Hi @jerryzh168!
The error I have is NotImplementedError: Could not run 'quantized::conv2d.new' with arguments from the 'CPU' backend
The inference is done on the following input:
dummy_input = torch.rand((1, 3, 512, 512), dtype=torch.float32, device="cpu")
The modified model for quantization is:
# Define the Model
class ResNet(nn.Module):
def __init__(self, config, output_dim):
super().__init__()
# QuantStub converts tensors from floating point to quantized
# self.quant = torch.quantization.QuantStub()
block, n_blocks, channels = config
self.in_channels = channels[0]
assert len(n_blocks) == len(channels) == 4
self.conv1 = nn.Conv2d(
3,
self.in_channels,
kernel_size=(7, 7),
stride=(2, 2),
padding=3,
bias=False,
)
self.bn1 = nn.BatchNorm2d(self.in_channels)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self.get_resnet_layer(block, n_blocks[0], channels[0])
self.layer2 = self.get_resnet_layer(block, n_blocks[1], channels[1], stride=2)
self.layer3 = self.get_resnet_layer(block, n_blocks[2], channels[2], stride=2)
self.layer4 = self.get_resnet_layer(block, n_blocks[3], channels[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(self.in_channels, output_dim)
# DeQuantStub converts tensors from quantized to floating point
# self.dequant = torch.quantization.DeQuantStub()
def get_resnet_layer(self, block, n_blocks, channels, stride=1):
layers = []
if self.in_channels != block.expansion * channels:
downsample = True
else:
downsample = False
layers.append(block(self.in_channels, channels, stride, downsample))
for i in range(1, n_blocks):
layers.append(block(block.expansion * channels, channels))
self.in_channels = block.expansion * channels
return nn.Sequential(*layers)
def forward(self, x):
# manually specify where tensors will be converted from floating
# point to quantized in the quantized model
# x = self.quant(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
h = x.view(x.shape[0], -1)
x = self.fc(h)
# manually specify where tensors will be converted from quantized
# to floating point in the quantized model
# x = self.dequant(x)
return x, h
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride=1, downsample=False):
super().__init__()
# QuantStub converts tensors from floating point to quantized
self.quant = torch.quantization.QuantStub()
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
if downsample:
conv = nn.Conv2d(
in_channels, out_channels, kernel_size=1, stride=stride, bias=False
)
bn = nn.BatchNorm2d(out_channels)
downsample = nn.Sequential(conv, bn)
else:
downsample = None
self.downsample = downsample
# DeQuantStub converts tensors from quantized to floating point
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
# manually specify where tensors will be converted from floating
# point to quantized in the quantized model
x = self.quant(x)
i = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.downsample is not None:
i = self.quant(i)
i = self.downsample(i)
i = self.dequant(i)
x += i
x = self.relu(x)
# manually specify where tensors will be converted from quantized
# to floating point in the quantized model
x = self.dequant(x)
return x
ResNetConfig = namedtuple("ResNetConfig", ["block", "n_blocks", "channels"])
resnet18_config = ResNetConfig(
block=BasicBlock, n_blocks=[2, 2, 2, 2], channels=[64, 128, 256, 512]
)
About the qconfig settings:
backend = "fbgemm" # x86 machine
torch.backends.quantized.engine = backend
model_q.qconfig = torch.quantization.get_default_qconfig(backend)
Depending on where I place QuantStub, DeQuantStub, I obtain this error or the one mentioned above.
this error
Thanks a lot for your help
Please let me know if you need any additional inputs