I’m new to pytorch, so likely doing something silly! I’m trying to do a simple 2D convolution with quantized weights where the scale and zero point are set manually. I assumed that the FixedQParamsObserver would be suitable for this.
import torch
from torch import nn
import torch.ao.quantization as quantization
class M(nn.Module):
def __init__(self):
super(self).__init__()
self.quant = quantization.QuantStub()
self.conv = nn.Conv2d(4, 12, 3, stride=1, padding=1, padding_mode='reflect', bias=False)
self.dequant = quantization.DeQuantStub()
def forward(self,x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
weight_fq = quantization.FakeQuantize.with_args(observer=quantization.FixedQParamsObserver,
quant_min=-8,
quant_max=7,
dtype=torch.qint8,
qscheme=torch.per_tensor_symmetric,
scale=1/16,
zero_point=0)
act_fq = quantization.FakeQuantize.with_args(observer=quantization.MinMaxObserver,
quant_min=0,
quant_max=255,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False) # NOT USED
model = M()
model.train()
model.qconfig = quantization.QConfig(activation=act_fq, weight=weight_fq)
model_prepared = quantization.prepare_qat(model)
However the last line gives me an error: TypeError: init() got an unexpected keyword argument ‘factory_kwargs’
What am I doing wrong?