Hi,
I am trying to apply static quantization to a model which has nn.ModuleList() object as one of its module.
To fuse the layers we need to pass the list of layers as string to ´torch.quantization.fuse_modules´
So I tried:
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.quant = torch.quantization.QuantStub()
self.linears = nn.ModuleList([nn.Linear(10, 10), nn.ReLU(), nn.Linear(10, 10), nn.ReLU()])
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
x = quant(x)
x = self.linears[0](x)
x = self.linears[1](x)
x = self.linears[2](x)
x = self.linears[3](x)
x = self.dequant(x)
return x
model_fp32 = MyModule()
model_fp32.eval()
input_fp32 = torch.randn(4, 10,10)
out = model_fp32(input_fp32)
model_fp32.qconfig = torch.quantization.get_default_qconfig('fbgemm')
model_fp32_prepared = torch.quantization.fuse_modules(model_fp32, ["linears[0]","linears[1]" ])
-> throws error: AttributeError: 'MyModule' object has no attribute 'linears[0]'