Hello, I’m having problems exporting a very simple quantized model to ONNX. The error message I’m seeing is -
AttributeError: 'torch.dtype' object has no attribute 'detach'
The cause of this is that (‘fc1._packed_params.dtype’, torch.qint8) is ends up in the state_dict.
I asked on a previous (and old) thread if there was a solution and the answer was that this could be solved in the latest version of PyTorch. So I installed 1.7.0.dev20200705+cpu, but no joy.
I’ve pasted the example below.
Any thoughts on whether this is a fault on my part, a bug, or not supported, greatly appreciated.
#Import libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
#Needed for quantization
from torch.quantization import QuantStub, DeQuantStub
import torch.quantization
class Net(nn.Module):
def __init__(self):
#create instance of base class
super().__init__()
self.fc1 = nn.Linear(28*28, 10) #Inputs, outputs
#Optimizer parameters
self.learning_rate = 0.01
self.epochs = 10
self.log_interval = 10
self.batch_size=200
#Needed for quantization, per pytorch examples
self.quant = QuantStub()
self.dequant = DeQuantStub()
#Training related functions
self.optimizer = optim.SGD(self.parameters(), lr=self.learning_rate, momentum=0.9)
self.criterion = nn.NLLLoss()
def forward(self, x, save_intermediate = False, count=0):
x1 = self.quant(x)
x2 = self.fc1(x1)
x3 = self.dequant(x2)
return x3
net = Net()
net.qconfig = torch.quantization.default_qconfig
torch.quantization.prepare(net, inplace=True)
torch.quantization.convert(net, inplace=True)
torch.onnx.export(net,
torch.zeros([1,784]),
'simple.onnx',
opset_version=11,
verbose=True
)