Can anyone explain why this ONNX export fails, and whether they can propose a fix. I am doing adaptive filtering. There is a network that predicts a set of filters, one for each batch, then each filter is convolved with the input. I’ve posted an issue on github but i am calling on the community to help me.
class Localiser(nn.Module):
def __init__(self, nfeats):
super(Localiser, self).__init__()
self.net = nn.Sequential(nn.Conv1d(1,128,3,padding=1),
nn.AdaptiveAvgPool1d(1))
self.fc = nn.Linear(128,nfeats)
def forward(self, x):
x = self.net(x)
x = x.flatten(1)
x = self.fc(x)
return x
@torch.jit.script
def adaptive_filter(x, w, padding: int):
B = x.shape[0]
w = w.unsqueeze(1)
x = x.view(1,B,-1)
x = F.conv1d(x, w, padding=padding, groups=B)
x = x.view(B,1,-1)
return x
class Equalizer(nn.Module):
def __init__(self, ntaps):
super(Equalizer, self).__init__()
self.loc = Localiser(ntaps)
self.ntaps = ntaps
def forward(self,x):
w = self.loc(x)
x = adaptive_filter(x, w, self.ntaps//2)
return x
x1 = torch.randn(4, 1, 1024)
x2 = torch.randn(8, 1, 512) * 10
model = Equalizer(13)
y2 = model(x2)
torch.onnx.export(model,
(x1,),
"file.onnx",
opset_version=12,
input_names=['x'],
output_names=['y'],
dynamic_axes={'x': [0,2], 'y': [0,2]})
print("exported")