Hello
I am currently still trying to get a custom conv2d implementation going and I thought the creation of it is going well, but there is a bug I just can’t get my head around. Whenever I try to run the convolution I get the following error:
Traceback (most recent call last):
File "C:\Users\Ron Rödel\PycharmProjects\lbcnnn\main.py", line 76, in <module>
p(r)
File "C:\Users\Ron Rödel\PycharmProjects\lbcnnn\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\Ron Rödel\PycharmProjects\lbcnnn\main.py", line 63, in forward
x = F.sigmoid(self.conv[j](x))
File "C:\Users\Ron Rödel\PycharmProjects\lbcnnn\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\Ron Rödel\PycharmProjects\lbcnnn\venv\lib\site-packages\torch\nn\modules\conv.py", line 463, in forward
return self._conv_forward(input, self.weight, self.bias)
File "C:\Users\Ron Rödel\PycharmProjects\lbcnnn\venv\lib\site-packages\torch\nn\modules\conv.py", line 459, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Given groups=1, weight of size [3, 2, 3, 3], expected input[2, 1, 5, 5] to have 2 channels, but got 1 channels instead
Process finished with exit code 1
If I were to change the input to [2,2,5,5]
Nothing meaning full changes (I mention this, because to my understanding the second input changes the amount of channels, but if I do so the dims just get weirder)
Here is how I currently build my custom conv2d
def __init__(self, i, o, m):
super(Lbcnn, self).__init__()
self.conv = nn.ModuleList([nn.Conv2d(i, o, m, bias=False)for y in range(m*m-1)])
#self.conv = self.conv.weight
self.conv.requires_grad = False
#print(self.conv)
for z in range(o):
for s in range(i):
for u in range(m*m-1):
with torch.no_grad():
self.conv[u].weight[z][s][math.floor(m/2)][math.floor(m/2)] = -1
for t in range(m * m - 1):
itemp = getIfromU(t, m)
jtemp = getJfromU(t, m)
with torch.no_grad():
self.conv[u].weight[z][s][itemp][jtemp] = 0
itemp = getIfromU(u, m)
jtemp = getJfromU(u, m)
with torch.no_grad():
self.conv[u].weight[z][s][itemp][jtemp] = 1
print(u)
print(itemp)
print(jtemp)
print(self.conv[u].weight)
print('')
masks = m*m-1
self.comb = nn.Conv2d(masks*i, o, 1)
What it does properly is to manipulate the weights how I want them to, but just cannot find the dimensioning error.
(In case it is needed the forward function and how I call the function):
def forward(self, x):
m = self.conv[0].kernel_size[0]
# print(m)
for j in range(m * m - 1):
x = F.sigmoid(self.conv[j](x))
x = self.comb(x)
# print(self.comb.weight)
return x
#print(x)
x = self.comb
print(self.comb.weight)
return x
p = Lbcnn(2, 3, 3)
r = torch.ones(2, 2, 5, 5)
p(r)
Any help would be greatly appreciated