I am making an encoder decoder model but this error occur in my encoder.
<ipython-input-2-2b8ee621aeff> in forward(self, x)
29
30 def forward(self,x):
---> 31 output=self.encoder(x)
32 ok=self.decoder(output)
33
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
<ipython-input-3-b65547714ad9> in forward(self, x)
61
62
---> 63 conv2a = self.conv2a(pool1)
64 rect2a = self.leaky_relu(conv2a)
65 conv2b = self.conv2b(rect2a)
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in forward(self, input)
318 def forward(self, input):
319 return F.conv2d(input, self.weight, self.bias, self.stride,
--> 320 self.padding, self.dilation, self.groups)
321
322
TypeError: conv2d(): argument 'input' (position 1) must be Tensor, not tuple
My CODE
class GRUNet(nn.Module):
def __init__(self):
print('\nGruInitializing')
super(GRUNet,self).__init__()
self.batch_size, self.img_w, self.img_h=1,128,128
self.input_shape = (self.batch_size, 3, self.img_w, self.img_h)
#number of filters for each convolution layer in the encoder
self.n_convfilter = [96, 128, 256, 256, 256, 256]
#the dimension of the fully connected layer
self.n_fc_filters = [1024]
#number of filters for each 2d convolution layer in the decoder
self.n_deconvfilter = [128, 128, 128, 64, 32, 2]
self.conv2d_filter_shape = (self.n_deconvfilter[0], self.n_deconvfilter[0], 3, 3, 3)
self.encoder=encoder(self.input_shape,self.n_convfilter,\
self.n_fc_filters,self.conv2d_filter_shape)
self.decoder=decoder(self.n_deconvfilter)
def forward(self,x):
output=self.encoder(x)
ok=self.decoder(output)
And the encoder structure:
class encoder(nn.Module):
def __init__(self,input_shape,n_convfilter,\
n_fc_filters, conv2d_filter_shape):
print("\ninitalizing \"encoder\"")
super(encoder,self).__init__()
#conv1
self.conv1a = Conv2d(input_shape[1], n_convfilter[0], 7, padding=3)
self.conv1b = Conv2d(n_convfilter[0], n_convfilter[0], 3, padding=1)
#conv2
self.conv2a = Conv2d(n_convfilter[0], n_convfilter[1], 3, padding=1)
self.conv2b = Conv2d(n_convfilter[1], n_convfilter[1], 3, padding=1)
self.conv2c = Conv2d(n_convfilter[0], n_convfilter[1], 1)
def forward(self, x):
#x is the input and the size of x is (batch_size, channels, heights, widths).
conv1a = self.conv1a(x)
rect1a = self.leaky_relu(conv1a)
conv1b = self.conv1b(rect1a)
rect1 = self.leaky_relu(conv1b)
pool1 = self.pool(rect1)
conv2a = self.conv2a(pool1)
rect2a = self.leaky_relu(conv2a)
conv2b = self.conv2b(rect2a)
rect2 = self.leaky_relu(conv2b)
conv2c = self.conv2c(pool1)
res2 = conv2c + rect2
pool2 = self.pool(res2)
I am Testing it on an Tensor(randomly generated array):
arr=np.random.randn(1,3,128,128)
arr=torch.tensor(arr)
model(arr)