Hello,
I have image sequences for 300 persons, each person has 8 images and each image has 6 channels, i pass it to my network as [8,6,128,64]
. My network is given below:
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.CNNLayer1 = nn.Sequential(
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=4),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.CNNLayer2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=4),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.CNNLayer3 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=4),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.DropoutLayer = nn.Dropout(0.6)
self.FullyConnected = nn.Linear(1, 8, 32*19*11)
#here i have to put RNN and then convert the sequence to a [1,8, channels*width*height] matrix
#after RNN here i want to get a [1,128] feature vector for the whole sequence, i.e. the 8 images
#should have one vector
def forward(self, inp):
#print(inp.shape) #prints torch.Size([1, 8, 6, 128, 64])[batchsize, sequenceLength, channels, H, W]
out = self.CNNLayer1(inp[0])
out = self.CNNLayer2(out)
out = self.CNNLayer3(out)
out = self.DropoutLayer(out)
print(out.shape)#prints torch.Size([ 8, 32, 19, 11]) [batchsize, sequenceLength, channels, H, W]
out = self.FullyConnected(out)
return out
this error arises:
RuntimeError: size mismatch, m1: [4864 x 11], m2: [1 x 8] at /pytorch/aten/src/THC/generic/THCTensorMathBlas.cu:266