RuntimeError: mat1 and mat2 shapes cannot be multiplied (128x16 and 64x32)

Explanation: I created a model with five inputs, three of them are CNN and two of them are linear. Then I concatenated them with the output as binary classification. The shape of input images are: (32,128), (16,64), (8,32), and the two linear inputs are: 64 and 16.

Here is the code of the model that I used:

class multi_net(nn.Module):
  def __init__(self):
    super(multi_net, self).__init__()
    # conv layer 1 input channel 1
    self.conv_1_1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1)
    self.conv_1_2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
    self.conv_1_3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
    # conv layer 2 channel 2
    self.conv_2_1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1)
    self.conv_2_2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
    self.conv_2_3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
    # conv layer 3 channel 3
    self.conv_3_1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1)
    self.conv_3_2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
    # linear layer 1 channel 4
    self.fc_1_1 = nn.Linear(64,32)
    self.fc_1_2 = nn.Linear(32,16)
    self.fc_1_3 = nn.Linear(16,8)
    # linear layer 2 channel 5
    self.fc_2_1 = nn.Linear(16,8)
    self.fc_2_2 = nn.Linear(16,8)
    # final cat layer 1
    self.fc_cat_1 = nn.Linear((8 + 8 + 256 + 64 + 64), 40) # = 400
    self.fc_cat_2 = nn.Linear(40, 2)

  def forward(self, input_1, input_2, input_3, input_4, input_5):
    ch1 = self.conv_1_1(input_1)
    ch1 = F.max_pool2d(F.relu(ch1), (2, 2))
    ch2 = self.conv_2_1(input_2)
    ch2 = F.max_pool2d(F.relu(ch2), (2, 2))
    ch3 = self.conv_3_1(input_3)
    ch3 = F.max_pool2d(F.relu(ch3), (2, 2))
    ch4 = self.fc_1_1(input_4)
    ch5 =self.fc_2_1(input_5)
    ch1 = self.conv_1_2(ch1)
    ch1 = F.max_pool2d(F.relu(ch1), (2, 2))
    ch2 = self.conv_2_2(ch2)
    ch2 = F.max_pool2d(F.relu(ch2), (2, 2))
    ch3 = self.conv_3_2(ch3)
    ch3 = torch.flatten(ch3, 1)
    ch4 = self.fc_1_2(ch4)
    ch5 = self.fc_2_2(ch5)
    ch1 = self.conv_1_3(ch1)
    ch1 = F.max_pool2d(F.relu(ch1), (2, 2))
    ch1 = torch.flatten(ch1, 1)
    ch2 = self.conv_2_3(ch2)
    ch2 = torch.flatten(ch2, 1)
    ch4 = self.fc_1_3(ch4)
    con_cat = torch.cat((ch1,ch2,ch3,ch4,ch5), dim=1)
    con_cat = F.relu(self.fc_cat_1(con_cat))
    con_cat = self.fc_cat_2(con_cat)

    return con_cat

Error Traceback:

Traceback (most recent call last):
File “/content/the_newtork.py”, line 214, in
train(model, 2, my_train_loader, device)
File “/content/the_newtork.py”, line 198, in train
outputs = model(img__1, img__2, img__3, img__4, img__5)
File “/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py”, line 1130, in _call_impl
return forward_call(*input, **kwargs)
File “/content/the_newtork.py”, line 149, in forward
ch4 = self.fc_1_1(input_4)
File “/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py”, line 1130, in _call_impl
return forward_call(*input, **kwargs)
File “/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py”, line 114, in forward
return F.linear(input, self.weight, self.bias)

It seems the shape mismatch is raised in ch4 = self.fc_1_1(input_4) so check the input_4.shape and make sure the number of features matches the expected in_features in the linear layer.