What causes this error??mat1 and mat2 shapes cannot be multiplied

What causes this error?
RuntimeError: mat1 and mat2 shapes cannot be multiplied (322560x1 and 46080x7)
x = torch.ones(128,1,50,432)
class Net(nn.Module):

def __init__(self):

    super(Net, self).__init__()

    self.conv1 = nn.Conv2d(1, 60,kernel_size=(1, 15),stride=(1,3))

    self.conv2 = nn.Conv2d(60, 60, kernel_size=(1, 4),stride=(1,2))

    self.conv3 = nn.Conv2d(60, 60, kernel_size=(30,1),stride=(1,3))

    self.conv4 = nn.Conv2d(60, 90, kernel_size=(1, 3),stride=(1,1))

    self.conv5 = nn.Conv2d(90, 120, kernel_size=(1, 1),stride=(1,1))

    self.pool = nn.MaxPool2d(kernel_size=(1, 2),stride=(1,2))

    self.soft = nn.Softmax()

    self.li=nn.Linear(128*120*3,7)

def forward(self, x):

  x = self.conv1(x)

  print(x.shape)

  x = self.pool(x)

  print(x.shape)

  x = self.conv2(x)

  print(x.shape)

  x = self.pool(x)

  print(x.shape)

  x = self.conv3(x)

  print(x.shape)

  x = self.conv4(x)

  print(x.shape)

  x = self.pool(x)

  print(x.shape)

  x = self.conv5(x)

  print(x.shape)

  x = self.pool(x)

  print(x.shape)

  x = self.li(x)

  print(x.shape)

  x = self.soft(x)

  return x

U have to mention the dimension parameter in the nn.softmax

You should flatten your input before you pass it to the linear layer, and make sure that the linear layer input size is not the minibatch size, which it seems like it might be in your implementation.

in other words, if you change the linear layer to this:
self.li=nn.Linear(120*21,7) # the 128 from before looks like the minibatch size, which doesn't belong here

And add a flattening step before the linear layer in your forward pass, like so:

x = nn.Flatten(1, -1)(x)
x = self.li(x)

Then your code will run.