RuntimeError: size mismatch, m1: [4 x 512], m2: [1024 x 512]

Hi,
I am facing this issue, how to fix this?
This is my forward function:

def forward(self, input, time_window=100):
    c1_mem = c1_spike = torch.zeros(batch_size, 16, 28, 28, device=device)
    print(c1_mem.shape)
    c2_mem = c2_spike = torch.zeros(batch_size, 32, 14, 14, device=device)
    print(c2_mem.shape)
    c3_mem = c3_spike = torch.zeros(batch_size, 64, 7, 7, device=device)
    print(c3_mem.shape)

    h1_mem = h1_spike = h1_sumspike = torch.zeros(batch_size, 512, device=device)
    print(h1_mem.shape)
    h2_mem = h2_spike = h2_sumspike = torch.zeros(batch_size, 10, device=device)
    print(h2_mem.shape)

    for step in range(time_window):  # simulation time steps

        # x = input.expand(input.data.shape[0], 3, 28, 28)
        # print(x.shape)
        x = input > torch.rand(input.size(), device=device)  # prob. firing
        # print(x.shape)

        c1_mem, c1_spike = mem_update(self.conv1, x.float(), c1_mem, c1_spike)
        # print(c1_mem.shape)

        x = F.avg_pool2d(c1_spike, 2)

        c2_mem, c2_spike = mem_update(self.conv2, x, c2_mem, c2_spike)

        x = F.avg_pool2d(c2_spike, 2)

        c3_mem, c3_spike = mem_update(self.conv3, x, c3_mem, c3_spike)
        # print(c3_mem.shape)

        x = F.avg_pool2d(c3_spike, 4)

        x = x.view(-1,512)
        # print(x.shape)

        h1_mem, h1_spike = mem_update(self.fc1, x, h1_mem, h1_spike)
        h1_sumspike += h1_spike
        h2_mem, h2_spike = mem_update(self.fc2, h1_spike, h2_mem, h2_spike)
        h2_sumspike += h2_spike

    outputs = h2_sumspike / time_window
    return outputs

The error is quite clear, it should be m1 : [4 x 512] , m2:[512 x 1024].
Your model Fully connected layer has input 1024, while conv layers output 512.
Share your model too.

This is my model:
class SCNN(nn.Module):
def init(self):
super(SCNN, self).init()

    self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)

    self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False)

    self.conv3 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False)

    self.fc1 = nn.Linear(128, 1024, bias=False)

    self.fc2 = nn.Linear(1024, 10)

a better way to write this since it flatten the output of conv layer is

shape = torch.prod(torch.tensor(x.shape[1:])).item()
x =  x.view(-1, shape)

Also

should be

self.fc1 = nn.Linear(Number of Flattened Features from last layer , 1024,...)

Further, read this post for better understanding.