ValueError: Expected input batch_size (1225) to match target batch_size (25)

Hi, I’m getting the following error. I have tried every solution but nothing is working.

I’m using VGG16 architecture, 3 channels images and 9 classes to classification.

class VGG16(nn.Module):
def init(self, num_classes):
super(VGG16, self).init()
self.block_1 = nn.Sequential(
nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)

    self.block_2 = nn.Sequential(
        nn.Conv2d(in_channels=64,
                  out_channels=128,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=128,
                  out_channels=128,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2, 2),
                     stride=(2, 2))
    )

    self.block_3 = nn.Sequential(
        nn.Conv2d(in_channels=128,
                  out_channels=256,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=256,
                  out_channels=256,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=256,
                  out_channels=256,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=256,
                  out_channels=256,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2, 2),
                     stride=(2, 2))
    )

    self.block_4 = nn.Sequential(
        nn.Conv2d(in_channels=256,
                  out_channels=512,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=512,
                  out_channels=512,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=512,
                  out_channels=512,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=512,
                  out_channels=512,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2, 2),
                     stride=(2, 2))
    )

    self.block_5 = nn.Sequential(
        nn.Conv2d(in_channels=512,
                  out_channels=512,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=512,
                  out_channels=512,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=512,
                  out_channels=512,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.Conv2d(in_channels=512,
                  out_channels=512,
                  kernel_size=(3, 3),
                  stride=(1, 1),
                  padding=1),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2, 2),
                     stride=(2, 2))
    )

    self.classifier = nn.Sequential(
        nn.Linear(512, 4096),
        nn.ReLU(True),
        nn.Linear(4096, 4096),
        nn.ReLU(True),
        nn.Linear(4096, num_classes)
    )

    for m in self.modules():
        if isinstance(m, torch.nn.Conv2d):
            m.weight.detach().normal_(0, 0.05)
            if m.bias is not None:
                m.bias.detach().zero_()
        elif isinstance(m, torch.nn.Linear):
            m.weight.detach().normal_(0, 0.05)
            m.bias.detach().detach().zero_()

def forward(self, x):

    x = self.block_1(x)
    x = self.block_2(x)
    x = self.block_3(x)
    x = self.block_4(x)
    x = self.block_5(x)
    logits = self.classifier(x.view(-1, 512))
    probas = F.softmax(logits, dim=1)

    return logits, probas

Replace x.view(-1, 512) with x.view(x.size(0), -1) to keep the batch size equal and fix the shape mismatch error in self.classifier if needed.