RuntimeError in PyTorch Model with Multi-Class Classification

Hello,

I’m currently working on a PyTorch model for classifying 9 different classes using images of size 722 x 722. However, I keep encountering the following runtime error:

RuntimeError: The size of tensor a (32) must match the size of tensor b (9) at non-singleton dimension 1

I’m not sure how to resolve this issue. Here’s a brief overview of my model’s structure:

import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from making_dataset import *

class Net(nn.Module):
    def __init__(self):

        super(Net, self).__init__()

        self.conv1 = nn.Conv2d(1,6,5)
        self.pool = nn.MaxPool2d(2,2)
        self.conv2 = nn.Conv2d(6,16,5)

        self.fc1=nn.Linear(16*53*53,120)
        self.fc2=nn.Linear(120,84)
        self.fc3=nn.Linear(84,9)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        print(x.shape)
        x = x.view(-1, 16 * 53 * 53)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

train_loader, valid_loader, test_loader = get_dataloader(32,0.7, 0.15)


for epoch in range(2):  

    net.train()  
    for i, data in enumerate(train_loader, 0):
        inputs, labels = data
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()


    net.eval()  
    valid_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():  
        for data in valid_loader:
            inputs, labels = data
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            valid_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()


    print(f'Epoch {epoch+1}, Train loss: {loss:.3f}, Valid loss: {valid_loss/len(valid_loader):.3f}, Accuracy: {100 * correct / total:.2f}%')

print('Finished Training')

Replace x = x.view(-1, 16 * 53 * 53) with x = x.view(x.size(0), -1) as the former code might change the batch size.