RuntimeError: The size of tensor a (26) must match the size of tensor b (4) at non-singleton dimension 1

Hello All,

I am a beginner in Pytorch, i am having the following error after using a multilabel output:
“RuntimeError: The size of tensor a (26) must match the size of tensor b (4) at non-singleton dimension 1”

in this line:
correct += (pred.argmax(1) == y).type(torch.float).sum().item()

i reached a dead end, i can’t understand the issue, and tried different solutions found in the Pytorch forum as it is a common error.
Please help.

The size of the input data is [128, 2000], and the size of the labelled data is [128,4]. The data is numeric.

My code is below:

Define model, loss and optimizer

class NeuralNetwork(nn.Module):
def init(self):
super(NeuralNetwork, self).init()
#self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(D_in, H),
nn.ReLU(),#rectified linear unit activation function
nn.Linear(H, H),
nn.ReLU(),
nn.Linear(H, H),
nn.ReLU(),
nn.Linear(H, D_out),
nn.Softmax() )

def forward(self, x):
    #x = self.flatten(x)
    x = self.linear_relu_stack(x)
    logits = nn.functional.softmax(x)
    
    return logits

model = NeuralNetwork()

loss_fn = torch.nn.MSELoss(reduction=‘sum’)
#loss_fn = nn.CrossEntropyLoss()
#optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
losses=[]

def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred = model(X)
loss = loss_fn(pred, y)
losses.append(loss.item())
print(f" pred: {pred} , y: {y}")

    # Backpropagation
    optimizer.zero_grad()# zero the gradient buffers
    loss.backward()
    optimizer.step()# Does the update

    if batch % 100 == 0:
        loss, current = loss.item(), batch * len(X)
        print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}] ")

def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss, correct = 0, 0

with torch.no_grad():
    for X, y in dataloader:
        pred = model(X)

        print(f" pred: {pred} , y: {y}")

        test_loss += loss_fn(pred, y).item()
        pred.size
        y.size
        correct += (pred.argmax(1) == y).type(torch.float).sum().item()
        print(f" pred: {pred} , y: {y}")


test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")

for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)

print(“Done!”)

I would start by checking if the shape of the model output is expected (if it’s a classification task, it might look something like [batch_size, num_classes]).