Given normalized_shape=[*], expected input with shape [*, *], but got input of size[*]

While training the model as below, I got this error: Given normalized_shape=[13], expected input with shape [*, 13], but got input of size[477].

Can anybody advise how to overcome this error?

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model.to(device)
criterion.to(device)
model.train()

epochs = 1000
print_every = 50  # epochs
# threshold = 0.5  # prediction value greater than this is interpreted as a prediction of 1

for e in range(epochs):
    running_train_loss = 0
    running_train_accuracy = 0
    running_train_recall = 0
    running_train_precision = 0
    running_train_f1 = 0
    for features, labels in trainloader:
        features = features.to(device)
        labels = labels.to(device)
        
        output = model(features)
        loss = criterion(output, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        current_loss = loss.item()
        current_accuracy = torch.mean((output.argmax(dim=1) == labels).type(torch.FloatTensor)).item()
        current_recall = torch.mean((output.argmax(dim=1) == labels)[labels == 1].type(torch.FloatTensor)).item()
        current_precision = torch.mean((output.argmax(dim=1) == labels)[output.argmax(dim=1) == 1].type(torch.FloatTensor)).item() if (output.argmax(dim=1) == 1).sum() != 0 else 0
        current_f1 = 2 * current_recall * current_precision / (current_recall + current_precision) if current_recall > 0 and current_precision > 0 else 0

        running_train_loss += current_loss * features.shape[0]
        running_train_accuracy += current_accuracy * features.shape[0]
        running_train_recall += current_recall * features.shape[0]
        running_train_precision += current_precision * features.shape[0]
        running_train_f1 += current_f1 * features.shape[0]
    running_train_loss /= len(trainloader.sampler)
    running_train_accuracy /= len(trainloader.sampler)
    running_train_recall /= len(trainloader.sampler)
    running_train_precision /= len(trainloader.sampler)
    running_train_f1 /= len(trainloader.sampler)
    
    
    
    model.eval()
    running_validation_loss = 0
    running_validation_accuracy = 0
    running_validation_recall = 0
    running_validation_precision = 0
    running_validation_f1 = 0
    with torch.no_grad():
        for features, labels in validationloader:
            features = features.to(device)
            labels = labels.to(device)

            output = model(features)
            loss = criterion(output, labels)

            current_loss = loss.item()
            current_accuracy = torch.mean((output.argmax(dim=1) == labels).type(torch.FloatTensor)).item()
            current_recall = torch.mean((output.argmax(dim=1) == labels)[labels == 1].type(torch.FloatTensor)).item()
            current_precision = torch.mean((output.argmax(dim=1) == labels)[output.argmax(dim=1) == 1].type(torch.FloatTensor)).item() if (output.argmax(dim=1) == 1).sum() != 0 else 0
            current_f1 = 2 * current_recall * current_precision / (current_recall + current_precision) if current_recall > 0 and current_precision > 0 else 0

            running_validation_loss += current_loss * features.shape[0]
            running_validation_accuracy += current_accuracy * features.shape[0]
            running_validation_recall += current_recall * features.shape[0]
            running_validation_precision += current_precision * features.shape[0]
            running_validation_f1 += current_f1 * features.shape[0]
    running_validation_loss /= len(testloader.sampler)
    running_validation_accuracy /= len(testloader.sampler)
    running_validation_recall /= len(testloader.sampler)
    running_validation_precision /= len(testloader.sampler)
    running_validation_f1 /= len(testloader.sampler)
    model.train()
    
    train_logs['losses'].append(running_train_loss)
    train_logs['accuracies'].append(running_train_accuracy)
    train_logs['recalls'].append(running_train_recall)
    train_logs['precisions'].append(running_train_precision)
    train_logs['f1_scores'].append(running_train_precision)
    validation_logs['losses'].append(running_validation_loss)
    validation_logs['accuracies'].append(running_validation_accuracy)
    validation_logs['recalls'].append(running_validation_recall)
    validation_logs['precisions'].append(running_validation_precision)
    validation_logs['f1_scores'].append(running_validation_precision)
    
    if e == 0 or (e + 1) % print_every == 0:
        print(f"Epoch : {epoch}")
        print(f"Training Loss : {running_train_loss}")
        print(f"Training Accuracy : {round(running_train_accuracy * 100, 2)}%")
        print(f"Training Recall : {round(running_train_recall * 100, 2)}%")
        print(f"Training Precision : {round(running_train_precision * 100, 2)}%")
        print(f"Training F1 Score : {round(running_train_f1, 4)}")
        print(f"Validation Loss : {running_validation_loss}")
        print(f"Validation Accuracy : {round(running_validation_accuracy * 100, 2)}%")
        print(f"Validation Recall : {round(running_validation_recall * 100, 2)}%")
        print(f"Validation Precision : {round(running_validation_precision * 100, 2)}%")
        print(f"Validation F1 Score : {round(running_validation_f1, 4)}")
        print()
    
    epoch += 1

Hi,

Which line is giving you these error? Could you add traceback of error?