Need help resolving value error and batch size configuration

Hello world, I’m new to pytorch and ML and trying to figure this out but not sure how to resolve this error, I know there’s a mismatch of batch size but not sure where else to configure it. Don’t want to use dataloaders but trying to mimic it. Any help would be appreciated <3

This is the error I’m getting:
Traceback (most recent call last):
-Project/src/ClassifierInterpretability/models/convolutional_neural_network.py", line 162, in
train()
Project/src/ClassifierInterpretability/models/convolutional_neural_network.py", line 130, in train
loss = criterion(outputs, y.unsqueeze(1))
File ~\anaconda3\envs\env_pytorch\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File “~\anaconda3\envs\env_pytorch\lib\site-packages\torch\nn\modules\loss.py”, line 1152, in forward
label_smoothing=self.label_smoothing)
File “~\anaconda3\envs\env_pytorch\lib\site-packages\torch\nn\functional.py”, line 2846, in cross_entropy
return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
ValueError: Expected input batch_size (1) to match target batch_size (4).

My code:

Blockquote

from src.ClassifierInterpretability.preprocessing import X_test, y_test, X_train, y_train
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout
import time

'''
Learning Multiple Layers of Features from Tiny Images, Alex Krizhevsky, 2009.

CIFAR classes
0: airplane
1: automobile
2: bird
3: cat
4: deer
5: dog
6: frog
7: horse
8: ship
9: truck
'''
#Determining GPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'

X_train = X_train.astype(np.float32)
X_train /= 255.0

#CNN Architecture
class NeuralNet(nn.Module):
        super(NeuralNet, self).__init__()

        self.cnn_layers = Sequential(
        # Defining a 2D convolution layer
            Conv2d(1, 4, kernel_size=3, stride=1, padding=1),
            BatchNorm2d(4),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),
            # Defining another 2D convolution layer
            Conv2d(4, 4, kernel_size=3, stride=1, padding=1),
            BatchNorm2d(4),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),

        )

        self.linear_layers = Sequential(
            Linear(3 * 32 * 32, 10)
        )

    # Defining the forward pass
    def forward(self, x):
        x = self.cnn_layers(x)
        x = x.view(x.size(0), -1)
        x = self.linear_layers(x)
        return x

#define Model
net = NeuralNet()
print('NET')
print(net)
#Defining the loss function
criterion = nn.CrossEntropyLoss()

#defining the optimizer
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)


#CONFIGURE:
load_Pretrained = False
filename = '' #To input filename for pretrained model

#TRAINING
def train():
    # Already converted X_train to float32 when we normalize it up top
    X_train_trch, Y_train_trch = torch.tensor(X_train), torch.tensor(y_train.astype(np.int64))
    X_test_trch, Y_test_trch = torch.tensor(X_test.astype(np.float32)), torch.tensor(y_test.astype(np.int64))

    for epoch in range(2):  # loop over the dataset multiple times

        running_loss = 0.0
        batch_size = 4
        for i in range(0, len(X_train), batch_size):

            # get the inputs; data is a list of [inputs, labels]
            
            X = X_train_trch[i:i + batch_size].unsqueeze(0)
            y = Y_train_trch[i:i + batch_size]

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(X.unsqueeze(0))

            loss = criterion(outputs, y.unsqueeze(1))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if i % 2000 == 1999:    # print every 2000 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0
    print('Finished Training - Saving model...')
    save_mdl()

#SAVE model
def save_mdl(net, epoch):
    filename = "model: epoch" + str(epoch + 1) + ".pth"
    torch.save(net.state_dict(), filename)

'''
TO LOAD
model.load_state_dict(torch.load(filename))
model.eval()
'''
def load_mdl(filename):
    net.load_state_dict(torch.load(filename))
    net.eval()

if load_Pretrained==True:
    load_mdl()
    print('Pretrained model loaded')
    train() # May need to write new training for pre-trained
else:
    train()

> Blockquote

Looks like you are using unsqueeze(0) two times over X which might cause this.