Expected input batch_size (378) to match target batch_size (756)

I am getting this error for the MLP that I am creating. Please help [I am new to this]!

def hidden_init(layer):
    fan_in = layer.weight.data.size()[0]
    lim = 0.5 / np.sqrt(fan_in)
    return (-lim, lim)

class Network(nn.Module):
    def __init__(self, input_dim, hidden1_dim, hidden2_dim, seed, init):
        super(Network, self).__init__()
        #have a seed for reproducability
        self.seed = torch.manual_seed(seed)
        self.fc1 = nn.Linear(input_dim, hidden1_dim)
        self.fc2 = nn.Linear(hidden1_dim, hidden2_dim)
        self.fc3 = nn.Linear(hidden2_dim, 3)
        self.nonlin = nn.ReLU()
        if init:
            self.reset_parameters()
            
    def reset_parameters(self):
        self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
        self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
        self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
        
    def forward(self, x):
        h1 = self.nonlin(self.fc1(x))
        h2 = self.nonlin(self.fc2(h1))
        h3 = self.fc3(h2)
        return h3
def train(model, loss_criterion, optimizer, data_loader):
    losses = []
    avg_loss = []
    for epoch in range(0, 100):
        model.train()
        epoch_loss = 0.0
        for batch_no, (inputs, target_labels) in enumerate(data_loader):
            print(batch_no)
            optimizer.zero_grad()
            vals= model(inputs)
            target_labels = target_labels.view(-1)
            loss = loss_criterion(vals, target_labels//2)
            epoch_loss += loss.item()
            loss.backward()
            optimizer.step()
            
        losses.append(epoch_loss)
        avgl = epoch_loss/1318.0
        avg_loss.append(avgl)
        
        print("Epoch: {} Loss: {:.4f} AvgLoss {:.4f}".format(epoch, epoch_loss, avgl))

    return losses, avg_loss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class MyDataset(data.Dataset):

    

    def __init__(self, size, inputdf, labeldf):

        super(MyDataset, self).__init__()

        self.size = size

        self.inputdf = inputdf

        self.labeldf = labeldf

        

    def __len__(self):

        return self.size

    def __getitem__(self, index):

        x = torch.from_numpy(self.inputdf.iloc[index,1:].to_numpy()).float().to(device)

        y = torch.from_numpy(self.labeldf.iloc[index,1:].to_numpy()).long().to(device)

        return x , y
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

data_train = MyDataset(1318, train_final, Y)
data_test = MyDataset(378, test_final, Y_test)
data_train_loader = data.DataLoader(data_train, batch_size =378, shuffle=True, num_workers=1)
data_test_loader = data.DataLoader(data_test, batch_size =378, shuffle=False, num_workers=1)

import numpy as np

model = Network(41,1000,300,5000,True)
optimizer = optim.SGD(model.parameters(), lr=0.0001)
lc = torch.nn.CrossEntropyLoss()
#lc = nn.MSELoss()

total_loss, avg_loss = train(model, lc, optimizer, data_train_loader)
test_total_loss, correct, y_true, y_pred = test(model, data_test_loader, lc)

The last block of code is giving the error:

0
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-123-94b11632a3db> in <module>()
      6 #lc = nn.MSELoss()
      7 
----> 8 total_loss, avg_loss = train(model, lc, optimizer, data_train_loader)
      9 test_total_loss, correct, y_true, y_pred = test(model, data_test_loader, lc)

4 frames
<ipython-input-119-1970596fbe6c> in train(model, loss_criterion, optimizer, data_loader)
     10             vals= model(inputs)
     11             target_labels = target_labels.view(-1)
---> 12             loss = loss_criterion(vals, target_labels//2)
     13             epoch_loss += loss.item()
     14             loss.backward()

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/loss.py in forward(self, input, target)
    960     def forward(self, input: Tensor, target: Tensor) -> Tensor:
    961         return F.cross_entropy(input, target, weight=self.weight,
--> 962                                ignore_index=self.ignore_index, reduction=self.reduction)
    963 
    964 

/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
   2466     if size_average is not None or reduce is not None:
   2467         reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2468     return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
   2469 
   2470 

/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
   2260     if input.size(0) != target.size(0):
   2261         raise ValueError('Expected input batch_size ({}) to match target batch_size ({}).'
-> 2262                          .format(input.size(0), target.size(0)))
   2263     if dim == 2:
   2264         ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)

ValueError: Expected input batch_size (378) to match target batch_size (756).

Most likely this view operation is wrong and creates the additional targets:

target_labels = target_labels.view(-1)

Since you are using nn.CrossEntropyLoss as the criterion, the target should have the shape [batch_size] and contain class indices in the range [0, nb_classes-1] for a multi-class classification use case.
Based on the target_labels shape, I assume your target has the shape [batch_size, 2] before the view operation is applied, which is wrong.
If the target is one-hot encoded, use target = torch.argmax(target, dim=1) to create the expected shape.