Help to understand the effect of the batch_size value in a dataloader

Hello everyone, I have the following code:

class NeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(NeuralNet, self).__init__()
        self.input_size = input_size
        self.max = layer.LinearMax(1, input_size)
        self.min = layer.LinearMin(1, input_size)
        self.l1 = nn.Linear(2, 64)#, dtype=torch.complex64)
        self.relu = nn.ReLU()
        self.l2 = nn.Linear(64, 64)
        self.l3 = nn.Linear(64, 1)#, dtype=torch.complex64)

    def forward(self, x):
        maximum_out = self.max(x)
        minimum_out = self.min(x)

        concatenate = torch.cat((maximum_out,minimum_out))
        
        out = self.l1(concatenate)
        
        out = self.relu(out.real)# + 1j * self.relu(out.imag)
        out = self.l2(out)
        out = self.relu(out)
        out = self.l3(out)

        return out

class MyDataset(Dataset):
    def __init__(self, data,rul, window):
        self.data = data
        self.rul = rul
        self.window = window

    def __getitem__(self, index):
        x = torch.from_numpy(self.data[index:index+self.window]).to(torch.float32)
        y = torch.tensor(np.min(self.rul[index:index+self.window])).to(torch.float32)
        return x, y

    def __len__(self):
        return len(self.data) - self.window




def training_loop_pytorch(train_loader, model,num_epochs, learning_rate):
    n_total_steps = len(train_loader)
    #loss and optimizer
    criterion = nn.L1Loss()
    optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)

    #training loop
    for epoch in range(num_epochs):
    # forward pass and loss
        for i,(vibration, rul) in enumerate(train_loader):
            X = vibration
            y  = rul
            y_predicted = model(X)
            
            #print(y.shape)
            loss = criterion(y_predicted, y)
  #backward pass
            loss.backward()

  # update
            optimizer.step()

            optimizer.zero_grad()
            if (i+1) % 10000 == 0:
                print(f'epoch {epoch+1}/ {num_epochs}, step {i+1}/{n_total_steps}, loss={loss.item():.4f}')
            #print(y_predicted.shape)
            
    return model

def leave_one_out(vibration_list, rul_list):
    value_for_rul_normalization = 50000
    #prediction_list = []
    prediction_list_custom = []
    #true_rul = []
    input_size = 500
    model = pm.NeuralNet(input_size, 64, 1)
    num_epochs = 1
    learning_rate = 0.001
    #batch_size_value = 1
    for i in range(len(vibration_list)):
        print("Cross validation: {}".format(i+1))
            
        test = vibration_list[i]
        print(test.shape)
        #_, temp_rul = split_data(test, rul_list[i])
        #temp_rul = temp_rul.reshape(temp_rul.shape[0],1)
        dataset_test = MyDataset(test, rul_list[i],500)
        test_loader = DataLoader(dataset = dataset_test,shuffle=False)
        #true_rul.append(temp_rul)  
        train = []
        train_rul = []
        # Iterate over each element in the range
        for k in range(6):
            if k != i:
                train.extend(vibration_list[k])
                train_rul.extend(rul_list[k])

            
        #Extract the mean and the std to normalize the data
        train_mean = np.mean(train)
        train_std = np.std(train)
        train_norm, rul_norm = normalisation(train,train_rul,train_mean, train_std, value_for_rul_normalization)

        #window_vib, window_rul = split_data(train_norm, rul_norm)
        
        dataset_train = MyDataset(train_norm, rul_norm, 500)
        train_loader = DataLoader(dataset = dataset_train, shuffle=True)
        model_trained = training_loop_pytorch(train_loader,model,num_epochs,learning_rate)
        
        with torch.no_grad():
            output_list = []
            for vibration, labels in test_loader:
                outputs = model_trained(vibration)
                numpy_truc = torch.Tensor.numpy(outputs)
                output_list.append(numpy_truc)
            reconstructed_signal = []
            for iteration in range(len(output_list)):
                reconstructed_signal.extend(output_list[iteration])
        prediction_list_custom.append(reconstructed_signal)
    return prediction_list_custom

That work without any warning or errors. My question is the following: when I am changing the value of the

batch_size_values=1

To another one (for example 64), I have a userwarning raised (due to the target size being 64 and input 1). My question is there a way that I can change the batch_size value without having this error raised? Also if possible having an explanation on how the dataloader works other than the one provided in the docs (As I have trouble understanding it) or links explaining it, would be nice.
Thanks

The warning is most likely raised from an operation, which performs (unwanted) broadcasting, such as nn.MSELoss in case the model output and target do not have the same shape.
Check the model output shape when a batch size of 1 and >1 is used and make sure no dimensions are removed.

Hello sorry for the late response. My error was due to a problem of operation in a custom layer. I fixed the custom layer and managed to fix the error.

Thanks!