Why i have "RuntimeError: each element in list of batch should be of equal size"

Hy all. i have this train class

import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import StepLR
import numpy as np
from dataset import Dataset
from net import *
from torch.utils.data import DataLoader
from torchnet.meter import AverageValueMeter
from torchnet.logger import VisdomPlotLogger, VisdomSaver


def train(model, train_loader, valid_loader, exp_name = "MLP121",  lr=0.00001, epochs=1000, wd = 0.000001):

    criterionX = nn.MSELoss()
    criterionZ = nn.MSELoss()
    optimizer = Adam(params=model.parameters(),lr = lr, weight_decay=wd)
    scheduler = StepLR(optimizer, step_size=100, gamma=0.5)#per ogni 100 epochs, lr si divide per due

    # meters
    lossX_meter = AverageValueMeter()
    lossZ_meter = AverageValueMeter()
    lossT_meter = AverageValueMeter()

    # device
    device = "cuda" if torch.cuda.is_available() else "cpu"

    model.to(device)
    loader = {"train": train_loader, "test": valid_loader}


    loss_X_logger = VisdomPlotLogger('line', env=exp_name, opts={'title': 'LossX', 'legend': ['train', 'test']})
    loss_Z_logger = VisdomPlotLogger('line', env=exp_name, opts={'title': 'LossZ', 'legend': ['train', 'test']})
    loss_T_logger = VisdomPlotLogger('line', env=exp_name, opts={'title': 'Total_Loss', 'legend': ['train', 'test']})
    visdom_saver = VisdomSaver(envs=[exp_name])

    last_best_loss = np.inf #for early stopping for best model
    for e in range(epochs):
        for mode in ["train", "test"]:

            lossX_meter.reset()
            lossZ_meter.reset()
            lossT_meter.reset()
            model.train() if mode == "train" else model.eval()
            with torch.set_grad_enabled(mode == "train"):  # abilitiamo i gradienti in training

                for i, batch in enumerate(loader[mode]):
                    x = batch["Array"].to(device)
                    dx = batch['Movement'][:,0].float().to(device)
                    dz = batch['Movement'][:,1].float().to(device)

                    output = model(x)


                    #out1, out2 = model(x)
                    out1, out2 = output[:,0], output[:,1]

                    #out2 = out2 / torch.sqrt((out2 ** 2).sum(1)).view(-1, 1)

                    l1 = criterionX(out1, dx)
                    l2 = criterionZ(out2, dz)
                    loss = l1+l2

                    if mode == "train":
                        optimizer.zero_grad()
                        loss.backward()
                        optimizer.step()

                    else:
                        if loss < last_best_loss:
                            torch.save(model.state_dict(), 'Best_%s.pth' % exp_name)
                            last_best_loss = loss



                    n = x.shape[0]  # numero di elementi nel batch


                    lossX_meter.add(l1.item() * n, n)#update meter to ploot
                    lossZ_meter.add(l2.item() * n, n)
                    lossT_meter.add(loss.item()* n, n)


                    if mode == "train":

                        loss_X_logger.log(e + (i + 1) / len(loader[mode]), lossX_meter.value()[0], name=mode)
                        loss_Z_logger.log(e + (i + 1) / len(loader[mode]), lossZ_meter.value()[0], name=mode)
                        loss_T_logger.log(e + (i + 1) / len(loader[mode]), lossT_meter.value()[0], name=mode)

            loss_X_logger.log(e + (i + 1) / len(loader[mode]), lossX_meter.value()[0], name=mode)
            loss_Z_logger.log(e + (i + 1) / len(loader[mode]), lossZ_meter.value()[0], name=mode)
            loss_T_logger.log(e + (i + 1) / len(loader[mode]), lossT_meter.value()[0], name=mode)


        scheduler.step()

        #save visdom environment
        visdom_saver.save()

        # conserviamo solo l'ultimo modello sovrascrivendo i vecchi, salviamo anche il best di volta in volta
        torch.save(model.state_dict(), '%s.pth' % exp_name)



    return model





def start_all():

    model = DeepMLPRegressor(15, 2)



    #Define train dataset and loader
    #
    train_dataset = Dataset('../../Dataset/121/','121_Train.csv', 'raw/')
    valid_dataset = Dataset('../../Dataset/121/','121_Validation.csv', 'raw/')


    train_loader = DataLoader(train_dataset, batch_size=16, num_workers=2)
    valid_loader = DataLoader(valid_dataset, batch_size=16, num_workers=2)

    model_trained = train(model, train_loader, valid_loader, exp_name="MLP_121_interpolated_datas+Time", epochs=700)


And my dataset class:

from torch.utils.data.dataset import Dataset
import numpy as np
import pandas as pd
from os import path
import torch

# CREAZIONE DELLA CLASSE CHE CI PERMETTE DI CARICARE LE FEATURES

class Dataset(Dataset):

    """
    Carica il dataset per l'eseprimento  che ci permette di caricare le feature
    """

    def __init__(self, base_path, csv_Name, pathNPArray):

        """Input:
            dataset CSV
        """
        self.base_path = base_path
        path_finale = path.join(base_path, csv_Name)
        self.pathNPArry = pathNPArray
        self.file = pd.read_csv(path_finale)

    def __getitem__(self, index):
        names = []
        array = torch.empty(15,1)
        coordinateX = 0
        coordinateY = 0
        if index == 0 or index <3:
            idx, name, coordinateX, coordinateY = self.file.iloc[index]
            name = int(name)
            names.append(name)
            array1 = torch.from_numpy(np.load(self.base_path +  self.pathNPArry + str(name) + ".npy").astype(np.float32))
            array2 = torch.from_numpy(np.load(self.base_path +  self.pathNPArry + str(name) + ".npy").astype(np.float32))
            array3 = torch.from_numpy(np.load(self.base_path +  self.pathNPArry + str(name) + ".npy").astype(np.float32))

            array = torch.cat((array1, array2, array3))

        elif index == len(self.file)-1  or (len(self.file)-1 > index  and index <= len(self.file)-4):
            _, name, coordinateX, coordinateY = self.file.iloc[index]
            name = int(name)
            names.append(name)
            array1 = torch.from_numpy(np.load(self.base_path + self.pathNPArry + str(name) + ".npy").astype(np.float32))
            array2 = torch.from_numpy(np.load(self.base_path + self.pathNPArry + str(name) + ".npy").astype(np.float32))
            array3 = torch.from_numpy(np.load(self.base_path + self.pathNPArry + str(name) + ".npy").astype(np.float32))
            array = torch.cat((array1, array2, array3))

        elif index == 3933:
            _, name1, coordinateX1, coordinateY1 = self.file.iloc[index]
            _, name2, coordinateX2, coordinateY2 = self.file.iloc[index+1]
            _, name3, coordinateX3, coordinateY3 = self.file.iloc[index+2]
            name1 = int(name1)
            name2 = int(name2)
            name3 = int(name3)
            names.append(name1)
            names.append(name2)
            names.append(name3)
            array1 = torch.from_numpy(np.load(self.base_path + self.pathNPArry + str(name1) + ".npy").astype(np.float32))
            array2 = torch.from_numpy(np.load(self.base_path + self.pathNPArry + str(name2) + ".npy").astype(np.float32))
            array3 = torch.from_numpy(np.load(self.base_path + self.pathNPArry + str(name3) + ".npy").astype(np.float32))
            array = torch.cat((array1, array2, array3))
            coordinateX = np.mean([coordinateX1, coordinateX2, coordinateX3])
            coordinateY = np.mean([coordinateY1, coordinateY2, coordinateY3])

        else:
            pass
        return {"ID":names, "Array": array,'Movement': np.array([coordinateX, coordinateY], dtype= 'float')}


    def __len__(self):
        return len(self.file)

This is my net class:

from torch import nn
class DeepMLPRegressor(nn.Module):
    def __init__(self, in_features, out_features):
        """Costruisce un regressore MLP.
            Input:
                in_features: numero di feature in input
                out_classes: numero di feature in uscita
                hidden_units: numero di unità nel livello nascosto """
        super(DeepMLPRegressor, self).__init__()

        self.model = nn.Sequential(nn.Linear(in_features, 32),
                                   nn.ReLU(),
                                   nn.Linear(32, 64),
                                   nn.ReLU(),
                                   nn.Linear(64, 128),
                                   nn.ReLU(),
                                   nn.Linear(128, 64),
                                   nn.ReLU(),
                                   nn.Linear(64, 32),
                                   nn.ReLU(),
                                   nn.Linear(32, out_features))

    def forward(self, x):
        return self.model(x)

I have a dataset with numpy arrays of 5 element each one.
i would load three array as one element and i have this error.

Every array contains 5 element and i 'm sure because in previous experiments (when in dataloader i loaded one single array ) i had no runtime error.

can you help me? Please :slight_smile:

1 Like

Solved with

elif index == len(self.file)-1  or (index < len(self.file)  and index > len(self.file)-3):

in class dataset.