Feeding dataloader batch to model is not working

Hi I am trying to load my custom dataset using dataloader and custom collate_fn for training the neural network, but an error occurred saying that input should be tensor not list:

def my_collate(batch):
    data = [item[0] for item in batch]
    target = [item[1] for item in batch]
    return [data, target]
class bsds_dataset(Dataset):
    def __init__(self, ds_main, ds_energy):
        self.dataset1 = ds_main
        self.dataset2 = ds_energy
    
    def __getitem__(self, index):
        x1 = self.dataset1[index]
        x2 = self.dataset2[index]
        
        return x1, x2
    
    def __len__(self):
        return len(self.dataset1)

generic_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.ToPILImage(),
    #transforms.CenterCrop(size=128),
    #transforms.Lambda(lambda x: myimresize(x, (128, 128))),
    transforms.ToTensor(),
    #transforms.Normalize((0., 0., 0.), (6, 6, 6))
])

original_imagefolder = './images/whole'
target_imagefolder = './results/whole'

original_ds = ImageFolder(original_imagefolder, transform=generic_transform)
energy_ds = ImageFolder(target_imagefolder, transform=generic_transform)

dataset = bsds_dataset(original_ds, energy_ds)
loader = DataLoader(dataset, batch_size=16, collate_fn=my_collate)

epochs = 2
model = UNet(1, depth=5, merge_mode='concat')
model.cuda()
loss = torch.nn.MSELoss()
criterion_pixelwise = torch.nn.L1Loss()

loss.cuda()
criterion_pixelwise.cuda()

optimizer = optim.SGD(model.parameters(), lr=0.001)

Tensor = torch.cuda.FloatTensor

for epoch in range(epochs):
    for i, batch in enumerate(loader):
        original, target = batch
        out = model(original)

I have made the dataset and collate_fn but I don’t know how to access each instance of batch.
error:

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-147-d1dea1bc00f8> in <module>
     15     for i, batch in enumerate(loader):
     16         original, target = batch
---> 17         out = model(original)

C:\Anaconda3\envs\torchgpu\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    491             result = self._slow_forward(*input, **kwargs)
    492         else:
--> 493             result = self.forward(*input, **kwargs)
    494         for hook in self._forward_hooks.values():
    495             hook_result = hook(self, input, result)

<ipython-input-7-5f743c3455c4> in forward(self, x)
     89         # encoder pathway, save outputs for merging
     90         for i, module in enumerate(self.down_convs):
---> 91             x, before_pool = module(x)
     92             encoder_outs.append(before_pool)
     93 

C:\Anaconda3\envs\torchgpu\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    491             result = self._slow_forward(*input, **kwargs)
    492         else:
--> 493             result = self.forward(*input, **kwargs)
    494         for hook in self._forward_hooks.values():
    495             hook_result = hook(self, input, result)

<ipython-input-5-26a0f7e21ea6> in forward(self, x)
     14 
     15     def forward(self, x):
---> 16         x = F.relu(self.conv1(x))
     17         x = F.relu(self.conv2(x))
     18         before_pool = x

C:\Anaconda3\envs\torchgpu\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    491             result = self._slow_forward(*input, **kwargs)
    492         else:
--> 493             result = self.forward(*input, **kwargs)
    494         for hook in self._forward_hooks.values():
    495             hook_result = hook(self, input, result)

C:\Anaconda3\envs\torchgpu\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
    336                             _pair(0), self.dilation, self.groups)
    337         return F.conv2d(input, self.weight, self.bias, self.stride,
--> 338                         self.padding, self.dilation, self.groups)
    339 
    340 

TypeError: conv2d(): argument 'input' (position 1) must be Tensor, not list

please help. Thanks a lot

It would help if you post your custom collate function my_collate :wink:

With that said, at the end of your my_collate, try to do a torch.stack(images).

It’s done, sorry, would you please check it again?

Also answered here.