I have image dataset. when I use my dataloader, I get the following error:
File “resnet_cub.py”, line 138, in test
for i,(input_x, target) in enumerate(val_loader):
File “/root/Util/miniconda/envs/py3.5/lib/python3.5/site-packages/torch/utils/data/dataloader.py”, line 179, in next
batch = self.collate_fn([self.dataset[i] for i in indices])
File “/root/Util/miniconda/envs/py3.5/lib/python3.5/site-packages/torch/utils/data/dataloader.py”, line 109, in default_collate
return [default_collate(samples) for samples in transposed]
File “/root/Util/miniconda/envs/py3.5/lib/python3.5/site-packages/torch/utils/data/dataloader.py”, line 109, in
return [default_collate(samples) for samples in transposed]
File “/root/Util/miniconda/envs/py3.5/lib/python3.5/site-packages/torch/utils/data/dataloader.py”, line 112, in default_collate
.format(type(batch[0]))))
TypeError: batch must contain tensors, numbers, dicts or lists; found <class ‘PIL.Image.Image’>
My dataloader code is this:
class MyDataset(Dataset):
def init(self, root, set, transform=None, target_transform = None):
if set == ‘train’:
fname = os.path.join(root, ‘train1.txt’)
elif set ==‘test’:
fname = os.path.join(root, ‘test1.txt’)
fn = open(fname, ‘r’)
imgs =
for line in fn.readlines():
words = line.split()
imgs.append((words[0],int(words[1])))
self.root = root
self.imgs = imgs
self.transform = transform
self.target_transform = transform
def __getitem__(self, index):
fn, label = self.imgs[index]
img = Image.open(os.path.join(self.root,'images',fn)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, torch.Tensor(label)
def __len__(self):
return len(self.imgs)
def test():
val_dataset = MyDataset(dataroot, 'train')
val_loader = DataLoader(dataset = val_dataset, batch_size = 8, shuffle = False)
pre_label = []
for i,(input_x, target) in enumerate(val_loader):
input_var = torch.autograd.Variable(input_x)
target_var = torch.autograd.Variable(target)
print('.....')