DataLoader NotImplemented Error

Dataloader code:

class HRFDataset(Dataset):
    def __init__(self, image_paths, target_paths, train=True):
        self.image_paths = image_paths
        self.target_paths = target_paths

    def transform(self, image, mask):
    # Resize

        imageTransform= transforms.Compose([
        transforms.Resize(size=(256, 256)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet
        ])

        maskTransform= transforms.Compose([
        transforms.Resize(size=(256, 256)),
        transforms.ToTensor()
        ])

        image= imageTransform(image)
        mask= maskTransform(mask)

        return image, mask

    def __getitem__(self, index):
        image = Image.open(self.image_paths[index])
        mask = Image.open(self.target_paths[index])
        x, y = self.transform(image, mask)
        return x, y

Initializing Dataset

# Creating training paths for dataset
extra_val="0"
train_dir="./dataset/images/"
mask_dir="./dataset/mask/"
train_array=[]
val_array=[]
for i in range(1,11):
    if(i<10):
        train_array.append(train_dir+extra_val+str(i)+"_dr.JPG")
        train_array.append(train_dir+extra_val+str(i)+"_g.jpg")
        train_array.append(train_dir+extra_val+str(i)+"_h.jpg")
    else:
        train_array.append(train_dir+str(i)+"_dr.JPG")
        train_array.append(train_dir+str(i)+"_g.jpg")
        train_array.append(train_dir+str(i)+"_h.jpg")

for i in range(1,11):
    if(i<10):
        val_array.append(mask_dir+extra_val+str(i)+"_dr_mask.tif")
        val_array.append(mask_dir+extra_val+str(i)+"_g_mask.tif")
        val_array.append(mask_dir+extra_val+str(i)+"_h_mask.tif")
    else:
        val_array.append(mask_dir+str(i)+"_dr_mask.tif")
        val_array.append(mask_dir+str(i)+"_g_mask.tif")
        val_array.append(mask_dir+str(i)+"_h_mask.tif")

# Creating Validation paths dataset



valid_image=[]
valid_mask=[]
for i in range(1,11):
    if(i<10):
        valid_image.append(train_dir+extra_val+str(i)+"_dr.JPG")
        valid_image.append(train_dir+extra_val+str(i)+"_g.jpg")
        valid_image.append(train_dir+extra_val+str(i)+"_h.jpg")
    else:
        valid_image.append(train_dir+str(i)+"_dr.JPG")
        valid_image.append(train_dir+str(i)+"_g.jpg")
        valid_image.append(train_dir+str(i)+"_h.jpg")

for i in range(1,11):
    if(i<10):
        valid_mask.append(mask_dir+extra_val+str(i)+"_dr_mask.tif")
        valid_mask.append(mask_dir+extra_val+str(i)+"_g_mask.tif")
        valid_mask.append(mask_dir+extra_val+str(i)+"_h_mask.tif")
    else:
        valid_mask.append(mask_dir+str(i)+"_dr_mask.tif")
        valid_mask.append(mask_dir+str(i)+"_g_mask.tif")
        valid_mask.append(mask_dir+str(i)+"_h_mask.tif")

train_set= HRFDataset(train_array,val_array)
val_set= HRFDataset(valid_image,valid_mask)
print train_set

Output of print statement:
Out[45]: <main.HRFDataset object at 0x7fe7d659d550>

Implementing Dataloader

from torch.utils.data import Dataset, DataLoader

image_datasets = {
    'train': train_set, 'val': val_set
}

batch_size = 10

dataloaders = {
    'train': DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0),
    'val': DataLoader(val_set, batch_size=batch_size, shuffle=True, num_workers=0)
}

dataset_sizes = {
    x: len(image_datasets[x]) for x in image_datasets.keys()
}

dataset_sizes

But at this stage i am getting an error:

NotImplementedError Traceback (most recent call last)
in ()
8
9 dataloaders = {
—> 10 ‘train’: DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0),
11 ‘val’: DataLoader(val_set, batch_size=batch_size, shuffle=True, num_workers=0)
12 }

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/dataloader.pyc in init(self, dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn)
800 if sampler is None:
801 if shuffle:
–> 802 sampler = RandomSampler(dataset)
803 else:
804 sampler = SequentialSampler(dataset)

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/sampler.pyc in init(self, data_source, replacement, num_samples)
58
59 if self.num_samples is None:
—> 60 self.num_samples = len(self.data_source)
61
62 if not isinstance(self.num_samples, int) or self.num_samples <= 0:

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/dataset.pyc in len(self)
18
19 def len(self):
—> 20 raise NotImplementedError
21
22 def add(self, other):

NotImplementedError:

Can’t seem to find the problem. Looking forward to quick help.

i resolved the upper given problem by making shuffle bool false in DataLoader function. But now i am getting following error.

NotImplementedError                       Traceback (most recent call last)
<ipython-input-54-2dda8680dbda> in <module>()
     13 
     14 dataset_sizes = {
---> 15     x: len(image_datasets[x]) for x in image_datasets.keys()
     16 }
     17 

<ipython-input-54-2dda8680dbda> in <dictcomp>((x,))
     13 
     14 dataset_sizes = {
---> 15     x: len(image_datasets[x]) for x in image_datasets.keys()
     16 }
     17 

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/dataset.pyc in __len__(self)
     18 
     19     def __len__(self):
---> 20         raise NotImplementedError
     21 
     22     def __add__(self, other):

NotImplementedError: 

on following code:

from torch.utils.data import Dataset, DataLoader

image_datasets = {
    'train': train_set, 'val': val_set
}

batch_size = 10

dataloaders = {
    'train': DataLoader(train_set, batch_size=batch_size, shuffle=False, num_workers=0),
    'val': DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=0)
}

dataset_sizes = {
    x: len(image_datasets[x]) for x in image_datasets.keys()
}

dataset_sizes

Edit 1
Resolved this error by making image_datasets[x] to image_datasets[x].image_paths in the third last statement

I can’t fetch batch from dataloader now.

inputs, masks = next(iter(dataloaders['train']))

I am getting the following error:

NotImplementedError                       Traceback (most recent call last)
<ipython-input-19-fbb099c4b6b9> in <module>()
     13 # Get a batch of training data
     14 
---> 15 inputs, masks = next(iter(dataloaders['train']))
     16 
     17 print(inputs.shape, masks.shape)

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/dataloader.pyc in __iter__(self)
    817 
    818     def __iter__(self):
--> 819         return _DataLoaderIter(self)
    820 
    821     def __len__(self):

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/dataloader.pyc in __init__(self, loader)
    582             # prime the prefetch loop
    583             for _ in range(2 * self.num_workers):
--> 584                 self._put_indices()
    585 
    586     def __len__(self):

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/dataloader.pyc in _put_indices(self)
    644     def _put_indices(self):
    645         assert self.batches_outstanding < 2 * self.num_workers
--> 646         indices = next(self.sample_iter, None)
    647         if indices is None:
    648             return

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/sampler.pyc in __iter__(self)
    158     def __iter__(self):
    159         batch = []
--> 160         for idx in self.sampler:
    161             batch.append(idx)
    162             if len(batch) == self.batch_size:

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/sampler.pyc in __iter__(self)
     32 
     33     def __iter__(self):
---> 34         return iter(range(len(self.data_source)))
     35 
     36     def __len__(self):

/home/gul/anaconda2/lib/python2.7/site-packages/torch/utils/data/dataset.pyc in __len__(self)
     18 
     19     def __len__(self):
---> 20         raise NotImplementedError
     21 
     22     def __add__(self, other):

NotImplementedError: 

Okay. So all the problems above mentioned are solved by implementing len(self) function in the dataloader. My final dataloader is this.

class HRFDataset(Dataset):
    def __init__(self, image_paths, target_paths, train=True):
        self.image_paths = image_paths
        self.target_paths = target_paths

    def transform(self, image, mask):
    # Resize

        imageTransform= transforms.Compose([
        transforms.Resize(size=(256, 256)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet
        ])

        maskTransform= transforms.Compose([
        transforms.Resize(size=(256, 256)),
        transforms.ToTensor()
        ])

        image= imageTransform(image)
        mask= maskTransform(mask)

        return image, mask

    def __getitem__(self, index):
        image = Image.open(self.image_paths[index])
        mask = Image.open(self.target_paths[index])
        x, y = self.transform(image, mask)
        return x, y
    def __len__(self):
    
        return len(self.image_paths)


@Gul_Zain It was nice reading your soliloquy, but I really loved it. And above anything, it solved the same issue i had. Though I also found out that this tutorial on DataLoader class says about the len function. OMG, if only pytorch had good documentation and tutorials which explicitly mentions this. Even if it does, I somehow can never find one when I need one. Its like these important things are hidden somewhere deep inside a broad “intro to pytorch” tutorial…

Thank you. Yes searching for bugs can be really difficult. I hope what you said wasn’t sarcasm.

The document is quite clear…
version: torch1.0.1
In:
print(torch.utils.data.Dataset.doc)
Out:

An abstract class representing a Dataset.

All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.

One can also use predefined subclasses of torch.utils.data.Dataset, e.g. tensorDataset, IterableDataset, all available after 1.0.1