class RDataset(Dataset):
def __init__(self, data_path, data_name, data_type, patch_size=None, length=None):
super().__init__()
self.data_name, self.data_type, self.patch_size = data_name, data_type, patch_size
self.A_images = sorted(glob.glob('{}/{}/{}/rain/*.png'.format(data_path, data_name, data_type)))
self.B_images = sorted(glob.glob('{}/{}/{}/norain/*.png'.format(data_path, data_name, data_type)))
# make sure the length of training and testing different
self.num = len(self.A_images)
self.sample_num = length if data_type == 'train' else self.num
def __len__(self):
return self.sample_num
def __getitem__(self, idx):
image_name = os.path.basename(self.A_images[idx % self.num])
A = T.to_tensor(Image.open(self.A_images[idx % self.num]))
B = T.to_tensor(Image.open(self.B_images[idx % self.num]))
h, w = A.shape[1:]
return A,B
I went through the above code and it works fine but my confusion is, after wrapping the dataset class in Datloader(RDataset, batch_size=10) how the value of idx is generated? my understanding is that idx will take values between 0 and len(self.num), and idx%self.num will always be zero means it will always refer to data sample at idx 0?