My first time trying to implement a custom data loader. I was just wondering if there are issues with applying transforms to the input data only when PyTorch’s data generator object is ready to call the image? Or do all of the transformations have to be applied at once, and then stored? Or are the transforms only applied to the batch_size so memory doesn’t become an issue?
class CustomDataLoader(Dataset): def __init__(self, img_paths, transforms=None): self.img_paths = img_paths self.to_image =  self.len = len(self.img_paths) def __len__(self): return len(self.img_paths) def __getitem__(self, idx): self.img_paths_temp = cv2.imread(self.img_paths[idx]) if transforms is not None: self.img_paths_temp = transforms(self.img_paths_temp) return self.img_paths_temp