classMydata(data.Dataset):
def __init__(self,root_dir,seg_dir,transforms = None):
self.root_dir = 'training path'
self.seg_dir = 'label'
self.transforms = transforms
self.files = os.listdir(self.root_dir)
self.lables = os.listdir(self.seg_dir)
def __len__(self):
return len(self.files)
def __getitem__(self,idx):
img_name = self.files[idx]
label_name = self.lables[idx]
img = Image.open(os.path.join(self.root_dir,img_name))
label = Image.open(os.path.join(self.seg_dir,label_name))
if self.transforms:
img = self.transforms(img)
label = self.transforms(label)
return img,label
else:
return img, label
full_dataset = Mydata('/training ',
'label',
transforms=tfms.Compose([tfms.Resize((128,128)),tfms.ToTensor(),
]))
train_size = int(0.8 * len(full_dataset))
val_size = len(full_dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(full_dataset, [train_size, val_size])
in the code above i am trying to do data augmentation / affline
i do not know if they are similer or not , ,
how i can do it ? is suppost to be done after dividing to val and training or befor ?