The problem solved using feeding same seed
value before applying each Compose
of transforms.
def __getitem__(self,index):
img = Image.open(self.data[index]).convert('RGB')
target = Image.open(self.data_labels[index])
seed = np.random.randint(2147483647) # make a seed with numpy generator
random.seed(seed) # apply this seed to img tranfsorms
if self.transform is not None:
img = self.transform(img)
random.seed(seed) # apply this seed to target tranfsorms
if self.target_transform is not None:
target = self.target_transform(target)
target = torch.ByteTensor(np.array(target))
return img, target
By the way, it works completely fine on a subset of transforms.