The output image of torchvision.transforms return a batch of same images turned grey, even though it suppose to turn it into tensor. I attach the image of the generated output(left) and the reference of the image taken from dataset(right). I was trying to look on the forums but I havent found anything also I tested it on google colab and jupyter notebook with the same result. Is that the way how every image in Pytorch is transformed?
class Cityscapes(Dataset): def __init__(self, path, transform = None): self.root_dir = path self.images = self.root_dir + r"train/" self.labels = self.root_dir + r"val/" print(self.labels, self.images) self.transform = transform def __len__(self): return len(self.labels) def __getitem__(self, index): image = np.array(Image.open(self.images + str(index+1) + '.jpg').convert('RGB')) label = np.array(Image.open(self.labels + str(index+1) + '.jpg') temp = image if self.transform is not None: image = self.transform(image) return image, temp
t = torchvision.transforms.Compose([ torchvision.transforms.ToTensor()