This is the following demo code I have used. But I got the error “TypeError: img should be PIL Image. Got <class ‘torch.Tensor’>”. How could I solve the problem?
class MyDataset(torch.utils.data.Dataset):
def __init__(self, transform = None):
#dset = get_dataset(dataset_name)
data_dir=''
dataset = datasets.MNIST(data_dir, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])) ##### dataset download from torch.vision
imbalanced_train_dataset = copy.deepcopy(dataset)
imbalanced_train_dataset = copy.deepcopy(dataset)
targets = imbalanced_train_dataset.targets
classes, class_counts = np.unique(targets, return_counts=True)
nb_classes = len(classes)
imbal_class_counts = [1182, 391, 324, 344, 1000, 466, 935, 673, 272 , 369]
class_indices = [np.where(targets == i)[0] for i in range(nb_classes)]
imbal_class_indices = [class_idx[:class_count] for class_idx, class_count in zip(class_indices, imbal_class_counts)]
imbal_class_indices = np.hstack(imbal_class_indices)
self.target= imbalanced_train_dataset.targets[imbal_class_indices]
self.data = imbalanced_train_dataset.data[imbal_class_indices]
self.transform = transform
def __getitem__(self, index):
self.data1 = self.data[index]
self.target = self.target[index]
if self.transform is not None:
self.data2 = self.transform(self.data1)
return self.data1, self.data2, self.target, index
def __len__(self):
return len(self.data)
transform2=transforms.Compose([transforms.RandomRotation(30)])
new_dataset = MyDataset(transform = transform2)
dataloader = torch.utils.data.DataLoader(
new_dataset,
num_workers=1,
batch_size=60,
shuffle=True)
data1, data2, target, index = iter(dataloader).next()