How can I fix the following error?
TypeError Traceback (most recent call last)
in ()
86 ])
87
—> 88 train_dataset = data_transforms(train_dataset)
89 class_names = train_dataset.classes
90 print(class_names)
/home/test/.local/lib/python3.6/site-packages/torchvision/transforms/transforms.py in call(self, img)
59 def call(self, img):
60 for t in self.transforms:
—> 61 img = t(img)
62 return img
63
/home/test/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/home/test/.local/lib/python3.6/site-packages/torchvision/transforms/transforms.py in forward(self, img)
302 PIL Image or Tensor: Rescaled image.
303 “”"
→ 304 return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias)
305
306 def repr(self):
/home/test/.local/lib/python3.6/site-packages/torchvision/transforms/functional.py in resize(img, size, interpolation, max_size, antialias)
417 )
418 pil_interpolation = pil_modes_mapping[interpolation]
→ 419 return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size)
420
421 return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size, antialias=antialias)
/home/test/.local/lib/python3.6/site-packages/torchvision/transforms/functional_pil.py in resize(img, size, interpolation, max_size)
231
232 if not _is_pil_image(img):
→ 233 raise TypeError(‘img should be PIL Image. Got {}’.format(type(img)))
234 if not (isinstance(size, int) or (isinstance(size, Sequence) and len(size) in (1, 2))):
235 raise TypeError(‘Got inappropriate size arg: {}’.format(size))
TypeError: img should be PIL Image. Got main.Data
Code
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import torch
import time
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic(‘matplotlib inline’)
from torchvision.io import read_image
from torchvision import datasets, transforms
from torchvision import datasets
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from glob import glob
torch.cuda.is_available()
if torch.cuda.is_available():
dev = “cuda:0”
else:
dev = “cpu”
device = torch.device(dev)
a = torch.zeros(4,3)
a = a.to(device)
class Data(Dataset):
def init(self, data=‘Train’ ):
self.data=data
self.CsvPath=’/home/test/PHOENIX-2014-T-release-v3/PHOENIX-2014-T/annotations/manual/PHOENIX-2014-T.’+(self.data).lower()+’.corpus.csv’
self.Csv=pd.read_csv(self.CsvPath,sep=’|’)
self.Datapath=’/home/test/PHOENIX-2014-T-release-v3/PHOENIX-2014-T/features/fullFrame-210x260px/’+(self.data).lower()+’/’
def __len__(self):
return len(self.Csv)
def getVideo(self,idx):
paths=glob(self.Datapath+self.Csv.video.values[idx].split('/')[0]+'/*.png', recursive = True)
Images = [torch.unsqueeze(read_image(i),-1) for i in paths[::2]]
out=torch.concat(Images,-1).transpose(0, -1)[: ,::2,::2,:]
return out
def __getitem__(self, idx,maxi=100):
video= self.getVideo(idx)
return video
train_dataset = Data(‘Train’)
test_dataset = Data(‘Test’)
#Load the dataset in batches
#create dataset
batch_size = 100000
img_height = 180
img_width = 180
def imshow(inp, title=None):
“”“Imshow for Tensor.”""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(15,15))
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
data_transforms = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train_dataset = data_transforms(train_dataset)
class_names = train_dataset.classes
print(class_names)
dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=5,shuffle=True)
for i in range(1):
inputs, classes = next(iter(dataloader))
out = torchvision.utils.make_grid(inputs) # inputs: 5, 3, 224, 224 ---> out: 3, 228, 1132
imshow(out, title=[class_names[x] for x in classes])
test_dataset = data_transforms(test_dataset)
class_names = test_dataset.classes
print(class_names)
dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=5,shuffle=True)