Resizing dataset

Hi
I am trying to resize my images through transforms, but I get the error: “AttributeError: ‘Resize’ object has no attribute ‘resize’”

Here is my code:

class swe_dataset(Dataset):
def init(self, train, label,transform):
self.x_data=train
self.y_data=label
self.len = len(self.x_data)
self.transform=transform
#self.transform= transforms.Resize((120,120))

def __getitem__(self,index):
    sample= self.x_data[index], self.y_data[index]
    if self.transform is not None:
        sample=self.transform(sample)
        
    return sample

def __len__(self):
    return self.len

#######################################
class resize:
def call(self, sample):
inputs,targets = sample
inputs =transform.resize(inputs, (120, 120))
return inputs,targets
#######################################
train_set=swe_dataset(ts[0],tls[0],transform=resize())
#######################################
when I try to access the sample image by: im,_=next(iter(train_set)), I get the mentioned error. Could you help me to understand what is the problem in resizing? (because when I am not using transform everything works well)

I’m not sure, if you are passing the custom resize class as the transformation or torchvision.transforms.Resize.
However, transform.resize(inputs, (120, 120)) won’t work.
You could either create an instance of transforms.Resize or use the functional API:

torchvision.transforms.functional.resize(img, size, interpolation)
2 Likes

I solved the problem, I noticed that I was not transforming to tensor after resizing…so after changing my resize class, the problem was solved:

class resize:
    def __call__(self, sample):
        inputs,targets = sample
        inputs=transforms.ToPILImage()(inputs)
        inputs =torchvision.transforms.functional.resize(inputs, (120,120), interpolation=2)
        inputs=transforms.ToTensor()(inputs)
        return inputs,targets

but now I have another question…
why I got error when I was using transforms in my dataset class?

class swe_dataset(Dataset):
    def __init__(self, train, label,transform=None):
        self.x_data=train 
        self.y_data=label
        self.len = len(self.x_data)
        #self.transform=transform
        
        self.transform= transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((120,120)),
            transforms.ToTensor()
        ])

while I was doing the same thing when I used my resize class, it worked well…
why it didnt work when I used transforms in dataset class?

It should work using your self.transform definition.
Could you post an executable code snippet to reproduce the error you are seeing so that we can have a look, please?

Sure, here is the code and error:

class swe_dataset(Dataset):
    def __init__(self, train, label,transform=None):
        self.x_data=train 
        self.y_data=label
        self.len = len(self.x_data)
        #self.transform=transform
        
        self.transform= transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((120,120)),
            transforms.ToTensor()
        ])
        
    def __getitem__(self,index):
        sample= self.x_data[index], self.y_data[index]
        if self.transform is not None:
            sample=self.transform(sample)
            
        return sample
    
    def __len__(self):
        return self.len
TypeError                                 Traceback (most recent call last)
<ipython-input-335-3765493ebbad> in <module>
----> 1 im,_=next(iter(train_set))

<ipython-input-331-2ded7f573365> in __getitem__(self, index)
     15         sample= self.x_data[index], self.y_data[index]
     16         if self.transform is not None:
---> 17             sample=self.transform(sample)
     18 
     19         return sample

~/anaconda3/envs/gpu-env/lib/python3.8/site-packages/torchvision/transforms/transforms.py in __call__(self, img)
     68     def __call__(self, img):
     69         for t in self.transforms:
---> 70             img = t(img)
     71         return img
     72 

~/anaconda3/envs/gpu-env/lib/python3.8/site-packages/torchvision/transforms/transforms.py in __call__(self, pic)
    134 
    135         """
--> 136         return F.to_pil_image(pic, self.mode)
    137 
    138     def __repr__(self):

~/anaconda3/envs/gpu-env/lib/python3.8/site-packages/torchvision/transforms/functional.py in to_pil_image(pic, mode)
    118     """
    119     if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
--> 120         raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
    121 
    122     elif isinstance(pic, torch.Tensor):

TypeError: pic should be Tensor or ndarray. Got <class 'tuple'>.
1 Like

In your functional approach you are unpacking sample to inputs and target, thus passing a tensor to resize, while you are trying to apply self.transform on the tuple directly, which won’t work.
Unpack sample or pass self.x_data[index] directly to self.transform.

1 Like