Make ImageFolder output the same image twice with different transforms

Hi!
I know how to use ImageFolder to get my training batch from folders using this code

transform = transforms.Compose([
    transforms.Resize((224, 224), interpolation=3),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor()
])

image_dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'), transform)
train_dataset = torch.utils.data.DataLoader(
        image_datasets, batch_size=32,
        shuffle=True, num_workers=16
    )

But I want to apply two transforms for the same image and get something like

for img1, img2, label in train_dataset:

Where img1 and img2 are same image with different size. An example below

im = cv2.imread('img.jpg').reshape((1, 1, 3))
im2 = cv2.resize(im, (448, 448))
im = im.transpose((1, 2, 0)).reshape((1, 3, 224, 224))
im2 = im2.transpose((1, 2, 0)).reshape((1, 3, 448, 448))

How Can I do this?
Thank you

2 Likes

For this you would have to create/modify a data loader class.

You can have a look at this tutorial - Data Loading and Processing Tutorial

I’ve modified the code from the tutorial to reflect something like you have asked below

class ImageDataset(Dataset):

    def __init__(self, csv_file, root_dir, transform=None):
        """
        Args:
            csv_file (string): Path to the csv file with annotations.
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.imgs_frame = pd.read_csv(csv_file)
        self.root_dir = root_dir
        self.transform = transform

    def __len__(self):
        return len(self.landmarks_frame)

    def __getitem__(self, idx):
        img_name = os.path.join(self.root_dir,
                                self.imgs_frame.iloc[idx, 0])
        im = cv2.imread('img.jpg').reshape((1, 1, 3))
        im2 = cv2.resize(im, (448, 448))
        im = im.transpose((1, 2, 0)).reshape((1, 3, 224, 224))
        im2 = im2.transpose((1, 2, 0)).reshape((1, 3, 448, 448))
        label = self.imgs_frame.iloc[idx, 1:].as_matrix()
        if self.transform:
            im = self.transform(im)
            im2 = self.transform(im2)

        return im1, im2, label
2 Likes

Thanks, Problem solved using your link and ImageFolder Implementation,
I added one transform argument to my custom ImageFolder and return img1, img2, labels
Thanks

1 Like

Can you share the custom ImageFolder implementation please.

@CCV

import torch.utils.data as data

from PIL import Image
import os
import os.path

IMG_EXTENSIONS = [
   '.jpg', '.JPG', '.jpeg', '.JPEG',
   '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]


def is_image_file(filename):
   return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)

def find_classes(dir):
   classes = os.listdir(dir)
   classes.sort()
   class_to_idx = {classes[i]: i for i in range(len(classes))}
   return classes, class_to_idx


def make_dataset(dir, class_to_idx):
   images = []
   for target in os.listdir(dir):
       d = os.path.join(dir, target)
       if not os.path.isdir(d):
           continue

       for filename in os.listdir(d):
           if is_image_file(filename):
               path = '{0}/{1}'.format(target, filename)
               item = (path, class_to_idx[target])
               images.append(item)

   return images


def default_loader(path):
   return Image.open(path).convert('RGB')


class ImageFolderLoader(data.Dataset):
   def __init__(self, root, transform_1=None,
                transform_2=None, target_transform=None,
                loader=default_loader):
       classes, class_to_idx = find_classes(root)
       imgs = make_dataset(root, class_to_idx)

       self.root = root
       self.imgs = imgs
       self.classes = classes
       self.class_to_idx = class_to_idx
       self.transform_1 = transform_1
       self.transform_2 = transform_2
       self.target_transform = target_transform
       self.loader = loader

   def __getitem__(self, index):
       path, target = self.imgs[index]
       img = self.loader(os.path.join(self.root, path))
       if self.transform_1 is not None:
           img1 = self.transform_1(img)
       if self.transform_2 is not None:
           img2 = self.transform_2(img)
       if self.target_transform is not None:
           target = self.target_transform(target)

       return img1, img2, target

   def __len__(self):
       return len(self.imgs)

And to use it;

data_transforms_1 = transforms.Compose([
        transforms.Resize((224, 224), interpolation=3),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
])
data_transforms_2 = transforms.Compose([
        transforms.Resize((448, 448), interpolation=3),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
])
image_datasets = ImageFolderLoader(
        os.path.join(data_dir, 'train_folders'),
        transform_1=data_transforms_1,
        transform_2=data_transforms_2
    )

trainloader = torch.utils.data.DataLoader(
        image_datasets, batch_size=32,
        shuffle=True, num_workers=16
  )

Inside your train function

for i, data in enumerate(trainloader):
          imgs1, imgs2, labels = data
           ..........

Hope it helped

2 Likes

Thanks for the prompt quick reply. It worked for a single datset. Im having two datsets in two different folders. I’m trying to modify your code to do this. Still no luck though. Do you think whether this a good idea to modify your code to do that? Or is there a alternate way?

You can follow this example Simultaneously train on two dataset

1 Like