I wrote this toy dataset example for MPII dataset, but the dataset actually has different image size. So the Dataloader malfunctions when concatenate images together into one batch. I’m sure that my mode is able to handle different input size since I’m using deeplabv3_resnet. The problem is just how to concatenate different size of images into one batch.
class ToyDataset(data.Dataset):
    def __init__(self, root_dir ):
        self.datadir = root_dir
        self.poses = torch.load(os.path.join(self.datadir, 'poses.pt'))
        self.ids = torch.load(os.path.join(self.datadir, 'ids.pt'))
        return
    def __len__(self):
        return len(self.poses)
    def __getitem__(self, idx):
        img = torch.from_numpy(skio.imread(os.path.join(self.datadir, str(idx) + '.png'))).permute(2,0,1).float()/255
        f_img = torch.from_numpy(skio.imread(os.path.join(self.datadir, str(idx) + '_f.png'))).permute(2,0,1).float()/255
        pose = self.poses[idx]
        id = self.ids[idx]
        return {'img':img, 'f_img':f_img, 'pose':pose, 'id':id}
            