transforms.ColorJitter. TypeError: '>' not supported between instances of 'tuple' and 'int'

Hello,
when I try to transform my image using ColorJitter I get the following error:
TypeError: ‘>’ not supported between instances of ‘tuple’ and ‘int’.
My transforms operation looks something like this:

transformations=transforms.Compose([
transforms.ColorJitter(brightness=0.2,saturation=(0.8,1.62),contrast=(1,1.3),hue=(-0.02,0.02)),
transforms.RandomAffine(degrees=180,translate=(0.05,0.05),scale=(0.9,1.1),shear=25),
transforms.CenterCrop((64,64)),
transforms.ToTensor(),
transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
])

I am confused because I thought a tuple as an input was allowed.
Hopefully someone can help me

Could you post the code which throws this error?
Are you trying to pass a tuple of images to this transformation?

import pandas as pd
from Modules import *
import torch.nn as nn
import re
import torch.nn.functional as F
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import Dataset,DataLoader
import openslide
import numpy as np
from PIL import Image
from torchvision import transforms
import torch.multiprocessing
import time
import random
from torch.autograd import grad_mode
import matplotlib.pyplot as plt

start_time=time.time()

device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class LoadDataset(Dataset):
    def __init__(self,csv_file,slide_path,img_size=150,transform=None):

        self.csv=pd.read_csv(csv_file,header=None)
        self.transform=transform
        self.slide_path=slide_path
        self.img_size=img_size
        self.pre_file=re.search(r"(.*)(.xml)",self.csv.iloc[0,0]).group(1)
        self.Slide=openslide.open_slide(self.slide_path + self.pre_file+ ".ndpi")

    def __len__(self):
        return len(self.csv)

    def __getitem__(self,idx):
        filename=re.search(r"(.*)(.xml)",self.csv.iloc[idx,0]).group(1)
        X_Coordinate=self.csv.iloc[idx,1]
        Y_Coordinate=self.csv.iloc[idx,2]
        label=torch.Tensor((1,0)) if self.csv.iloc[idx,3] == 1 else torch.Tensor((0,1))
        if self.pre_file != filename:
            self.Slide=openslide.open_slide(self.slide_path + filename + ".ndpi")

        img=self.Slide.read_region((X_Coordinate - int(self.img_size / 2),Y_Coordinate - int(self.img_size / 2)),
                              0, (self.img_size,self.img_size)).convert("RGB")
        self.pre_file=filename

        if self.transform:
            trans_img=self.transform(img)
        else:
            trans_img=transforms.ToTensor()(img)

        return (trans_img,label)


class LoadValset(Dataset):
    # noch implementieren
    pass


if __name__ == '__main__':
    transformations=transforms.Compose([
        transforms.ColorJitter(brightness=(0.8,1.1),saturation=(0.8,1.62),contrast=(1,1.3),hue=(-0.02,0.02)),
        transforms.RandomAffine(degrees=180,translate=(0.05,0.05),scale=(0.9,1.1),shear=25),
        transforms.CenterCrop((64,64)),
        transforms.ToTensor(),
        transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
    ])

    Trainset=LoadDataset(csv_file=r"E:\Output_csv\Output.csv", slide_path=r"E:\Image_files\\",img_size=150,
                         transform=transformations)
    batch_size=64
    TrainLoader=DataLoader(Trainset, batch_size=batch_size, shuffle=True)


    model=simple_CNN()
    model.to(device)

    optimizer=optim.Adam(model.parameters(),lr=0.001)
    criterion=F.binary_cross_entropy
    scheduler=StepLR(optimizer,50)


    def train(epoch):
        model.train()
        running_loss=0.0
        for batch_id,(data,label) in enumerate(TrainLoader):
            data,label=data.to(device),label.to(device)
            optimizer.zero_grad()
            out=model(data)
            loss=criterion(out,label)
            running_loss+=loss
            loss.backward()
            optimizer.step()
            scheduler.step()

        print("Training Epoch %d: running loss: %f" % (epoch,running_loss * batch_size / len(Trainset)))

    for epoch in range(1,20):
        train(epoch)

    print((time.time() - start_time) / 60)

Here is the code. I dont think i pass a tuple of images.
If I just delete the ColorJitter line the code works.
I know my code is a mess, I have just started programming ^^.
Any help is appreciated!

That’s strange, as your code seems to work for this dummy input image:

import torchvision.transforms.functional as TF

img = TF.to_pil_image(torch.randn(3, 224, 224))

transformations=transforms.Compose([
    transforms.ColorJitter(brightness=0.2,saturation=(0.8,1.62),contrast=(1,1.3),hue=(-0.02,0.02)),
    transforms.RandomAffine(degrees=180,translate=(0.05,0.05),scale=(0.9,1.1),shear=25),
    transforms.CenterCrop((64,64)),
    transforms.ToTensor(),
    transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
])

x = transformations(img)

Could you check the shape and type of img before passing it to self.transform?

Thanks for the quick reply

<PIL.Image.Image image mode=RGB size=150x150 at 0x2B64A9BA5C18>
(150, 150)
<class 'PIL.Image.Image'>
Traceback (most recent call last):
  File "DataLoader_outline.py", line 122, in <module>
    train(epoch)
  File "DataLoader_outline.py", line 96, in train
    for batch_id,(data,label) in enumerate(TrainLoader):
  File "/home/hd/hd_hd/hd_qo450/miniconda3/envs/bachelorarbeit/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 264, in __next__
    batch = self.collate_fn([self.dataset[i] for i in indices])
  File "/home/hd/hd_hd/hd_qo450/miniconda3/envs/bachelorarbeit/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 264, in <listcomp>
    batch = self.collate_fn([self.dataset[i] for i in indices])
  File "DataLoader_outline.py", line 53, in __getitem__
    trans_img=self.transform(img)
  File "/home/hd/hd_hd/hd_qo450/miniconda3/envs/bachelorarbeit/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 49, in __call__
    img = t(img)
  File "/home/hd/hd_hd/hd_qo450/miniconda3/envs/bachelorarbeit/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 755, in __call__
    self.saturation, self.hue)
  File "/home/hd/hd_hd/hd_qo450/miniconda3/envs/bachelorarbeit/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 725, in get_params
    if brightness > 0:
TypeError: '>' not supported between instances of 'tuple' and 'int'

Thats the whole Error.

I installt pytorch / torchvision via

conda install pytorch torchvision cudatoolkit=9.0 -c pytorch

This line of code is outdated. Anyway, since brightness is a single float value, I’m not sure why it throws an error.
Could you check your torchvision.__version__ so that we can have a look?

This error was generated while i tried brightness=(0.8,1.1), sorry for the confusion.
Im using version 0.2.1

Thanks for the information.
The tuple argument is used in newer torchvision versions, so you might update this lib.

Thank you very much!!! Works like a charm.
I thought id have the latest version because i just downloaded it 3 day ago.