Error in Training and Testing

Hi, I’ve been trying to make a custom dataset, a training, and a testing classes for landmark recognition. What I did is the following

 class FaceLandmarksDataset(Dataset):
 """Face Landmarks dataset."""
   def __init__(self, csv_file, root_dir, transform=None):
      """
      Args:
      csv_file (string): Path to the csv file with annotations
      root_dir (string): Directory with all the images.
      transform (callable, optional): Optional transform to be applied on a sample.
       """
      csv_data = pd.read_csv(csv_file, header=0, engine='python', index_col=0)
      self.landmarks_frame = pd.read_csv(csv_file)
      self.root_dir = root_dir
      self.transform = transform
      self.gender = torch.as_tensor(csv_data.iloc[:,0].values)
      self.landmarks = torch.as_tensor(csv_data.iloc[:, 1:].values)
      self.filename = csv_data.index.values
  def __len__(self):
      return len(self.landmarks_frame)
  def __getitem__(self, idx):
      if torch.is_tensor(idx):
       idx = idx.tolist()
       img = PIL.Image.open(os.path.join(self.root_dir, self.filename[idx]))
       if self.transform is not None:                
  input_transform = {'image': np.array(img), 'landmarks': self.landmarks[idx,:].numpy().reshape(-1, 2)}
  output_transform = self.transform(input_transform)
  X = output_transform['image']
  T = (self.gender[idx], output_transform['landmarks'].reshape(-1) / X.size(1)
 return X, T

Note: From ‘input_transform’ to ‘return’ the ode should be inside the loop, but it wasnt’t going to the right place, I don’t know why.

Then I have some classes to resize the image and that stuff, because CNNs accept images of a fixed size, and after that I have

transformed_dataset = FaceLandmarksDataset(
       csv_file='/content/gdrive/MyDrive/celeba-mini/celeba-mini.csv', 
       root_dir='/content/gdrive/MyDrive/celeba-mini/images',
       transform=transforms.Compose([
         Rescale(256),
         RandomCrop(224),
        ToTensor() ]))
       train = FaceLandmarksDataset(csv_file='/content/gdrive/MyDrive/celeba-mini/celeba-mini.csv',
                         root_dir='/content/gdrive/MyDrive/celeba-mini/images', 
       transform=transformed_dataset)
       test = FaceLandmarksDataset(csv_file='/content/gdrive/MyDrive/celeba-mini/celeba-mini.csv',
                            root_dir='/content/gdrive/MyDrive/celeba-mini/images', 
       transform=transformed_dataset)

      from torch.utils.data import random_split
      train_size = int(0.8 * len(dataset))
      test_size = len(dataset) - train_size
      dataset = CelebAMini(os.getcwd(), transform=transform)
      train, test = random_split(dataset, [train_size, test_size])
      print(len(train), len(test))

My class for training

  num_epochs = 10
  batch_size = 16
  lr = 1e-4
  class Training_Gender(object):
   def __init__(self, train, batch_size, num_epochs, lr, model, loss_0, device, optimizer_0):
    self.train_dataloader = DataLoader(train, batch_size = batch_size, shuffle = True) 
    self.test_loader = DataLoader(test, batch_size = batch_size, shuffle = True)
    
    # Model
    self.model = model

    # Optimizer
    self.optimizer_0 = optimizer_0
    
    if self.optimizer_0 == 'Adam':
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr = lr)

    if self.optimizer_0 == 'SGD':
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr)

    # Define Criterion
    self.loss_0 = loss_0

    if self.loss_0 == 'BCE':
        self.loss = nn.BCELoss()

    if self.loss_0 == 'CE':
        self.loss = nn.CrossEntropyLoss()

    
    # Hyperparameters
    self.n_epochs = num_epochs

    # Using GPU
    self.model.to(device)

  def training_task1(self, epoch):
    self.model.train()
        
    for image, target, _ in self.train_dataloader:
        image = image.to(device).float()
        
        target = F.one_hot(target[0], num_classes=2).to(device).to(torch.float32)
        self.optimizer.zero_grad()    
        output = self.model(image)
        loss = self.criterion(output, target)
            
        loss.backward()
        self.optimizer.step()

    def save_model(self, path):
     torch.save(self.model.state_dict(), path)



train_data = Training_Gender(train, batch_size, num_epochs, lr, model, 'CE', device, 'Adam')
for epoch in tqdm(range(num_epochs)):
 train_data.training_task1(epoch)

and it is appearing
TypeError Traceback (most recent call last)
in ()
2
3 for epoch in tqdm(range(num_epochs)):
----> 4 train_data.training_task1(epoch)
5

/content/gdrive/MyDrive/celebamini.py in getitem(self, index)
69
70 if self.transform is not None:
—> 71 X = self.transform(X)
72
73 T = (self.gender[index], self.landmarks[index,:])

TypeError: ‘module’ object is not callable

I’m not understanding why is this happening. If you need any more details, do just say, please. I’m also having the same problem with the test class, but I believe the problem is the same.
Thanks in advance!

I’m not sure if I misunderstand your code but this part looks wrong:

transformed_dataset = FaceLandmarksDataset(
       csv_file='/content/gdrive/MyDrive/celeba-mini/celeba-mini.csv', 
       root_dir='/content/gdrive/MyDrive/celeba-mini/images',
       transform=transforms.Compose([
         Rescale(256),
         RandomCrop(224),
        ToTensor() ]))
       train = FaceLandmarksDataset(csv_file='/content/gdrive/MyDrive/celeba-mini/celeba-mini.csv',
                         root_dir='/content/gdrive/MyDrive/celeba-mini/images', 
       transform=transformed_dataset)
       test = FaceLandmarksDataset(csv_file='/content/gdrive/MyDrive/celeba-mini/celeba-mini.csv',
                            root_dir='/content/gdrive/MyDrive/celeba-mini/images', 
       transform=transformed_dataset)

transformed_dataset is an object of FaceLandmarksDataset initially and is passed as the transform argument to FaceLandmarksDataset again to create train.

Thanks, the error was related to that, it was supposed to be

 transformed_dataset = transforms.Compose([Rescale(256), RandomCrop(224), ToTensor()])
 train = FaceLandmarksDataset(csv_file='/content/gdrive/MyDrive/celeba-mini/celeba-mini.csv',
                         root_dir='/content/gdrive/MyDrive/celeba-mini/images', 
                         transform=transformed_dataset)
 test = FaceLandmarksDataset(csv_file='/content/gdrive/MyDrive/celeba-mini/celeba-mini.csv',
                            root_dir='/content/gdrive/MyDrive/celeba-mini/images', 
                            transform=transformed_dataset)

Now it is just appearing

ValueError Traceback (most recent call last)
in ()
2
3 for epoch in tqdm(range(num_epochs)):
----> 4 train_data.training_task1(epoch)
5
6 #training.graph_losses()

in training_task1(self, epoch)
35 self.model.train()
36
—> 37 for image, target, _ in self.train_dataloader:
38 image = image.to(device).float()
39

ValueError: not enough values to unpack (expected 3, got 2)

Your Dataset is returning a tuple of 2 objects but you are requesting 3.

Remove the _ from for image, target, _ in self.train_dataloader: should fix the issue.

1 Like