Enumerate(dataloader) - unsolvable error

Hi,
I’ve been working on a CT scan classfication model with custom dataset from images and while training the model in the line " for batch, (X_train,y_train) in enumerate(train_data): "
I got an error:
“ValueError: too many values to unpack (expected 3)”
The thing is I previous 18 or so batches went without any problems. I also checked if the shape of the loaded data is different and used *data to try to find what doesnt fit, adding more or less variables to unpack the data to didn’t work either.

I’ve been trying to resolve this issue for like 3 hours, your help would be greatly appreciated. :smile:

PS: If you find ways to improve my code even further then please let me know.

class ScansDS(Dataset):
    def __init__(self, annotations, root_dir, transform = None):
        self.annotations = annotations
        self.root_dir = root_dir
        self.transform = transform
    
    def __len__(self):
        return len(self.annotations)
    
    def __getitem__(self, index):
        img_path = os.path.join(self.root_dir, self.annotations.iloc[index,0])
        image = torch.tensor(read_image(img_path), dtype=float)
        y_label = torch.tensor(int(self.annotations.iloc[index,1]))

        if self.transform:
            image = self.transform(image)

        return (image, y_label)
    
transforms = v2.Compose([
    v2.Grayscale(num_output_channels=1),
    v2.Resize((244,244)),
    v2.ToTensor(),
    v2.Normalize(mean=[0.49], std=[0.225]),
])

train_dataset = ScansDS(annotations=train_df, root_dir="Data/mix_train", transform=transforms)

train_data = DataLoader(dataset=train_dataset, batch_size=8, shuffle=True, num_workers=0,)

class ScanModel(nn.Module):
    def __init__(self,):
        super().__init__()
        self.conv1 = nn.Conv2d(1,64,stride=1,kernel_size=3, bias=False)
        self.conv2 = nn.Conv2d(64,64,stride=1,kernel_size=3, bias=False)
        self.conv3 = nn.Conv2d(64,128,stride=1,kernel_size=3, bias=False)
        self.conv4 = nn.Conv2d(128,256,stride=1,kernel_size=3, bias=False)
        self.conv5 = nn.Conv2d(256,512, stride=1,kernel_size=3, bias=False)

        self.dropout = nn.Dropout(0.22)
       
        self.bn1 = nn.BatchNorm2d(64)
        self.bn2 = nn.BatchNorm2d(64)
        self.bn3 = nn.BatchNorm2d(128)
        self.bn4 = nn.BatchNorm2d(256)
        self.bn5 = nn.BatchNorm2d(512)
        self.bn6 = nn.BatchNorm2d(512)
         
        self.fc1 = nn.Linear(512*5*5, 128)
        self.fc2 = nn.Linear(128, 256)
        self.fc3 = nn.Linear(256,3)

    def forward(self, x):     
        x = F.relu(self.conv1(x))
        x = self.bn1(x)
        x = self.dropout(x)
        x = F.max_pool2d(x,2,2)

        x = F.relu(self.conv2(x))
        x = self.bn2(x)
        x = self.dropout(x)
        x = F.max_pool2d(x,2,2)

        x = F.relu(self.conv3(x))
        x = self.bn3(x)
        x = self.dropout(x)
        x = F.max_pool2d(x,2,2)

        x = F.relu(self.conv4(x))
        x = self.bn4(x)
        x = self.dropout(x)
        x = F.max_pool2d(x,2,2)


        x = F.relu(self.conv5(x))
        x = self.bn5(x)
        x = self.dropout(x)
        x = F.max_pool2d(x,2,2)
        
        x = x.view(-1,512*5*5)
        
        x = F.relu(self.fc1(x))
        x=self.dropout(x)
        x = F.relu(self.fc2(x))
        x= self.dropout(x)
        x = F.log_softmax(self.fc3(x), dim=1)
                   
        return x

torch.manual_seed(41)
torch.cuda.manual_seed(41)

model = ScanModel()
model = model.cuda()

print(torch.cuda.current_device())
print(torch.cuda.device_count())
print(torch.cuda.get_device_name(0))

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

optimizer = torch.optim.Adam(lr=0.001, params= model.parameters())
criterion = nn.CrossEntropyLoss()

EPOCHS = 5

train_losses = []
train_correct = []

start_time = time.time()

for epoch in range(EPOCHS):

    epoch_train_correct = 0
    model.train()

    for batch, (X_train,y_train) in enumerate(train_data):
      X_train,y_train = X_train.to(device).float(), y_train.to(device)
      print(batch)
      y_pred = model.forward(X_train)
      loss = criterion(y_pred,y_train)

      predicted = torch.max(y_pred.detach(),1)[1]   # .data oddelí gradient atd od samostatných dat a dá nám jen data, ta jednice v tom maxu znamena dimenze na ktere hleda max(cols nebo rows asi)
      batch_correct = (predicted == y_train).sum()
      epoch_train_correct += batch_correct

      optimizer.zero_grad()
      loss.backward() #vypocita gradient pro nase weights atd
      optimizer.step() #updatne weights s gradientem

      if batch%10 == 0:
        print(f"Epoch: {epoch+1}   Batch: {batch+1}   Loss: {loss.item()}")




    train_losses.append((f"Trl{epoch+1}",loss))
    train_correct.append((f"Trc{epoch+1}",epoch_train_correct))


current_time = time.time()
total_time = current_time - start_time
print(f"Training took {total_time/60} minutes.")