Evaluation - ValueError: too many values to unpack (expected 2)

Hello,

I am trying to write my own data loader, train a resnet18 for several epochs and evaluate the data. This is my code so far.

# define hyper-parameters
num_epochs = 10
learning_rate = 0.001
batchsize = 8
nb_classes = 3

resnet18 = models.resnet18(nb_classes)
resnet18.cuda()

dir_train = 'C:/Users/.../costum_DPDataset/train/'
dir_test = 'C:/Users/.../costum_DPDataset/test/'

imgs = os.listdir(dir_train)

# device configuration, use cuda if possible
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


# define image transformation (resize, normalize and convert to tensor)
normalize = transforms.Compose([transforms.Resize(350), transforms.ToTensor(), 
                                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225])])

class DPDataset(Dataset):
    def __init__(self, imgs, mode, transforms=None):
        self.imgs = imgs
        self.mode = mode
        self.transforms = transforms
        
    def __getitem__(self, idx):
        image_name = self.imgs[idx]
        
        if self.mode == 'train' or self.mode == 'val':
            label = int(image_name.split('_')[2])
            label = torch.tensor(label, dtype = torch.long)
            
            img = Image.open(dir_train + image_name)
            img = self.transforms(img)            
            return img, label
        
        elif self.mode == 'test':
            img = Image.open(dir_test + image_name)
            img = self.transforms(img)
            
            return img
        
    def __len__(self):
        return len(self.imgs)
    
train_imgs, val_imgs = train_test_split(imgs, test_size=0.2)
test_imgs = (os.listdir(dir_test))

train_dataset = DPDataset(train_imgs, mode='train', transforms=normalize)
val_dataset = DPDataset(val_imgs, mode='val', transforms=normalize)
test_dataset = DPDataset(test_imgs, mode='test', transforms=normalize)

train_data_loader = DataLoader(dataset=train_dataset, num_workers=0,
                               batch_size=batchsize, shuffle=True)
val_data_loader = DataLoader(dataset=val_dataset, num_workers=0,
                             batch_size=batchsize, shuffle=True)
test_data_loader = DataLoader(dataset=test_dataset, num_workers=0,
                              batch_size=batchsize, shuffle=True)

# loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(resnet18.parameters(), lr=learning_rate)
# optimizer = torch.optim.SGD(resnet18.parameters(), lr=learning_rate)


# updating learning rate
def update_lr(optimizer, lr):    
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

total_step = len(train_data_loader)
curr_lr = learning_rate
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_data_loader):
        images = images.to(device)
        labels = labels.to(device)

        # forward pass
        outputs = resnet18(images)
        loss = criterion(outputs, labels)

        # backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # if (i+1) % 100 == 0:
        if (i+1) % 10 == 0:
            print ("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
                   .format(epoch+1, num_epochs, (i+1)*batchsize, 
                           total_step*batchsize, loss.item()))

    # decay learning rate
    if (epoch+1) % 20 == 0:
        curr_lr /= 3
        update_lr(optimizer, curr_lr)

resnet18.eval()
with torch.no_grad():
    correct = 0
    total = 0
    # for i, (images, labels) in enumerate(test_data_loader):
    #### ValueError ####
    for images, labels in test_data_loader:
        images = images.to(device)
        labels = labels.to(device)
        
        outputs = resnet18(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))

Unfortunately, I get an ValueError with too many values to unpack (expected 2) in this line:

for images, labels in test_data_loader:

I do not know why, because I thougth I did it similar to the training part and it should work out. Also the line, which is commented throws the same error. Anyone have an idea? Thanks in advance.

If you see this part of the code, you are only returning one element but in the for loop you are expecting 2 elements and hence the error.

Thanks, I will try this out. So this means if I change the loop to:

for images in test_data_loader:

this should work out?