I’m having an issue: cannot unpack non-iterable float object

train_loss = 0
for bi, d in tqdm(enumerate(data_loader)):
    ids = d['ids']
    mask = d['mask']
    token_type_ids = d['token_type_ids']
    target = d['target']
# putting them into the device

    ids = ids.to(device, dtype=torch.long)
    mask = mask.to(device, dtype=torch.long)
    token_type_ids = token_type_ids.to(device, dtype=torch.long)
    target = target.to(device, dtype=torch.long)
        
    optimizer.zero_grad()
    outputs = model(
        ids = ids,
        mask = mask,
        token_type_ids = token_type_ids
    )
    
    loss = loss_fn(outputs, target)
    loss.backward()
    optimizer.step()
    scheduler.step()
    train_loss += loss.item()
train_loss = train_loss / len(data_loader)

print(f'Train loss: {train_loss:.3f}')

def eval_fx(data_loader, model, device):
model.eval()
valid_loss = 0

#final_target = []
#final_outputs = []
with torch.no_grad():
    for bi, d in tqdm(enumerate(data_loader)):
        ids = d['ids']
        mask = d['mask']
        token_type_ids = d['token_type_ids']
        target = d['target']
    # putting them into the device

        ids = ids.to(device, dtype=torch.long)
        mask = mask.to(device, dtype=torch.long)
        token_type_ids = token_type_ids.to(device, dtype=torch.long)
        target = target.to(device, dtype=torch.long)
            
        outputs = model(
            ids = ids,
            mask = mask,
            token_type_ids = token_type_ids
        )
        loss = loss_fn(outputs, target)

        valid_loss += loss.item()
    valid_loss = valid_loss / len(data_loader)

print(f'valid loss: {valid_loss:.3f}')

def accuracy_metrics(self, outputs, target):
outputs = torch.argmax(outputs).cpu().detach().numpy() # Due to the linear layer in our feedforward function
target = target.cpu().detach().numpy()
return {‘Accuracy’: metrics.accuracy_score(outputs, target)}

from tqdm import tqdm
best_accuracy = 0
for epoch in range(config.epochs):
train_fx(train_data_loader, model, optimizer,scheduler, device)
outputs, target = eval_fx(valid_data_loader, model, device)
accuracy = accuracy_metrics(outputs, target)
print(f’Accuracy score ----- {accuracy}')
if accuracy > best_accuracy:
torch.save(model.state_dict(), config.model_path)
best_accuracy = accuracy_metrics

I’ve checked the shape of my outputs and target for both training and validation loops and it gave me this…….
Training:
Outputs - torch.size([4, 3])
Target - torch.size([4])

Validation:
Outputs - torch.size([1, 3])
Target - torch.size([1])

So I don’t know why it’s showing me errors in my validation loop.

You may have to mention the specific line number in which there is an error, so that, it will be easy to get help here.

On a quick look, eval_fx does not seem to return any values. But the below line seem to expect some return values.

P.S. you can use ``` to format the code neatly.

outputs, target = eval_fx(valid_data_loader, model, device)
This is where the problem lies

Even when I returned valid_loss in the code itself, it said cannot unpack float object.

I am not sure if I understand your question correctly.
From your code, I understand that eval_fx is supposed to return two values (outputs, target).

Yes, but it’s getting just a single float value.

Then maybe you could change the code to receive a “single float value”?

loss = eval_fx(valid_data_loader, model, device)

I think it’s a general python related error and not related to Pytorch.

I know the problem is from my code and it’s coming from the eval function. But I don’t know how to tweak it.

Double post from here with follow up.