F1 sample for multi-label classification

def train(net,data_loader,optimizer,cost_function, device=device):

samples = 0.

cumulative_loss = 0.

#cumulative_accuracy = 0.

net.train() # Strictly needed if network contains layers which has different behaviours between train and test

for batch_idx, (inputs, targets) in enumerate(data_loader):

# Load data into GPU

inputs = inputs.to(device)

targets = targets.to(device)

  

# Forward pass

outputs = net(inputs)

# Apply the loss

targets = targets                              #.unsqueeze(1)           # attenzione

loss = cost_function(outputs,targets)

  

# Backward pass

loss.backward()


# Update parameters

optimizer.step()

# Resets the gradients

optimizer.zero_grad()

f1 = f1_score(targets, outputs > 0.5, average="samples")

samples+=inputs.shape[0]         # batches

cumulative_loss += loss.item()


return cumulative_loss/samples,  f1                   

Does this make sense?