train_losses = []
val_losses = []
test_losses =[]
train_auc = []
val_auc = []
train_auc_epoch = []
val_auc_epoch = []
best_auc = 0.0
min_loss = np.Inf
since = time.time()
for e in range(num_epochs):
train_loss = 0.0
val_loss = 0.0
# Train the model
model.train()
for i, (images, labels) in enumerate(tqdm(train_dataloader, total=int(len(train_dataloader)))):
images = images.to(device)
labels = labels.to(device)
images, labels = images.cuda(), labels.cuda()
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Loss and accuracy
train_loss += loss.item()
y_actual = labels.data.cpu().numpy()
y_pred = outputs[:,-1].detach().cpu().numpy()
train_auc.append(roc_auc_score(y_actual, y_pred))
# Evaluate the model
model.eval()
for i, (images, labels) in enumerate(tqdm(val_dataloader, total=int(len(val_dataloader)))):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Loss and AUC
val_loss += loss.item()
y_actual = labels.data.cpu().numpy()
y_pred = outputs[:,-1].detach().cpu().numpy()
val_auc.append(roc_auc_score(y_actual, y_pred))
# Average losses and accuracies
train_loss = train_loss/len(train_dataloader)
val_loss = val_loss/len(val_dataloader)
train_losses.append(train_loss)
val_losses.append(val_loss)
training_auc = np.mean(train_auc)
validation_auc = np.mean(val_auc)
train_auc_epoch.append(training_auc)
val_auc_epoch.append(validation_auc)
# Updating best validation AUC
if best_auc < val_auc:
best_acc = val_auc
# Saving best model
if min_loss >= val_loss:
torch.save(model.state_dict(), 'best_model.pt')
min_loss = val_loss
print('EPOCH {}/{}'.format(e+1, num_epochs))
print('-' * 10)
print("Train loss: {:.6f}, Train AUC: {:.4f}".format(train_loss, train_auc))
print("Validation loss: {:.6f}, Validation AUC: {:.4f}\n".format(val_loss, val_auc))
time_elapsed = time.time() - since
print(‘Training completed in {:.0f}m {:.0f}s’.format(time_elapsed // 60, time_elapsed % 60))
print(‘Best validation AUC: {:4f}’.format(best_auc))
100%
2048/2048 [19:19<00:00, 1.77it/s]
6%
16/256 [00:08<01:52, 2.13it/s]
ValueError Traceback (most recent call last)
in
52 y_actual = labels.data.cpu().numpy()
53 y_pred = outputs[:,-1].detach().cpu().numpy()
—> 54 val_auc.append(roc_auc_score(y_actual, y_pred))
55
56
2 frames
/usr/local/lib/python3.7/dist-packages/sklearn/metrics/_ranking.py in _binary_roc_auc_score(y_true, y_score, sample_weight, max_fpr)
336 if len(np.unique(y_true)) != 2:
337 raise ValueError(
→ 338 "Only one class present in y_true. ROC AUC score "
339 “is not defined in that case.”
340 )
ValueError: Only one class present in y_true. ROC AUC score is not defined in that case.