ROC curve for multiple classes in PyTorch

Hi
i’m trying to plot the ROC curve for the multi class classification problem. There is bug in my testing code i tried in 2 ways but getting the same error. i’m coping the testing part of my code here:

###### track test#####
...
...
NUM_CLASSES = 8
...

test_loss = 0.0
class_correct = list(0. for i in range(NUM_CLASSES))
class_total = list(0. for i in range(NUM_CLASSES))

model.eval()
output_list = [] 
predict_list = [] 
label_list = [] 

predict_list_each_class = [
    [],
    [],
    [],
    [],
    [],
    [],
    [],
    []
]
label_list_each_class = [
    [],
    [],
    [],
    [],
    [],
    [],
    [],
    []
]

# iterate over test data
for data, target in test_loader:
    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        data, target = data.cuda(), target.cuda()
    # forward pass: compute predicted outputs by passing inputs to the model
    output = model(data)
    output_softmax = torch.nn.functional.softmax(output)
    output_list.extend(output_softmax.data.cpu().squeeze().tolist())
    label_list.extend(target.data.cpu().squeeze().tolist())
    # calculate the batch loss
    loss = criterion(output, target)
    # update test loss
    test_loss += loss.item() * data.size(0)
    # convert output probabilities to predicted class
    _, pred = torch.max(output, 1)
    predict_list.extend(pred.data.cpu().squeeze().tolist()) 
    for i in range(target.shape[0]):
        for emotion in range(len(classes)):
            predict_list_each_class[emotion].append(output_softmax[i, emotion].item())
            label_list_each_class[emotion].append(1 + int(target[i] == emotion))
    correct_tensor = pred.eq(target.data.view_as(pred))
    correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
    # calculate test accuracy for each object class

    for i in range(target.shape[0]):
        label = target.data[i]
        class_correct[label] += correct[i].item()
        class_total[label] += 1


# average test loss
test_loss = test_loss / len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))

for i in range(NUM_CLASSES):
    if class_total[i] > 0:
        print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
            classes[i], 100 * class_correct[i] / class_total[i],
            np.sum(class_correct[i]), np.sum(class_total[i])))
    else:
        print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))

print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
    100. * np.sum(class_correct) / np.sum(class_total),
    np.sum(class_correct), np.sum(class_total)))

# visualize the sample results
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
images.numpy()

# move model inputs to cuda, if GPU available
if train_on_gpu:
    images = images.cuda()

# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy())

# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(12, 4))
for idx in np.arange(10):
    ax = fig.add_subplot(2, 10 / 2, idx + 1, xticks=[], yticks=[])
    #    plt.imshow(images.cpu()[idx])
    #    imshow(images.cpu()[idx])
    imshow(images.cpu()[idx])
    ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]),
                 color=("green" if preds[idx] == labels[idx].item() else "red"))
plt.show()
plt.savefig("examples_prediction1.png")

####################Plotting the confusion matrix##########################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
nb_classes = len(classes)

confusion_matrix = torch.zeros(nb_classes, nb_classes)
with torch.no_grad():
    for i, (images, labels) in enumerate(test_loader):
        if train_on_gpu:
            images = images.cuda()
            labels = labels.cuda()
        outputs = model(images)
        _, preds = torch.max(outputs, 1)
        for t, p in zip(labels.view(-1), preds.view(-1)):
            confusion_matrix[t.long(), p.long()] += 1


# print(confusion_matrix)
# #to get per class accuracy
# print(confusion_matrix.diag()/confusion_matrix.sum(1))


# For Ploting Confusion Matrix
def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        # print("Normalized confusion matrix")
    else:
        # print('Confusion matrix, without normalization')
        pass

    im = plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    # plt.colorbar()
    plt.colorbar(im, fraction=0.046, pad=0.04)
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylim(-0.5, len(classes) - 0.5)
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()
    plt.savefig("confusion_matrix.png")  # cz


# Confusion matrix
names = tuple(classes)
plt.figure(figsize=(8, 7))
plot_confusion_matrix(confusion_matrix.int(), names)

print('==============================')
print('========sklearn metric========')
print('accuracy: \t', metrics.accuracy_score(label_list, predict_list))
print('classification report: \n', metrics.classification_report(label_list, predict_list))
print('f1 sorce: \t', metrics.f1_score(label_list, predict_list, average='macro'))
print('confusion matrix: \t', metrics.confusion_matrix(label_list, predict_list))
print('---')


fpr0, tpr0, thresholds = metrics.roc_curve(np.array(label_list_each_class[0]), np.array(predict_list_each_class[0]),
                                           pos_label=2)
print('0 auc: \t', metrics.auc(fpr0, tpr0))
fpr1, tpr1, thresholds = metrics.roc_curve(label_list_each_class[1], predict_list_each_class[1], pos_label=2)
print('1 auc: \t', metrics.auc(fpr1, tpr1))
fpr2, tpr2, thresholds = metrics.roc_curve(label_list_each_class[2], predict_list_each_class[2], pos_label=2)
print('2 auc: \t', metrics.auc(fpr2, tpr2))
fpr3, tpr3, thresholds = metrics.roc_curve(label_list_each_class[3], predict_list_each_class[3], pos_label=2)
print('3 auc: \t', metrics.auc(fpr3, tpr3))
fpr4, tpr4, thresholds = metrics.roc_curve(label_list_each_class[4], predict_list_each_class[4], pos_label=2)
print('4 auc: \t', metrics.auc(fpr4, tpr4))
fpr5, tpr5, thresholds = metrics.roc_curve(label_list_each_class[5], predict_list_each_class[5], pos_label=2)
print('5 auc: \t', metrics.auc(fpr5, tpr5))
fpr6, tpr6, thresholds = metrics.roc_curve(label_list_each_class[6], predict_list_each_class[6], pos_label=2)
print('6 auc: \t', metrics.auc(fpr6, tpr6))
fpr7, tpr7, thresholds = metrics.roc_curve(label_list_each_class[7], predict_list_each_class[7], pos_label=2)
print('7 auc: \t', metrics.auc(fpr7, tpr7))

plt.figure()
lw = 2
plt.figure(figsize=(4, 4))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.plot(fpr0, tpr0,
         lw=lw, label=classes[0])  
plt.plot(fpr1, tpr1, lw=lw, label=classes[1])  
plt.plot(fpr2, tpr2, lw=lw, label=classes[2])
plt.plot(fpr3, tpr3, lw=lw, label=classes[3])
plt.plot(fpr4, tpr4, lw=lw, label=classes[4])
plt.plot(fpr5, tpr5, lw=lw, label=classes[5])
plt.plot(fpr6, tpr6, lw=lw, label=classes[6])
plt.plot(fpr7, tpr7, lw=lw, label=classes[7])

# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
plt.xlim([0.0, 0.1])
plt.ylim([0.8, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC)')
plt.legend(loc="lower right")
plt.savefig("ROC.png")

the error i got for the first method is;

line 669, in <module>
    predict_list_each_class[emotion].append(output_softmax[i, emotion].item())
IndexError: index 7 is out of bounds for dimension 1 with size 7

And the second method:

#SECOND METHOD

def test_class_probabilities(model, device, test_loader, which_class):
    model.eval()
    actuals = []
    probabilities = []
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            prediction = output.argmax(dim=1, keepdim=True)
            actuals.extend(target.view_as(prediction) == which_class)
            probabilities.extend(np.exp(output[:, which_class].cpu()))
    return [i.item() for i in actuals], [i.item() for i in probabilities]


actuals, class_probabilities = test_class_probabilities(model, device, test_loader, 0)
fpr0, tpr0, _ = metrics.roc_curve(actuals, class_probabilities)
actuals, class_probabilities = test_class_probabilities(model, device, test_loader, 1)
fpr1, tpr1, _ = metrics.roc_curve(actuals, class_probabilities)
actuals, class_probabilities = test_class_probabilities(model, device, test_loader, 2)
fpr2, tpr2, _ = metrics.roc_curve(actuals, class_probabilities)
actuals, class_probabilities = test_class_probabilities(model, device, test_loader, 3)
fpr3, tpr3, _ = metrics.roc_curve(actuals, class_probabilities)
actuals, class_probabilities = test_class_probabilities(model, device, test_loader, 4)
fpr4, tpr4, _ = metrics.roc_curve(actuals, class_probabilities)
actuals, class_probabilities = test_class_probabilities(model, device, test_loader, 5)
fpr5, tpr5, _ = metrics.roc_curve(actuals, class_probabilities)
actuals, class_probabilities = test_class_probabilities(model, device, test_loader, 6)
fpr6, tpr6, _ = metrics.roc_curve(actuals, class_probabilities)
actuals, class_probabilities = test_class_probabilities(model, device, test_loader, 7)
fpr7, tpr7, _ = metrics.roc_curve(actuals, class_probabilities)
#roc_auc = metrics.auc(fpr, tpr)
plt.figure()
lw = 2
plt.figure(figsize=(7, 7))
plt.plot(fpr0, tpr0,  lw=lw, label=classes[0])
plt.plot(fpr1, tpr1,  lw=lw, label=classes[1])
plt.plot(fpr2, tpr2,  lw=lw, label=classes[2])
plt.plot(fpr3, tpr3,  lw=lw, label=classes[3])
plt.plot(fpr4, tpr4,  lw=lw, label=classes[4])
plt.plot(fpr5, tpr5,  lw=lw, label=classes[5])
plt.plot(fpr6, tpr6,  lw=lw, label=classes[6])
plt.plot(fpr7, tpr7,  lw=lw, label=classes[7])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
plt.xlim([0.0, 0.1])
plt.ylim([0.8, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC)')
plt.legend(loc="lower right")
plt.show()
plt.savefig("ROC.png")

The error i got for second method is;

    probabilities.extend(np.exp(output[:, which_class].cpu()))
IndexError: index 7 is out of bounds for dimension 1 with size 7