Auc stay at 0.5, not change with iterations increase

I’m new to Pytorch. I have a sparse dataset in libsvm format. I used a simple NN model for binary classification. In training procedure, AUC of my model kept at 0.5, which means random selecting. I don’t know what’s the problem(s). I’m not sure if it is because the network is too simple.

My features are already scaled to 0 - 1.

My code:

import torch
import sklearn.datasets
import numpy as np

import torch.nn as nn
import torch.nn.functional as F

data = sklearn.datasets.load_svmlight_file(‘train.libsvm’)
coo = data[0].tocoo()
y = data[1]

indices = np.vstack((coo.row, coo.col))
values = coo.data

i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = coo.shape
x_data = torch.sparse.FloatTensor(i, v, torch.Size(shape))
y_data = torch.from_numpy(y).type(torch.LongTensor)

#our class must extend nn.Module
class Net(nn.Module):
def init(self):
super(Net,self).init()
self.fc1 = nn.Linear(393216,1024)
self.fc2 = nn.Linear(1024,2)

def forward(self,x):
    x = self.fc1(x)
    x = F.tanh(x)
    x = self.fc2(x)

x = F.relu(x)

x = self.fc3(x)

    return x

#This function takes an input and predicts the class, (0 or 1)
def predict(self,x):
    #Apply softmax to output.
    pred = F.softmax(self.forward(x))
    ans = []
    #Pick the class with maximum weight
    for t in pred:
        if t[0]>t[1]:
            ans.append(0)
        else:
            ans.append(1)
    return torch.tensor(ans)

model = Net()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay= 1e-6, momentum = 0.9, nesterov = True)

#Number of epochs
epochs = 100
#List to store losses
losses = []
for i in range(epochs):
y_pred = model.forward(x_data)
print(y_pred)
loss = criterion(y_pred,y_data)
losses.append(loss.item())

#Clear the previous gradients
optimizer.zero_grad()
#Compute gradients
loss.backward()
#Adjust weights
optimizer.step()

from sklearn.metrics import roc_auc_score
print(roc_auc_score(y_data, model.predict(x_data)))