Loss is always 0 and not changing

CODE:
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from Sentiment_Analysis import create_feature_sets_and_labels
import pickle
import numpy as np

train_x,train_y,test_x,test_y = create_feature_sets_and_labels(‘D:/PycharmProjects/Pytorch/pos.txt’,‘D:/PycharmProjects/Pytorch/neg.txt’)

Device configuration

device = torch.device(‘cuda’ if torch.cuda.is_available() else ‘cpu’)

hm_epochs = 23
batch_size = 563

print("Input Tensor: " + str(train_x[0]))
print("Label Tensor: " + str(train_y[0]))
class Net(nn.Module):
def init(self):
super(Net, self).init()
self.fc1 = nn.Linear(423, 500)
self.fc2 = nn.Linear(500, 500)
self.fc3 = nn.Linear(500, 500)
self.fc4 = nn.Linear(500, 2)

def forward(self, x):
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = F.relu(self.fc3(x))
    x = self.fc4(x)
    return x

net = Net().to(device)
print(net)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.1)

total_step = len(train_x)

for epoch in range(hm_epochs):
epoch_loss = 0
i = 0

while i < len(train_x):
    start = i
    end = i + batch_size
    batch_x = torch.FloatTensor(train_x[start:end][0])
    #batch_x = batch_x.reshape(-1, 423)
    batch_y = torch.LongTensor(train_y[start:end][1])

        # Move tensors to the configured device
    inputs = batch_x.to(device)
    inputs = Variable(inputs, requires_grad= True)
    labels = batch_y.to(device)
    labels = Variable(labels, requires_grad= False)
        # Forward pass
    outputs = net(inputs)
    outputs = outputs.reshape(2,1)
    loss = criterion(outputs, labels)
        # Backward and optimize
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    i += batch_size
    epoch_loss += loss.item()


print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, hm_epochs, loss.data[0]))

OUTPUT:
C:\Users\Reza\Anaconda3\python.exe D:/PycharmProjects/Pytorch/Neural_Network.py
Input Tensor: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Label Tensor: [1, 0]
Net(
(fc1): Linear(in_features=423, out_features=500, bias=True)
(fc2): Linear(in_features=500, out_features=500, bias=True)
(fc3): Linear(in_features=500, out_features=500, bias=True)
(fc4): Linear(in_features=500, out_features=2, bias=True)
)
Epoch [1/23], Loss: 0.0000
D:/PycharmProjects/Pytorch/Neural_Network.py:72: UserWarning: invalid index of a 0-dim tensor. This will be an error in PyTorch 0.5. Use tensor.item() to convert a 0-dim tensor to a Python number
print(‘Epoch [{}/{}], Loss: {:.4f}’.format(epoch + 1, hm_epochs, loss.data[0]))
Epoch [2/23], Loss: 0.0000
Epoch [3/23], Loss: 0.0000
Epoch [4/23], Loss: 0.0000
Epoch [5/23], Loss: 0.0000
Epoch [6/23], Loss: 0.0000
Epoch [7/23], Loss: 0.0000
Epoch [8/23], Loss: 0.0000
Epoch [9/23], Loss: 0.0000
Epoch [10/23], Loss: 0.0000
Epoch [11/23], Loss: 0.0000
Epoch [12/23], Loss: 0.0000
Epoch [13/23], Loss: 0.0000
Epoch [14/23], Loss: 0.0000
Epoch [15/23], Loss: 0.0000
Epoch [16/23], Loss: 0.0000
Epoch [17/23], Loss: 0.0000
Epoch [18/23], Loss: 0.0000
Epoch [19/23], Loss: 0.0000
Epoch [20/23], Loss: 0.0000
Epoch [21/23], Loss: 0.0000
Epoch [22/23], Loss: 0.0000
Epoch [23/23], Loss: 0.0000

Process finished with exit code 0