Problem with udating loss function in a logistic regression

Why does loss function does not update? Is there something wrong with the code?
Thank you

iris_dataset = datasets.load_iris()
X = iris_dataset[‘data’]
Y = iris_dataset[‘target’]

 #create test and train sets
 X_train, X_test, y_train, y_test = sk.model_selection.train_test_split(X, Y, test_size=0.2, random_state=42)

feat_x_train = torch.from_numpy(X_train)
feat_y_train = torch.from_numpy(y_train)

feat_x_test = torch.from_numpy(X_test)
feat_y_test = torch.from_numpy(y_test)

x_data_train = Variable(torch.Tensor(X_train)).float()
y_data_train = Variable(torch.Tensor(y_train)).type(torch.LongTensor)

#logistic regression

class LogisticRegress(nn.Module):
   def __init__(self,inp,out):
      super(LogisticRegress,self).__init__()
      self.linear = nn.Linear(inp,out)
    
    
    def forward(self,x):
        #sigmoid = torch.nn.Sigmoid()
        out = F.sigmoid(self.linear(x))
        return out

#logistic model
#4 features and 3 outputs
model = LogisticRegress(4,3)

#Loss Function
loss = nn.CrossEntropyLoss()

#have a look at model parameters
for param in model.parameters():
print(param)

#optimizer
optim = torch.optim.Adam(model.parameters(), lr=0.01)

#Train the model

  loss_list = []
  count_list = []
  count = 0
  num_epochs = 2000
  for epoch in range(num_epochs):



        y_pred = model(x_data_train)
       _loss = loss(y_pred,y_data_train)
       #print(epoch,_loss.data[0])

      optimizer.zero_grad()
      _loss.backward()
      optimizer.step()

      loss_list.append(_loss.data)
      count_list.append(count)

if epoch % 100 == 0:
    print('Epoch [%d/%d] Loss: %.4f' %(epoch + 1, num_epochs, _loss.data[0]))

Note sure, can’t see anything wrong there. Have almost identical code here (https://github.com/rasbt/deep-learning-book/blob/master/code/model_zoo/pytorch_ipynb/logistic-regression.ipynb, at the bottom under High-level implementation using the nn.Module API) and here (https://github.com/rasbt/deep-learning-book/blob/master/code/model_zoo/pytorch_ipynb/softmax-regression.ipynb) and it works fine.

Probably not the root of the problem, but you can replace

x_data_train = Variable(torch.Tensor(X_train)).float()
y_data_train = Variable(torch.Tensor(y_train)).type(torch.LongTensor)

with

x_data_train = torch.tensor(X_train, dtype=torch.float32)
y_data_train = torch.tensor(y_train, dtype=torch.int32)

Thank you Sebastian for your time,
I followed your advice and I replaced the values u suggested, and I got this error : Expected object of type torch.LongTensor but found type torch.IntTensor for argument #2 ‘target’.

Oh, I just see “torch.LongTensor” is actually a 64 bit tensor (I always confuse these). To do the exact equivalent in terms of types then, you would need to do

y_data_train = torch.tensor(y_train, dtype=torch.long)

or

y_data_train = torch.tensor(y_train, dtype=torch.int64)

(here’s a reference of the different Tensor types: https://pytorch.org/docs/stable/tensors.html#torch-tensor)