I wanted to perform CrossEntropyLoss() with my custom dataset, for an experiment, but I am not being able to perform the loss operation. My code goes as follows:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.layer1 = nn.Linear(2, 10)
self.layer2 = nn.Linear(10, 1)
def forward(self, x):
x = F.relu(self.layer1(x))
x = (self.layer2(x))
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device=device)
loss_fn = nn.CrossEntropyLoss()
learning_rate = 1e-3
epochs = 20
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
inputs = torch.tensor([
[0.,0.],
[0.,1.],
[1.,0.],
[1.,1.]
]).to(device=device)
targets = torch.tensor([
[0],
[1],
[1],
[0]
], dtype=torch.float32).to(device=device)
model.train()
for epoch in range(epochs):
pred_output = model(inputs)
print(pred_output)
print(targets.dtype)
print(pred_output.dtype)
loss = loss_fn(targets, pred_output)
print(loss)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
print()
break
with this code snippet, I am getting the following error-
tensor([[0.1445],
[0.3038],
[0.1030],
[0.2709]], device='cuda:0', grad_fn=<AddmmBackward>)
torch.float32
torch.float32
Traceback (most recent call last):
File ".\main.py", line 58, in <module>
loss = loss_fn(targets, pred_output)
File "C:\Users\user\anaconda3\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\Users\user\anaconda3\lib\site-packages\torch\nn\modules\loss.py", line 1047, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "C:\Users\user\anaconda3\lib\site-packages\torch\nn\functional.py", line 2693, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "C:\Users\user\anaconda3\lib\site-packages\torch\nn\functional.py", line 2388, in nll_loss
ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target' in call to _thnn_nll_loss_forward
As soon as I change the dtype of targets to torch.long by:
targets = torch.tensor([
[0],
[1],
[1],
[0]
], dtype=torch.long).to(device=device)
I get the following error:
tensor([[0.1445],
[0.3038],
[0.1030],
[0.2709]], device='cuda:0', grad_fn=<AddmmBackward>)
torch.float32
torch.float32
Traceback (most recent call last):
File ".\main.py", line 58, in <module>
loss = loss_fn(targets, pred_output)
File "C:\Users\bagga\anaconda3\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\Users\bagga\anaconda3\lib\site-packages\torch\nn\modules\loss.py", line 1047, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "C:\Users\bagga\anaconda3\lib\site-packages\torch\nn\functional.py", line 2693, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "C:\Users\bagga\anaconda3\lib\site-packages\torch\nn\functional.py", line 2388, in nll_loss
ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target' in call to _thnn_nll_loss_forward
bagga@harjyot-bagga ~\Documents\GitHub\AI-Playground\Pattern Detection Experiment main base 3.8.10 ERROR python .\main.py
tensor([[0.4545],
[0.3657],
[0.3480],
[0.2857]], device='cuda:0', grad_fn=<AddmmBackward>)
torch.int64
torch.float32
Traceback (most recent call last):
File ".\main.py", line 57, in <module>
loss = loss_fn(targets, pred_output)
File "C:\Users\user\anaconda3\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\Users\user\anaconda3\lib\site-packages\torch\nn\modules\loss.py", line 1047, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "C:\Users\user\anaconda3\lib\site-packages\torch\nn\functional.py", line 2693, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "C:\Users\user\anaconda3\lib\site-packages\torch\nn\functional.py", line 1672, in log_softmax
ret = input.log_softmax(dim)
RuntimeError: "host_softmax" not implemented for 'Long'
What am I doing wrong? And what should be done?