This is my loss function
def loss(output, target, loss_function=nn.MSELoss(), scale=1, *args, **kwargs):
output = output.t()
target = Variable(torch.LongTensor(target[0]), requires_grad=False)
pprint(locals())
if Config().cuda: target = target.cuda()
log.debug('i, o sizes: {} {}'.format(output.size(), target.size()))
return loss_function(output, target.float())
When I execute this function I get the following error
2018-03-23 16:49:35,103:main:DEBUG : loss:: i, o sizes: torch.Size([32, 56]) torch.Size([32, 56])
2018-03-23 16:49:35,104:main:ERROR : experiment:: ####################
Traceback (most recent call last):
return loss_function(output, target.float())
File "/home/paarulakan/environments/python/pytorch-py35/lib/python3.5/site-packages/torch/nn/modules/module.py", line 325, in __call__
result = self.forward(*input, **kwargs)
File "/home/paarulakan/environments/python/pytorch-py35/lib/python3.5/site-packages/torch/nn/modules/loss.py", line 329, in forward
return F.mse_loss(input, target, size_average=self.size_average, reduce=self.reduce)
RuntimeError: Expected object of type Variable[torch.cuda.LongTensor] but found type Variable[torch.cuda.FloatTensor] for argument #1 'target'
(pytorch-py35) paarulakan@karunthuLai:~/projects/saama/ner-extended$
But when I don’t convert the target into float tensor I get the following error.
2018-03-23 16:52:45,875:main:DEBUG : loss:: i, o sizes: torch.Size([32, 56]) torch.Size([32, 56])
2018-03-23 16:52:45,875:main:ERROR : experiment:: ####################
Traceback (most recent call last):
return loss_function(output, target)
File "/home/paarulakan/environments/python/pytorch-py35/lib/python3.5/site-packages/torch/nn/modules/module.py", line 325, in __call__
result = self.forward(*input, **kwargs)
File "/home/paarulakan/environments/python/pytorch-py35/lib/python3.5/site-packages/torch/nn/modules/loss.py", line 329, in forward
return F.mse_loss(input, target, size_average=self.size_average, reduce=self.reduce)
RuntimeError: mse_loss_forward is not implemented for type torch.cuda.LongTensor
Same thing is also happening with L1Loss too