Negative Log Likelihood Ratio Loss

Thanks a lot first!There is a Loss Function
TIM%E6%88%AA%E5%9B%BE20190104205433
I write some codes

# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def one_hot(index, classes):
    index=index.cpu()
    size = index.size() + (classes,)
    view = index.size() + (1,)
    mask = torch.Tensor(*size).fill_(0)
    index = index.view(*view)
    ones = 1.
    if isinstance(index, Variable):
        ones = Variable(torch.Tensor(index.size()).fill_(1))
        mask = Variable(mask, volatile=index.volatile)
    return mask.scatter_(1, index, ones)  
class focal_revised_ce(nn.Module):
    def __init__(self, eps=1e-7):
        super(focal_revised_ce, self).__init__()
        self.eps = eps
        return
    def forward(self, input, target):
        y = one_hot(target, input.size(-1))
        Psoft = torch.nn.functional.softmax(input).cpu()
        Loss=0.0
        for i in range(0,target.size(0)-1):
            Loss+=torch.log((torch.sum(y[i,]*Psoft[i,])/(1.0-torch.sum(y[i,]*Psoft[i,])))) 
        Loss=Loss/(target.size(0))
        return Loss

then sometimes loss is inf.What happened?Thanks a lot!!!

I had a problem like this and solved this with change tensors to double type.

tensor.Tensor([...]).double()
or
tensor.Tensor([...]).type(torch.DoubleTensor)

or you can set default tensor type:

torch.set_default_tensor_type('torch.DoubleTensor')

Your implementation will be slower, but ,i hope, will work :slight_smile: