Custom Loss function : Clarification

I have a custom loss function defined like this:

class Quaternion_Multiplicative_Error(torch.nn.Module):
    def __init__(self):
        print("QME optimized")
        super(Quaternion_Multiplicative_Error, self).__init__()
        self.conj = torch.tensor([1,-1,-1,-1], requires_grad=False)
       
    def qme(self, pred, true):
        true = torch.mul(true, self.conj)
        pro = self.hamilton_product(pred, true)
        img_part = pro[1:]
        norm = np.linalg.norm(img_part, ord=1)
        return 2 * norm

    def forward(self, pred, true):
        batch_size = pred.shape[0]
        return sum(self.qme(x, y) for x, y in zip(pred, true))/batch_size

I need to use this custom loss function in another main class, which looks something like this:

class FusionCriterion_LearnParms(torch.nn.Module):
    def __init__(self, loss_pos="L1Loss", loss_ori="QMELoss", alpha=0.0, beta=-3.0):
        super(FusionCriterion_LearnParms, self).__init__()
        self.loss_pos = self.select_loss(loss_pos)
        self.loss_ori = self.select_loss(loss_ori)
        self.alpha = torch.nn.Parameter(torch.tensor([alpha], dtype=torch.double), requires_grad=True)
        self.beta = torch.nn.Parameter(torch.tensor([beta], dtype=torch.double), requires_grad=True)
        
    def select_loss(self, loss):
        if loss == "L1Loss":
            return torch.nn.L1Loss()    
        elif loss == "MSELoss":
            return torch.nn.MSELoss()
        else:
            return Quaternion_Multiplicative_Error()

    def forward(self, predicted, actual):
        position_loss = (torch.exp(-self.alpha) * self.loss_pos(predicted[:, :3], actual[:, :3])) + self.alpha
        orientation_loss = (torch.exp(-self.beta) * self.loss_ori(predicted[:, 3:]), actual[:, 3:]) + self.beta
        total_loss =   position_loss + orientation_loss
        return total_loss

I get an error

File "fusion.py", line 62, in qme
    true = torch.mul(true, self.conj)
RuntimeError: expected device cuda:0 but got device cpu

I have all the Classes above(nn.Module) moved to the torch.device(“cuda”)
I was hoping all its member functions will also be moved to “cuda”

Instead of: self.conj = torch.tensor([1,-1,-1,-1], requires_grad=False) perhaps:

self.register_buffer('conj ', torch.tensor([1,-1,-1,-1]))

So that it is transferred to gpu also?

1 Like

So this means, we can have this as a constant and also this parameter will not be optimized right.

Yes it works :slight_smile:

Thanks