Implementing loss function for bivariate Gaussian distribution

Hi,

I’m trying to implement a negative log likelihood loss function for a bivariate Gaussian distribution using torch MultivariateNormal.

my current implementation as follow:

import torch
from torch.distributions.multivariate_normal import MultivariateNormal as MVNormal

def Gaussian2DLikelihood(outputs, targets):
    #mux is mean of x
    #mux is mean of y
    #sx,sy is std  >0
    #corr is correlation -1<corr<1


    batch_size = targets.shape[0]
    mux, muy, sx, sy, corr = outputs[:,0], outputs[:,1], outputs[:,2], outputs[:,3], outputs[:,4]
    sx = torch.exp(sx) 
    sy = torch.exp(sy)
    corr = torch.tanh(corr)
    mux, muy, sx, sy, corr = mux.unsqueeze(-1), muy.unsqueeze(-1), sx.unsqueeze(-1), sy.unsqueeze(-1), corr.unsqueeze(-1)
    
    mu_xy = torch.cat([mux,muy],1)
    print('mu_xy.shape:',mu_xy.shape)#mu_xy.shape: torch.Size([8, 2])

    cov_xy = torch.zeros(batch_size,2,2) #Covariance matrix
    cov_xy[:,0,0] = (sx*sx).squeeze()
    cov_xy[:,0,1] = (corr*sx*sy).squeeze()
    cov_xy[:,1,0] = (corr*sx*sy).squeeze()
    cov_xy[:,1,1] = (sy*sy).squeeze()
    print('cov_xy.shape:',cov_xy.shape)#cov_xy.shape: torch.Size([8,2, 2])

    
    loglik = -normal.log_prob(targets[0,:])
    for d in range(1,batch_size):
        loglik += -normal.log_prob(targets[d,:])


    return loglik/batch_size

A test :
Gaussian2DLikelihood(torch.rand(8,5),torch.rand(8,2))

I’m not quite sure it’s correct, I’ve seen some examples where people take the log of loglik then they use the log-sum-exp thing.

Any ideas?