How to define a custom loss with pure numpy operation?

I want to define a custom loss function, with a little modification to RMSE

I want to calculate the rmse of pred head and tail

here is the code:

from util import *
import torch.optim as opt 
import torch.nn as nn
import torch

def getxy(seed=100):
  np.random.seed(100)
  x = np.random.rand(100, 2)
  w = np.array([3, 7]) 
  b = 5 
  y = (x * w).sum(axis=1) + b  ### remember, keep the shape same with network output!!!
  # y = 3 * x1 + 7* x2 + 5
  return x, y.reshape(-1,1)

class MLP(nn.Module):
  def __init__(self):
    super(MLP, self).__init__()
    self.l1 = nn.Linear(2, 1, bias=True)

  def forward(self, x): 
    return self.l1(torch.tensor(x).double())

class myloss(nn.Module):
  def __init__(self):
    super().__init__()

  #TODO: HOW CAN I MAKE THIS WORK??
  def forward(self, x, y): 
    xarray = x.detach().numpy()
    mask = (xarray > np.quantile(xarray, 0.9, axis=0)) + (xarray < np.quantile(xarray, 0.1, axis=0))
    xarray = np.where(mask, xarray, y)
    l = mask.sum()
    #return ((xarray - y) ** 2).mean(axis=1)
    #return torch.tensor(torch.sum(torch.square(torch.tensor(xarray) - torch.tensor(y))) / l)
    return torch.mean(torch.square(torch.tensor(xarray) - torch.tensor(y)))

  #def backward(grad_output):
    #return grad_output

mlp = MLP().double()
loss = myloss()
o = opt.Adam(mlp.parameters(), lr=0.1)
#o = opt.SGD(mlp.parameters(), lr=0.05)

def train(n):                                                                                                                      
  x, y = getxy()
  for i in range(n):
    pred = mlp(x)
    o.zero_grad()
    los = loss(pred, y)
    los.backward()
    o.step()
    print('%dth: loss = %.3f'%(i, los.item()), mlp.l1.weight.detach().numpy(), mlp.l1.bias.detach().numpy())

if __name__ == '__main__':
  train(10000)

Can anyone help on this? or provide a demo which can use numpy in custom loss function freely?

You can implement your own loss function by overriding an existing loss function class like what is done in the loss module, as long as you use only torch operations that are differentiable. I suspect all of the “numpy” operations needed in your custom lost function are implemented in PyTorch.