Hello,

I’m trying to train a PINN and I’m computing the physics loss using FEniCS so I am having to detach a torch tensor and then I have to provide an implementation of the gradient. When I train the loss stays constant after each epoch, it doesnt increase or decrease. This probably means that the gradient of the loss that torch is obtaining is zero. I would appreciate some help figuring out what I’m doing wrong as well as any suggestions on the best practice for training PINNs this way.

Here is the implementation that I’m using:

This is the autograd subclass that Im using:

```
class PDE_Loss(torch.autograd.Function):
def __init__(self):
super(PDE_Loss, self).__init__()
#p_dofs: this is the output of the neural net
#A_matrix_params: this is the input to the neural net
#formulation is a class that computes the necessary vectors and matrices using
#FEniCS
@staticmethod
def forward(
ctx,
p_dofs: torch.tensor,
A_matrix_params: torch.tensor,
lossfun: torch.nn.Module,
formulation: Dg.Darcy_primal_formulation,
f: torch.tensor,
) -> torch.tensor:
result = lossfun(
torch.matmul(
torch.from_numpy(
formulation.assemble_linear_system(A_matrix_params.detach().numpy())
),
p_dofs,
),
f,
)
ctx.formulation = formulation
ctx.lossfun = lossfun
ctx.save_for_backward(
p_dofs,
A_matrix_params,
f,
result,
)
# print(f"{result=}")
return result
#Gradient is estimated using finite differences
@staticmethod
def backward(ctx, grad_output):
eps = 1e-8
(
p_dofs,
A_matrix_params,
f,
forward_res,
) = ctx.saved_tensors
formulation = ctx.formulation
lossfun = ctx.lossfun
grad = []
for i in range(3):
A_matrix_params_eps_away = A_matrix_params.clone()
A_matrix_params_eps_away[i] += eps
result_eps_away = lossfun(
torch.matmul(
torch.from_numpy(
formulation.assemble_linear_system(
A_matrix_params_eps_away.numpy()
)
),
p_dofs,
),
f,
)
grad.append(((result_eps_away - forward_res) / eps) * grad_output)
return None, torch.tensor(np.array(grad)), None, None, None
```