import torch
import sys
from typing import Dict, Optional, List, Tuple
import time
torch.autograd.set_detect_anomaly(True)
t1 = time.time()
def p_fun(n: int, rij: torch.Tensor, r_inner: float, r_outer: float) -> torch.Tensor:
fx = torch.zeros(1,2,10,8, n+1, dtype=rij.dtype, device=rij.device)
mask = (rij > r_inner) & (rij < r_outer)
x = 2 * (rij[mask] - r_inner) / (r_outer - r_inner) - 1
fx[..., 0:1][mask] = 1
for i in range(1, n + 1):
if i == 1:
fx[..., 1:2][mask] = x
if i >= 2:
fx[..., i] = 2 * fx[..., 1] * fx[..., i-1] - fx[..., i-2]
return fx
rij = torch.rand(1,2,10,8,1)
rij.requires_grad_(True)
Tk = p_fun(6,rij, 0,6)
mask: List[Optional[torch.Tensor]] = [torch.ones_like(Tk)]
dgn = torch.autograd.grad([Tk], [rij], grad_outputs=mask, retain_graph=True, create_graph=True)[0]
t3 = time.time()
print('t3',t3-t1)
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [1, 2, 10, 8]], which is output 0 of AsStridedBackward0, is at version 7; expected version 6 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!