Is this code snippet compatible with backward?

Hi, it seems that my model fails to learn i.e. the loss keeps constant after a few epochs. I’m wondering whether I have any bug on my code but can’t find any. Just to make sure is this code snippet compatible with autograd?

def forward(self, x):
    with torch.set_grad_enabled(True):
        self.n = x.shape[0]
        if self.forced:
            self.d = x.shape[1] // (3*2)
            q_past, qk, q_next, u_past, u, u_next =  torch.split(x,self.d,1)
        else:
            self.d = x.shape[1] // (3)
            q_past, qk, q_next =  torch.split(x,self.d,1)
            
        qk  = qk.requires_grad_(True)
        if len(x)==1:
            q_next = q_next.requires_grad_(True)
        
        q1, qd1 = self.input_scaling(qk, q_next)
        q2, qd2 = self.input_scaling(q_past, qk)
        
        L1 = self.lagrangian(q1, qd1)
        L2 = self.lagrangian(q2, qd2)
        
        #Discrete Euler-Lagrange equations
        if self.forced:
            f = L1 + L2
            DEL = grad(f.sum(), qk, create_graph=True)[0] + (u_past + u) + 0.25*(u + u_next)
        else:
            DEL = grad(f.sum(), qk, create_graph=True)[0]
        # The loss layer will be applied outside Network class
        
        if len(x) ==1:
        
            J = []
            for i in range(self.d):
                D1_i = DEL[:,i][:,None]
                J_i = grad(D1_i[:].sum(), q_next, create_graph=True)[0]
                J.append(J_i)
            J_i=torch.stack((J[0], J[1]), 0)
            Jac=J_i.permute(1,0,2)
            return DEL, Jac
        else:
            return DEL