Iterate to update tensor values in dynamical system

Hi,

I’m having difficulties understanding how to declare the forward pass of a function that computes steps of a dynamical system one at a time (each step depends on the previous value). At the moment I’m declaring an empty tensor and then use a loop to update the values, although autograd is not able to compute the gradients as it detects in-place operations.

I have created a snippet of code to show the function and reproduce the error using the grad function.

Any help with making this work or suggesting alternatives would be greatly appreciated.

Thanks,
Pablo

import torch
from torch import nn

class Dyn(nn.Module):
    def __init__(self):
        super(Dyn, self).__init__()
        
    def forward(self, z0, params):
        with torch.no_grad():
            out = torch.empty(10,2, requires_grad=True)

        out[0] = z0
        for i in range(9):
            out[i+1] = out[i] + 0.5*torch.mv(params.view(2,2), out[i])
        
        return out
        #return (z0+0.5*torch.mv(params.view(2,2), z0)).repeat(10,1)
    
z1 = torch.Tensor([-1., -1.])
z0 = torch.tensor([0.6, 0.3], requires_grad=True)

params = torch.randn(4, requires_grad=True)/2.
dyn = Dyn()

sol = torch.tensor([[-0.3600,  0.5700],
                    [-0.6270,  0.3615],
                    [-0.7764,  0.0299],
                    [-0.7525, -0.3598],
                    [-0.5350, -0.7181],
                    [-0.1493, -0.9497],
                    [ 0.3330, -0.9768],
                    [ 0.8048, -0.7614],
                    [ 1.1453, -0.3210],
                    [ 1.2485,  0.2677]])

pred = dyn(z0, params)

torch.autograd.grad(pred-sol, params, grad_outputs=torch.ones(10,2))

Hi,

I think the simplest solution here would be to simply store your “out” as a list of Tensors of size 2.
Then just before returning, you can torch.stack(out, dim=0).

Thank you for your help @albanD. Your solution allows to backpropagate through the tensor values and makes sense.

Here is the modified working code in case is useful for someone else in the future:

import torch
from torch import nn

class Dyn(nn.Module):
    def __init__(self):
        super(Dyn, self).__init__()
        
    def forward(self, z0, params):
        out = [z0]
        for i in range(9):
            out.append(out[i] + 0.5*torch.mv(params.view(2,2), out[i]))
        return torch.stack(out, dim=0)
    
z1 = torch.Tensor([-1., -1.])
z0 = torch.tensor([0.6, 0.3], requires_grad=True)

params = torch.randn(4, requires_grad=True)/2.
dyn = Dyn()

sol = torch.tensor([[-0.3600,  0.5700],
                    [-0.6270,  0.3615],
                    [-0.7764,  0.0299],
                    [-0.7525, -0.3598],
                    [-0.5350, -0.7181],
                    [-0.1493, -0.9497],
                    [ 0.3330, -0.9768],
                    [ 0.8048, -0.7614],
                    [ 1.1453, -0.3210],
                    [ 1.2485,  0.2677]])

pred = dyn(z0, params)

torch.autograd.grad(pred-sol, params, grad_outputs=torch.ones(10,2))