Backward got grad of none

Hello, I use the backward to solve the grad, the demo code is as follows:

import torch


def angle2matrix(angles, device=torch.device('cpu')):
    angles = angles/180*3.1415926
    x = angles[0]
    y = angles[1]
    z = angles[2]

    # x
    Rx = torch.tensor([[1, 0, 0],
                   [0, torch.cos(x), -torch.sin(x)],
                   [0, torch.sin(x), torch.cos(x)]]).to(device)
    # y
    Ry = torch.tensor([[torch.cos(y), 0, torch.sin(y)],
                   [0, 1, 0],
                   [-torch.sin(y), 0, torch.cos(y)]]).to(device)
    # z
    Rz = torch.tensor([[torch.cos(z), -torch.sin(z), 0],
                   [torch.sin(z), torch.cos(z), 0],
                   [0, 0, 1]]).to(device)

    R = Rz.mm(Ry.mm(Rx))
    return R

x=torch.ones(3,requires_grad=True)
y=torch.pow(angle2matrix(x),2)
y.sum().backward()

print(x.grad)

then I got the error as follows:

Traceback (most recent call last):
  File "app/transform.py", line 110, in <module>
    y.sum().backward()
  File "/root/anaconda3/lib/python3.7/site-packages/torch/tensor.py", line 198, in backward
    torch.autograd.backward(self, gradient, retain_graph, create_graph)
  File "/root/anaconda3/lib/python3.7/site-packages/torch/autograd/__init__.py", line 100, in backward
    allow_unreachable=True)  # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

then I change the angle2matrix as follows:

def angle2matrix(angles, device=torch.device('cpu')):

    angles = angles/180*3.1415926
    x = angles[0]
    y = angles[1]
    z = angles[2]

    # x
    Rx = torch.tensor([[1, 0, 0],
                   [0, torch.cos(x), -torch.sin(x)],
                   [0, torch.sin(x), torch.cos(x)]]).to(device)
    # y
    Ry = torch.tensor([[torch.cos(y), 0, torch.sin(y)],
                   [0, 1, 0],
                   [-torch.sin(y), 0, torch.cos(y)]]).to(device)
    # z
    Rz = torch.tensor([[torch.cos(z), -torch.sin(z), 0],
                   [torch.sin(z), torch.cos(z), 0],
                   [0, 0, 1]]).to(device)

    Rx.requires_grad=True
    Ry.requires_grad=True
    Rz.requires_grad=True

    R = Rz.mm(Ry.mm(Rx))
    return R

x=torch.ones(3,requires_grad=True)
y=torch.pow(angle2matrix(x),2)
y.sum().backward()

print(x.grad)

but this time I the grad of x is none.

looking forward to your help !

Your problem is this

x = torch.tensor([1., 2], requires_grad=True)
y = torch.tensor(x, requires_grad=True)
y.sum().backward()

Now when you backprop through y, y is a leaf (because torch.tensor creates leaf), there is no path to propagate the gradients to x i.e. why x.grad=None.

One way to solve this problem is by doing something as follows

x = torch.tensor([1., 2.], requires_grad=True)
y = [x[0], x[1]]
(y[0]+y[1]).backward()

# Now gradients will be propagated to x

I don’t know if there is an easier way. But you need to convert Rx, Ry, Rx to lists and then work from there for your code to work.