Hi everyone,

I’ve been trying to calculate the jacobian matrix or Jacobian times a vector when the explicit formula for the gradient is not available, and I calculate it by autograd. In this case, the jacobian of PyTorch returns a zero matrix. In the matrix-vector product, it returns the same value as the gradient of the objective function with respect to parameter theta. Please find a toy example below. I appreciate your guidance.

#First version

#Forming the Jacobian matrix

def objective(x,theta):

return torch.exp(theta[0])*x[0]*x[1] +torch.exp(theta[1])*x[2]

def gradient(x,theta):

funval = objective(x,theta)

funval.backward()

return x.grad

x = torch.tensor([1.0,2.0,3.0], requires_grad=True)

theta = torch.tensor([-1.0,-3.0], requires_grad=True)

jac = jacobian(gradient,(x,theta))

print(jac)

#Second version

#Forming the Jacobian matrix-vector product with, for example, the vector of ones

def objective(x,theta):

return torch.exp(theta[0])*x[0]*x[1] +torch.exp(theta[1])*x[2]

def gradient(x,theta):

funval = objective(x,theta)

funval.backward()

print(theta.grad)

return x.grad

x = torch.tensor([1.0,2.0,3.0], requires_grad=True)

theta = torch.tensor([-1.0,-3.0], requires_grad=True)

grad = Variable(gradient(x,theta),requires_grad=True)

temp = torch.dot(grad,torch.ones(3))

temp.backward()

print(theta.grad)

#which is the same as the gradient of the objective function with respect to theta