Derivative of model outputs w.r.t input features

Hi Frank,

I just did a very minor change and it seems to work. Thanks for your help! Appreciated…

my_input=test_data[num][0].view(1,1,28,28)

J = torch.zeros((784, 10)) # loop will fill in Jacobian
J = J.float()

my_input.requires_grad_()

preds = model(my_input)
print("preds shape is: ",preds.shape)

for i in range (10):
grd = torch.zeros ((1, 10)) # same shape as preds
grd[0, i] = 1 # column of Jacobian to compute
preds.backward(gradient = grd, retain_graph = True)
J[:,i] = my_input.grad.view(784).float() # fill in one column of Jacobian
my_input.grad.zero_() # .backward() accumulates gradients, so reset to zero

print(J.shape)
print(J)