Is there a way to calculate Hessian with respect to activations for a small network like this?


I need to calculate the hessian with respect to activations (intermediate values) for a small network like this? Is there a way to do that using pytorch?

class Net(nn.Module):

def __init__(self):
    super(Net, self).__init__()
    self.fc1 = nn.Linear(2,2)
    self.s1 = nn.Sigmoid()
    self.fc2 = nn.Linear(2,2)
    self.s2 = nn.Sigmoid()
    self.fc1.weight = torch.nn.Parameter(torch.Tensor([[0.15,0.2],[0.250,0.30]]))
    self.fc1.bias = torch.nn.Parameter(torch.Tensor([0.35]))
    self.fc2.weight = torch.nn.Parameter(torch.Tensor([[0.4,0.45],[0.5,0.55]]))
    self.fc2.bias = torch.nn.Parameter(torch.Tensor([0.6]))
def forward(self, x):
    x= self.fc1(x)
    x = self.s1(x)
    x= self.fc2(x)
    x = self.s2(x)
    return x

net = Net()

data = torch.Tensor([0.05,0.1])
out = net(data)
target = torch.Tensor([0.01,0.99])  # a dummy target, for example
criterion = nn.MSELoss()
loss = criterion(out, target); loss