Save gradient of middle layer of detection network

I want to save the gradient of FPN. I tried as follows, but the gradient was not saved.

  1. used register_hook
  2. requires_grad of input was set to be True
class _FPN(nn.Module):
....
     self.gradients = []

    def save_gradient(self, grad):
        self.gradients.append(grad)
....
def forward(self, im_data, im_info, gt_boxes, num_boxes):
...
        c1 = self.RCNN_layer0(im_data)
        c2 = self.RCNN_layer1(c1)
        c3 = self.RCNN_layer2(c2)
        c3.register_hook(self.save_gradient)
....

im_data = Variable(im_data, requires_grad = True)

Thanks!

I’ve created a small example storing the grad_output into a list in your model.

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.fc1 = nn.Linear(10, 1)
        
        self.gradients = []
        self.fc1.register_backward_hook(self.save_gradients)
        
    def forward(self, x):
        x = self.fc1(x)
        return F.sigmoid(x)

    def save_gradients(self, module, grad_input, grad_output):
        self.gradients.append(grad_output[0].item())

x = torch.randn(1, 10)
model = MyModel()
output = model(x)
output.backward()

print(model.gradients)

Would this work for you or do you want to register the hook with a specific tensor?

3 Likes