Printing gradients of functional layers

Hi,

Is there a way we could print gradient flow across functional layers like relu and softmax? For example, in the program below:

# Network
class LeNet5(nn.Module):
    def __init__(self):
        super(LeNet5, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(1, 6, kernel_size=(5, 5)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=2),
            nn.Conv2d(6, 16, kernel_size=(5, 5)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=2),
            nn.Conv2d(16, 120, kernel_size=(5, 5)),
        )

        self.fc = nn.Sequential(
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, 10),
            nn.LogSoftmax(dim=-1),
        )

    def forward(self, data):
        x = self.conv(data)
        x = x.view(x.shape[0], -1)
        x = self.fc(x)
        return x

net = LeNet5()
input = torch.rand(1, 1, 32, 32)
output = net(input)

grad = torch.rand(output.shape)
output.backward(grad) # Can I print gradient flow of this operation?

Is there a way print the gradients flow (gradient input and output) of self.fc[0] and self.fc[3] layers?