How to calculate gradient for each layer input?

I am working on the following model, and I want to calculate the gradient of input to each layer; pseudo-code-

class MyModel():
    def __init__():
        layers ....
        ....
        ..
    def forward(x):
        layer_input = []
        layer_input.append(x)
        for layer in layers:
            x = layer(x)
            layer_input.append(x)

        return x, layer_input

and I want to do this, also so how to freeze the weights and not an input_to_each layer?

for epoch in epochs:
    #train model
    for images, labels in data:
        out, layer_input = model(images)
        ...

    # validate model
    for images, labels in data:
        out, layer_input = model(images)
        ...

    # save gradient for eacch layer input
    # also freeze the layers and not a input to each layer (??)
    for images, labels in data:
        out, layer_input = model(images)
        loss = criterion(out, labels)
        for l_input in layer_input:
            grad = calculate_grad(loss, l_input)
            save(grad)

Thank you in advance.

Hi,

You can get the gradient for a given tensor by doing x.register_hook(hook_fn). You hook_fn will be called with the gradient of x when it is computed. You can then save it wherever you want.

how to freeze the weights and not an input_to_each layer?

I am not sure what you mean by that. Could you describe in more details what you’re trying to accomplish?

Hi @albanD,

Thank you for you replay.

I am trying to save the input to each layer and calculate the gradient w.r.t the input to each layer. Suppose we have a three layer in the network.

def model(x):
    a = w1 * x # layer 1
    b = w2 * a # layer 2
    c = w3 * b # layer 3
    return c

for epoch in epochs:
    # now i will train the model and update the weights w1, w2, w3. i.e
    for x in train_loader:
        optimizer.zero_grad()
        out = model(x)
        loss = 10 - y
        loss.backward()
        optimizers.step()

    # validate model
    ....
    ...

    # In this step I don't want to update the w1, w2, w3 I only want to 
    #calculate the gradient with respect to input to each layer 
    # (output of privious layer including) i.e dl/dx, dl/da, dl/db, dl/dc.

    # save this gradient and average it after all epochs is complete.
    # and gradient w.r.t inputs has be calculated by single input only (batch_size==1)
    for batch in train_loader:
        for image in batch:
            calculate_gradient for inputs to each layer; 
            save(gradient)

I hope you understand my problem. sorry for the poor explanation.currently I am doing this but I don’t know I am doing it right.

import torch
import torch.nn as nn
from torch.autograd import Variable

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.layers = nn.ModuleList([
            nn.Linear(4, 8),
            nn.Linear(8, 16),
            nn.Linear(16, 2),
        ])

    def forward(self, x):
        self.output = []
        self.input = []
        for layer in self.layers:
            # detach from previous history
            x = Variable(x.data, requires_grad=True)
            self.input.append(x)

            # compute output
            x = layer(x)
            self.output.append(x)
        return x

    def backward(self, p, q):
        # print(p, q)
        self.input.append(q)
        self.output.append(p)
        for i, output in reversed(list(enumerate(self.output))):
            
            if i == (len(self.output) - 1):
                # for last node, use g
                output.backward()
            else:
                output.backward(self.input[i+1].grad.data)
                print(i + 1, self.input[i+1].grad.data)     
        print(i, self.input[i].grad.data)

model = Net()
criterion = torch.nn.MSELoss()

inp = torch.ones(1, 4)
output = model(inp) # predcicted output

output = Variable(output.data, requires_grad=True)
loss = (10 - output).sum()
model.backward(loss, output)

How to use x.register_hook(hook_fn) can you help me with some code snippet?

Thanks!

Hi,

output = Variable(output.data, requires_grad=True) you should never do that. Both because Variable don’t exist anymore, you can just use Tensors. And you should never use .data as it has many bad side effects (including preventing the gradients from flowing).
If you want to detach a Tensor, use .detach().

If you already have a list of all the inputs to the layers, you can simply do grads = autograd.grad(loss, inputs) which will return the gradient wrt each input.

1 Like

Hi @albanD,

I am using the following implementation, but the gradient is None w.r.t inputs.

import torch
import torch.nn as nn
from torch.autograd import Variable

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        pass

    def forward(self, x):
        self.input = []
        x = x.clone().detach().requires_grad_(True)
        for i in range(2):          
            x = x * 2
            # detach from previous history
            x = x.clone().detach().requires_grad_(True)
            self.input.append(x)
        return x

    def backward(self, p):
        
        for input in self.input:
            print(input)
            print(torch.autograd.grad(loss, input, allow_unused=True))

model = Net()
criterion = torch.nn.MSELoss()

"""
x = 2 # dl/ax == -4
a = 2 * x # dl/da == dl/db * db/da == -2
b = 2 * a # dl/db == -1
l = 10 - b # dl/dl == 1
"""

inp = torch.tensor(2.0)
output = model(inp) # predcicted output

output = output.clone().detach().requires_grad_(True)
loss = (10 - output).sum()
model.backward(loss)

Output should be like this right?


"""
x = 2 # dl/ax == -4
a = 2 * x # dl/da == dl/db * db/da == -2
b = 2 * a # dl/db == -1
l = 10 - b # dl/dl == 1
"""

need help. Thanks in advance.

If you detach from the model, I don’t expect any gradient to be propagated there right?

1 Like