Gradients of model output layer and intermediate layer wrt inputs

I’m trying to visualize model layer outputs using the saliency core package package on a simple conv net. This requires me to compute the gradients of the model output layer and intermediate convolutional layer output w.r.t the input. I’ve attempted to do this in the last code block, but I run into the error RuntimeError: grad can be implicitly created only for scalar outputs. How do I retrieve these gradients and what is this error telling me?

import torch
import torch.nn as nn
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import numpy as np
from tqdm.notebook import tqdm
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
from google.colab import drive
drive.mount('/content/drive')
root = '/content/drive/MyDrive/Work/ExplainMNIST'
import os
# !pip install saliency
import saliency.core as saliency

# Hyperparameters
batch_size = 32
num_classes = 10

data_dir = os.path.join(root, "data")
mnist_train = torchvision.datasets.MNIST(data_dir, train=True, transform=transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081))
]), download=True)
mnist_test = torchvision.datasets.MNIST(data_dir, train=False, transform=transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081))
]), download=True)
train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(mnist_test, batch_size=batch_size, shuffle=True)
checkpoint_dir = os.path.join(root, 'checkpoints')
if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir)

images_, labels_ = next(iter(train_loader))
images_ = images_.to(device)
labels_ = labels_.to(device)

class ConvNet(nn.Module):
    def __init__(self, in_channels, num_classes):
        super(ConvNet, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=10, kernel_size=3)
        self.pool1 = nn.MaxPool2d(3)
        self.conv2 = nn.Conv2d(in_channels=10, out_channels=10, kernel_size=3)
        self.activation = nn.ReLU()
        self.fc = nn.Linear(360, num_classes)
    def forward(self, x):
        x = self.conv1(x)
        x = self.pool1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
        conv = x
        x = self.fc(x.reshape(x.shape[0], -1))
        return {'output': x, 'conv': conv}

model = ConvNet(1, num_classes)
model = model.to(device)
model.eval()

#HELP! here is my attempt
with torch.set_grad_enabled(True):
    output, conv = model(images_).values()
    output_class = output[:, 0]
# Here is how i tried to do it
    grads = torch.autograd.grad(output, images_)
    grads2 = torch.autograd.grad(conv, images_)

Hi
You can use hooks to access intermediate activations. Here is a snippet borrowed from this forum:

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.cl1 = nn.Linear(25, 60)
        self.cl2 = nn.Linear(60, 16)
        self.fc1 = nn.Linear(16, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
        
    def forward(self, x):
        x = F.relu(self.cl1(x))
        x = F.relu(self.cl2(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.log_softmax(self.fc3(x), dim=1)
        return x


activation = {}
def get_activation(name):
    def hook(model, input, output):
        activation[name] = output.detach()
    return hook


model = MyModel()
model.fc2.register_forward_hook(get_activation('fc2'))
x = torch.randn(1, 25)
output = model(x)
print(activation['fc2'])
1 Like

thank you so much, @mhnazeri :slight_smile:

1 Like