Visualize feature maps of the 1st layer

class Net(nn.Module):
def init(self):
super(Net, self).init()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 4 * 4, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool(F.relu(self.conv1(x)))
    x = self.pool(F.relu(self.conv2(x)))
    x = x.view(-1, 16 * 4 * 4)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

I want to visualize the 6 output feature maps of the self.conv1 1st layer in network. could anybody please help .

You could directly return the desired activation from the forward method or you could use forward hooks as described here.

ok will try n thank you

model = MyModel()
model.fc2.register_forward_hook(get_activation(‘fc2’))
x = torch.randn(1, 25)
output = model(x)
print(activation[‘fc2’])

what is this x = torch.randn(1, 25)?

x is just a random input which is used as an example to show how hooks are used.

class Net(nn.Module):

def __init__(self):
    super(Net, self).__init__()
    # 1 input image channel, 6 output channels, 5x5 square convolution
    # kernel
    self.conv1 = nn.Conv2d(1, 6, 5)
    self.conv2 = nn.Conv2d(6, 16, 5)
    # an affine operation: y = Wx + b
    #self.fc1 = nn.Linear(16 * 5 * 5, 120)  # 5*5 from image dimension
    self.fc1 = nn.Linear(13456, 120)
    self.fc2 = nn.Linear(120, 84)
    self.fc3 = nn.Linear(84, 1)

def forward(self, x):
    # Max pooling over a (2, 2) window
    print('x', str(x.size()))
    x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
    print('x', str(x.size()))
    #ts.show(x,ncols=4)
    # If the size is a square, you can specify with a single number
    x = F.max_pool2d(F.relu(self.conv2(x)), 2)
    x = torch.flatten(x, 1) # flatten all dimensions except the batch dimension
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook

activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook

#model = Net()
model = Net()
model.fc2.register_forward_hook(get_activation(‘conv1’))
x = torch.randn(1,6)
output = model(x)
print(activation[‘conv1’])

for the above code ,since i want conv1 features gave x = torch.randn(1,6)
but got this error RuntimeError: Expected 4-dimensional input for 4-dimensional weight [6, 1, 5, 5], but got 2-dimensional input of size [1, 6] instead

does this x depends on our model

Yes, the model input (in my example x) depends on the model architecture. The error is raised since you are passing a wrong input to your model and you should use your real samples instead of my example input.

i gave x as x = torch.randn(1,6,5,5) y because in self.conv1 = nn.Conv2d(1, 6, 5) is like this and got the error as
RuntimeError: Given groups=1, weight of size [6, 1, 5, 5], expected input[1, 6, 5, 5] to have 1 channels, but got 6 channels instead

how to directly return the desired activation function from the forward method can you please give an example

how to directly return the desired activation function from the forward method can you please give an example

This should work:

def forward(self, x):
    x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
    x = F.max_pool2d(F.relu(self.conv2(x)), 2)
    x = torch.flatten(x, 1)
    x = F.relu(self.fc1(x))
    act = self.fc2(x)
    x = F.relu(act)
    x = self.fc3(x)
    return x, act

will try with ur sample code ,thank u