I’ve written the following code to visualise the feature maps (four from each layer) from a fully-trained CNN network, by passing images forward through the network. I’ve seen examples where layer activations are saved by registering a hook into the network but how do I do this?
def feature_map_visualisation(images, image_index):
images = images.to(device)
conv1_activation = model_gpu.first_layer[0](images)
conv1_active_relu = model_gpu.first_layer[1](conv1_activation)
conv1_active_pooling = model_gpu.first_layer[2](conv1_active_relu)
conv1_active_drop = model_gpu.first_layer[3](conv1_active_pooling)
conv2_activation = model_gpu.second_layer[0](conv1_active_drop)
figure = plt.figure(figsize = (16,16))
k = 0
for i in range(0, 4):
k += 1
figure.add_subplot(1,4,k)
img = conv1_activation[image_index][i].cpu().detach().clone().numpy()
img = rgb2gray(img)
plt.imshow(img, cmap="gray")
j=0
for i in range(0,4):
j+=1
figure.add_subplot(2,4,j)
img1=conv2_activation[image_index][i].cpu().detach().clone().numpy()
img1 = rgb2gray(img1)
plt.imshow(img1, cmap="gray")