def forward_pass_on_convolutions(self, x):
“”"
Does a forward pass on convolutions, hooks the function at given layer
“”"
l1 = ['conv0','norm0','relu0','pool0','norm5']
l2 = ['denseblock1','denseblock2', 'denseblock3', 'denseblock4']
l3 = ['transition1', 'transition2', 'transition3']
l_all = l1+l2+l3
conv_output = None
for module_pos, module in self.model.features._modules.items():
print ('m', module_pos)
if module_pos in l1:
x = module(x) # Forward
if module_pos == self.target_layer:
x.register_hook(self.save_gradient)
conv_output = x # Save the convolution output on that layer
if module_pos in l2:
for smodule_pos, smodule in module._modules.items():
print ('sm', smodule_pos)
x = smodule(x) # Forward
if smodule_pos == self.target_layer:
x.register_hook(self.save_gradient)
conv_output = x # Save the convolution output on that layer
if module_pos in l3:
for smodule_pos, smodule in module._modules.items():
print ('sm', smodule_pos)
x = smodule(x) # Forward
if smodule_pos == self.target_layer:
x.register_hook(self.save_gradient)
conv_output = x # Save the convolution output on that layer
return conv_output, x
it gives the error:
Traceback (most recent call last):
File “main.py”, line 651, in
main(args)
File “main.py”, line 635, in main
visualize(args)
File “main.py”, line 526, in visualize
cam = grad_cam.generate_cam(prep_img, S, target_class)
File “./pytorch-cnn-visualizations/src\gradcam_.py”, line 99, in generate_cam
conv_output, model_output = self.extractor.forward_pass(input_image, cell_type)
File “./pytorch-cnn-visualizations/src\gradcam_.py”, line 71, in forward_pass
conv_output, x = self.forward_pass_on_convolutions(x)
File “./pytorch-cnn-visualizations/src\gradcam_.py”, line 49, in forward_pass_on_convolutions
x = smodule(x) # Forward
File “C:\Users\shmak\AppData\Local\Continuum\anaconda3\lib\site-packages\torch\nn\modules\module.py”, line 547, in call
result = self.forward(*input, **kwargs)
File “C:\Users\shmak\AppData\Local\Continuum\anaconda3\lib\site-packages\torchvision\models\densenet.py”, line 48, in forward
bottleneck_output = cp.checkpoint(bn_function, *prev_features)
File “C:\Users\shmak\AppData\Local\Continuum\anaconda3\lib\site-packages\torch\utils\checkpoint.py”, line 155, in checkpoint
return CheckpointFunction.apply(function, preserve, *args)
File “C:\Users\shmak\AppData\Local\Continuum\anaconda3\lib\site-packages\torch\utils\checkpoint.py”, line 74, in forward
outputs = run_function(*args)
File “C:\Users\shmak\AppData\Local\Continuum\anaconda3\lib\site-packages\torchvision\models\densenet.py”, line 23, in bn_function
bottleneck_output = conv(relu(norm(concated_features)))
File “C:\Users\shmak\AppData\Local\Continuum\anaconda3\lib\site-packages\torch\nn\modules\module.py”, line 547, in call
result = self.forward(*input, **kwargs)
File “C:\Users\shmak\AppData\Local\Continuum\anaconda3\lib\site-packages\torch\nn\modules\batchnorm.py”, line 81, in forward
exponential_average_factor, self.eps)
File “C:\Users\shmak\AppData\Local\Continuum\anaconda3\lib\site-packages\torch\nn\functional.py”, line 1656, in batch_norm
training, momentum, eps, torch.backends.cudnn.enabled
RuntimeError: running_mean should contain 48 elements not 144
But if i run:
def forward_pass_on_convolutions(self, x):
"""
Does a forward pass on convolutions, hooks the function at given layer
"""
conv_output = None
for module_pos, module in self.model.features._modules.items():
print ('m', module_pos)
x = module(x) # Forward
if module_pos == self.target_layer:
x.register_hook(self.save_gradient)
conv_output = x # Save the convolution output on that layer
return conv_output, x
It works fine
Also i might add, my motive is to access the denselayers modules and if possible conv1 and conv2 filters of the denselayer.