I have checked the post here , trying to fix the warning but I am still getting the warning.
Custom Autograd Function Backward pass not Called
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:1204: UserWarning: Output 0 of BackwardHookFunctionBackward is a view and is being modified inplace. This view was created inside a custom Function (or because an input was returned as-is) and the autograd logic to handle view+inplace would override the custom backward associated with the custom Function, leading to incorrect gradients. This behavior is deprecated and will be forbidden starting version 1.6. You can remove this warning by cloning the output of the custom Function. (Triggered internally at /pytorch/torch/csrc/autograd/variable.cpp:547.) result = torch.relu_(input)
Here is my code
class _Grad():
def __init__(self,model,target_layer=None,input_size = [3,224,224]):
if not isinstance(model,torch.nn.Module):
raise ValueError("Provide a valid model")
self.model = model
self.model_dic = dict(model.named_modules())
if target_layer is None or target_layer not in self.model_dic.keys():
raise ValueError("Provide a valid layer")
self.target_layer = self.model_dic[target_layer]
self.activations = None
self.grads = None
self.hooks = []
back_hook = 'register_full_backward_hook' if torch.__version__ >= '1.8.0' else 'register_backward_hook'
self.hooks.append(self.target_layer.register_forward_hook(self._extract_activations))
self.hooks.append(getattr(self.target_layer,back_hook)(self._extract_grads))
def _extract_activations(self,module,input,output):
self.activations = output.data
def _extract_grads(self,module,input,output):
self.grads = output[0].data
def _backpropagate(self,class_indx,scores):
if self.activations is None:
raise TypeError("Input needs to be passed before Backpropagation")
loss = scores[:,class_indx].sum()
self.model.zero_grad()
loss.backward(retain_graph=True)
def _get_weights(self,class_indx,scores):
self._backpropagate(class_indx,scores)
b,c,h,w = self.grads.size()
weights = self.grads.view(b, c, -1).mean(2)
weights = weights.view(b,c,1,1)
return weights.clone()
def get_cam_map(self,class_indx,scores,normalized):
weights = self._get_weights(class_indx,scores)
cams = torch.nansum((weights*self.activations).squeeze(0),0)
cams = F.relu(cams)
if normalized :
cam_map_min, cam_map_max = cams.min(), cams.max()
cams = (cams - cam_map_min).div(cam_map_max)
return cams
def __call__(self,class_indx,scores,normalized=True):
return self.get_cam_map(class_indx,scores,normalized)
from torchvision.models import densenet121
model = densenet121(pretrained=True).eval()
img = read_image("image.png")
input_tensor = normalize(resize(img, (224, 224)) / 255., [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
out = model(input_tensor.unsqueeze(0))