Hi,
I want to visualize convolutional features, but when I call backward(),
the input variable’s grad still None.
I have checked the input variable, and is_leaf is True.
class SaveFeatures():
def __init__(self, module):
self.hook = module.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.features = output.clone().detach().requires_grad_(True)
def close(self):
self.hook.remove()
class FilterVisualizer():
def __init__(self, model):
self.model = model
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
def visualize(self, layer, filter, lr=0.1, opt_steps=100):
img = np.uint8(np.random.uniform(150, 180, (48, 48, 3))) # generate random image #48, 48, 3
img = transform_train(Image.fromarray(img)) # 3, 44, 44
img = torch.unsqueeze(img, 0) # 1, 3, 44, 44
activations = SaveFeatures(list(self.model.children())[0][layer]) # register hook
img_var = Variable(img, requires_grad=True) # convert image to Variable that requires grad
optimizer = torch.optim.Adam([img_var], lr=lr, weight_decay=1e-6)
for n in range(opt_steps): # optimize pixel values for opt_steps times
optimizer.zero_grad()
self.model(img_var)
loss = -activations.features[0, filter].mean() #activations.features.shape=1,64,44,44 ,features[0,filter] = 44,44
loss.backward()
optimizer.step()