with torch.no_grad():
def add_hook(module):
def hook1(module, input, output):
print("hooker Working")
t_FM.append(output)
if module is t_model.tbackbone.layer2[1].conv1:
hooks.append(module.register_forward_hook(hook1))
def hook2(module, input, output):
print("hooker Working")
s_FM.append(output)
if module is s_model.sbackbone.layer2[1].conv1:
hooks.append(module.register_forward_hook(hook2))
your_model.eval()
your_model.apply(add_hook)
your_model(input_tensors) # forward with firing all the hooks registered
for hook_handle in hooks:
hook_handle.remove()