Retrieve neural network name inside registered hook

how do I get the neural network name inside hook function, so that I see output of different neural network under a different name in tensorboard

writer = SummaryWriter()
def hook(module, input, output):
    # print(input[0].shape, output.shape)
    grid = torchvision.utils.make_grid(output[0][0])
    # print(grid.shape)
    
    # how to change conv_features to neural network name?
    # so it is model.modelone.encoder for a, model.modeltwo.encoder for b
    writer.add_image('conv_features', grid, 0) 

a = model.modelone.encoder.register_forward_hook(hook)
b = model.modeltwo.encoder.register_forward_hook(hook)

module.__class__, module.__class__.__name__, module._get_name do not give neural network name.

Have a look at this example where you can pass additional names to the get_activation function.

I did something like this to get output of each encoder of each neural network

writer = SummaryWriter()
activations = {}
def get_activation(name):
  def hook(module, input, output):
      activations[name] = output.detach()
      grid = torchvision.utils.make_grid(output)
      writer.add_image(name, grid, 0)
  return hook

for name, layer in model.named_modules():
  if 'encoder' in name and 'encoder.' not in name:
    print(name, layer)
    layer.register_forward_hook(get_activation(name))

@ptrblck, this way I am able to visualize what all patterns are learnt by base neural networks, but their output is passed in the form of 10 scores, which is then concatenated in higher level neural network, how do I visualize what are the higher level neural networks learning, currently I do something like this, I add image output from base networks, and plot them

def forward(self, x):
    img = self.modelone(x)[0] + self.modeltwo(x)[0]
    grid = torchvision.utils.make_grid(img)
    writer.add_image('img_combine', grid)
    img_2d = self.encoder(img)
    img = self.lin_two(self.lin_one(img_2d.view(img_2d.size(0), -1)))
    out = torch.cat((self.modelone(x)[1], self.modeltwo(x)[1], img), dim=-1)
    out = (self.linear((out)))
    return img_2d, out

where ModelOne forward looks like

def forward(self, x):
    x = self.encoder(x)
    return (x, self.lin_two(self.lin_one(x.view(x.size(0), -1))))

encoder is conv, batchnorm, relu