Intermediate activation Values keep changing when I execute the hook

I am extracting the features of an image from an intermediate network using this function

def get_layer_activation(model, name, layer, transform, path):
    activation = {}
    # model = model
    def get_activation(name):
        def hook(model, input, output):
            activation[name] = output.detach()

        return hook

    layer.register_forward_hook(get_activation(name))
    img = tensorify_img(path, transform)
    output = model(img[None, ...].float())  # .to(device)

    return activation[name].reshape(-1)

When I run the function above, it returns different values each time.

_test_transform = transforms.Compose(
        [
            transforms.ToTensor(),
        ]
    )
act = get_layer_activation(
    network_saved, "latent", network_saved.fc1, _test_transform, single_image)
network_saved is the loaded pretrained network from this architecture
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 3)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)

Is this an expected behavior? If yes, why?

You are using an nn.Dropout layer in your model, which will randomly drop the activations during training, so the different outputs are expected.
Call model.eval() and it should work (unless you are using other random operations).

1 Like

Yes, I later realized. This solution works perfectly.