Outputs different when using forward hook vs normal forward

import torch
import torch.nn as nn
import torch.nn.functional as F

from src.autoencoders.base_autoencoder import BaseAutoencoder


class AutoEncoder(BaseAutoencoder):
    def __init__(self,**kwargs):
        super(BaseAutoencoder, self).__init__()
        super(AutoEncoder, self).__init__()

        # Encoder
        self.enc1 = nn.Linear(in_features=kwargs["input_shape"], out_features=1024)
        self.enc2 = nn.Linear(in_features=1024, out_features=256)
        self.enc3 = nn.Linear(in_features=256, out_features=128)
        self.enc4 = nn.Linear(in_features=128, out_features=64)
        self.enc5 = nn.Linear(in_features=64, out_features=32)
        self.enc6 = nn.Linear(in_features=32, out_features=kwargs["output_shape"])

        # Decoder
        self.dec1 = nn.Linear(in_features=kwargs["output_shape"], out_features=32)
        self.dec2 = nn.Linear(in_features=32, out_features=64)
        self.dec3 = nn.Linear(in_features=64, out_features=128)
        self.dec4 = nn.Linear(in_features=128, out_features=256)
        self.dec5 = nn.Linear(in_features=256, out_features=1024)
        self.dec6 = nn.Linear(in_features=1024, out_features=kwargs["input_shape"])

    def encode(self, x):
        x = F.relu(self.enc1(x))
        x = F.relu(self.enc2(x))
        x = F.relu(self.enc3(x))
        x = F.relu(self.enc4(x))
        x = F.relu(self.enc5(x))
        x = F.relu(self.enc6(x))

        return x

    def decode(self, x):
        x = F.relu(self.dec1(x))
        x = F.relu(self.dec2(x))
        x = F.relu(self.dec3(x))
        x = F.relu(self.dec4(x))
        x = F.relu(self.dec5(x))
        x = F.relu(self.dec6(x))

        return x

    def forward(self, x):

        x = self.encode(x)
        x = self.decode(x)

        return x

This is a simple auto encoder which takes in an input vector encodes into a lower dismensional vector.
To extract the representation after training i am trying two ways : 1) Directly performing forward pass through the encoder
2) using ‘register_forward_hook()’

random_data = torch.rand((1, 1, 28, 28))
flat_data = torch.flatten(random_data)

my_nn = AutoEncoder(input_shape=784,output_shape=8)
    my_nn.enc6.register_forward_hook(my_nn.get_activation('enc6'))
result1 = my_nn(flat_data)
print(my_nn.activation['enc6'])
# Answer : tensor([-0.1071,  0.1887, -0.0942,  0.1025,  0.1118,  0.0955, -0.2018, -0.0909])

result2 = my_nn.encode(flat_data)
print(result2)
# Answer : tensor([0.0000, 0.1887, 0.0000, 0.1025, 0.1118, 0.0955, 0.0000, 0.0000],grad_fn=<ReluBackward0>)

The results for both are different. The negatives values are converted to zeros. Why is this happening?

I think relu operation is not part of enc6 layer. Thus, the output of enc6 has negative values.