Resnet50 accuracy when calling its modules individually


I have a pre-trained resnet50(imported from pytorch),on a two-class dataset. consider the model is resnet50
When I push an image through the model like resnet50(x), the model classifies the image correctly. However, I re-implemented the forward functions manually to track the activations of the relu layer. When I pass the image through the second way I explained, the model classifies the image wrongly.
Here is my code. I appreciate any help.

    def forward_resnet50_(self,x):
        initial_layers = ['conv1', 'bn1', 'relu', 'maxpool']
        classifier_layers = ['avgpool', 'fc']
        block_layers = ['layer1', 'layer2', 'layer3', 'layer4']
        block_layers_dic={'layer1':{'0':['conv1', 'bn1', 'relu', 'conv2', 'bn2', 'relu', 'conv3', 'bn3', 'downsample', 'relu'],
                                    '1':['conv1', 'bn1', 'relu','conv2', 'bn2','relu', 'conv3', 'bn3', 'relu'],
                                    '2':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu']},
                          'layer2':{'0':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'downsample', 'relu'],
                                    '1':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu'],
                                    '2':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu'],
                                    '3':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu']},
                          'layer3':{'0':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'downsample', 'relu'],
                                    '1':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu'],
                                    '2':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu'],
                                    '3':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu'],
                                    '4':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu']},
                          'layer4':{'0':['conv1', 'bn1', 'relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'downsample', 'relu'],
                                    '1':['conv1', 'bn1', 'relu','conv2', 'bn2','relu', 'conv3', 'bn3', 'relu'],
                                    '2':['conv1', 'bn1','relu', 'conv2', 'bn2','relu', 'conv3', 'bn3', 'relu']}
        for layer_name in initial_layers:
            x = self.model._modules[layer_name](x)
        for blck_layer_name in block_layers:
            x = self.forward_blck_resnet50(x,self.model._modules[blck_layer_name],blck_layer_name,block_layers_dic)
        x = self.model._modules[classifier_layers[0]](x)
        #x = torch.flatten(x, 1)
        x = x.reshape(x.shape[0], -1)
        x = self.model._modules[classifier_layers[1]](x)
        return x

    def forward_blck_resnet50(self,x,blck_module,blck_layer_name,block_layers_dic):
        identity = torch.clone(x)
        for layer_index in block_layers_dic[blck_layer_name].keys():
            for operaion_name in block_layers_dic[blck_layer_name][layer_index]:
                if operaion_name == 'downsample':
                    identity = blck_module._modules[layer_index]._modules[operaion_name](identity)
                    x +=identity
                    x = blck_module._modules[layer_index]._modules[operaion_name](x)
        return x

I would recommend to print the stats of the intermediate activations in the original resnet and compare these to the activations in your custom model to check where the models diverge.
My first guess would be that you might be missing functional API calls from the forward method of the original ResNet.