Manual calculation of convolution opeartion not matching

Hi,

I am manually trying to verify the convolution operation. I have got the weights of the first filter of the 1st conv layer from my saved model and have applied it on the input image. After that I also applied the bias term but the output that I have got from the model during inference is not matching with my manually calculated one.

Link to my input image - https://drive.google.com/file/d/15Beff598UDT-fcT8WMF3w_0LW5LnpTV-/view?usp=sharing

Link to my saved model - https://drive.google.com/file/d/1tnbwb-YG-BPWAEy72h4J2ElBb-z2XNz6/view?usp=sharing

This is the code of my CNN:

class Cifar10CnnModel(ImageClassificationBase):
    def __init__(self):
        super().__init__()
        self.network = nn.Sequential(
            
            # Conv-1
            nn.Conv2d(3, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Dropout(0.25),
            
            # Conv-2
            nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.AvgPool2d(2, 2), # output: 64 x 16 x 16
            
            # Conv-3
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Dropout(0.25),

            # Conv-4
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.AvgPool2d(2, 2), # output: 128 x 8 x 8

            # Conv-5
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Dropout(0.25),
            
            # Conv-6
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),           
            nn.ReLU(),
            nn.AvgPool2d(2, 2), # output: 256 x 4 x 4

            nn.Flatten(), 
            nn.Linear(256*4*4, 32),
            nn.Dropout(0.25),            
            nn.ReLU(),
            nn.Linear(32, 10))
        
    def forward(self, xb):
        return self.network(xb)

This is the code I am using to get the output of each layer:

# Define a dictionary to store the outputs of each layer
outputs = {}

conv = []

batch_norm = []

relu = []

drop = []

pool = []

flatten = []

linear = []

# Register a hook for each layer in the model
def hook(module, input, output):
    module_str = str(module)
    outputs[module_str] = output
        
    if module_str[0] == 'C':
        conv.append("################## LAYER ##################")
        conv.append(output)
    elif module_str[0] == 'B':
        batch_norm.append("################## LAYER ##################")
        batch_norm.append(output)
    elif module_str[0] == 'A':
        pool.append("################## LAYER ##################")
        pool.append(output)
    elif module_str[0] == 'R':
        relu.append("################## LAYER ##################")
        relu.append(output)
    elif module_str[0] == 'D':
        drop.append("################## LAYER ##################")
        drop.append(output)
    elif module_str[0] == 'F':
        flatten.append("################## LAYER ##################")
        flatten.append(output)
    elif module_str[0] == 'L':
        linear.append("################## LAYER ##################")
        linear.append(output)

        
    
for name, module in saved_model.named_modules():
    module.register_forward_hook(hook)

# Perform inference on an input image
saved_model(resized_input_image)

# Print the output of each layer
# for name, output in outputs.items():
#     print(name, output.shape)