Pytorch using Keras vgg weight produce different results

I’m trying to load a pre-trained keras weight into a PyTorch model with the same structure. However, it seems PyTorch model can produce different results than the Keras one. I’m not sure if I made a mistake here. Any help would be much appreciated.

This is my code

from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.applications.vgg16 import preprocess_input
import numpy as np
import os
import cv2
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
from keras import backend as K
import tensorflow as tf
import torch
import torch.nn as nn
import torch.nn.functional as F

dummy_arr = np.ones([32,32,3]).astype(np.uint16)

vgg = VGG16(weights='imagenet', input_shape=(32, 32, 3), include_top=False)
k_model = Model(inputs=vgg.inputs, outputs=vgg.get_layer('block1_conv1').output)

img = np.expand_dims(dummy_arr, axis=0)
img = preprocess_input(img)
res1 = k_model.predict(img)[0, :, :, 0]
print(res1)


class PytorchNet:
    def __init__(self):
        vgg = VGG16(weights='imagenet', input_shape=(32, 32, 3), include_top=False)
        k_model = Model(inputs=vgg.inputs, outputs=vgg.get_layer('block1_conv2').output)
        weights = k_model.get_weights()
        self.model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg16', pretrained=True).features[:2].eval()
        self.model[0].weight.data=torch.from_numpy(np.transpose(weights[0]))
        self.model[0].bias.data=torch.from_numpy(weights[1])
        
    def __call__(self, x):
        x = (x / np.iinfo(x.dtype).max).astype(np.float32)
        x_tensor = torch.from_numpy(x).unsqueeze(0).permute(0,3,1,2)
        res = self.model(x_tensor)
        res = res[0, 0, :, :]
        return res
p_model = PytorchNet()
res2 = p_model(dummy_arr)
print(res2)

And Keras result looks like this:

[[ 12.10889   0.        0.      ...   0.        0.        0.     ]
 [189.03055  10.17958  10.17958 ...  10.17958  10.17958   0.     ]
 [189.03055  10.17958  10.17958 ...  10.17958  10.17958   0.     ]
 ...
 [189.03055  10.17958  10.17958 ...  10.17958  10.17958   0.     ]
 [189.03055  10.17958  10.17958 ...  10.17958  10.17958   0.     ]
 [280.07498 200.40985 200.40985 ... 200.40985 200.40985  46.49285]]

which is different from the PyTorch result:

tensor([[0.0000, 0.6139, 0.6139,  ..., 0.6139, 0.6134, 0.9878],
        [0.0000, 0.5057, 0.5104,  ..., 0.5104, 0.5091, 1.2110],
        [0.0000, 0.4962, 0.5027,  ..., 0.5027, 0.5026, 1.2092],
        ...,
        [0.0000, 0.4962, 0.5027,  ..., 0.5027, 0.5026, 1.2092],
        [0.0000, 0.5053, 0.5138,  ..., 0.5138, 0.5132, 1.2184],
        [0.0000, 1.1331, 1.1401,  ..., 1.1401, 1.1405, 1.5501]],
       grad_fn=<SliceBackward0>)

Could you check the input ranges of the tensors as it seems as if the input used in Keras might not be normalized.
If I understand your code correctly you are just using a conv + relu layer, so I would expect to see “small” numbers, not in the range 100+, for a normalized input.