How to replicate PyTorch normalization in NumPy?

I need to replicate PyTorch image normalization in OpenCV or NumPy.

Quick backstory: I’m doing a project where I’m training in PyTorch but will have to inference in OpenCV due to deploying to an embedded device where I won’t have the storage space to install PyTorch. Therefore, I need to use NumPy to do the normalization before inferencing on device.

I’m getting different accuracy results when normalizing and test inferencing in PyTorch vs normalizing in NumPy and inferencing in OpenCV, and I suspect the difference is due to the normalization process producing a slightly different result between the two.

Here is a quick script I put together to show the difference on a single image. Note that I’m using grayscale (single-channel) and I’m normalizing into the -1.0 to +1.0 range:

# scratchpad.py

import torch
import torchvision

import cv2
import numpy as np
import PIL
from PIL import Image

TRANSFORM = torchvision.transforms.Compose([
    torchvision.transforms.Resize((224, 224)),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.5], [0.5])
])

def main():
    # 1st show PyTorch normalization

    # open the image as an OpenCV image
    openCvImage = cv2.imread('image.jpg', cv2.IMREAD_GRAYSCALE)
    # convert OpenCV image to PIL image
    pilImage = PIL.Image.fromarray(openCvImage)
    # convert PIL image to a PyTorch tensor
    ptImage = TRANSFORM(pilImage).unsqueeze(0)
    # show the PyTorch tensor info
    print('\nptImage.shape = ' + str(ptImage.shape))
    print('ptImage max = ' + str(torch.max(ptImage)))
    print('ptImage min = ' + str(torch.min(ptImage)))
    print('ptImage avg = ' + str(torch.mean(ptImage)))
    print('ptImage: ')
    print(str(ptImage))

    # 2nd show NumPy normalization

    # resize the image
    openCvImage = cv2.resize(openCvImage, (224, 224))
    # convert to float 32 (necessary for passing into cv2.dnn.blobFromImage which is not show here)
    openCvImage = openCvImage.astype('float32')
    mean = np.mean(openCvImage)
    stdDev = np.std(openCvImage)
    openCvImage = (openCvImage - mean) / stdDev
    # show results
    print('\nopenCvImage.shape = ' + str(openCvImage.shape))
    print('openCvImage max = ' + str(np.max(openCvImage)))
    print('openCvImage min = ' + str(np.min(openCvImage)))
    print('openCvImage avg = ' + str(np.mean(openCvImage)))
    print('openCvImage: ')
    print(str(openCvImage))

    print('\ndone !!\n')
# end function

if __name__ == '__main__':
    main()

Here is the test image that I’m using:

and here are the results I’m getting currently:

$ python3 scratchpad.py 

ptImage.shape = torch.Size([1, 1, 224, 224])
ptImage max = tensor(0.9608)
ptImage min = tensor(-0.9686)
ptImage avg = tensor(0.1096)
ptImage: 
tensor([[[[ 0.0431, -0.0431,  0.1294,  ...,  0.8510,  0.8588,  0.8588],
          [ 0.0510, -0.0510,  0.0980,  ...,  0.8353,  0.8510,  0.8431],
          [ 0.0588, -0.0431,  0.0745,  ...,  0.8510,  0.8588,  0.8588],
          ...,
          [ 0.6157,  0.6471,  0.5608,  ...,  0.6941,  0.6627,  0.6392],
          [ 0.4902,  0.3961,  0.3882,  ...,  0.6627,  0.6471,  0.6706],
          [ 0.3725,  0.4039,  0.5451,  ...,  0.6549,  0.6863,  0.6549]]]])

openCvImage.shape = (224, 224)
openCvImage max = 2.1724665
openCvImage min = -2.6999729
openCvImage avg = 7.298528e-09
openCvImage: 
[[ 0.07062991 -0.42616782  0.22349077 ...  1.809422    1.8476373
   1.809422  ]
 [ 0.10884511 -0.42616782 -0.04401573 ...  1.7520993   1.7903144
   1.7520993 ]
 [ 0.0324147  -0.42616782 -0.21598418 ...  1.809422    1.8285296
   1.8285296 ]
 ...
 [ 1.1597633   1.5419154   1.0642253  ...  1.7712069   1.178871
   1.1406558 ]
 [ 1.4081622   0.56742764  0.70118093 ...  1.3890547   1.3699471
   1.3126242 ]
 [ 0.56742764  0.7393961   1.0069026  ...  1.1024406   1.5419154
   1.178871  ]]

So I’m getting somewhat close results to the PyTorch normalization but definitely not the same.

How can I do the normalization in NumPy and have it come out exactly or almost exactly the same as the PyTorch normalization?

Admittedly this is a semi-repost of this question on Stack Overflow python - How to replicate PyTorch normalization in OpenCV or NumPy? - Stack Overflow but I did not get a working answer there so I figured I’d ask here.

Run the following code

TRANSFORM = torchvision.transforms.Compose([
    # torchvision.transforms.Resize((224, 224), Image.NEAREST),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.5], [0.5])
])

def main():
    openCvImage = (np.random.rand(908, 1210) * 255).astype(np.uint8)
    pilImage = PIL.Image.fromarray(openCvImage)
    ptImage = TRANSFORM(pilImage)
    print('\nptImage.shape = ' + str(ptImage.shape))
    print('ptImage max = ' + str(torch.max(ptImage)))
    print('ptImage min = ' + str(torch.min(ptImage)))
    print('ptImage avg = ' + str(torch.mean(ptImage)))
    print('ptImage: ')
    print(ptImage)

    # openCvImage = cv2.resize(openCvImage, (224, 224), cv2.INTER_NEAREST)
    openCvImage = openCvImage.astype('float32')[None, ...]
    mean = 255 * 0.5
    stdDev = 255 * 0.5
    openCvImage = (openCvImage - mean) / stdDev
    print('\nopenCvImage.shape = ' + str(openCvImage.shape))
    print('openCvImage max = ' + str(np.max(openCvImage)))
    print('openCvImage min = ' + str(np.min(openCvImage)))
    print('openCvImage avg = ' + str(np.mean(openCvImage)))
    print('openCvImage: ')
    print(openCvImage)

    print('\ndone !!\n')

if __name__ == '__main__':
    main()

and you will find that ACTUALLY THE DIFFERENCE IS FROM RESIZE METHOD

References:
stackoverflow::torch-transform-resize-vs-cv2-resize
stackoverflow::what-is-the-difference-between-pils-and-opencvs-resize
pytorchforums::difference-between-pil-resize-and-opencv-resize