How to replicate PyTorch normalization in NumPy?

Run the following code

TRANSFORM = torchvision.transforms.Compose([
    # torchvision.transforms.Resize((224, 224), Image.NEAREST),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.5], [0.5])
])

def main():
    openCvImage = (np.random.rand(908, 1210) * 255).astype(np.uint8)
    pilImage = PIL.Image.fromarray(openCvImage)
    ptImage = TRANSFORM(pilImage)
    print('\nptImage.shape = ' + str(ptImage.shape))
    print('ptImage max = ' + str(torch.max(ptImage)))
    print('ptImage min = ' + str(torch.min(ptImage)))
    print('ptImage avg = ' + str(torch.mean(ptImage)))
    print('ptImage: ')
    print(ptImage)

    # openCvImage = cv2.resize(openCvImage, (224, 224), cv2.INTER_NEAREST)
    openCvImage = openCvImage.astype('float32')[None, ...]
    mean = 255 * 0.5
    stdDev = 255 * 0.5
    openCvImage = (openCvImage - mean) / stdDev
    print('\nopenCvImage.shape = ' + str(openCvImage.shape))
    print('openCvImage max = ' + str(np.max(openCvImage)))
    print('openCvImage min = ' + str(np.min(openCvImage)))
    print('openCvImage avg = ' + str(np.mean(openCvImage)))
    print('openCvImage: ')
    print(openCvImage)

    print('\ndone !!\n')

if __name__ == '__main__':
    main()

and you will find that ACTUALLY THE DIFFERENCE IS FROM RESIZE METHOD

References:
stackoverflow::torch-transform-resize-vs-cv2-resize
stackoverflow::what-is-the-difference-between-pils-and-opencvs-resize
pytorchforums::difference-between-pil-resize-and-opencv-resize