Normalization does not work as expected!

I am trying to normalize CUB_200 images by ImageNet stats. This is my code:

def _read_images_from_list(imagefile_list):

    imgs = []
    mean=[0.485, 0.456, 0.406]
    std= [0.229, 0.224, 0.225]

 
    
    Transformations = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
    
    for imagefile in imagefile_list:
        
        print("Reading img: ", imagefile)
        img = cv2.imread(imagefile).astype(np.float32) # cv2 returns images with channels in BGR order. This is reverse to the order that torchvision pretrained channels accept - (500, 357, 3) np.ndarray
         
        img = cv2.resize(img, (224, 224))
        # Convert BGR to RGB
        img_r, img_g, img_b = np.split(img, 3, axis=2) # the output should in fact be b, g, r 
        img = np.concatenate((img_b, img_g, img_r), axis=2) # the images are in BGR # here we can use Image.merge("RGB", (r, g, b)) (from PIL import Image)
        img = Transformations(img)
        imgs += [img]
    return imgs

This way I have images with very large max and very small min. What am I doing wrong?