Help me understand transform normalize

I am trying to normalize image(mean 0 std 1) before giving it to neural network.
This is my code.

def stats(image):
    #expects image in [H,W,C] format
    print("channel 1 min {} max {} mean {} std {}".format(image[:,:,0].min(), image[:,:,0].max(), image[:,:,0].mean(), image[:,:,0].std()))
    print("channel 2 min {} max {} mean {} std {}".format(image[:,:,1].min(), image[:,:,1].max(), image[:,:,1].mean(), image[:,:,1].std()))
    print("channel 3 min {} max {} mean {} std {}".format(image[:,:,2].min(), image[:,:,2].max(), image[:,:,2].mean(), image[:,:,2].std()))

image = cv2.imread(train.loc[0, "path"]); print("original shape", image.shape)
print("original image stats")
stats(image)
image_tensor = transforms.ToTensor()(image); print("\nimage tensor shape ", image_tensor.shape)  #scales in [0,1] but also changes dimension[c, H, W]

#now lets normalize it per channel and get mean 0 and std 1
#for which I hope it will use following formula
"""
image_n[:,:,0] = (image_n[:,:,0] - image_n[:,:,0].mean()) / image_n[:,:,0].std()
image_n[:,:,1] = (image_n[:,:,1] - image_n[:,:,1].mean()) / image_n[:,:,1].std()
image_n[:,:,2] = (image_n[:,:,2] - image_n[:,:,2].mean()) / image_n[:,:,2].std()
"""

image_n = transforms.Normalize(mean = [image[:,:,0].mean(), image[:,:,1].mean(), image[:,:,2].mean()],
                               std = [image[:,:,0].std(), image[:,:,1].std(), image[:,:,2].std()])(image_tensor)

print("normalized image shape", image_n.shape) #lets change to [H,W,C] and check stats
image_n = np.transpose(image_n.detach().numpy(), [1,2,0]); print("changing dims", image_n.shape)
stats(image_n)

I get following output

original shape (2136, 3216, 3)
original image stats
channel 1 min 0 max 63 mean 16.429443081875267 std 9.372180776313941
channel 2 min 0 max 202 mean 43.16988136913746 std 26.907299737201175
channel 3 min 0 max 255 mean 80.74282933413457 std 49.58311051825643

image tensor shape  torch.Size([3, 2136, 3216])


normalized image shape torch.Size([3, 2136, 3216])
changing dims (2136, 3216, 3)
channel 1 min -1.753001093864441 max -1.726640224456787 mean -1.7461274862289429 std 0.003921598196029663
channel 2 min -1.6043928861618042 max -1.5749527215957642 mean -1.5981013774871826 std 0.0039215791039168835
channel 3 min -1.6284340620040894 max -1.608265995979309 mean -1.622046947479248 std 0.003921573981642723

Here image is not normalized.
What am I doing wrong ?

Hi @mayur_newase

ToTensor already “normalizes” the [0,255] uint input image into [0.0, 1.0] floats. Hence, the mean and std args expected by transforms.Normalize should be also scaled down to the [0.0, 1.0] range.

1 Like