Discrepancy between gaussian blur implemented with pytorch conv and OpenCV

from PIL import Image
from torchvision import transforms

import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import requests
from io import BytesIO

class GaussianBlur(object):
    def __call__(self, img):
        sigma = 2
        after = cv2.GaussianBlur(np.asarray(img), (23, 23), sigma)
        return after

class GaussianBlur2(object):
    """blur a single image on CPU"""
    def __init__(self, kernel_size=23):
        radias = kernel_size // 2
        kernel_size = radias * 2 + 1
        self.pad = nn.ReflectionPad2d(radias)
        self.blur_h = nn.Conv2d(3, 3, kernel_size=(kernel_size, 1),
                                stride=1, padding=0, bias=False, groups=3)
        self.blur_v = nn.Conv2d(3, 3, kernel_size=(1, kernel_size),
                                stride=1, padding=0, bias=False, groups=3)
        self.k = kernel_size
        self.r = radias

        self.tensor_to_pil = transforms.ToPILImage()

    def __call__(self, img):
        img = torch.tensor(np.asarray(img)).unsqueeze(0).float()
        img = img.permute(0, 3, 1, 2)

        sigma = 2
        x = np.arange(-self.r, self.r + 1)
        x = np.exp(-np.power(x, 2) / (2 * sigma * sigma))
        x = x / x.sum()
        x = torch.from_numpy(x).view(1, -1).repeat(3, 1)

        self.blur_h.weight.data.copy_(x.view(3, 1, self.k, 1))
        self.blur_v.weight.data.copy_(x.view(3, 1, 1, self.k))

        with torch.no_grad():
            img = self.pad(img)
            img = self.blur_h(img)
            img = self.blur_v(img)
            img = img.round()
            img = img.squeeze()


        img = self.tensor_to_pil((img / 255).float().cpu())

        return img

response = requests.get('https://img.sunset02.com/sites/default/files/styles/marquee_large_2x/public/image/2017/04/main/el-capitan-getty-0517.jpg?itok=vuJS4hXh')
img = Image.open(BytesIO(response.content)).convert('RGB')
cv2img = Image.fromarray(GaussianBlur()(img))
cv2img_tensor = transforms.ToTensor()(cv2img)
torchimg = GaussianBlur2()(img)
torchimg_tensor = transforms.ToTensor()(torchimg)

match = (cv2img_tensor == torchimg_tensor).float()
print(match.sum() / (match.shape[0] * match.shape[1] * match.shape[2]))
print(np.asarray(cv2img)[np.asarray(cv2img) != np.asarray(torchimg)])
print(np.asarray(torchimg)[np.asarray(cv2img) != np.asarray(torchimg)])

>>> tensor(0.9617)
>>> [226 161 173 ...  22  18   3]
>>> [227 162 172 ...  23  19   2]

As far as I know, OpenCV’s GaussianBlur is wrapping sepFilter2D, which is exactly same with the above implementation in pytorch.

However, these two implementation yield ~4% difference in above (image, kernel_size, sigma).
The difference becomes larger worse when the rounding (img = img.round()) is removed (reaches ~50%)

  • pytorch==1.5.1, opencv-python==4.3.0.36

Interesting. I just stumbled on a discrepancy between the output of cv2.GaussianBlur() and the output when using cv2.getGaussianKernel() and cv2.sepFilter2D().

Might be related to your discrepancy?