How is Kervolution Neural Network given below is different from a Convolutional Neural Network

Dear friends, I was wondering whether there is any convolution operation in the following Pytorch code of Kervolution Neural Network or there is only Gaussian, Polynomial, Euclidian operation between the image and kernel? Can anyone please clarify my doubt? I would be grateful to you if anyone suggests any method for testing the operation on a single image and getting to know what operation is taking place.

class Kerv2d(nn.Conv2d):
    '''
    kervolution with following options:
    kernel_type: [linear, polynomial, gaussian, etc.]
    default is convolution:
             kernel_type --> linear,
    balance, power, gamma is valid only when the kernel_type is specified
    if learnable_kernel = True,  they just be the initial value of learable parameters
    if learnable_kernel = False, they are the value of kernel_type's parameter
    the parameter [power] cannot be learned due to integer limitation
    '''
    def __init__(self, in_channels, out_channels, kernel_size, 
            stride=1, padding=0, dilation=1, groups=1, bias=True,
            kernel_type='linear', learnable_kernel=False, kernel_regularizer=False,
            balance=1, power=3, gamma=1):

        super(Kerv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
        self.kernel_type = kernel_type
        self.learnable_kernel, self.kernel_regularizer = learnable_kernel, kernel_regularizer
        self.balance, self.power, self.gamma = balance, power, gamma

        # parameter for kernel type
        if learnable_kernel == True:
            self.balance = nn.Parameter(torch.cuda.FloatTensor([balance] * out_channels), requires_grad=True).view(-1, 1)
            self.gamma   = nn.Parameter(torch.cuda.FloatTensor([gamma]   * out_channels), requires_grad=True).view(-1, 1)

    def forward(self, input):

        minibatch, in_channels, input_width, input_hight = input.size()
        assert(in_channels == self.in_channels)
        input_unfold = F.unfold(input, kernel_size=self.kernel_size, dilation=self.dilation, padding=self.padding, stride=self.stride)
        input_unfold = input_unfold.view(minibatch, 1, self.kernel_size[0]*self.kernel_size[1]*self.in_channels, -1)
        weight_flat  = self.weight.view(self.out_channels, -1, 1)
        output_width = (input_width - self.kernel_size[0] + 2 * self.padding[0]) // self.stride[0] + 1
        output_hight = (input_hight - self.kernel_size[1] + 2 * self.padding[1]) // self.stride[1] + 1

        if self.kernel_type == 'linear':
            output = (input_unfold * weight_flat).sum(dim=2)

        elif self.kernel_type == 'manhattan':
            output = -((input_unfold - weight_flat).abs().sum(dim=2))

        elif self.kernel_type == 'euclidean':
            output = -(((input_unfold - weight_flat)**2).sum(dim=2))

        elif self.kernel_type == 'polynomial':
            output = ((input_unfold * weight_flat).sum(dim=2) + self.balance)**self.power

        elif self.kernel_type == 'gaussian':
            output = (-self.gamma*((input_unfold - weight_flat)**2).sum(dim=2)).exp() + 0

        else:
            raise NotImplementedError(self.kernel_type+' kervolution not implemented')

        if self.bias is not None:
            output += self.bias.view(self.out_channels, -1)

        return output.view(minibatch, self.out_channels, output_width, output_hight)