class KernelConv3d(nn.Conv3d):
'''
kervolution with following options:
kernel_type: [linear, polynomial, gaussian, etc.]
default is convolution:
kernel_type --> linear,
balance, power, gamma is valid only when the kernel_type is specified
if learnable_kernel = True, they just be the initial value of learable parameters
if learnable_kernel = False, they are the value of kernel_type's parameter
the parameter [power] cannot be learned due to integer limitation
'''
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True,
kernel_type='linear', learnable_kernel=False, kernel_regularizer=False,
balance=1, power=3, gamma=1):
super(KernelConv3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.kernel_type = kernel_type
self.learnable_kernel, self.kernel_regularizer = learnable_kernel, kernel_regularizer
self.balance, self.power, self.gamma = balance, power, gamma
# parameter for kernel type
if learnable_kernel == True:
self.balance = nn.Parameter(torch.cuda.FloatTensor([balance] * out_channels), requires_grad=True).view(-1, 1)
self.gamma = nn.Parameter(torch.cuda.FloatTensor([gamma] * out_channels), requires_grad=True).view(-1, 1)
def forward(self, input):
minibatch,in_channels, input_width, input_hight = input.size()
assert(in_channels == self.in_channels)
input_unfold = F.unfold(input, kernel_size=self.kernel_size, dilation=self.dilation, padding=self.padding, stride=self.stride)
input_unfold = input_unfold.view(minibatch, 1, self.kernel_size[0]*self.kernel_size[1]*self.kernel_size[2]*self.in_channels, -1)
weight_flat = self.weight.view(self.out_channels, -1, 1)
output_width = (input_width - self.kernel_size[0] + 2 * self.padding[0]) // self.stride[0] + 1
output_hight = (input_hight - self.kernel_size[1] + 2 * self.padding[1]) // self.stride[1] + 1
#output_depth = (in_channels - self.kernel_size[2] + 2 * self.padding[2]) // self.stride[2] + 1
if self.kernel_type == 'linear':
output = (input_unfold * weight_flat).sum(dim=3)
elif self.kernel_type == 'manhattan':
output = -((input_unfold - weight_flat).abs().sum(dim=3))
elif self.kernel_type == 'euclidean':
output = -(((input_unfold - weight_flat)**2).sum(dim=3))
elif self.kernel_type == 'polynomial':
output = ((input_unfold * weight_flat).sum(dim=3) + self.balance)**self.power
elif self.kernel_type == 'gaussian':
output = (-self.gamma*((input_unfold - weight_flat)**2).sum(dim=3)).exp() + 0
else:
raise NotImplementedError(self.kernel_type+' kervolution not implemented')
if self.bias is not None:
output += self.bias.view(self.out_channels, -1)
return output.view(minibatch, self.out_channels, output_width, output_hight)
Hello friends, I want to implement RBF Kernel in my CNN like Gaussian, polynomial as shown in above code. Can anyone suggest how to implement it in above code?