Thank you for reply.

This is my code.

class MyModel(nn.Module):

```
def __init__(self, in_channels, out_channels, kernel_size, padding=(0, 2, 2), dilation=1, bias=True, groups=1):
super(MyModel, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.padding = padding
self.relu = nn.ReLU()
self.bn = nn.BatchNorm3d(out_channels, eps=1e-5, momentum=0.1)
# Register them as parameters
self.a = self.get_param(self.in_channels, self.out_channels, self.groups)
self.b = self.get_param(self.in_channels, self.out_channels, self.groups)
self.c = self.get_param(self.in_channels, self.out_channels, self.groups)
# Generate the custom kernel
self.weight = self.get_weight(self.a, self.b, self.c)
def get_param(self, in_channels, out_channels, groups):
param = torch.zeros([out_channels, in_channels//groups, 1, 1, 1], dtype=torch.float)
param = param.cuda()
nn.init.xavier_normal_(param, gain=nn.init.calculate_gain('sigmoid'))
return nn.Parmeter(param)
def get_weight(self, a, b, c):
one = torch.ones([self.out_channels, self.in_channels // self.groups, 1, 1, 1], dtype=torch.float).cuda()
bias = torch.sigmoid(c) + one
kernel_x = torch.cat([bias - (torch.sigmoid(a)),
bias - 1 / 2 *(torch.sigmoid(a)),
bias,
bias - 1 / 2 * (torch.sigmoid(a)),
bias - (torch.sigmoid(a))], dim=3)
kernel_x = kernel_x.repeat((1, 1, 1, 1, 5))
kernel_y = torch.cat([bias - (torch.sigmoid(b)),
bias - 1 / 2 * (torch.sigmoid(b)),
bias,
bias - 1 / 2 * (torch.sigmoid(b)),
bias - (torch.sigmoid(b))], dim=4)
kernel_y = kernel_y.repeat((1, 1, 1, 5, 1))
kernel = kernel_x + kernel_y
# kernel has grad_fn(add_backward)
return kernel
def forward(self, x):
# Error occured
x = F.conv3d(x, self.weight, padding=self.padding)
x = self.bn(x)
x = self.relu(x)
```

I have to train **a, b, c** parameters of **kernel**.

My error is

**RuntimeError: Cannot insert a Tensor that requires grad as a constant. Consider making it a parameter or input, or detaching the gradient**

When I register **kernel** as nn.Parameter, there is no error. But **kernel** has no grad_fn, so I cannot train the **a, b, c** parameters.

And I found that this error comes from https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/frontend/tracer.cpp#L147

But I have trouble finding the solution.

Best Regards