I use torch.cuda.amp.autocast before model forward, But I get a error like topics.
The code is :
class SuperConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super(SuperConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias, padding_mode)
def forward(self, x, config):
in_nc = x.size(1)
out_nc = config['channel']
weight = self.weight[:out_nc, :in_nc] # [oc, ic, H, W]
if self.bias is not None:
bias = self.bias[:out_nc]
else:
bias = None
return F.conv2d(x, weight, bias, self.stride, self.padding, self.dilation, self.groups)
What can I change to use amp for this module?
Is it F.conv2d support amp?
My environment is:
gpu: rtx2080ti
torch: py3.7_cuda10.1.243_cudnn7.6.3_0
cuda: 10.1