torch.nn.MaxPool2d does not need "input" argument run but quantized function torch.nn.quantized.functional.max_pool2d must have it

I am trying to quantize a salient object detection model.
Originally, my ResNet class would look like:

import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.functional as F
import torch.nn.quantized.functional as qF
class ResNet(nn.Module):
    def __init__(self, block, layers):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64,affine = affine_par)
        for i in self.bn1.parameters():
            i.requires_grad = False
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # changed to Quanti

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation__ = 2)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, 0.01)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
    def _make_layer(self, block, planes, blocks, stride=1,dilation__ = 1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion or dilation__ == 2 or dilation__ == 4:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion,affine = affine_par),

            )
        for i in downsample._modules['1'].parameters():
            i.requires_grad = False
        layers = []
        layers.append(block(self.inplanes, planes, stride,dilation_=dilation__, downsample = downsample ))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes,dilation_=dilation__))
        return nn.Sequential(*layers)
    def forward(self, x):
        tmp_x = []
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        tmp_x.append(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        tmp_x.append(x)
        x = self.layer2(x)
        tmp_x.append(x)
        x = self.layer3(x)
        tmp_x.append(x)
        x = self.layer4(x)
        tmp_x.append(x)
        return tmp_x

And it works just fine. But if I replace everything with quantized functions like:


class ResNet(nn.Module):
    def __init__(self, block, layers):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nnq.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nnq.BatchNorm2d(64) #,affine = affine_par
        for i in self.bn1.parameters():
            i.requires_grad = False
        self.relu = nnq.ReLU(inplace=False)

        #self.maxpool = F.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change 

        self.maxpool = qF.max_pool2d(x = ??? ,kernel_size=3, stride=2, padding=1, ceil_mode=True)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation__ = 2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, 0.01)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def _make_layer(self, block, planes, blocks, stride=1,dilation__ = 1):

        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion or dilation__ == 2 or dilation__ == 4:
            downsample = nn.Sequential(
                nnq.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nnq.BatchNorm2d(planes * block.expansion), #,affine = affine_par
            )

        for i in downsample._modules['1'].parameters():
            i.requires_grad = False
        layers = []
        layers.append(block(self.inplanes, planes, stride,dilation_=dilation__, downsample = downsample ))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes,dilation_=dilation__))
        return nn.Sequential(*layers)
    def forward(self, x):
        tmp_x = []
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        tmp_x.append(x)

        x = qF.max_pool2d(x,kernel_size=3, stride=2, padding=1, ceil_mode=True)  #this certainly will not create a MaxPool layer.

        #x = self.maxpool(x)

        x = self.layer1(x)
        tmp_x.append(x)
        x = self.layer2(x)
        tmp_x.append(x)
        x = self.layer3(x)
        tmp_x.append(x)
        x = self.layer4(x)
        tmp_x.append(x)
        return tmp_x

Error:
TypeError: max_pool2d() missing 1 required positional argument: 'input'

So, I think the problem is in torch.nn.MaxPool2d does not need any input argument but torch.nn.quantized.functional.max_pool2d needs an input argument. DOES ANYONE KNOW ANY WAY ROUND? How can I successfully quantize like this one other custom classes?

quantized maxpool2d and adaptiveavgpool2d should not be defined in the quantizable version.

I don’t think there is such thing as F.nn.MaxPool2dF, which is an alias to functional in your case does not have stateful layers. However, in your case you are treating it as if it did. In both models you need to replace the max pooling definition to nn.MaxPool2d.

Also, in the second case, you cannot call qF.max_pool2d in the constructor. As mentioned before, the F and qF namespaces are stateless, meaning they operate directly on the input, and don’t require instance construction. The way you use it is:

  1. Remove self.max_pool = F.max_pool2d(...) and self.max_pool = qF.max_pool2d(...) from the constructors.
  2. Add them in the forward method, but without the self.... = part. Like this: max_pool_output = qF.max_pool2d(some_input, *some_arguments)
1 Like