Issue on Convolutional Layer customization: positional arguments in __init__()

I am developing a custom convolutional layer in PyTorch that will be used inside a Graph Neural Network. The layer myConv2D receives the input, will apply a function to the output (not implemented yet) and passes to a GraphConv layer inside a classifier. However, looks like there is some sort of issue with kernel_size inside myConv2D layer. Here’s my code:

class myConv2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: _size_2_t=3,
        stride: _size_2_t = 1,
        padding: _size_2_t = 0,
        dilation: _size_2_t = 1,
        groups: int = 1,
        bias: bool = True,
        padding_mode: str = 'zeros'  # TODO: refine this type
    ):
        kernel_size_ = _pair(kernel_size)
        stride_ = _pair(stride)
        padding_ = _pair(padding)
        dilation_ = _pair(dilation)
        super(myConv2D, self).__init__(
            in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
            False, _pair(0), groups, bias, padding_mode)

    def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
        if self.padding_mode != 'zeros':
            return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
                            weight, bias, self.stride,
                            _pair(0), self.dilation, self.groups)
        return F.conv2d(input, weight, bias, self.stride,
                        self.padding, self.dilation, self.groups)

    def forward(self, input: Tensor) -> Tensor:
        return self._conv_forward(input, self.weight, self.bias)    

class Classifier(nn.Module):
    def __init__(self, in_feats, hidden_size, num_classes):
        super(Classifier, self).__init__()
        self.conv1 = myConv2D(in_feats, hidden_size)
        self.conv2 = GraphConv(hidden_size, num_classes)

    def forward(self, g, inputs):
        h = self.conv1(g, inputs)
        h = torch.relu(h)
        h = self.conv2(g, h)
        return h

model = Classifier(768, 20, 2)

I get the following error:

>>> model = Classifier(768, 20, 2)
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File "<stdin>", line 4, in __init__
  File "<stdin>", line 20, in __init__
TypeError: __init__() takes 1 positional argument but 12 were given

Looks like the error is referring to the set up of kernel size, but when is fixed, issues in parameters of the custom layer show up. Any enlightenment is welcome.

The super().__init__ call is raising this issue:

super(myConv2D, self).__init__(
    in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
    False, _pair(0), groups, bias, padding_mode)

since you are passing all arguments to it (while nn.Module is the parent class).
If you want to pass these conv layer arguments to the parent class, you would need to derive myConv2d from nn.Conv2d.