TypeError: __init__() got multiple values for argument 'kernel_size'

When i run my model code i got this error message :

TypeError: init() got multiple values for argument ‘kernel_size’

How can solve this error ? I searched how to use conv2d function, i got something but i think this is not work.

a=torch.randn(1, 19, 304, 304)
print(a.shape)
b=a.permute(1, 0, 2, 3)
print(b.shape)

batch_size = 1,channel =19 ,height =304 width =304
for input

Model:

class Flatten(nn.Module):
    def forward(self, input):
        return input.view(input.size(0), -1)


class Unflatten(nn.Module):
    def __init__(self, channel, height, width):
        super(Unflatten, self).__init__()
        self.channel = channel
        self.height = height
        self.width = width

    def forward(self, input):
        return input.view(input.size(0), self.channel, self.height, self.width)


class ConvVAE(nn.Module):

    def __init__(self, latent_size):
        super(ConvVAE, self).__init__()

        self.latent_size = latent_size

        self.encoder = nn.Sequential(
            nn.Conv2d(1, 19, 304, 304, kernel_size=3, stride=2, padding=1),
            nn.ReLU(),
            nn.Conv2d(1, 19, 76, 76, kernel_size=3, stride=2, padding=1),
            nn.ReLU(),
            Flatten(),
            nn.Linear(1,109744),
            nn.ReLU()
        )

        # hidden => mu
        self.fc1 = nn.Linear(109744, self.latent_size)

        # hidden => logvar
        self.fc2 = nn.Linear(109744, self.latent_size)

        self.decoder = nn.Sequential(
            nn.Linear(self.latent_size, 109744),
            nn.ReLU(),
            nn.Linear(1,109744),
            nn.ReLU(),
            Unflatten(19, 76, 76),
            nn.ReLU(),
            nn.ConvTranspose2d(1, 19, 76, 76, kernel_size=3, stride=2, padding=1),
            nn.ReLU(),
            nn.ConvTranspose2d(1, 19, 304, 304, kernel_size=3, stride=2, padding=1),
            nn.Sigmoid()
        )

    def encode(self, x):
        h = self.encoder(x)
        mu, logvar = self.fc1(h), self.fc2(h)
        return mu, logvar

    def decode(self, z):
        z = self.decoder(z)
        return z

    def reparameterize(self, mu, logvar):
        if self.training:
            std = torch.exp(0.5 * logvar)
            eps = torch.randn_like(std)
            return eps.mul(std).add_(mu)
        else:
            return mu

    def forward(self, x):
        mu, logvar = self.encode(x)
        z = self.reparameterize(mu, logvar)
        return self.decode(z), mu, logvar

The error is raised by a wrong initialization of the nn.Conv2d and nn.ConvTranspose2d layers, e.g.:

nn.Conv2d(1, 19, 304, 304, kernel_size=3, stride=2, padding=1)

The expected arguments are given as:

Init signature:
nn.Conv2d(
    in_channels: int,
    out_channels: int,
    kernel_size: Union[int, Tuple[int, int]],
    stride: Union[int, Tuple[int, int]] = 1,
    padding: Union[int, Tuple[int, int]] = 0,
    dilation: Union[int, Tuple[int, int]] = 1,
    groups: int = 1,
    bias: bool = True,
    padding_mode: str = 'zeros',
)

so the 3rd values (304 in the posted line of code) would already specify the kernel size.