Invalid Combination of arguments in Conv1d()

Hi! I am trying to run a simple autoencoder with convolutional network in both encoder and decoder. I cross-checked the input to the forward function and it says it is a tensor, but still I am getting an error saying Conv1d() received invalid combination of arguments. The error message is following:

TypeError: conv1d() received an invalid combination of arguments - got (tuple, Parameter, Parameter, tuple, tuple, tuple, int), but expected one of:
 * (Tensor input, Tensor weight, Tensor bias, tuple of ints stride, tuple of ints padding, tuple of ints dilation, int groups)
      didn't match because some of the arguments have invalid types: (tuple of (Tensor, Tensor), Parameter, Parameter, tuple of (int,), tuple of (int,), tuple of (int,), int)
 * (Tensor input, Tensor weight, Tensor bias, tuple of ints stride, str padding, tuple of ints dilation, int groups)
      didn't match because some of the arguments have invalid types: (tuple of (Tensor, Tensor), Parameter, Parameter, tuple of (int,), tuple of (int,), tuple of (int,), int)

My model is defined as follows:

# Defining Autoencoder model
class Autoencoder(nn.Module):
    def __init__(self, input_size, encoding_dim):
        super(Autoencoder, self).__init__()
        self.encoder = nn.Sequential(
            nn.Conv1d(in_channels = 1, out_channels = 64, kernel_size =3, stride = 1, padding = 1),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size = 2, stride = 2, return_indices = True),
            nn.Conv1d(in_channels = 64, out_channels = 128, kernel_size = 3, stride = 1, padding = 1),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size = 2, stride = 2, return_indices = True)
        )
        
        self.linear_1enc = nn.Linear(int(input_size / 4) * 128, 128)
        self.linear_2enc = nn.Linear(128, 64)
        self.linear_3enc = nn.Linear(64, 32)
        self.linear_4enc = nn.Linear(32, 16)
        self.linear_5enc = nn.Linear(16, encoding_dim)
        
        self.relu = nn.ReLU()
        
        self.linear_1dec = nn.Linear(encoding_dim, 16)
        self.linear_2dec = nn.Linear(16, 32)
        self.linear_3dec = nn.Linear(32, 64)
        self.linear_4dec = nn.Linear(64, 128)
        self.linear_5dec = nn.Linear(128, int(input_size / 4) * 128)
        
        self.decoder = nn.Sequential(
            nn.ConvTranspose1d(in_channels = 128, out_channels = 64, kernel_size = 3, stride = 1, padding = 1),
            nn.ReLU(),
            nn.MaxUnpool1d(kernel_size = 2, stride = 2),
            nn.ConvTranspose1d(in_channels = 64, out_channels = 1, kernel_size = 3, stride = 1, padding = 1),
            nn.ReLU(),
            nn.MaxUnpool1d(kernel_size = 2, stride = 2),
            nn.Sigmoid()
        )

    def forward(self, x):
        print(type(x))
        x = self.encoder(x)
        
        original_shape = x.size() #store the original shape
        
        x = x.view(x.size(0), -1) #flattening the tensor to apply linear models
        print('shape : ', x.shape)
        x = self.linear_1enc(x)
        x = self.relu(x)
        x = self.linear_2enc(x)
        x = self.relu(x)
        x = self.linear_3enc(x)
        x = self.relu(x)
        x = self.linear_4enc(x)
        x = self.relu(x)
        x = self.linear_5enc(x)
        x = self.relu(x)
        
        x = self.linear_1dec(x)
        x = self.relu(x)
        x = self.linear_2dec(x)
        x = self.relu(x)
        x = self.linear_3dec(x)
        x = self.relu(x)
        x = self.linear_4dec(x)
        x = self.relu(x)
        x = self.linear_5dec(x)
        
        x = x.view(original_shape) #unflatten the tensor
        
        x = self.decoder(x)

And I am running it as follows:

# Setting random seed for reproducibility
torch.manual_seed(42)

input_size = X_scaled.shape[1]  # Number of input features
encoding_dim = 2  # Desired number of output dimensions
model = Autoencoder(input_size, encoding_dim)

#torch.save(model, 'autoencoder_full.pth')


# Loss function and optimizer
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)

# Training the autoencoder
num_epochs = 20
for epoch in range(num_epochs):
   # Forward pass
    X_train = X_tensor.unsqueeze(1)
    outputs = model(X_train)
    loss = criterion(outputs, X_tensor)

    # Backward pass and optimization
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    # Loss for each epoch
    print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')

# Encoding the data using the trained autoencoder
encoded_data = model.encoder(X_tensor).detach().numpy()

Any help on this is greatly appreciated. I am really a newbie in neural network, so please excuse me for any stupid mistake.

It seems the input to the layer is a tuple of tensors while a single input tensor is expected. I don’t see where this tuple is created in the posted code snippets. If you are stuck isolating this issue, post a minimal and executable code snippet reproducing the issue so we can debug it.