RuntimeError: Expected hidden[0] size (2, 1, 15), got [2, 16, 15]

I’m building a seq2seq model for time series data, and I am getting this error:
RuntimeError: Expected hidden[0] size (2, 1, 15), got [2, 16, 15]

I can tell that the error has something to do with the LSTM in my decoder, but I am unable to resolve this.
Any help with this would be greatly appreciated.

Here are the encoder and decoder classes:

class Encoder(nn.Module):
    def __init__(self, input_dim, hid_dim, n_layers, dropout):
        super().__init__()
        
        self.hid_dim = hid_dim
        self.n_layers = n_layers
        self.rnn = nn.LSTM(input_dim, hid_dim, n_layers, dropout=dropout, batch_first=True)
    
    def forward(self, x):
        outputs, (hidden, cell) = self.rnn(x)
        return hidden, cell

class Decoder(nn.Module):
    def __init__(self, output_dim, input_dim, hid_dim, n_layers, dropout):
        super().__init__()
        
        self.output_dim = output_dim
        self.hid_dim = hid_dim
        self.n_layers = n_layers
        self.rnn = nn.LSTM(output_dim, hid_dim, n_layers, dropout=dropout, batch_first=True)
        self.fc_out = nn.Linear(hid_dim, output_dim)
        
    def forward(self, input, hidden, cell):
        input = input.unsqueeze(0).view(1, -1, 3)
        output, (hidden, cell) = self.rnn(input, (hidden, cell))
        prediction = self.fc_out(output.squeeze(0))
        return prediction, hidden, cell

and here is the seq2seq class if necessary:

class Seq2Seq(nn.Module):
    def __init__(self, encoder, decoder, device):
        super().__init__()
        
        self.encoder = encoder
        self.decoder = decoder
        self.device = device
    
    def forward(self, X, y, teacher_forcing_ratio = 0.5):
        batch_size = 3
        y_len = y.shape[0]
        y_size = self.decoder.output_dim
        
        outputs = torch.zeros(y_len, batch_size, y_size).to(self.device)
        hidden, cell = self.encoder(X)
        input = y[0, :]
        
        for t in range(1, y_len):
            output, hidden, cell = self.decoder(input, hidden, cell)
            outputs[t] = output
            teacher_force = random.random() < teacher_forcing_ratio
            input = y[t] if teacher_force else output
        return outputs

@ptrblck would you be able to help me out with this?

I don’t know which input shaoe you are passing to the model, but would guess that hidden or cell is creating the shape mismatch.
Here is a small examples showing the hidden and cell outputs in the expected shape:

model = nn.LSTM(input_size=3, hidden_size=15, num_layers=2, batch_first=True)
x = torch.randn(1, 100, 3)
out, (hidden, cell) = model(x)

so check the shape of hidden and cell and make sure their shape matches the expected ones.