Problem with Autoencoder definition

I would like to build a 3 layers autoencoder with recurrent hidden layer and a feedback connection that come from an other AE connected on the input of the H layer. Is my implementation correct ?

class LPU(nn.Module):
    def __init__(self, Encoder_size, Hidden_size, Decoder_size): # simple autoencoder structure
        super(LPU, self).__init__()

        self.encoder = nn.Linear(Encoder_size, Encoder_size) # PREVIOUS : self.encoder = nn.Linear(Encoder_size, Hidden_size)
        self.act_encoder = nn.Sigmoid()

        self.hidden = nn.Linear(Hidden_size, Hidden_size)
        self.act_hidden = nn.Sigmoid()

        self.decoder = nn.Linear(Decoder_size, Decoder_size) # PREVIOUS : self.decoder = nn.Linear(Hidden_size, Decoder_size)
        self.act_decoder = nn.Sigmoid()

    def forward(self, Xt, last_H, last_H_superior):

        input_encoder= self.encoder(Xt)
        out_encoder = self.act_encoder(input_encoder)

        input_hidden = self.hidden(torch.cat((out_encoder, last_H, last_H_superior), 3)) # ????????
        representation = self.act_hidden(input_hidden) # hidden compressed representation

        input_decoder = self.decoder(representation)
        out_decoder = self.act_decoder(input_decoder)

        return out_encoder, out_decoder, representation

In this line, itseems you are expecting 4D inputs?
Linear layer works with 2D inputs (batch_size x input_dim).

I change my implementation into a more simpler one :

class LPU(nn.Module):
    def __init__(self, Encoder_size, Hidden_size, Decoder_size):
        # simple autoencoder structure
        super(LPU, self).__init__()
        # ENC input : main sensory state (Encoder_size), the last hidden state
        # and last hidden state from a superior LPU
        self.encoder = nn.Linear((Encoder_size + 2*Hidden_size), Hidden_size)
        self.act_encoder = nn.Sigmoid()

        self.decoder = nn.Linear(Hidden_size, Decoder_size)
        self.act_decoder = nn.Sigmoid()

    def forward(self, Xt, last_Hidden, last_Hidden_sup):

        input_encoder = torch.cat((Xt, last_Hidden, last_Hidden_sup), 1)
        encoder_process = self.encoder(input_encoder)
        representation = self.act_decoder(encoder_process)

        decoder_process = self.decoder(representation)
        out_decoder = self.act_decoder(decoder_process)

        return out_decoder, representation