I would like to build a 3 layers autoencoder with recurrent hidden layer and a feedback connection that come from an other AE connected on the input of the H layer. Is my implementation correct ?
class LPU(nn.Module):
def __init__(self, Encoder_size, Hidden_size, Decoder_size): # simple autoencoder structure
super(LPU, self).__init__()
self.encoder = nn.Linear(Encoder_size, Encoder_size) # PREVIOUS : self.encoder = nn.Linear(Encoder_size, Hidden_size)
self.act_encoder = nn.Sigmoid()
self.hidden = nn.Linear(Hidden_size, Hidden_size)
self.act_hidden = nn.Sigmoid()
self.decoder = nn.Linear(Decoder_size, Decoder_size) # PREVIOUS : self.decoder = nn.Linear(Hidden_size, Decoder_size)
self.act_decoder = nn.Sigmoid()
def forward(self, Xt, last_H, last_H_superior):
input_encoder= self.encoder(Xt)
out_encoder = self.act_encoder(input_encoder)
input_hidden = self.hidden(torch.cat((out_encoder, last_H, last_H_superior), 3)) # ????????
representation = self.act_hidden(input_hidden) # hidden compressed representation
input_decoder = self.decoder(representation)
out_decoder = self.act_decoder(input_decoder)
return out_encoder, out_decoder, representation