Autoencoder embeddings always converge to zero

Hi Guys, so I am trying to do a simple VAE, and I am already struggeling to set up an Autoencoder by itself.
For some reason, when I train the model the latent space/ embeddings are always converging to zero.
Below I posted my model and also my training code.

I am trying to encoder graphs, thus my autoencoder has to encode a modified adjacency matrix and decode the original adjacency matrix ( a matrix with 0 and 1, representing whether a connection between to nodes in the graph exist).

After the Graph Convolution I am currently not adding a activation function, but I does not change anything about the behaviour of my model anyways.

It could be that I overlooked something in my data, but I used the same data in a different model, and it worked. So maybe I am overlooking a stupidly simple mistake in my code. Or as the Germans says: I do not see the forest because of all the trees :slight_smile:

Model

class GraphConvolution(Module):
    def __init__(self, in_features, out_features, bias=True):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)

    def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'


class InnerProductDecoder(nn.Module):
    """Decoder for using inner product for prediction."""

    def __init__(self, dropout, act=torch.sigmoid):
        super(InnerProductDecoder, self).__init__()
        self.dropout = dropout
        self.act = act

    def forward(self, z):
        z = F.dropout(z, self.dropout, training=self.training)
        adj = self.act(torch.mm(z, z.t()))
        return adj
    
    

class GCNModelVAE(nn.Module):
    def __init__(self, input_feat_dim, hidden_dim1, dropout):
        super(GCNModelVAE, self).__init__()
        
        self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1)
        self.dc = InnerProductDecoder(dropout, act=lambda x: x)

 

    def forward(self, x, adj):
        x= (self.gc1(x, adj))
        latent=x
        return self.dc(x), latent

Training

model=GCNModelVAE(feat[0].shape[1],64,0.1)
model.cuda()
model.train()

loss_function =nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(),lr=0.0001)
epoch_losses=[]

for i in range(100):
    epoch_loss =0
    for k in range(len(train_feat)):
        
    optimizer.zero_grad()
        ## 1. forward propagation
        
        output, embeddings =model(train_feat[k].cuda(),train_adjs[k].cuda())
        
       
        print(F.sigmoid(output))
      
        ## 2. loss calculation
        loss= loss_function(output, train_target_encoder[k].cuda()) 
 
        ## 3. backward propagation
        loss.backward()
        
        ## 4. weight optimization
        optimizer.step()
        
        #save epoch_loss
        epoch_loss+= loss.detach().item()    
    #Postprocess loss
    epoch_loss/=len(train_feat)
    epoch_losses.append(epoch_loss)
    print(epoch_loss

)