How to concatenate embedding layer

I am trying to concatenate embedding layer with other features. It doesn’t give me any error, but doesn’t do any training either. Is anything wrong with this model definition, how to debug this?

Note: The last column (feature) in my X is feature with word2ix (single word).
Note: The net works fine without the embedding feature/layer



    
class Net(torch.nn.Module):
    def __init__(self, n_features, h_sizes, num_words, embed_dim, out_size, dropout=None):
        super().__init__()
   
        
        self.num_layers = len(h_sizes)  # hidden + input
        
        
        self.embedding = torch.nn.Embedding(num_words, embed_dim)
        self.hidden = torch.nn.ModuleList()
        self.bnorm = torch.nn.ModuleList()
        if dropout is not None:
            self.dropout = torch.nn.ModuleList()
        else:
            self.dropout = None
        for k in range(len(h_sizes)):
            if k == 0:
                self.hidden.append(torch.nn.Linear(n_features, h_sizes[0]))
                self.bnorm.append(torch.nn.BatchNorm1d(h_sizes[0]))
                if self.dropout is not None:
                    self.dropout.append(torch.nn.Dropout(p=dropout))
                    
            else:
                if k == 1:
                    input_dim = h_sizes[0] + embed_dim
                else:
                    input_dim = h_sizes[k-1]
                
                self.hidden.append(torch.nn.Linear(input_dim, h_sizes[k]))
                self.bnorm.append(torch.nn.BatchNorm1d(h_sizes[k]))
                if self.dropout is not None:
                    self.dropout.append(torch.nn.Dropout(p=dropout))

        # Output layer
        self.out = torch.nn.Linear(h_sizes[-1], out_size)
        
    def forward(self, inputs):

        # Feedforward
        
        for l in range(self.num_layers):
            if l == 0:
                x = self.hidden[l](inputs[:, :-1])
                x = self.bnorm[l](x)
                if self.dropout is not None:
                    x= self.dropout[l](x)
                    
                embeds = self.embedding(inputs[:,-1])#.view((1, -1)
                x = torch.cat((embeds, x),dim=1)
                
            else:
                x = self.hidden[l](x)
                x = self.bnorm[l](x)
                if self.dropout is not None:
                    x = self.dropout[l](x)
            x = F.relu(x)
        output= self.out(x)

        return output

also posted on stackoverflow https://stackoverflow.com/questions/57029817/how-to-concatenate-embedding-layer-in-pytorch