Hi,everyone!
My question is plain and simple.How can i add new hidden layer to LSTM?
I’m on my way through this tutorial:
I want to add extra hidden layer to LSTM model and when i trying to set number of layers higher than 1 i get in trouble:
EMBEDDING_DIM = 6
HIDDEN_DIM = 9
hidden_layers = 3
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, hidden_layers, vocab_size, tagset_size):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(embedding_dim, hidden_dim, hidden_layers)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
self.hidden = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.zeros(1, 1, self.hidden_dim)),
autograd.Variable(torch.zeros(1, 1, self.hidden_dim)))
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, self.hidden = self.lstm(embeds.view(len(sentence), 1, -1), self.hidden)
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
IndexError: list index out of range
Then i trying add extra lstm layer upon existing like:
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, self.hidden = self.lstm(embeds.view(len(sentence), 1, -1), self.hidden)
lstm_out, self.hidden = self.lstm(embeds.view(len(lstm_out), 1, -1), self.hidden)
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
it works somehow without errors but i still can’t see it in model.modules().
It’s like
LSTMTagger(
(word_embeddings): Embedding(9, 6)
(lstm): LSTM(6, 9)
(hidden2tag): Linear(in_features=9, out_features=3)
)
Embedding(9, 6)
LSTM(6, 9)
Linear(in_features=9, out_features=3)
I hope someone can help me with that.Thanks.