Thank you very much for your answer jpeg729!
I didn’t post my code in my first message because it is a bit long. You answered my main question anyway, and I will try to find out the reason of this error by myself. But if you want to look at my code I put it here:
class LSTMTaggerCharLevelFeature(nn.Module):
def __init__(self, w_emb_dim, t_hidden_dim, c_emb_dim,
c_hidden_dim, n_word, n_char, tagset_size):
super(LSTMTaggerCharLevelFeature, self).__init__()
self.w_emb_dim = w_emb_dim
self.t_hidden_dim = t_hidden_dim
self.c_emb_dim = c_emb_dim
self.c_hidden_dim = c_hidden_dim
self.w_embeddings = nn.Embedding(n_word, w_emb_dim)
self.c_embeddings = nn.Embedding(n_char, c_emb_dim)
# The LSTM for embedded characters.
self.c_lstm = nn.LSTM(c_emb_dim, c_hidden_dim)
# The LSTM for the concatenation of the embedded
# word and the output of (the last run) of the
# first LSTM.
self.t_lstm = nn.LSTM(w_emb_dim+c_hidden_dim, t_hidden_dim)
# The linear layer that maps the total output
# of the second LSTM to the tag space
self.hidden2tag = nn.Linear(t_hidden_dim, tagset_size)
self.c_hidden = self.init_hidden(1, c_hidden_dim)
self.t_hidden = self.init_hidden(1, t_hidden_dim)
def init_hidden(self, batch, hidden_dim):
# For initialization of an LSTM
return(autograd.Variable(torch.zeros(1, batch, hidden_dim)),
autograd.Variable(torch.zeros(1, batch, hidden_dim)))
def forward(self, list_len, w_ix, c_ix):
self.c_hidden = self.init_hidden(len(list_len), c_hidden_dim)
w_embeds = self.w_embeddings(w_ix)
c_embeds = self.c_embeddings(c_ix)
## First LSTM on embedded characters
out, self.c_hidden = self.c_lstm(c_embeds.permute(1, 0, 2), self.c_hidden)
## Selecting the output corresponding
## to the last real character
last_char_ix = [list_len[i]-1 for i in range(len(list_len))]
kept_lstm_output_ix = [[[last_char_ix[i] for j in range(self.c_hidden_dim)]
for i in range(len(list_len))]]
kept_lstm_output_ix = autograd.Variable(
torch.LongTensor(kept_lstm_output_ix))
out_c_lstm = out.gather(dim=0,
index=kept_lstm_output_ix).view(len(list_len),
self.c_hidden_dim)
## Creation of the input of the second LSTM
in_t_lstm = torch.cat((w_embeds, out_c_lstm), 1)
in_t_lstm = in_t_lstm.view(list(in_t_lstm.size())[0],
1, list(in_t_lstm.size())[1])
## Second LSTM on aggregation of output of the first LSTM
## and embeddings of the words
out_t_lstm, self.t_hidden = self.t_lstm(in_t_lstm, self.t_hidden)
tag_space = self.hidden2tag()
tag_scores = F.log_softmax(tag_space, dim=1)
return(tag_scores)
And here is the full stack trace of the error:
TypeError Traceback (most recent call last)
<ipython-input-29-287341512899> in <module>()
16
17 # Step 3. Run our forward pass.
---> 18 tag_scores = model(list_len, w_ix, c_ix)
19
20 # Step 4. Compute the loss, gradients, and update the parameters by
~/anaconda3/envs/mypy36/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
355 result = self._slow_forward(*input, **kwargs)
356 else:
--> 357 result = self.forward(*input, **kwargs)
358 for hook in self._forward_hooks.values():
359 hook_result = hook(self, input, result)
<ipython-input-25-afab1327140d> in forward(self, list_len, w_ix, c_ix)
50 ## and embeddings of the words
51 out_t_lstm, self.t_hidden = self.t_lstm(in_t_lstm, self.t_hidden)
---> 52 tag_space = self.hidden2tag()
53 tag_scores = F.log_softmax(tag_space, dim=1)
54 return(tag_scores)
~/anaconda3/envs/mypy36/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
355 result = self._slow_forward(*input, **kwargs)
356 else:
--> 357 result = self.forward(*input, **kwargs)
358 for hook in self._forward_hooks.values():
359 hook_result = hook(self, input, result)
TypeError: forward() missing 1 required positional argument: 'input'