Embedding(): argument 'indices' (position 2) must be Tensor, not tuple

I’m getting an error with my embedding layer I don’t understand. It seems to be something to do with the embedding dimension.

class SimpleLSTM(nn.Module):
  def __init__(self,hidden_dim,emb_dim,num_linear=1):
    super().__init__()
    self.embedding = nn.Embedding(vocab_size,emb_dim)
    self.encoder = nn.LSTM(emb_dim,hidden_dim,num_layers=1)
    self.linear_layers = []
    for _ in range(num_linear -1):
      self.linear_layers.append(nn.Linear(hidden_dim,hidden_dim))
      self.linear_layers = nn.ModuleList(self.linear_layers)
    self.predictor = nn.Linear(hidden_dim,3)
  
  def forward(self,seq):
    hdn, _ = self.encoder(self.embedding(seq))
    feature = hdn[-1, :, :]
    for layer in self.linear_layers:
      feature = layer(feature)
    preds = self.predictor(feature)
    return preds    

And the error:

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

<ipython-input-76-35e2652714dd> in forward(self, seq)
     11 
     12   def forward(self,seq):
---> 13     hdn, _ = self.encoder(self.embedding(seq))
     14     feature = hdn[-1, :, :]
     15     for layer in self.linear_layers:

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    487             result = self._slow_forward(*input, **kwargs)
    488         else:
--> 489             result = self.forward(*input, **kwargs)
    490         for hook in self._forward_hooks.values():
    491             hook_result = hook(self, input, result)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/sparse.py in forward(self, input)
    116         return F.embedding(
    117             input, self.weight, self.padding_idx, self.max_norm,
--> 118             self.norm_type, self.scale_grad_by_freq, self.sparse)
    119 
    120     def extra_repr(self):

/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
   1452         # remove once script supports set_grad_enabled
   1453         _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1454     return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
   1455 
   1456 

TypeError: embedding(): argument 'indices' (position 2) must be Tensor, not tuple
1 Like

Could you print seq? Based on the error message it seems you are passing a tuple instead of a tensor.

1 Like

I got the same error, did you rectified the problem!?

Can you tell me what do you mean by seq!?, I got the same error, here is my colab notebook
What mistake i had done!?

seq is the input tensor to self.embedding in the previous code snippet, which I think might raise the type mismatch error.