How can I concatenate sentence level embeddings with word level embeddings for padded sequences?
embedded_seqs
has to be a PackedSequence
before I feed it to the RNN.
class Model(nn.Module):
def __init__(self, vocab_size, word_embed_dim,
num_labels, labels_embed_dim,
hidd_dim, padding_idx=0):
super(Model, self).__init__()
self.w_embedding = nn.Embedding(vocab_size, word_embed_dim, padding_idx=padding_idx)
self.l_embedding = nn.Embedding(num_labels, labels_embed_dim)
self.rnn = nn.GRU(word_embed_dim + labels_embed_dim,
hidd_dim, batch_first=True, bidirectional=True)
def forward(self, input_seqs, input_lengths, input_labels):
embedded_seqs = self.w_embedding(input_seqs)
embedded_labels = self.l_embedding(input_labels)
# How can I concatenate embedded_seqs with embedded_labels?
packed_seqs = pack_padded_sequence(embedded_seqs, input_lengths,
batch_first=True)
output, h_t = self.rnn(packed_seqs)
......