class Network(nn.Module):
def __init__(self,weight_matrix=embedding_matrix,hidden_dim=12):
super().__init__()
vocab_size = weight_matrix.shape[0]
vector_dim = weight_matrix.shape[1]
#text data
self.hidden_dim = 12
self.embedding = nn.Embedding(vocab_size,vector_dim)
self.embedding.weight.data.copy_(torch.from_numpy(weight_matrix))
self.embedding.weight.requires_grad = False
self.lstm = nn.LSTM(input_size = vector_dim, hidden_size = hidden_dim,num_layers=1,batch_first=True)
# categorical inputs
# make a 4d vector for each school_state input
# 51 is the size of dictionary we keep
self.state_embedding = nn.Embedding(51,2)
self.prefix_embedding = nn.Embedding(5,3)
self.cat_embedding = nn.Embedding(50,26)
self.sub_cat_embedding = nn.Embedding(401,20)
self.grade_embedding = nn.Embedding(4,2)
#numerical inputs
self.numeric = nn.Linear(3,12)
**self.linear1 = nn.Linear(hidden_dim , 128)**
self.linear2 = nn.Linear(128,32)
self.linear3 = nn.Linear(32,2)
def forward(self,text,state,prefix,cat,sub_cat,grade,num):
x1 = self.embedding(text)
lstm_out, (h,c) = self.lstm(x1) #lstm_out #[128, 250, 12]
out = lstm_out.contiguous().view(-1,self.hidden_dim)
x2 = self.state_embedding(state) #[128,2]
x3 = self.prefix_embedding(prefix) #[128,3]
x4 = self.cat_embedding(cat) ##[128,26]
x5 = self.sub_cat_embedding(sub_cat) #[128,20]
x6 = self.grade_embedding(grade) ##[128,2]
x7 = self.numeric(num)
print(x7.shape)
combined = torch.cat((out,x2,x3,x4,x5,x6,x7),axis=1)
x = F.relu(self.linear1(out))
x = F.relu(self.linear2(x))
x = F.sigmoid(self.linear3(x))[:,-1]
return x
In the bolded code portion I would like to flatten the LSTM output to dim (batch_size, -1).
How to do that. Please anyone help. Actually Iam trying to concatinate different inputs.