Hi,
I’m trying to convert LSTM model code from Keras to Pytorch.
# input size
X_train.shape = (10000, 48)
# output size
y_train.shape = (10000, 16)
Here is the original keras model:
model = Sequential()
model.add(Embedding(16, 10, input_length=48))
model.add(CuDNNLSTM(50))
model.add(Dropout(0.1))
model.add(Dense(16, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
And this is my Pytorch model:
class LSTM(nn.Module):
def __init__(self, embedding_size = 10, hidden_size = 50, vocab_size = 16, tagset_size = 16, dropout_rate = 0.1):
super().__init__()
self.embedding_size = embedding_size # 10
self.hidden_size = hidden_size # 50
self.vocab_size = vocab_size # 16
self.tagset_size = tagset_size # 16
self.dropout_rate = dropout_rate # 0.1
self.embedding = nn.Embedding(vocab_size, embedding_size) # (16, 10)
self.lstm = nn.LSTM(embedding_size, hidden_size) # (10, 50)
self.dropout = nn.Dropout(dropout_rate) # 0.1
self.hidden2tag = nn.Linear(hidden_size, tagset_size) # (50, 16)
def forward(self, x):
embed = self.embedding(x)
lstm_out, lstm_hidden = self.lstm(embed, None)
lstm_out = lstm_out[:,-1,:]
drop_out = self.dropout(lstm_out)
output = self.hidden2tag(drop_out)
return output
model = LSTMTagger(10, 50, 16, 16, 0.1)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters())
I’m not sure whether I’m doing right or not. Any suggestion would be helpful.