I am currently working on a regression classification model. But my scores always keep steady, doesn’t decrease. Is there any problem with my model?
class LSTM(nn.Module):
def __init__(self):
super(LSTM, self).__init__()
embedding_dimension = 40
pre_trained_embedding = torch.FloatTensor(TEXT.vocab.vectors)
self.target_dimension = 1 # label: 1 or 0
self.embedding_layer = nn.Embedding.from_pretrained(pre_trained_embedding)
self.hidden_dimension = 200
self.lstm = nn.LSTM(embedding_dimension, self.hidden_dimension, num_layers=1, batch_first=True)
n_lstm_out = 200
self.dense_layer = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(n_lstm_out, 100),
nn.Dropout(0.5),
nn.Linear(100, self.target_dimension, ),
nn.Sigmoid(),
)
self.optimizer = optim.SGD(self.parameters(), lr=0.000001)
self.loss = nn.MSELoss()
def learn(self, x_train, y_train):
self.optimizer.zero_grad()
embedding_out = self.embedding_layer(x_train)
lstm_out, _ = self.lstm(embedding_out)
prediction_out = self.dense_layer(lstm_out)
mse_input = prediction_out[:, 0, :]
mse_input = mse_input.type(torch.float)
mse_target = y_train
mse_target = mse_target.type(torch.float)
loss = self.loss(mse_input, mse_target)
loss.backward()
self.optimizer.step()
return loss