LSTM always predicts same value for different inputs

My LSTM model always predicts the same value after fitting, even if I input random numbers. Can anyone help me on this? Thanks!

class LSTM(nn.Module):
     def __init__(self, input_size, hidden_size, output_size, num_layers):
        super(LSTM, self).__init__()
        
        self.output_size = output_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)
  
    def forward(self, x):
        batch_size = x.size(0)

        h0 = torch.zeros(self.num_layers, batch_size, self.hidden_size)
        c0 = torch.zeros(self.num_layers, batch_size, self.hidden_size)

        r_out, (hn, cn) = self.lstm(x, (h0, c0))

        output = self.fc(r_out[:,-1,:])

        return output
    
class ToSequence(Data.Dataset):
    
    def __init__(self, features, response, window):
        self.features = features
        self.response = response
        self.window = window

    def __getitem__(self, index):
        x = self.features[index:index+self.window]
        y = self.response[index+self.window-1]
        return x, y

    def __len__(self):
        return len(self.response) - self.window + 1

seq_length = 20
batch_size = 50
num_epochs = 200
learning_rate = 0.1

#Just test a simple sequence data
#use 20 time steps to predict the next data point
df = pd.DataFrame({'A':range(1,40001,1)})
train_y = df.iloc[0:30001,0]
test_y = df.iloc[30001:,0]

train_X = df.iloc[0:30001,0]
test_X = df.iloc[30001:,0]

test_input_features = torch.tensor(test_X.values).resize_((test_X.shape[0],1))  
test_input_response = torch.tensor(test_y.values).resize_((test_y.shape[0],1)) 
test_data_sequence = ToSequence(test_input_features,test_input_response,seq_length)
test_data_loader = Data.DataLoader(test_data_sequence, batch_size)

train_input_features = torch.tensor(train_X.values).resize_((train_X.shape[0],1))  
train_input_response = torch.tensor(train_y.values).resize_((train_y.shape[0],1)) 
train_data_sequence = ToSequence(train_input_features,train_input_response,seq_length)
train_data_loader = Data.DataLoader(train_data_sequence, batch_size)
params = {'input_size': 1,
          'hidden_size': 50,
          'output_size': 1,
          'num_layers': 1}

model = LSTM(**params)

loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) 

for epoch in range(1, num_epochs+1):

    for train_set_X, train_set_Y in train_data_loader: 

        optimizer.zero_grad()
        outputs= model(train_set_X.float())
        loss = loss_function(outputs, train_set_Y.float())
        loss.backward()
        optimizer.step()
    
    if epoch%10 == 0:
        
        print('Epoch: {}/{}.............'.format(epoch, num_epochs), end=' ')
        print("Training MSE: {:.4f}...........".format(loss.item()))
prediction = torch.Tensor()
model.eval()

for X, Y in test_data_loader:

    test_outputs = model(X.float())
    prediction = torch.cat((prediction, test_outputs),0)
    
prediction

The prediction result is the same value for all 10000 test data.

I wish I could help you but I’m facing the same issue…
Have you made any progress regarding this issue?