Timeseries lstm model prediction are strange

Hi everyone, ive created a lstm model to predict timeseries data. Ive trained it on gold historical price and when i test it it almsot match the original graph. The only problem is when i predict… I append the last prediction to my data and use the last window size element of the array to predict the next. One thing ive noticed is that when i predict like this the time series goes all the way up or all the way down, like if it would converge to zero…
Here i predict from half the time series.
download

This is the model code:

class Model(nn.Module):
  def __init__(self, input_size, hidden_size, output_size, n_lstm_layers=1, n_deep_layers=10, dropout=0.2):
    super().__init__()
    self.hidden_size = hidden_size
    self.n_lstm_layers = n_lstm_layers

    self.lstm = nn.LSTM(input_size, hidden_size, n_lstm_layers, batch_first=True)
    self.linear = nn.Linear(hidden_size, hidden_size)

    dnn_layers = []
    for i in range(n_deep_layers):
      if i == n_deep_layers - 1:
        dnn_layers.append(nn.ReLU())
        dnn_layers.append(nn.Linear(hidden_size, output_size))
      else:
        dnn_layers.append(nn.ReLU())
        dnn_layers.append(nn.Linear(hidden_size, hidden_size))
        if dropout:
          dnn_layers.append(nn.Dropout(p=dropout))

    self.dnn = nn.Sequential(*dnn_layers)

  def forward(self, x):
    x, _ = self.lstm(x)
    x = self.linear(x)
    return self.dnn(x)

  def fit(self, datamodule, epochs, criterion, optimizer, path_checkpoint, lr = None, save=True):
    if os.path.isfile(path_checkpoint):
      optimizer_state_dict = self.load_checkpoint(path_checkpoint)
      optimizer.load_state_dict(optimizer_state_dict)
      if lr != None:
        optimizer.param_groups[0]['lr'] = lr

    train_dataloader = datamodule.train_dataloader()
    validation_dataloader = datamodule.validation_dataloader()

    for epoch in range(epochs):
      self.train()
      for X, y in train_dataloader:
        X, y = X.to(device), y.unsqueeze(dim=1).to(device)
        y_pred = model(X)
        loss = criterion(y_pred, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

      self.eval()
      with torch.no_grad():
        for X, y in validation_dataloader:
          X, y = X.to(device), y.unsqueeze(dim=1).to(device)
          validation_loss = criterion(y_pred, y)
          y_pred = model(X)
      
      if epoch % 50 == 0:
        print(f"Epoch: {epoch}, Loss: {loss.item():.4f}, Validation Loss: {validation_loss.item():.4f}")
        if save:
          self.save_checkpoint(path = "model.pt", optimizer = optimizer)

  def save_checkpoint(self, path, optimizer):
    torch.save({
            'model_state_dict': self.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            }, path)
    
  def load_checkpoint(self, path):
    checkpoint = torch.load(path)
    self.load_state_dict(checkpoint["model_state_dict"])
    return checkpoint['optimizer_state_dict']

  def one_step_forecast(self, history):
    self.eval()
    with torch.no_grad():
      prediction = self(torch.from_numpy(history).to(device))
    return prediction.item()

  def n_step_forecast(self, data, n, tw):

    data = list(data)

    for step in range(n):
      data_ = np.array(data[-tw:]).reshape(1, -1)
      forecast = self.one_step_forecast(data_)
      data.append(np.float32(forecast))

    return data```