I get same values in LSTM time series prediction

I’m trying to train LSTM model with short time series & many individual persons data
Prior to the training with actual data, I went with dummy data.
The structure of the dummy data looks normal and they have unique values
But the final prediction shows same values in 3 arrays
What did I do wrong?

# tensor([[[0.0496, 0.6693, 0.6575],
#          [0.3207, 0.6082, 0.0967],
#          [0.3982, 0.5984, 0.0644],
#          [0.4094, 0.5971, 0.0608],
#          [0.4109, 0.5969, 0.0603],
#          [0.4111, 0.5969, 0.0603]],

#         [[0.0496, 0.6693, 0.6575],
#          [0.3207, 0.6082, 0.0967],
#          [0.3982, 0.5984, 0.0644],
#          [0.4094, 0.5971, 0.0608],
#          [0.4109, 0.5969, 0.0603],
#          [0.4111, 0.5969, 0.0603]],

#         [[0.0496, 0.6693, 0.6575],
#          [0.3207, 0.6082, 0.0967],
#          [0.3982, 0.5984, 0.0644],
#          [0.4094, 0.5971, 0.0608],
#          [0.4109, 0.5969, 0.0603],
#          [0.4111, 0.5969, 0.0603]]], grad_fn=<SigmoidBackward>)
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np

torch.manual_seed(777)  # reproducibility

# ================================================================================
x_one_hot = [[[50, 0, 120, 70, 90],    # 2008 medical data (patientA): age, sex, max blood pressure, min blood pressure, glucose
              [50, 0, 124, 74, 91],    # 2009 medical data (patientA): age, sex, max blood pressure, min blood pressure, glucose
              [51, 0, 122, 60, 92],    # 2010 medical data (patientA): age, sex, max blood pressure, min blood pressure, glucose
              [52, 0, 110, 76, 91],    # 2011 medical data (patientA): age, sex, max blood pressure, min blood pressure, glucose
              [53, 0, 140, 83, 68],    # 2012 medical data (patientA): age, sex, max blood pressure, min blood pressure, glucose
              [54, 0, 130, 81, 85]],   # 2013 medical data (patientA): age, sex, max blood pressure, min blood pressure, glucose
             [[60, 1, 151, 70, 100],   # 2008 medical data (patientB): age, sex, max blood pressure, min blood pressure, glucose
              [60, 1, 152, 94, 101],   # 2009 medical data (patientB): age, sex, max blood pressure, min blood pressure, glucose
              [61, 1, 151, 90, 102],   # 2010 medical data (patientB): age, sex, max blood pressure, min blood pressure, glucose
              [62, 1, 133, 96, 101],   # 2011 medical data (patientB): age, sex, max blood pressure, min blood pressure, glucose
              [63, 1, 162, 83, 108],   # 2012 medical data (patientB): age, sex, max blood pressure, min blood pressure, glucose
              [64, 1, 151, 71, 105]],  # 2013 medical data (patientB): age, sex, max blood pressure, min blood pressure, glucose
             [[30, 0, 111, 60, 60],
              [30, 0, 112, 64, 61],
              [31, 0, 111, 60, 62],
              [32, 0, 113, 66, 61],
              [33, 0, 112, 63, 68],
              [34, 0, 111, 61, 65]]]   
# print("x_one_hot",np.array(x_one_hot).shape)
# x_one_hot (3, 6, 5)

y_data = [[[0,1,1],   # 2008 disease status: high blood pressure, diabetes, obesity
           [1,1,0],   # 2009 disease status: high blood pressure, diabetes, obesity
           [0,0,0],   # 2010 disease status: high blood pressure, diabetes, obesity
           [0,0,0],   # 2011 disease status: high blood pressure, diabetes, obesity
           [0,0,0],   # 2012 disease status: high blood pressure, diabetes, obesity
           [1,1,0]],  # 2013 disease status: high blood pressure, diabetes, obesity
          [[0,1,1],
           [1,1,0],
           [1,1,1],
           [0,1,0],
           [0,1,0],
           [1,1,0]],
          [[0,0,0],
           [0,0,0],
           [0,0,0],
           [0,0,0],
           [0,1,0],
           [1,1,0]]]
# print("y_data",np.array(y_data).shape)
# (3, 6, 3)

# ================================================================================
inputs = Variable(torch.Tensor(x_one_hot))
labels = Variable(torch.Tensor(y_data))

# ================================================================================
num_classes = 5
input_size = 5
hidden_size = 20
sequence_length = 6
num_layers = 1
final_output_dim=3

# ================================================================================
class RNN(nn.Module):

    def __init__(self, num_classes, input_size, hidden_size, num_layers, final_output_dim):
        super(RNN, self).__init__()

        self.num_classes = num_classes
        self.num_layers = num_layers
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.sequence_length = sequence_length
        self.final_output_dim = final_output_dim

        self.rnn = nn.LSTM(input_size=input_size, hidden_size=hidden_size, batch_first=True)
        self.linear = nn.Linear(hidden_size, hidden_size)
        self.linear2 = nn.Linear(hidden_size, final_output_dim)

    def forward(self, x):
        h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
        c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))

        x.view(x.size(0), self.sequence_length, self.input_size)

        out, _ = self.rnn(x, (h_0, c_0))

        out=self.linear(out).squeeze()
        out=self.linear2(out).squeeze()

        out=F.sigmoid(out)

        return out

# ================================================================================
rnn = RNN(num_classes, input_size, hidden_size, num_layers, final_output_dim)
print(rnn)

criterion = nn.BCELoss()
optimizer = torch.optim.Adam(rnn.parameters(), lr=0.1)

# ================================================================================
for epoch in range(5000):
    outputs = rnn(inputs)
    optimizer.zero_grad()
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()
    _, idx = outputs.max(1)
    idx = idx.data.numpy()
    print("epoch: %d, loss: %1.3f" % (epoch + 1, loss.item()))

print("Learning finished!")

# ================================================================================
# Test
outputs = rnn(inputs)
# print("outputs",outputs)

# All are same values
# tensor([[[0.0496, 0.6693, 0.6575],
#          [0.3207, 0.6082, 0.0967],
#          [0.3982, 0.5984, 0.0644],
#          [0.4094, 0.5971, 0.0608],
#          [0.4109, 0.5969, 0.0603],
#          [0.4111, 0.5969, 0.0603]],

#         [[0.0496, 0.6693, 0.6575],
#          [0.3207, 0.6082, 0.0967],
#          [0.3982, 0.5984, 0.0644],
#          [0.4094, 0.5971, 0.0608],
#          [0.4109, 0.5969, 0.0603],
#          [0.4111, 0.5969, 0.0603]],

#         [[0.0496, 0.6693, 0.6575],
#          [0.3207, 0.6082, 0.0967],
#          [0.3982, 0.5984, 0.0644],
#          [0.4094, 0.5971, 0.0608],
#          [0.4109, 0.5969, 0.0603],
#          [0.4111, 0.5969, 0.0603]]], grad_fn=<SigmoidBackward>)