By using relu function my model is giving the outputs according to the hidden layer size. if i have 6 or 7 hidden layers its giving 6 or 7 outputs but my output size is 1. Why relu() is not giving the one output?

class LSTM(nn.Module):
def init(self, input_size=1, hidden_layer_size=6, output_size=1):
super().init()

self.hidden_layer_size = hidden_layer_size
self.lstm = nn.LSTM(input_size, hidden_layer_size, output_size)
self.relu = nn.ReLU()     #activation unit # linear # try with relu ###

self.hidden_cell = (torch.zeros(1, 1, self.hidden_layer_size),   
                    torch.zeros(1, 1, self.hidden_layer_size))

def forward(self, input_seq):
lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq), 1, -1), self.hidden_cell)
predictions = self.relu(lstm_out.view(len(input_seq), -1))
return predictions[-1]

model = LSTM()
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.0001)

print(model)

epochs = 300

for i in range(epochs):
for seq, labels in train_inout_seq:
optimizer.zero_grad()
model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size),
torch.zeros(1, 1, model.hidden_layer_size))
y_pred = model(seq)

single_loss = loss_function(y_pred, labels)
single_loss.backward()
optimizer.step()
if i%25 == 1:
    print(f'epoch: {i:3} loss: {single_loss.item():10.8f}')

model.eval()

for i in range(fut_period):
seq = torch.FloatTensor(train_inputs[-train_window:])
#print (seq)
with torch.no_grad():
model.hidden = (torch.zeros(1, 1, model.hidden_layer_size),
torch.zeros(1, 1, model.hidden_layer_size))
print(model(seq))
train_inputs.append(model(seq).item())

By using relu function my model is giving the outputs according to the hidden layer size. if i have 6 or 7 hidden layers its giving 6 or 7 outputs but my output size is 1. Why relu() is not giving the one output?