Always similar output when batch_size more than 1

What I get is:

epoch 99 output = tensor([[23.8904],
[28.7085],
[20.6641]], grad_fn=)
epoch 99 output = tensor([[30.4899],
[22.9630],
[27.6583]], grad_fn=)
epoch 99 running loss = 9.1643
epoch 199 output = tensor([[13.0002],
[11.1635],
[11.0956]], grad_fn=)
epoch 199 output = tensor([[41.1716],
[40.6116],
[41.1826]], grad_fn=)
epoch 199 running loss = 4.7491
epoch 299 output = tensor([[ 9.9920],
[10.1905],
[10.2471]], grad_fn=)
epoch 299 output = tensor([[40.6160],
[40.2038],
[40.5667]], grad_fn=)
epoch 299 running loss = 4.5123
epoch 399 output = tensor([[10.2249],
[10.2265],
[10.1220]], grad_fn=)
epoch 399 output = tensor([[39.8608],
[40.2258],
[40.0353]], grad_fn=)
epoch 399 running loss = 4.4805
epoch 499 output = tensor([[8.8487],
[8.5307],
[8.9612]], grad_fn=)
epoch 499 output = tensor([[35.7484],
[36.2212],
[36.3786]], grad_fn=)
epoch 499 running loss = 5.0115
epoch 599 output = tensor([[8.9839],
[8.8247],
[9.0632]], grad_fn=)
epoch 599 output = tensor([[39.2804],
[38.3255],
[38.4358]], grad_fn=)
epoch 599 running loss = 4.7069
epoch 699 output = tensor([[9.7793],
[9.4635],
[9.6627]], grad_fn=)
epoch 699 output = tensor([[38.4204],
[38.4527],
[38.7851]], grad_fn=)
epoch 699 running loss = 4.6458
epoch 799 output = tensor([[9.9077],
[9.6871],
[9.7271]], grad_fn=)
epoch 799 output = tensor([[38.8115],
[39.2431],
[39.2638]], grad_fn=)
epoch 799 running loss = 4.5689
epoch 899 output = tensor([[10.0550],
[ 9.9876],
[10.0641]], grad_fn=)
epoch 899 output = tensor([[41.8334],
[41.6715],
[41.7007]], grad_fn=)
epoch 899 running loss = 4.6421
epoch 999 output = tensor([[9.9938],
[9.9625],
[9.9120]], grad_fn=)
epoch 999 output = tensor([[40.6939],
[40.7212],
[40.7685]], grad_fn=)
epoch 999 running loss = 4.5302