Network is not learning

I got a simple feedforward net that its not learning, i get the same loss with every epoch.

Book= pd.ExcelFile(“path”)
Sheet= Book.parse(“R_MWL_N”)
Input_Data= Sheet.R
Target_Data= Sheet.MWL

Input_Data= torch.FloatTensor(Input_Data)
Input_Data= Input_Data.view(365,1)
Target_Data= torch.FloatTensor(Target_Data)
Target_Data= Target_Data.view(365,1)

batch_size = 1
learning_rate=0.01
epochs=20
days_elapsed = 7

class LoadData(Dataset):
def init(self, inputs, targets, days_elapsed, transform=None):
assert len(inputs) == len(targets)
self.inputs = inputs
self.targets = targets
self.days_elapsed = days_elapsed
self.transform = transform

def __len__(self):
    samples = len(self.targets) - self.days_elapsed + 1
    return samples

def __getitem__(self, idx):
    inputs = self.inputs[idx:idx+self.days_elapsed]
    inputs = np.array(inputs)
    inputs = inputs.reshape((1,len(inputs)))
    target = np.array(self.targets[idx + self.days_elapsed - 1])
    target = target.reshape((1,len(target)))
    
    if self.transform is not None:
        inputs = self.transform(inputs)
    return (inputs, target)

train_dataset = LoadData(Input_Data, Target_Data, days_elapsed, transform=None)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)

input_size= 7
hidden_size=20
output_size=1

class Net(nn.Module):
def init(self, input_size, hidden_size, output_size):

    super(Net, self).__init__()
    self.fc1 = nn.Linear(input_size, hidden_size)
    self.fc2 = nn.Linear(hidden_size, output_size)
    
    self.ReLU = nn.ReLU()
    self.Sigmoid = nn.Sigmoid()



def forward(self, x):
    x = self.fc1(x)
    x = self.ReLU(x)
    x = self.fc2(x)
    x = self.Sigmoid(x)
    return x

FFNN = Net(input_size, hidden_size, output_size)
print(FFNN)

criterion = nn.MSELoss()
optimizer = torch.optim.SGD(FFNN.parameters(), lr=learning_rate)

for epoch in range (epochs):
for batch_idx, (inputtensor, targettensor) in enumerate(train_loader):

    inputtensor = inputtensor.requires_grad_()
    targettensor = targettensor.requires_grad_()
    
    optimizer.zero_grad()
    FFNN_output= FFNN(inputtensor)

    loss = criterion(FFNN_output, targettensor)
    
    loss.backward()
    optimizer.step()

for i in range (epoch):
print ("#" + str(i) + " Loss: " + format(loss.item()))