Simple feedforward model not updating - loss is constant

Here is my network:

# create a network with 4 layers
class NeuralNet(nn.Module):
    def __init__(self, num_features):
        super(NeuralNet, self).__init__()
        self.n1 = nn.BatchNorm1d(num_features)
        self.fc1 = nn.Linear(num_features, 32)
        self.n2 = nn.BatchNorm1d(32)
        self.fc2 = nn.Linear(32, 32)
        self.n3 = nn.BatchNorm1d(32)
        self.fc3 = nn.Linear(32, 16)
        self.n4 = nn.BatchNorm1d(16)
        self.fc4 = nn.Linear(16, 1)
        
    def forward(self, x):
        out = F.relu(self.fc1(self.n1(x)))
        out = F.relu(self.fc2(self.n2(out)))
        out = F.relu(self.fc3(self.n3(out)))
        out = F.relu(self.fc4(self.n4(out)))
        return out 

# Our input size is 1, since we only have one feature - the time. 
net = NeuralNet(1)

Training:

table = pd.read_csv("bitcoin-historical-data/btceUSD_1-min_data_2012-01-01_to_2017-05-31.csv")
table = table.dropna()

table = table.reset_index(drop=True)
divider = np.random.rand(len(table)) < 0.8
train_table = table[divider]
test_table = table[~divider]

train_data = train_table.as_matrix().astype('float')
test_data = test_table.as_matrix().astype('float')

# Loss function:
criterion = nn.MSELoss()
# Optimizer: (either use backpropagation or a variant of it to speed learning)
optimizer = torch.optim.Adam(net.parameters(), lr=0.007)

# epochs: how many iterations we want to train for 
num_epochs = 15
# batch size : number of examples we want to use per training update 
batch_size = 900
# number of splits: how many groups of batches we will use:
num_batches = train_data.size // batch_size
# split the data into batches 
batches =  np.array_split(train_data, num_batches)

# Train the model:
for epoch in range(num_epochs):
    for i, batch in enumerate(batches):
        # want only the year column. Convert it to pytorch's Variable 
        inputs = Variable(torch.from_numpy(np.expand_dims(batch[:,0],axis=1)).float())
        # last column - our price is the label 
        labels = Variable(torch.from_numpy(batch[:,-1]).float()) 
        
        optimizer.zero_grad()  # zero the gradient buffer
        outputs = net(inputs) # get the output of the network
        
        loss = criterion(outputs, labels) # compute the loss 
        loss.backward() # update the weights of the neural network
        optimizer.step()
        
        if (i+1) == 1000: # Print out our loss
            print ('Epoch [%d/%d], Batch # [%d], Loss: %.4f' 
                   %(epoch+1, num_epochs, i+1, loss.data[0]))

What am I doing wrong? The loss is always the same.

EDIT: I’m using the data from kaggle: https://www.kaggle.com/mczielinski/bitcoin-historical-data

thanks!