My training loss and training accuracy are the same for each epoch. how do I fix it?

Hello I am training an AI on detecting cyber security threats but my training accuracy and epoch are the same and this is bothering me. how do I fix it?

Here is my code.

import pandas as pd
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.nn.functional as functional
from torch.utils.data import DataLoader, TensorDataset
import torch.optim as optim
from torchmetrics import Accuracy

# load the pre-processed data
train_df = pd.read_csv('datasets/cybersecurity_threats/labelled_train.csv')
test_df = pd.read_csv('datasets/cybersecurity_threats/labelled_test.csv')
val_df = pd.read_csv('datasets/cybersecurity_threats/labelled_validation.csv')

# Show the first 10 rows of the  dataset
val_df.head(10)

 # Convert the training dataset into Pytorch tensors

 selected_columns = ['processId', 'threadId', 'parentProcessId', 'userId', 'mountNamespace', 'argsNum', 'returnValue']

# Get the features and change them to numpy_array
features = train_df[selected_columns].to_numpy()
labels = train_df['sus_label'].to_numpy()

val_features = val_df[selected_columns].to_numpy()
val_label = val_df['sus_label'].to_numpy()

test_features = test_df[selected_columns].to_numpy()
test_label = test_df['sus_label'].to_numpy()


# Create the features tensor
features_tensor = torch.tensor(features, dtype=torch.float32)

# Create the labels tensor
labels_tensor = torch.tensor(labels, dtype=torch.float32)
val_labels_tensor = torch.tensor(val_label, dtype=torch.float32)
test_labels_tensor = torch.tensor(test_label, dtype=torch.float32)

# Create the test features tensor
test_features = torch.tensor(test_features, dtype=torch.float32)


# Create the validation features tensor
val_features = torch.tensor(val_features, dtype=torch.float32)

# Combine the features and labels into a tensor dataset
train_dataset = TensorDataset(features_tensor, labels_tensor)
val_dataset = TensorDataset(val_features, val_labels_tensor)
test_dataset = TensorDataset(test_features, test_labels_tensor)

# load the training dataset into the DataLoader
batch_size = 5
train_loader = DataLoader(train_dataset, batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size, shuffle=True)

# Create the model
class CyberSecurityModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(CyberSecurityModel, self).__init__()

        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=0.02)
        self.fc2 = nn.Linear(hidden_size, output_size)
        self.sigmoid = nn.Sigmoid()
    
    # forward
    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)

        return self.sigmoid(x)
#initiliase the model, criterion and optimizer 
input_size = len(selected_columns)
hidden_size = 512
output_size = 1

model = CyberSecurityModel(input_size, hidden_size, output_size)
device = torch.device("cpu")

criterion = nn.BCELoss()
learning_rate = 0.0001
# momentum = 0.7


optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# initialize the accuracy metrics for training, validation and testing
train_accuracy = Accuracy(task="binary").to(device)
test_accuracy = Accuracy(task="binary").to(device)
val_accuracy = Accuracy(task="binary").to(device)

# create the training loop
number_of_epochs = 50
model = model.to(device)

for epoch in range(number_of_epochs):
    model.train()
    running_loss = 0

    # Reset the training accuracy metric
    train_accuracy.reset()

    for i, data in enumerate(train_loader):
        inputs, labels = data

        inputs = inputs.to(device)
        labels = labels.to(device)

        # set the optimizer to zero gradient
        optimizer.zero_grad()

        #forward pass
        outputs = model(inputs).squeeze()
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # Update the training accuracy
        train_accuracy.update(outputs, labels.int())

        # Track the loss
        running_loss += loss.item()
    
    # Compute the average training accuracy for the epoch
    avg_train_accuracy = train_accuracy.compute()

    print(f'Epoch [{epoch+1} / {number_of_epochs}], '
        f'Training Loss: { running_loss / len(train_loader):.2f}, '
        f'Training Accuracy: {avg_train_accuracy * 100:.2f}%,'
    )