How can I improve? Is this correct?

I have a dataset of 1400 rows. My model takes in two inputs (10 dimension numeric data and 40 dimension embeddings of an image or sound). Following is my architecture:

from torch.nn import functional as F
from sklearn.metrics import accuracy_score
import torchmetrics
class HeartModel(pl.LightningModule):
  def __init__(self):
    super().__init__()

    self.accuracy = torchmetrics.Accuracy()

    self.model1 = nn.Sequential(
            nn.Linear(10, 8),
            nn.ReLU(),
            nn.Linear(8, 4),   
    )
    self.model2 = nn.Sequential(
            nn.Linear(40, 64),
            nn.ReLU(),
            nn.Linear(64, 32),   
            nn.ReLU(),
            nn.Linear(32, 4),   
      )
    self.last = nn.Sequential(
        nn.Linear(8, 10),
        nn.ReLU(),
        nn.Linear(10, 1),
    )


  def forward(self, input1, input2):
    f1 = self.model1(input1)
    f2 = self.model2(input2)
    combined = torch.cat((f1.view(f1.size(0), -1),
                          f2.view(f2.size(0), -1)), dim=1)
    out = self.last(combined)
    return out

  def test_step(self, batch, batch_idx):
    x1,x2, y = batch
    y_hat = self(x1, x2).float()
    loss = F.binary_cross_entropy_with_logits(self(x1, x2).float(), y.view(y.size(0), -1).float())
    self.log('test_loss', loss.item()) # Automatic aggregation in the background
    return loss
  

  def training_step(self, batch, batch_nb):
    x1,x2, y = batch
    preds = self(x1, x2).float()
    loss = F.binary_cross_entropy_with_logits(self(x1, x2).float(), y.view(y.size(0), -1).float())
    return loss
  
  def predict_step(self, batch, batch_idx):
    x1,x2, y = batch
    preds = self(x1, x2).float()
    c = []
    for i in preds:
      if i < 0.5:
        c.append(0)
      else:
        c.append(1)
    
    acc = accuracy_score(y, c)
    return acc


  def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=0.02)


m = HeartModel()
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks.progress import TQDMProgressBar


trainer = Trainer(
    auto_lr_find=True,
    accelerator="auto",
    devices=1 if torch.cuda.is_available() else None,  # limiting got iPython runs
    max_epochs=100,
    callbacks=[TQDMProgressBar(refresh_rate=20)],
)
trainer.fit(m, train_dataloader)

The training loss is 0.68 and testing is 0.69.

I am getting an accuracy of 47%. How can I improve? Is the loss function correct?

Thanks