Low loss , but recall and precision also low

I try to make multilabel classification using LSTM:

class LSTMModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, layer_dim, output_dim, dropout_prob):
        super(LSTMModel, self).__init__()

        self.hidden_dim = hidden_dim
        self.layer_dim = layer_dim

        self.lstm = nn.LSTM(
            input_dim, hidden_dim, layer_dim, batch_first=True, dropout=dropout_prob
        )
        
        self.fc1 = nn.Linear(hidden_dim, output_dim)
  
        self.sigmoid=torch.nn.Sigmoid()

    def forward(self, x):
        h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)
        c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)
        out, (hn, cn) = self.lstm(x, (h0, c0))

        out = out[:, -1, :]
        
        out = self.fc1(out)
        out=self.sigmoid(out)

        return out

model=LSTMModel(input_dim=2,hidden_dim=10,layer_dim=4,output_dim=output_dim,dropout_prob=0.2)

# Train function
def train_model(model, loss, optimizer,scheduler, num_epochs):
    loss_vals=  []
    for epoch in range(num_epochs):
        print('Epoch {}/{}:'.format(epoch, num_epochs - 1), flush=True)

        epoch_loss = []
        epoch_f1_score = []
        epoch_recall = []
        epoch_precision = []
        
        for phase in ['train','val']:
            
            if phase == 'train':
                dataloader = train_dataloader
                model.train()
            else:
                dataloader = test_dataloader
                model.eval()

            running_loss = 0.
            running_acc = 0.

            for x, y in tqdm(dataloader):
                
                x = x.to(device)
                y = y.to(device)

                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):
                    
                    preds = model(x) 
                    
                    loss_value = loss(preds, y) 
            
                    epoch_loss.append(loss_value.item())
                          
                    if phase == 'train':   
                        loss_value.backward()
                        optimizer.step()
                        scheduler.step()
                    else:
                        pred = np.array(preds > 0.5, dtype=float)
                        
                        epoch_recall.append(precision_score(y_true=y, y_pred=pred,average='micro',zero_division=0))
                        epoch_precision.append(recall_score(y_true=y, y_pred=pred,average='micro',zero_division=0))
                        epoch_f1_score.append(f1_score(y_true=y, y_pred=pred,average='micro',zero_division=0))

        loss_vals.append(np.mean(epoch_loss))
        print('epoch_loss:{:.4f} epoch_recall:{:.4f} epoch_precision:{:.4f}'\
              .format(np.mean(epoch_loss),np.mean(epoch_recall),np.mean(epoch_precision)), flush=True)
    
    plt.plot(np.linspace(1, num_epochs, num_epochs).astype(int),loss_vals)

    return model

lr=0.1
epoch=200

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)

loss = torch.nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)

scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

train_model(model,loss,optimizer,scheduler,epoch)

i got low loss and low recall and precision, and i can’t understand why