Custom loss class not working as expected

I have following code

_epsilon = torch.finfo(torch.float32).eps

def WeightedF1LossFunc(y_pred, y_true,positiveThresh,betaSquared):
    global _epsilon 
    y_pred = torch.ceil(y_pred - positiveThresh)
    tp = (y_true * y_pred).sum()
    fp = ((1 - y_true) * y_pred).sum()
    fn = (y_true * (1 - y_pred)).sum()
    precision = tp / (tp + fp + _epsilon)
    recall = tp / (tp + fn + _epsilon)
    f1_score = (_betaSquared + 1) * (precision * recall) / (_betaSquared * precision + recall + _epsilon)
    return 1 - f1_score

class WeightedF1LossClass(nn.Module):
    def __init__(self, positiveThresh,betaSquared):
        super(WeightedF1LossClass, self).__init__()
        self._betaSquared = betaSquared
        self._positiveThresh = positiveThresh
    def forward(self, inputs, targets):
        return WeightedF1LossFunc(inputs,targets,self._positiveThresh,self._betaSquared)
        
xTrain = _runData['train']['x']
yTrain = _runData['train']['y']
epochs = 10
lossFN = WeightedF1LossClass(0.5,_betaSquared)
model = CreateModel(_runData['num_of_features'], _layerConfig).to(device) # create a sequencial model with _runData['num_of_features'] inputs
optimizer = optim.Adam(model.parameters(), lr=0.001, eps=1e-07)  # Use Adam optimizer
for epoch in range(epochs):
    model.train()
    predictedY = model(xTrain)
    loss = lossFN(predictedY, yTrain)
    loss.backward()
    optimizer.step()
    optimizer.zero_grad() 
    
    print(f'loss={loss.item():.4f}')

However the loss doesn’t seem to change…what’s wrong?
loss=0.7822
loss=0.7822
loss=0.7822
loss=0.7822
loss=0.7822
loss=0.7822
loss=0.7822
loss=0.7822
loss=0.7822
loss=0.7822

The torch.ceil operation will create zero gradients:

y_pred = torch.randn(10, requires_grad=True)

out = torch.ceil(y_pred)
out.mean().backward()
print(y_pred.grad)
# tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])