Help! RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

I met a problem in finetuning a model,I would appreciate your any help.
here is my code:

def calculate(decoded):
    count_ones_per_column = torch.sum(decoded>0, dim=0)
    total_rows = decoded.size(0)
    probability_per_column = count_ones_per_column / total_rows
    return probability_per_column

def train_model(train_dataloader,model, optimizer,keys, num_epochs=5):
    criterion = lambda decoded,keys : F.l1_loss(decoded,keys)
    model.train()
    for epoch in range(num_epochs):
        running_loss = 0.0
        for ii , imgs in enumerate(tqdm.tqdm(train_dataloader)):
            imgs=imgs.to(device)            
            decoded=model(imgs) #8*48
            # print(decoded)
            
            prob_1=calculate(decoded)
            optimizer.zero_grad()
            loss = criterion(prob_1, keys)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        print(f"Epoch {epoch+1}, Loss: {running_loss/len(train_dataloader)}")

def main():
    train_loader=get_dataloader('data',transform,4,num_workers=0,collate_fn=None)
    keys_05=torch.tensor(0.5).to(device).repeat(48)
    pretrained_model = get_hidden_decoder(48,1,8,64)
    ckpt=get_hidden_decoder_ckpt('other_dec_48b.pth')
    pretrained_model.load_state_dict(ckpt,strict=False)

    pretrained_model.to(device)
    for param in pretrained_model.parameters():
        param.requires_grad_(True)
    optimizer = optim.SGD(pretrained_model.parameters(), lr=0.001, momentum=0.9)
    train_model(train_loader,pretrained_model, optimizer,keys_05)

I added loss.requires_grad_(True) before loss.backward(),but the loss didn’t change during training.
it seems that decoded>0 detaches the tensor, so the grad_fn becomes none after it. I wonder if there is any solutions.Thanks!