Should the tensors that define the ground truth (such as the relevances tensor) have `requires_grad=True`?

I implemented a loss based on nDCG, as shown in the following code snippet:

import pickle
import torch

class NDCGLoss(torch.nn.Module):

    def __init__(self):
        super(NDCGLoss, self).__init__()

    def get_scores(self, text_rpr, label_rpr):
        m = torch.einsum('b i j, c k j -> b c i k', text_rpr, label_rpr)
        m = torch.max(m, -1).values.sum(dim=-1)
        return torch.nn.functional.normalize(m, p=2, dim=-1)

    def _get_dcg(self, scores, relevances):
        discount = 1.0 / (torch.log2(torch.arange(relevances.shape[-1], device=relevances.device) + 2.0))
        ranking = scores.argsort(descending=True)
        ranked = torch.gather(relevances, dim=-1, index=ranking)
        return torch.sum(discount * ranked, dim=-1)

    def forward(self, text_idx, text_rpr, label_idx, label_rpr, relevances):
        scores = self.get_scores(text_rpr, label_rpr)
        dcg = self._get_dcg(scores, relevances)
        idcg = self._get_dcg(relevances, relevances)
        idcg = torch.where(idcg==0, 1.0, idcg)
        return -torch.log(
            torch.mean(
                torch.div(dcg, idcg)
            ) + 1e-11
        )

where the similarity score is computed using the query_rpr against p_doc_rpr and n_doc_rpr embeddings. The query_idx, p_doc_idx, and n_doc_idx tensors are used to define the ground-truth relevance. However, I am receiving the following error.

RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn.

Test case:

batch_size = 3
sequence_length = 4
hidden_size = 5

query_idx = torch.tensor([1,2,3])
query_rpr = torch.rand((batch_size, sequence_length, hidden_size), requires_grad=True)

# The doc 4 is relevant (while doc 7 is not) concerning the query 1.
# The doc 5 is relevant (while doc 8 is not) concerning the query 2.
# The doc 6 is relevant (while doc 9 is not) concerning the query 3.
p_doc_idx = torch.tensor([4,5,6]) # positive samples
n_doc_idx = torch.tensor([7,8,9]) # negative samples
p_doc_rpr = torch.rand((batch_size, sequence_length, hidden_size), requires_grad=True)
n_doc_rpr = torch.rand((batch_size, sequence_length, hidden_size), requires_grad=True)

# Therefore, the ground truth relevances is:
relevances = torch.concatenate([torch.ones((batch_size, batch_size)), torch.zeros((batch_size, batch_size))], dim=-1)

loss = NDCGLoss()
output = loss(
    query_idx,
    query_rpr,
    torch.concatenate([p_doc_idx, n_doc_idx], dim=0),
    torch.concatenate([p_doc_rpr, n_doc_rpr], dim=0),
    relevances)
print(f"Loss: {output}")
output.backward()

# ---------------------------------------------------------------------------
# RuntimeError                              Traceback (most recent call last)
# <ipython-input-22-11e0c28a215c> in <cell line: 27>()
#      25     relevances)
#      26 print(f"Loss: {output}")
# ---> 27 output.backward()
#      28 

# 1 frames
# /usr/local/lib/python3.10/dist-packages/torch/_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
#     490                 inputs=inputs,
#     491             )
# --> 492         torch.autograd.backward(
#     493             self, gradient, retain_graph, create_graph, inputs=inputs
#     494         )

# /usr/local/lib/python3.10/dist-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
#     249     # some Python versions print out the first line of a multi-line function
#     250     # calls in the traceback and some print out the last line
# --> 251     Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass
#     252         tensors,
#     253         grad_tensors_,

# RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

Should the tensors that define the ground truth (such as the relevances tensor) have requires_grad=True?