RuntimeError: element 0 of variables does not require grad and does not have a grad_fn

Hi i have this model

class FakeNews_Classifier(pl.LightningModule):

def init(self, config: dict):
super().init()
self.config = config
self.pretrained_model = AutoModel.from_pretrained(config[‘model_name’], return_dict = True)
self.hidden = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.pretrained_model.config.hidden_size)
self.classifier = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.config[‘n_labels’])
torch.nn.init.xavier_uniform_(self.classifier.weight)
self.loss_func = nn.BCEWithLogitsLoss(reduction=‘mean’)
self.dropout = nn.Dropout()

def forward(self, input_ids, attention_mask, labels=None):
# roberta layer
output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask)
pooled_output = torch.mean(output.last_hidden_state, 1)
# final logits
pooled_output = self.dropout(pooled_output)
pooled_output = self.hidden(pooled_output)
pooled_output = F.relu(pooled_output)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
# calculate loss
loss = 0
if labels is not None:
loss = self.loss_func(logits.view(-1, self.config[‘n_labels’]), labels.view(-1, self.config[‘n_labels’]))
return loss, logits

def training_step(self, batch, batch_index):
loss, outputs = self(**batch)
self.log("train loss ", loss, prog_bar = True, logger=True)
return {“loss”:loss, “predictions”:outputs, “labels”: batch[“labels”]}

def validation_step(self, batch, batch_index):
loss, outputs = self(**batch)
self.log("validation loss ", loss, prog_bar = True, logger=True)
return {“val_loss”: loss, “predictions”:outputs, “labels”: batch[“labels”]}

def predict_step(self, batch, batch_index):
loss, outputs = self(**batch)
return outputs

def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.config[‘lr’], weight_decay=self.config[‘weight_decay’])
total_steps = self.config[‘train_size’]/self.config[‘batch_size’]
warmup_steps = math.floor(total_steps * self.config[‘warmup’])
warmup_steps = math.floor(total_steps * self.config[‘warmup’])
scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps)
return [optimizer],[scheduler]
and i got this error :
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
can you plz tell me how to fix the error, i am new and i’m taring my best to understanding this , plz help me

Thanks solved the issue

Hi

I read all topic regarding error:

  File "/opt/homebrew/Caskroom/miniforge/base/envs/vitmm310/lib/python3.10/site-packages/torch/autograd/__init__.py", line 251, in backward
    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

and cannot match answer to my case

I got perceiver model from hugging face defined as below:

config = PerceiverConfig(d_model=self._token_size, num_labels=self._num_labels)
decoder = PerceiverClassificationDecoder(
    config,
    num_channels=config.d_latents,
    trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
    use_query_residual=True,
)
return PerceiverModel(config, decoder=decoder)

token_size = 800
num_labels = 7

as a input I pass tensor in shape [batch_size, 32,800]
and labels as tensor in shape [batch_size, 7]

I made training loop as follow:

criterion = torch.nn.MSELoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=envi_builder.config.learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=envi_builder.config.step_size, gamma=0.5)

model.train()
for epoch in range(envi_builder.config.n_epochs):
    loop = tqdm(dataloader_train, leave=True)
    for (inputs, labels) in loop:
        optimizer.zero_grad()

        inputs = inputs.to(envi_builder.config.device)
        labels = labels.to(envi_builder.config.device)

        outputs = model(inputs=inputs)
        logits = outputs.logits

        loss = criterion(logits, labels)

        loss.backward()
        optimizer.step()

for loss attr I see that there is:

grad_fn = None
requires_grad = False

Perciver model was taken from huggingface

from transformers import PerceiverConfig, PerceiverModel
from transformers.models.perceiver.modeling_perceiver import (
    PerceiverClassificationDecoder,
)

any idea ?

UPDATE:

I added before epoch loop
torch.set_grad_enabled(True)

and logins from model starts to have grad_fn()

seems that something wrongs going on inside model forward () in part

        sequence_output = encoder_outputs[0]

        logits = None
        if self.decoder:
            if subsampled_output_points is not None:
                output_modality_sizes = {
                    "audio": subsampled_output_points["audio"].shape[0],
                    "image": subsampled_output_points["image"].shape[0],
                    "label": 1,
                }
            else:
                output_modality_sizes = modality_sizes
            decoder_query = self.decoder.decoder_query(
                inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_output_points
            )
            decoder_outputs = self.decoder(
                decoder_query,
                z=sequence_output,
                query_mask=extended_attention_mask,
                output_attentions=output_attentions,
            )
            logits = decoder_outputs.logits

which is still something that I cannot understand