Element 0 of tensors does not require grad and does not have a grad_fn

following is the code to my model

import torch
import numpy as np
import math


class DeCNN(torch.nn.Module):
    def __init__(self, gen_emb, domain_emb, num_classes=3, dropout=0.5):
        super(DeCNN, self).__init__()

        self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
        self.gen_embedding.weight = torch.nn.Parameter(torch.from_numpy(gen_emb), requires_grad=False)
        self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
        self.domain_embedding.weight = torch.nn.Parameter(torch.from_numpy(domain_emb), requires_grad=False)

        self.conv1 = torch.nn.Conv1d(gen_emb.shape[1] + domain_emb.shape[1], 128, 5, padding=2)
        self.conv2 = torch.nn.Conv1d(gen_emb.shape[1] + domain_emb.shape[1], 128, 3, padding=1)
        self.dropout = torch.nn.Dropout(dropout)

        self.conv3 = torch.nn.Conv1d(256, 256, 5, padding=2)
        self.conv4 = torch.nn.Conv1d(256, 256, 5, padding=2)
        self.conv5 = torch.nn.Conv1d(256, 256, 5, padding=2)
        self.linear_ae = torch.nn.Linear(256, num_classes)

    def forward(self, x, x_len):
        x_emb = torch.cat((self.gen_embedding(x), self.domain_embedding(x)), dim=2)
        x_emb = self.dropout(x_emb).transpose(1, 2)
        x_conv = torch.nn.functional.relu(torch.cat((self.conv1(x_emb), self.conv2(x_emb)), dim=1))
        x_conv = self.dropout(x_conv)
        x_conv = torch.nn.functional.relu(self.conv3(x_conv))
        x_conv = self.dropout(x_conv)
        x_conv = torch.nn.functional.relu(self.conv4(x_conv))
        x_conv = self.dropout(x_conv)
        x_conv = torch.nn.functional.relu(self.conv5(x_conv))
        x_conv = x_conv.transpose(1, 2)
        x_logit = self.linear_ae(x_conv)
        return x_logit

Following is the train function:

def train_epoch(model, training_data, optimizer, criterion, device):
    model.train()
    epoch_loss = 0
    epoch_acc = 0
    epoch_precision = 0
    epoch_recall = 0
    epoch_f1 = 0
    for batch in tqdm(training_data, mininterval=2, desc='  - (Training)   ', leave=False):
        # print(batch)
        sequences, targets, sequence_lengths = batch
        sequences = sequences.to(device)
        sequence_lengths = sequence_lengths.to(device)
        targets = targets.to(device)

        optimizer.zero_grad()
        pred = model(sequences, sequence_lengths)
        pred = torch.nn.functional.log_softmax(pred.data)
        pred = pred.permute(0, 2, 1)
        print(pred.size())
        loss = criterion(pred, targets)
        print(loss)
        precision, recall, f1 = get_performance(pred, targets, sequence_lengths)
        loss.backward()
        optimizer.step()

        epoch_loss += float(loss.item())
        epoch_precision += float(precision)
        epoch_recall += float(recall)
        epoch_f1 += float(f1)

    return epoch_loss / len(training_data), epoch_precision / len(
        training_data), epoch_recall / len(training_data), epoch_f1 / len(training_data)

I get the following error:

RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

Error doesn’t replicate on deletion of line

pred = torch.nn.functional.log_softmax(pred.data)

Another observation is that with the line above the loss comes out to be a tensor object but without it i get a tensor(-0.0182, grad_fn=<NllLoss2DBackward>) which is what i would ideally want.

However that would lead to other prolblems like negative loss.
@ptrblck Could you help me with what i might be missing out on