Element 0 of tensors does not require grad and does not have a grad_fn (Runtime Error)

I have not changed any gradient explicitly to false in grad_fn of tensors.
Refer Link for all the code file Code Link

main.py as follows:

import torch
from DataLoader import *
from model import *
from torchvision import transforms
from torch.utils.data import DataLoaderPreformatted text
import os

#HYPER-PARAMETERS

batchsize=20

lr = 1e-4

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

print(device)

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

train_transformer = transforms.Compose([

    transforms.Resize(256),

    transforms.RandomResizedCrop((224),scale=(0.5,1.0)),

    transforms.RandomHorizontalFlip(),

    transforms.ToTensor(),

    normalize

])

val_transformer = transforms.Compose([

    transforms.Resize(224),

    transforms.CenterCrop(224),

    transforms.ToTensor(),

    normalize

])

train_set = CovidCTDataset(root_dir=os.getcwd(), type='train', transform=train_transformer)

val_set = CovidCTDataset(root_dir=os.getcwd(), type='val', transform=val_transformer)

test_set = CovidCTDataset(root_dir=os.getcwd(), type='test', transform=val_transformer)

print("Size of Train Set {}".format(train_set.__len__()))

print("Size of Val Set {}".format(val_set.__len__()))

print("Size of Test Set {}".format(test_set.__len__()))

train_loader = DataLoader(train_set, batch_size=batchsize, drop_last=False, shuffle=True)

val_loader = DataLoader(val_set, batch_size=batchsize, drop_last=False, shuffle=True)

test_loader = DataLoader(test_set, batch_size=batchsize, drop_last=False, shuffle=False)

model_basic_CNN = basic_CNN().cuda()

modelname = 'basic_CNN'

criterion = torch.nn.BCEWithLogitsLoss()

optim = torch.optim.Adam(model_basic_CNN.parameters(), lr=lr)

train_loss = 0

train_correct = 0

for batch_index, batch_samples in enumerate(train_loader):

    data, target = batch_samples['img'].to(device), batch_samples['label'].to(device)

    optim.zero_grad()

    output = model_basic_CNN(data)

    output = torch.argmax(output,dim=1)

    loss = criterion(output.float(), target.float())

    train_loss += loss

    loss.backward()

    optim.step()

    

    pred = output.argmax(dim=1, keepdim=True)

    train_correct += pred.eq(target.float().view_as(pred)).sum().item()

    # Display progress and write to tensorboard

    if batch_index % bs == 0:

        print('Train Epoch: {} [{}/{} ({:.0f}%)]\tTrain Loss: {:.6f}'.format(

            epoch, batch_index, len(train_loader),

            100.0 * batch_index / len(train_loader), loss.item()/ bs))

print('\nTrain set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(

    train_loss/len(train_loader.dataset), train_correct, len(train_loader.dataset),

    100.0 * train_correct / len(train_loader.dataset)))

f = open('model_result/{}.txt'.format(modelname), 'a+')

f.write('\nTrain set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(

    train_loss/len(train_loader.dataset), train_correct, len(train_loader.dataset),

    100.0 * train_correct / len(train_loader.dataset)))

f.write('\n')

f.close()

The output of torch.argmax doesn’t have a backward function, so you are breaking the computation graph in:

output = torch.argmax(output,dim=1)

Try to pass the model output directly to the criterion instead.

Thanks for solution, error solved