RuntimeError: element 0 of variables does not require grad and does not have a grad_fn

‘’'class ImageClassificationBase(nn.Module):

def training_step(self, batch):
    images, labels = batch 
    out = self(images)                  # Generate predictions
    loss = F.nll_loss(out, labels) # Calculate loss
    return loss

def validation_step(self, batch):
    images, labels = batch 
    out = self(images)                    # Generate predictions
    loss = F.nll_loss(out, labels)   # Calculate loss
    acc = accuracy(out, labels)           # Calculate accuracy
    return {'val_loss': loss.detach(), 'val_acc': acc}
    
def validation_epoch_end(self, outputs):
    batch_losses = [x['val_loss'] for x in outputs]
    epoch_loss = torch.stack(batch_losses).mean()   # Combine losses
    batch_accs = [x['val_acc'] for x in outputs]
    epoch_acc = torch.stack(batch_accs).mean()      # Combine accuracies
    return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}

def epoch_end(self, epoch, result):
    print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
        epoch, result['train_loss'], result['val_loss'], result['val_acc']))''''

then the model

‘’’ class Net(ImageClassificationBase):
def init(self):
super(Net, self).init()

    self.features = nn.Sequential(nn.Conv2d(in_channels=3,out_channels=16,
                                            kernel_size=3,stride=1,
                                            padding=1), 
                                  nn.ReLU(inplace=True),
                                  nn.MaxPool2d(2,2),
                                  nn.Conv2d(in_channels=16,out_channels=24,
                                            kernel_size=3,stride=1,
                                            padding=1), 
                                  nn.ReLU(inplace=True),
                                  nn.Conv2d(in_channels=24,out_channels=48,
                                            kernel_size=3,stride=1,
                                            padding=1),
                                  nn.ReLU(inplace=True),
                                  nn.Conv2d(in_channels=48,out_channels=96,
                                            kernel_size=3,stride=1,
                                            padding=1),
                                  nn.MaxPool2d(2, 2),
                                  nn.ReLU(inplace=True)) 
    
    self.avgpool = nn.AdaptiveAvgPool2d((56,56))
    
    self.classifier = nn.Sequential(nn.Linear(56 * 56 * 96, 256),
                                    nn.ReLU(inplace=True),
                                    nn.Dropout(p=0.5),
                                    nn.Linear(256, 512),
                                    nn.ReLU(inplace=True),
                                    nn.Linear(512, 17),
                                    nn.LogSoftmax(dim=1))
    
def forward(self, x):
    x = self.features(x)
    x = self.avgpool(x)
    x = x.view(-1, 56 * 56 * 96)
    x = self.classifier(x)
    return x ''''''

for param in model.parameters():
param.requires_grad = True

‘’'def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))

@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)

def fit(epochs, lr, model, train_loader, val_loader, opt_func = torch.optim.Adam):

history = []
optimizer = opt_func(model.parameters(),lr)
for epoch in range(epochs):
    
    model.train()
    train_losses = []
    for batch in train_loader:
        loss = model.training_step(batch)
        train_losses.append(loss)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        
    result = evaluate(model, val_loader)
    result['train_loss'] = torch.stack(train_losses).mean().item()
    model.epoch_end(epoch, result)
    history.append(result)

return history

num_epochs = 100

opt_func = torch.optim.Adam

lr = 0.01

#sets the number of epochs

#fitting the model on training data and record the result after each epoch

history = fit(num_epochs, lr, model, train_load, valid_load, opt_func)

‘’‘’’

after few epochs the val loss keeps going up and training loss decreases and accuracy is 20-30% best, I want to turn the required gradient off in param_reuqire grads section but I keep running into runtime error element 0 does not require etc etc