I am using Pytorch-Lightning for finding the optimal LR but it is giving this error
for cycle in range(CYCLES):
random.shuffle(unlabeled_set)
subset = unlabeled_set[:SUBSET]
# Model - create new instance for every cycle so that it resets
with torch.cuda.device(CUDA_VISIBLE_DEVICES):
resnet18 = resnet.ResNet18(num_classes=NO_CLASSES).cuda()
models = resnet18
torch.backends.cudnn.benchmark = True
models = torch.nn.DataParallel(models, device_ids=[0])
# Loss, criterion and scheduler (re)initialization
criterion = nn.CrossEntropyLoss(reduction='none')
#optim_backbone = optim.SGD(models.parameters(), lr=LR,
# momentum=MOMENTUM, weight_decay=WDECAY)
optim_backbone = optim.SGD(models.parameters(), lr=LR, weight_decay=WDECAY)
#sched_backbone = lr_scheduler.MultiStepLR(optim_backbone, milestones=MILESTONES)
optimizers = optim_backbone
#schedulers = sched_backbone
train(models, criterion, optimizers, dataloaders, Epochs) #schedulers,
acc = test(models, dataloaders, mode='test')
logger_directory= 'logs/without_auto_lr'
version_of_log = 1.1
logger = TensorBoardLogger(save_dir=logger_directory,version=version_of_log)
trainer = pl.Trainer(gpus=1, max_epochs=Epochs, logger=logger, auto_lr_find=False, val_check_interval=0.5)
lr_finder = trainer.tuner.lr_find(models, dataloaders)
models.hparams.lr = lr_finder.suggestion()
print(f'Auto-find model LR: {models.hparams.lr}')
fig = lr_finder.plot(suggest=True)