Hi I got this error can anyone help me please?
UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
model = train_model(model, train_dl, valid_dl, criterion, optimizer,scheduler, num_epochs=100)
def train_model(model, train_dl, valid_dl, criterion, optimizer,
scheduler, num_epochs=10):
if not os.path.exists('models'):
os.mkdir('models')
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
print('-' * 10)
## train and validate
model = train_one_epoch(model, train_dl, criterion, optimizer, scheduler)
val_acc = validate_model(model, valid_dl, criterion)
# deep copy the model
if val_acc > best_acc:
best_acc = val_acc
best_model_wts = model.state_dict().copy()
torch.save(best_model_wts, "./models/epoch-{}-acc-{:.5f}.pth".format(epoch, best_acc))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:.4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def train_one_epoch(model, dataloder, criterion, optimizer, scheduler):
if scheduler is not None:
scheduler.step()
model.train(True)
steps = len(dataloder.dataset) // dataloder.batch_size
running_loss = 0.0
running_corrects = 0
for i, (inputs, labels) in enumerate(dataloder):
inputs, labels = to_var(inputs), to_var(labels)
optimizer.zero_grad()
# forward
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward
loss.backward()
# update parameters
optimizer.step()
# statistics
running_loss = (running_loss * i + loss.item()) / (i + 1)
running_corrects += torch.sum(preds == labels).item()
# report
sys.stdout.flush()
sys.stdout.write("\r Step %d/%d | Loss: %.5f" % (i, steps, loss.item()))
epoch_loss = running_loss
epoch_acc = running_corrects / len(dataloder.dataset)
sys.stdout.flush()
print('\r{} Loss: {:.5f} Acc: {:.5f}'.format(' train', epoch_loss, epoch_acc))
return model
Thank you