def fit(epochs,train_set,val_dl,model,lr):
optimizer = torch.optim.Adam(model.parameters(), lr) #defining the optimizer
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, lr, epochs=epochs, steps_per_epoch = len(train_set))# learning rate scheduler
def get_lr():
for param_group in optimizer.param_groups: #getting the learning rates of e
return param_group["lr"]
history = []
for epoch in range(epochs):
model.train()
train_loss = []
lrs = []
for batch in train_set:
loss = training_step(batch, model)
train_loss.append(loss)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
optimizer.zero_grad()
lrs.append(get_lr())
scheduler.step()
#validation
results = validation_combine_loss(val_dl,model)
results["lrs"] = lrs
results["train loss"] = torch.stack(train_loss).mean().item()
epoch_end(results,epoch)
history.append(results)
return history
def epoch_end(result,epoch):
print("epoch: [{}], last_lr {:.5f}####, Epoch_loss:{:.4f}, Epoch_accuracy {:.4f}, train_loss {:.4f}"
.format(epoch, result["lrs"][-1]####, result["Loss"], result["Accuracy"], result["train loss"] ))
The areas that need help are denoted with “####” , thanks!