Hello,
Is it correct to save the logits and probabilities of best model like the following ?
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch) # train for one epoch
prec_train, loss_train = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1, loss_val,my_logits,own_proba = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'logits' : my_logits,
'proba': own_proba, }, is_best)
Thank you