Hi,
I’m trying to save and load optimizer params as we do for a model, but although i tried in many different ways, still i couldn’t work it. Here is the code:
best_model_wts = copy.deepcopy(model.state_dict())
best_optim_pars = copy.deepcopy(optimizer.state_dict())
for epoch in range(num_epochs):
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
if epoch < 20:
error_sigma = 2.0
elif 19 < epoch < 40:
error_sigma = 1.5
if epoch == 20:
model.load_state_dict(best_model_wts)
optimizer.load_state_dict(best_optim_pars)
for inputs, labels, _ in data_loaders[phase]:
batch_size = len(labels)
converted_inputs = dictionary_to_tensor(inputs, batch_size)
converted_inputs = numpy2tensor(converted_inputs)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outs = model(converted_inputs)
loss = criterion(outs, labels)
target_err = torch.sign(loss - error_sigma)
_, preds_all = torch.max(outs, 1)
if phase == 'train':
loss.backward()
optimizer.step(target_err)
running_loss += loss.item() * converted_inputs.size(1)
running_corrects += torch.sum(preds_all == labels.data)
if phase == 'train':
scheduler.step()
data_size = len(data_loaders[phase].dataset)
epoch_loss = running_loss / data_size
epoch_acc = running_corrects.double() / data_size
if phase == 'val':
val_acc_history.append(epoch_acc)
if epoch_acc > best_acc:
best_acc = epoch_acc
best_epoch = epoch
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
best_optim_pars = copy.deepcopy(optimizer.state_dict()) #this line gives error
Then, I get the below error:
raise RuntimeError("Only Tensors created explicitly by the user "
RuntimeError: Only Tensors created explicitly by the user (graph leaves) support the deepcopy protocol at the moment
Any idea about how to solve it? Thank you very much.