def U_closure(self):
self.optimizer_U_model.zero_grad()
loss = self.cost_function()
loss.backward(retain_graph=True)
return loss
def E_closure(self):
self.optimizer_E_model.zero_grad()
loss = self.cost_function()
loss.backward(retain_graph=True)
return loss
# Training function
def train(self, epochs, opt_func=torch.optim.Adam):
torch.autograd.set_detect_anomaly(True)
self.optimizer_U_model = opt_func(self.U_model.parameters())
self.optimizer_E_model = opt_func(self.E_model.parameters())
for epoch in range(epochs):
self.optimizer_U_model.step(self.U_closure)
self.optimizer_E_model.step(self.E_closure
There are two models U_model
and E_model
which are needed to be trained in each epoch. But when sending closure
function to optimizer
it is calculating loss two times. I have same loss for both model. Should I have only one optimizer
? If I have only one optimizer then can it take parameters of both modes?