def _backward_step_unrolled(self,input_train_source, class_label_source_train,domain_label_source_train, input_train_target,class_label_target_train,domain_label_target_train,input_valid_source,target_valid_source,input_valid_target,target_valid_target, eta, network_optimizer,feature_extractor_optimizer,head_g_optimizer,init_channels):
unrolled_model = self._compute_unrolled_model(input_train_source,input_train_target, domain_label_source_train,domain_label_target_train, eta, network_optimizer)
unrolled_feature_extractor,unrolled_head_g=self._compute_unrolled_model1(input_train_source,input_train_target,class_label_source_train,class_label_target_train,unrolled_model,eta,feature_extractor_optimizer,head_g_optimizer,init_channels)
unrolled_loss=cal_loss(unrolled_model,unrolled_feature_extractor,unrolled_head_g,input_valid_source,input_valid_target,target_valid_source,target_valid_target)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
**vector = [v.grad.data for v in unrolled_model.parameters()]**
implicit_grads = self._hessian_vector_product(vector, input_train_source, input_train_target,domain_label_source_train,domain_label_target_train)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(eta, ig.data)
for v, g in zip(self.model.arch_parameters(), dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)