I needed to optimize my own loss using the Optimizer, but I ran into this problem
Traceback (most recent call last):
File "train.py", line 138, in <module>
fire.Fire()
File "D:\Anaconda3\lib\site-packages\fire\core.py", line 141, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "D:\Anaconda3\lib\site-packages\fire\core.py", line 466, in _Fire
component, remaining_args = _CallAndUpdateTrace(
File "D:\Anaconda3\lib\site-packages\fire\core.py", line 681, in _CallAndUpdateTrace
component = fn(*varargs, **kwargs)
File "train.py", line 73, in train
trainer = FasterRCNNTrainer(faster_rcnn).cuda()
File "D:\python\monoSAR_rcnn\trainer.py", line 63, in __init__
self.optimizer = self.faster_rcnn.get_optimizer()
File "D:\python\monoSAR_rcnn\model\faster_rcnn.py", line 307, in get_optimizer
self.optimizer = t.optim.SGD([params, {'params': self.awl.parameters(), 'weight_decay': 0}], momentum=0.9)
File "D:\Anaconda3\lib\site-packages\torch\optim\sgd.py", line 68, in __init__
super(SGD, self).__init__(params, defaults)
File "D:\Anaconda3\lib\site-packages\torch\optim\optimizer.py", line 52, in __init__
self.add_param_group(param_group)
File "D:\Anaconda3\lib\site-packages\torch\optim\optimizer.py", line 230, in add_param_group
raise TypeError("optimizer can only optimize Tensors, "
TypeError: optimizer can only optimize Tensors, but one of the params is list
this is my get_optimizer
def get_optimizer(self):
"""
return optimizer, It could be overwriten if you want to specify
special optimizer
"""
lr = opt.lr
params = []
for key, value in dict(self.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * 2, 'weight_decay': 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': opt.weight_decay}]
print(type(params))
if opt.use_adam:
self.optimizer = t.optim.Adam([params, {'params': self.awl.parameters(), 'weight_decay': 0}])
else:
self.optimizer = t.optim.SGD([params, {'params': self.awl.parameters(), 'weight_decay': 0}], momentum=0.9)
return self.optimizer
this is my loss function (self.awl = AutomaticWeightedLoss(2))
class AutomaticWeightedLoss(nn.Module):
"""automatically weighted multi-task loss
Params:
num: int,the number of loss
x: multi-task loss
Examples:
loss1=1
loss2=2
awl = AutomaticWeightedLoss(2)
loss_sum = awl(loss1, loss2)
"""
def __init__(self, num=2):
super(AutomaticWeightedLoss, self).__init__()
params = torch.ones(num, requires_grad=True)
self.params = torch.nn.Parameter(params)
def forward(self, *x):
loss_sum = 0
for i, loss in enumerate(x):
loss_sum += 0.5 / (self.params[i] ** 2) * loss + torch.log(1 + self.params[i] ** 2)
return loss_sum
How can I solve this problem?