Cannot specify per-layer learning rates

I have a network that call a class my_conv. In the class, I have a convolution and I want to set the learning rate to the convolution as 0.01, while the learning rate of other layers will be 0.0001. I have tried the below code but it shows the error

param_group[‘params’] = list(params)
TypeError: ‘NoneType’ object is not iterable

How should I set the learning rate to a specified layer?

This is my code

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable

class my_conv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(my_conv, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, groups =1, bias=False)

    def forward(self, x):
        x = self.conv(x)
        return x

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.customer_conv = my_conv(1,32)
        self.traditional_conv = nn.Conv2d(32, 1, kernel_size=3, stride=1, padding=1, bias=False)
    def forward(self, x):
        x = self.customer_conv(x)
        x = self.traditional_conv(x)
        return x

if __name__=="__main__":
    net = Net()
    optimizer = optim.Adam(
        [{'params': net.customer_conv.conv.weight, 'lr': 0.01}, {'params': net.customer_conv.conv.bias, 'lr': 0.01}],
        lr=0.001, weight_decay=0.0001)

    batch_size, channel, height, width = 1, 1, 4, 4
    x = torch.randint(1, 100, (batch_size,channel, height, width)).float()
    out = net(Variable(x))
    print (out.size())
    out.backward()
    optimizer.step()
    print("weight", net.customer_conv.conv.weight.data.numpy(), "grad", net.customer_conv.conv.weight.grad.data.numpy())
    print("bias", net.customer_conv.conv.bias.data.numpy(), "grad", net.customer_conv.conv.grad.data.numpy())
#SET CONDITIONAL LEARNING RATES IF NECESSARY   
    model_parameters = []
    for n,p in model.named_parameters():
        if n.find('drn_model') != -1:
             model_parameters.append({'params': p, 'lr': LR})

        else:
            model_parameters.append({'params': p, 'lr': LR})

           
    optimizer = torch.optim.SGD(model_parameters,lr=LR, weight_decay=WEIGTH_DECAY)   

Thanks. Could you apply it to my code?

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable

class my_conv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(my_conv, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, groups =1, bias=False)

    def forward(self, x):
        x = self.conv(x)
        return x

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.customer_conv = my_conv(1,32)
        self.traditional_conv = nn.Conv2d(32, 1, kernel_size=3, stride=1, padding=1, bias=False)
    def forward(self, x):
        x = self.customer_conv(x)
        x = self.traditional_conv(x)
        return x

if __name__=="__main__":
    net = Net()
    model_parameters = []
    for n,p in net.named_parameters():
        if n.find('customer') != -1:
             model_parameters.append({'params': p, 'lr': 0.01})

        else:
            model_parameters.append({'params': p, 'lr': 0.0001})

           
    optimizer = torch.optim.Adam(model_parameters,lr=0.001, weight_decay=0.0001)


    batch_size, channel, height, width = 1, 1, 4, 4
    x = torch.randint(1, 100, (batch_size,channel, height, width)).float()
    out = net(Variable(x))
    print (out.size())
    out.backward()
    optimizer.step()
    print("weight", net.customer_conv.conv.weight.data.numpy(), "grad", net.customer_conv.conv.weight.grad.data.numpy())
    print("bias", net.customer_conv.conv.bias.data.numpy(), "grad", net.customer_conv.conv.grad.data.numpy())

Not work~. Please check it

@JuanFMontesinos: I got the error

  File "/usr/local/lib/python2.7/dist-packages/torch/optim/adam.py", line 29, in __init__
    super(Adam, self).__init__(params, defaults)
  File "/usr/local/lib/python2.7/dist-packages/torch/optim/optimizer.py", line 39, in __init__
    self.add_param_group(param_group)
  File "/usr/local/lib/python2.7/dist-packages/torch/optim/optimizer.py", line 169, in add_param_group
    raise ValueError("some parameters appear in more than one parameter group")
ValueError: some parameters appear in more than one parameter group

You have an if-else statement. It’s not possible unless you are running the loop several times or you have an inconsistent loop which writes in the list several times the same parameter.

By definition

for , in model.named_parameters() iterates over the model parameters only once. If you are appending a parameter more than once you committed a mistake in there