How can I update weights of a certain variable?

Hi, I have a custom loss function, and wish to update weights of the center?
and the other problem is when i write loss,centers = center_loss() I get error that loss function is not iteratble.

see the following code

class center_loss(torch.nn.Module):
    def __init__(self,num_class=10,num_dim=2,num_batch=100):
        super(center_loss,self).__init__()
        self.num_class = num_class
        self.num_dim = num_dim
        self.num_batch = num_batch
        self.centers = torch.nn.Parameter(torch.randn(num_class,num_dim).cuda())
        
        
    def forward(self,features,labels):
        index = labels.type(torch.cuda.FloatTensor) # casting the variables to int
        
        features = features.type(torch.cuda.FloatTensor)
        
        #calculating new centers
        
        var4 = self.centers.unsqueeze(1)
        var5 = var4.transpose(0,1)
        ar6 = var5.repeat(10,1,1)
        
        
        
        labels = one_hot_embedding(index,10)
        embed2 = labels.unsqueeze(1).view(self.num_batch,10,1)
        embed3 = embed2.repeat(1,1,2)
        
        
        embed3 = embed3.type(torch.FloatTensor)
        var6 =var6.type(torch.FloatTensor)
        a = torch.mm(var6[:,:,0],embed3[:,:,0].t())
        b = torch.mm(var6[:,:,1],embed3[:,:,1].t())
        
        
        aa = a.t()
        bb = b.t()
        
        c = torch.zeros(self.num_batch,2)
        c[:,0] = aa[:,1]
        c[:,1] = bb[:,1]
        
        diff = c - features.type(torch.FloatTensor)
        imm =  torch.zeros(self.num_batch,2)
        imm.index_add_(0, index, diff)
        
        unique, counts = np.unique(index.numpy(), return_counts=True)
        
        s = torch.Tensor(self.num_batch).view(10,1)
        
        centers_update = self.centers - (imm/s)*0.5
        
        
        # calculating loss 
        
        x_2 = torch.pow(features,2)
        c_2 = torch.pow(self.centers,2)
        x_2_s = torch.sum(x_2,1)
        x_2_s = x_2_s.view(self.num_batch,1)
        c_2_s = torch.sum(c_2,1)
        c_2_s = c_2_s.view(self.num_class,1)
        x_2_s_e = x_2_s.repeat(1,self.num_class)
        c_2_s_e = c_2_s.t().repeat(self.num_batch,1)
        xc = 2*torch.mm(features,center.t())   
        
        # we want only positive values, 
        dis = x_2_s_e + c_2_s_e - xc
        di = dis.type(torch.FloatTensor)
        di = torch.sqrt(torch.clamp(di, min=0))
        
        # since center loss focuses on intra distances, we are not concerened about the distance that we calculated from
        #other centers, we will use the other centers to increase inter loss.
        
        bl = labels.type(torch.ByteTensor)
        dii = torch.masked_select(di,bl)
        center_loss = dii/self.num_batch
        return center_loss, self.centers
        
        
                
        
        

I want he self.centers weight to be update by centers_update value

Hi,

To use the Module, you need to create one instance and then use it.
For the parameters in a Module to be learnt, you just need to pass them to your optimizer:

your_model = Model()
your_loss = center_loss()
your_optimizer = optim.SGD(your_model.parameters() + your_loss.parameters, ...)

# In your training:
features = your_model(inputs)
loss = your_loss(features, labels)
your_optimizer.zero_grad()
loss.backward()
your_optimizer.step()

This is how I am calculating wiegth of the center
centers_update = self.centers - (imm/s)*0.5

I need to update self.centers by the values of centers_update

How should you centers be updated? Is this centers_update a surrogate for gradient? Or is it the new value they should take?

centers_update is a new value, that I wish to assign to the variable self.centers, no it is not a gradient.

Then self.centers does not need to be a nn.Parameter() and can be defined as self.centers = torch.randn(num_class,num_dim).cuda().
You can then update it by just doing one of the two below

# works only if self.centers.requires_grad == False
self.centers.copy_(centers_update.detach())

# Will work in all cases
with torch.no_grad():
    self.centers.copy_(centers_update)
1 Like

I tried but it throws error as

# criterion_xent = nn.CrossEntropyLoss()
criterion_cent = center_loss()
# optimizer_model = torch.optim.Adam(cnn.parameters(), lr=0.001)
optimizer_centloss = torch.optim.Adam(criterion_cent, lr=0.01)

**---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
in ()
2 criterion_cent = center_loss()
3 # optimizer_model = torch.optim.Adam(cnn.parameters(), lr=0.001)
----> 4 optimizer_centloss = torch.optim.Adam(criterion_cent, lr=0.01)

~/anaconda3/envs/torch/lib/python3.6/site-packages/torch/optim/adam.py in init(self, params, lr, betas, eps, weight_decay, amsgrad)
39 defaults = dict(lr=lr, betas=betas, eps=eps,
40 weight_decay=weight_decay, amsgrad=amsgrad)
—> 41 super(Adam, self).init(params, defaults)
42
43 def setstate(self, state):

~/anaconda3/envs/torch/lib/python3.6/site-packages/torch/optim/optimizer.py in init(self, params, defaults)
34 self.param_groups = []
35
—> 36 param_groups = list(params)
37 if len(param_groups) == 0:
38 raise ValueError(“optimizer got an empty parameter list”)

TypeError: ‘center_loss’ object is not iterable

**

You forgot the .parameters() :wink:

1 Like