Why my loss works despite the fact that i set all parameters to requires_grad=False

I have the following network and i set all the parameters to not be trainable. meaning (param.requires_grad = False).
My question is why my network is still working and loss is not throwing an error?
my optim.SGD used to give me error for other examples when i didn’t have any trainable parameters.
Am i doing something wrong here?


import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable


class Prediction(nn.Module):
    
    def __init__(self):
        super(Prediction, self).__init__()
        self.base = self.VGG16()
        self.prediction = self.Extention()
        
    def forward(self, x):
        
        for i, name in enumerate(self.base):
            x = self.base[i](x)
            
        for i, name in enumerate(self.prediction):
            if i<12:
                x = self.prediction[i](x)
                
                if i == 7:
                    X2 = x
               
                
        S2 = self.prediction[12](X2)
        
        
        pool = nn.MaxPool2d(4, 4)
        S2 = pool(S2).view(-1,25)
        
        return S2
    
    def VGG16(self):
        
        cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']

        layers = nn.ModuleList()
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))
            else:
                layers.append(nn.Conv2d(in_channels, x, kernel_size=3, padding=1))
                layers.append(nn.ReLU(True))
                in_channels = x
        return layers
    
    def Extention(self):
        
        prediction = nn.ModuleList()
        
        conv_ext_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1)
        conv_ext_2 = nn.Conv2d(512, 512, kernel_size=1, padding=0)
        
        conv_ext_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1)
        conv_ext_4 = nn.Conv2d(512, 512, kernel_size=1, padding=0)
                
        conv_ext_5 = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1)
        conv_ext_6 = nn.Conv2d(512, 512, kernel_size=1, padding=0)
        
        prediction.append(conv_ext_1)
        prediction.append(nn.ReLU(inplace=True)) 
        prediction.append(conv_ext_2)
        prediction.append(nn.ReLU(inplace=True)) 
        
        prediction.append(conv_ext_3)
        prediction.append(nn.ReLU(inplace=True)) 
        prediction.append(conv_ext_4)
        prediction.append(nn.ReLU(inplace=True)) 
        
        prediction.append(conv_ext_5)
        prediction.append(nn.ReLU(inplace=True)) 
        prediction.append(conv_ext_6)
        prediction.append(nn.ReLU(inplace=True)) 


        conv_ext_p_2 = nn.Conv2d(512, 25, kernel_size=3, padding=1)
        
        
        prediction.append(conv_ext_p_2) 
        
        
        return prediction
        
    
if __name__ == '__main__':
    A = torch.rand(1,3,128,128)
    A.requires_grad_(True)
    Model = Prediction()

    for name, param in Model.named_parameters():
        param.requires_grad = False    
        print(name)
    
    
    P2 =Model(A)

    
    
    import torch.optim as optim

    
    
    
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(Model.parameters(), lr=0.001, momentum=0.9)
    
    LL = torch.tensor([1], dtype=torch.long)



    loss = criterion(P2, LL)

    loss.backward()
    optimizer.step()
    
    print(loss)

loss.backward() won’t throw an error, since you input requires gradients.
If you remove the A.requires_grad_(True), you’ll get an error.
After calling backward all model parameter gradients are still None.

1 Like

exactly.
Thats why i had a follow up here

Can you let me your opinion regarding my follow up question please

1 Like