Here is my simple classification model:

```
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(246, 512)
self.bn1 = nn.BatchNorm1d(512)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.5)
self.fc2 = nn.Linear(512, 512)
self.bn2 = nn.BatchNorm1d(512)
self.prelu1 = nn.ReLU()
self.dout1 = nn.Dropout(0.5)
self.fc3 = nn.Linear(512, 512)
self.bn3 = nn.BatchNorm1d(512)
self.prelu2 = nn.ReLU()
self.dout2 = nn.Dropout(0.5)
self.fc4 = nn.Linear(512, 512)
self.bn4 = nn.BatchNorm1d(512)
self.prelu3 = nn.ReLU()
self.dout3 = nn.Dropout(0.5)
self.fc5 = nn.Linear(512, 512)
self.bn5 = nn.BatchNorm1d(512)
self.prelu4 = nn.ReLU()
self.out = nn.Linear(512,1)
self.out_act = nn.Sigmoid()
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(self.bn1(a1))
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu1(self.bn2(a2))
dout1 = self.dout1(h2)
a3 = self.fc3(dout1)
h3 = self.prelu2(self.bn3(a3))
dout2 = self.dout2(h3)
a4 = self.fc4(dout2)
h4 = self.prelu3(self.bn4(a4))
dout3 = self.dout3(h4)
a5 = self.fc5(dout3)
h5 = self.prelu4(self.bn5(a5))
a6 = self.out(h5)
y = self.out_act(a6)
return y
```

During training even when I am fixing the learning rate to 0 still somehow the parameters get updated and I got different test results in different epoch. How’s that possible? My understanding was if the lr =0 then the parameters update will be zero.

Any help is really appreciated.