I’ve been reading a piece of code found on github. I specifically don’t understand why we keep on verifying if the model’s layers have got weights and bias (is not None) when we’ve just created it. And why would we do (nn.init.uniform_(module.weight)) if there are weights. What’s the logic here? Thanks
def get_head(nf: int, n_classes):
model = nn.Sequential(
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.BatchNorm1d(nf),
nn.Dropout(p=0.25),
nn.Linear(nf, n_classes)
)
for i, module in enumerate(model):
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):
if module.weight is not None:
nn.init.uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
if isinstance(module, nn.Linear):
if getattr(module, "weight_v", None) is not None:
print("Initing linear with weight normalization")
assert model[i].weight_g is not None
else:
nn.init.kaiming_normal_(module.weight)
print("Initing linear")
if module.bias is not None:
nn.init.constant_(module.bias, 0)
return model