I have a network with many layers. but I defined a simple bloc that I call it many time instead of re-writing the same lines. in this shape.
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.RGB_bloc1 = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1, padding=1),
nn.ELU()
)
self.bloc2 = nn.Sequential(
conv_bn_elu(64, 64, 3, 1),
conv_bn_elu(64, 64, 3, 1)
)
so when I do forward it this way.
def forward(self, RGB, TI):
r1 = self.RGB_bloc1(RGB)
r2 = self.bloc2(r1)
m1 = torch.cat((r1, r2), 1)
c1 = self.Cross1(m1)
r3 = self.bloc2(c1)
m2 = torch.cat((r2, r3), 1)
c2 = self.Cross1(m2)
t1 = self.TI_bloc1(TI)
t2 = self.bloc2(t1)
m3 = torch.cat((t2,c2), 1)
c3 = self.Cross1(m3)
x1 = self.bloc2(c3)
m4 = torch.cat((c3,x1), 1)
c4 = self.Cross1(m4)
x2 = self.bloc2(c4)
m5 = torch.cat((c4, x2), 1)
c5 = self.Cross1(m5)
x3 = self.bloc3(c5)
x4 = self.bloc4(x3)
x = self.Cross2(x4)
return x
I asked to print all the layers names and this is the result.
for name, f in G.named_parameters():
print(name)
RGB_bloc1.0.weight
RGB_bloc1.0.bias
bloc2.0.0.weight
bloc2.0.0.bias
bloc2.1.0.weight
bloc2.1.0.bias
Cross1.0.weight
Cross1.0.bias
TI_bloc1.0.weight
TI_bloc1.0.bias
bloc3.0.0.weight
bloc3.0.0.bias
bloc3.1.weight
bloc3.1.bias
bloc3.3.0.weight
bloc3.3.0.bias
bloc4.0.0.weight
bloc4.0.0.bias
bloc4.1.0.weight
bloc4.1.0.bias
Cross2.0.weight
Cross2.0.bias
As you can see some layers in my structure definition doesn’t exist in the output print of the layers names.
Could you tell my why I can’t see their names and if my network is fully including all my layers connections or because of the Doublecated names they are skipped?