Backpropagation skips layers

I am trying to use a temporal conv net with different kernel sizes simultaneously for regression. Net.modules() does not include the 1d convolutions, but rather only the fc layers at the end.

class TemporalConvNet(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_sizes=[2,3], dilation_size=1):
        super(TemporalConvNet, self).__init__()

        self.group_norm1 = nn.GroupNorm(in_channels, in_channels)
        self.g_norms_out = []
        for i in range(len(kernel_sizes)):
            self.g_norms_out.append(nn.GroupNorm(out_channels, out_channels).double().cuda())
        self.mish = Mish()
        
        self.temporal_blocks = []
        for kernel_size in kernel_sizes:
            self.temporal_blocks.append(TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
                                    padding=kernel_size-1))
        
        self.linear_part = Linear_out(len(kernel_sizes)*out_channels*100, 1)

    def forward(self, x):
        x = self.group_norm1(x)
        
        conv_outputs = []
        for ind in range(len(self.temporal_blocks)):
            temp_block = self.temporal_blocks[ind]
            group_norm = self.g_norms_out[ind]
            temp_block.cuda().double()
            conv_out = temp_block(x)
            conv_out = group_norm(conv_out)
            conv_out = self.mish(conv_out)
            conv_outputs.append(conv_out)
            print(conv_out[0][0])
        conv_outputs = torch.cat(conv_outputs, dim=1)
        x = torch.flatten(conv_outputs, start_dim=1)
        print("After flatten: ", x[:2])
        torch.Variable(x) = self.linear_part(x)
        return x.view(-1)

TemporalConvNet(
  (group_norm1): GroupNorm(259, 259, eps=1e-05, affine=True)
  (mish): Mish()
  (linear_part): Linear_out(
    (dense_1): Sequential(
      (0): Linear(in_features=8000, out_features=64, bias=True)
      (1): Mish()
      (2): Dropout(p=0.2, inplace=False)
    )
    (dense_2): Sequential(
      (0): Linear(in_features=64, out_features=10, bias=True)
      (1): Mish()
      (2): Dropout(p=0.2, inplace=False)
    )
    (dense_3): Linear(in_features=10, out_features=1, bias=True)
  )
)

Plain Python lists won’t register nn.Modules properly inside the parent model and you should use nn.ModuleList instead.

1 Like