Can I define the complete model in forward function?

class UNet(nn.Module):
    def __init__(self, depth=5, root_filter=64):
        super(UNet, self).__init__()
        self.depth = depth
        self.root_filter = root_filter
        print("depth: {}, root filter: {}".format(self.depth, self.root_filter))
    
    def forward(self, x):
        long_connection_store = {}
        input_channels = 1
        # Down sampling
        print("DOWN SAMPLING")
        for i in range(self.depth):
            out_channel = 2**i * self.root_filter
            print(out_channel)
            x = F.relu(nn.Conv2d(input_channels, out_channel, 3, padding=1)(x))
            x = F.relu(nn.Conv2d(out_channel, out_channel, 3, padding=1)(x))

            if i<self.depth-1:
                long_connection_store[str(i)] = x
                x = F.max_pool2d(x, kernel_size=2)
            input_channels = out_channel

        print("x.shape: {}".format(x.shape))
        print("UP SAMPLING")
        
        for i in range(self.depth - 2, -1, -1):
            out_channel = 2**(i) * self.root_filter
            print("input_channels: {}".format(input_channels))
            print("out_channel: {}".format(out_channel))
            
            # long connection from down sampling path.
            long_connection = long_connection_store[str(i)]
            print("long_connection shape: {}".format(long_connection.shape))
            
            up1 = nn.Upsample(scale_factor=2)(x)
            print("up1 shape: {}".format(up1.shape))
            
            up_conv1 = F.relu(nn.Conv2d(input_channels, out_channel, 3, padding=1)(up1))
            print("upsamplingConv : {}".format(up_conv1.shape))
            
            up_conc = torch.cat((up_conv1, long_connection), dim=1)
            print("up_conc : {}\n".format(up_conc.shape))
            
            #  Convolutions
            up_conv1 = F.relu(nn.Conv2d(input_channels, out_channel, 3, padding=1)(up_conc))
            up_conv2 = F.relu(nn.Conv2d(out_channel, out_channel, 3, padding=1)(up_conv1))

            x = up_conv2
            input_channels = out_channel
        print("FINAL shape: {}".format(x.shape))
        x = F.sigmoid(nn.Conv2d(input_channels, 1, 1, padding=0)(x))
        print("FINAL shape: {}".format(x.shape))
        return x
batch_size = 5
channels = 1
height = 256
width = 256
image = torch.randn(batch_size, channels, height, width)
unet = UNet(5, 64)
output = unet(image)

If I run the above code, it works but when I do unet.parameters() I get empty list.
Is it always necessary to define the layers with learnable parameters in init function? That looks like lot of work if the model is very deep like in case of UNet. Do I have to define all Conv layers of downsampling and upsampling path in init function. If I do that then I can’t initialize the model dynamically with depth or filter size parameters which I can do very easily in Keras.
Is there a way to define the model without initiating each and every Conv layer in init function.

Not sure but when you use x = F.relu(nn.Conv2d(input_channels, out_channel, 3, padding=1)(x)) you are instantiating Conv2d in each forward pass, so maybe you should use the functional conv2d instead

Ok. Make sense. Thank you for the response. I think that’s why we initiate Conv layers in init function.