How to change forward class? (Skip Net)

This is Skip Net nn.Sequential function

from numpy.testing.private.utils import assert

import torch

import torch.nn as nn

from .common import *

def Skip(

    num_input_channels=1, num_output_channels=3,

    num_channels_down=[16, 32, 64, 128, 128], num_channels_up=[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4], 

    filter_size_down=3, filter_size_up=3, filter_skip_size=1,

    need_sigmoid=True, need_bias=True, 

    pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU', 

    need1x1_up=True):

"""Assembles encoder-decoder with skip connections.

Arguments:

    act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)

    pad (string): zero|reflection (default: 'zero')

    upsample_mode (string): 'nearest|bilinear' (default: 'nearest')

    downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')

"""

assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)

n_scales = len(num_channels_down) 

if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)) :

    upsample_mode   = [upsample_mode]*n_scales

if not (isinstance(downsample_mode, list)or isinstance(downsample_mode, tuple)):

    downsample_mode   = [downsample_mode]*n_scales



if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)) :

    filter_size_down   = [filter_size_down]*n_scales

if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)) :

    filter_size_up   = [filter_size_up]*n_scales

last_scale = n_scales - 1 

cur_depth = None

model = nn.Sequential()

model_tmp = model

input_depth = num_input_channels

for i in range(len(num_channels_down)):

    deeper = nn.Sequential()

    skip = nn.Sequential()

    if num_channels_skip[i] != 0:

        model_tmp.add(Concat(1, skip, deeper))

    else:

        model_tmp.add(deeper)

    

    model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))

 

    if num_channels_skip[i] != 0:

        skip.add(conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))

        skip.add(bn(num_channels_skip[i]))

        skip.add(act(act_fun))

        

    # skip.add(Concat(2, GenNoise(nums_noise[i]), skip_part))

    deeper.add(conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad, downsample_mode=downsample_mode[i]))

    deeper.add(bn(num_channels_down[i]))

    deeper.add(act(act_fun))

    deeper.add(conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))

    deeper.add(bn(num_channels_down[i]))

    deeper.add(act(act_fun))

    deeper_main = nn.Sequential()

    if i == len(num_channels_down) - 1:

        # The deepest

        k = num_channels_down[i]

    else:

        deeper.add(deeper_main)

        k = num_channels_up[i + 1]

    deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i]))

    model_tmp.add(conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))

    model_tmp.add(bn(num_channels_up[i]))

    model_tmp.add(act(act_fun))

    if need1x1_up:

        model_tmp.add(conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))

        model_tmp.add(bn(num_channels_up[i]))

        model_tmp.add(act(act_fun))

    input_depth = num_channels_down[i]

    model_tmp = deeper_main

model.add(conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))

if need_sigmoid:

    model.add(nn.Sigmoid())

return model

I want change forward class How to convert forward class? Or How to Debugging nn.Sequential function

์ž˜ ์ดํ•ด๋ฅผ ๋ชปํ–ˆ๋Š”๋ฐ ๋‹ค์‹œ ํ•œ๋ฒˆ ์ ์–ด์ฃผ์‹ค ์ˆ˜ ์žˆ๋‚˜์š”?
So, from what I understand, you want to change some modules within the nn.sequential module?
if so, I think you can change module with following
model._modules[wanted_change_idx] = <ANY MODULE YOU WANT (but it needs to be in correct shape>

Dear ooodragon

I want change Forward class

Example:

def forward(self, t):
    t = F.relu(self.conv1(t))
    t = F.max_pool2d(t, kernel_size=2, stride=2)
    t = F.relu(self.conv2(t))
    t = F.max_pool2d(t, kernel_size=2, stride=2)
    t = t.flatten(start_dim=1)
    t = F.relu(self.fc1(t))
    t = F.relu(self.fc2(t))
    t = self.out(t)
    return t

I want make a Forward class of Skip Net Because Skip Net canโ€™t debug input tensor

Still not quite sure what you meanโ€ฆ
So you want to โ€œdebugโ€ tensors?
you can add hook functions within your modules and ask them to print it

or if you need forward function, Iโ€™m not aware if you can override one from nn.Sequential, you might need to do some research

Yes. Exactly

I want just debug tensors so iโ€™m add some hook

Example:

from numpy.testing._private.utils import assert_

 import torch

 import torch.nn as nn

 from .common import *

def Skip(

    num_input_channels=1, num_output_channels=3,

    num_channels_down=[16, 32, 64, 128, 128], num_channels_up=

[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4],

    filter_size_down=3, filter_size_up=3, filter_skip_size=1,

    need_sigmoid=True, need_bias=True, 

    pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU', 

    need1x1_up=True):

"""Assembles encoder-decoder with skip connections.

Arguments:

    act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)

    pad (string): zero|reflection (default: 'zero')

    upsample_mode (string): 'nearest|bilinear' (default: 'nearest')

    downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')

"""

assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)

n_scales = len(num_channels_down) 

if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)) :

    upsample_mode   = [upsample_mode]*n_scales

if not (isinstance(downsample_mode, list)or isinstance(downsample_mode, tuple)):

    downsample_mode   = [downsample_mode]*n_scales



if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)) :

    filter_size_down   = [filter_size_down]*n_scales

if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)) :

    filter_size_up   = [filter_size_up]*n_scales

last_scale = n_scales - 1 

cur_depth = None

model = nn.Sequential()

model_tmp = model



def print_sizes(model_tmp, input_tensor):

    output = input_tensor

    for m in model_tmp.children():

        output = m(output)

        print(m, output.shape)

    return output

input_tensor = torch.rand(1,num_input_channels,512,512)



input_depth = num_input_channels

for i in range(len(num_channels_down)):

    deeper = nn.Sequential()

    skip = nn.Sequential()

    if num_channels_skip[i] != 0:

        model_tmp.add(Concat(1, skip, deeper))

    else:

        model_tmp.add(deeper)

    print_sizes(model_tmp, input_tensor)

    model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))

    if num_channels_skip[i] != 0:

        skip.add(conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))

        skip.add(bn(num_channels_skip[i]))

        skip.add(act(act_fun))

        

    # skip.add(Concat(2, GenNoise(nums_noise[i]), skip_part))

    deeper.add(conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad, downsample_mode=downsample_mode[i]))

    deeper.add(bn(num_channels_down[i]))

    deeper.add(act(act_fun))

    deeper.add(conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))

    deeper.add(bn(num_channels_down[i]))

    deeper.add(act(act_fun))

    deeper_main = nn.Sequential()

    if i == len(num_channels_down) - 1:

        # The deepest

        k = num_channels_down[i]

    else:

        deeper.add(deeper_main)

        k = num_channels_up[i + 1]

    deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i]))

    model_tmp.add(conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))

    model_tmp.add(bn(num_channels_up[i]))

    model_tmp.add(act(act_fun))

    print_sizes(model_tmp, input_tensor)

    if need1x1_up:

        model_tmp.add(conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))

        model_tmp.add(bn(num_channels_up[i]))

        model_tmp.add(act(act_fun))

    input_depth = num_channels_down[i]

    model_tmp = deeper_main

model.add(conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))

if need_sigmoid:

    model.add(nn.Sigmoid())

return model

iโ€™m add printsizes but i have some errorโ€ฆ

plz show me your โ€œerrorโ€
Iโ€™m sorry I canโ€™t help you with code one by one :disappointed_relieved:

However, I donโ€™t see you applied any hook functions, plz try adding them

Sorry. This is My error How can i debug this codeโ€ฆ

๊ทธ๋ƒฅ ํŽธํ•˜๊ฒŒ ํ•œ๊ตญ๋ง๋กœ ํ• ๊ฒŒ์š” ^^โ€ฆ ๊ตณ์ด ์˜์–ด๋กœ ํ•˜๋ผ๋Š” ๋ฒ•์€ ์—†์–ด์„œโ€ฆ
๋ณด๋‹ˆ๊นŒ model.children ์‚ฌ์šฉํ•˜์…จ๋Š”๋ฐ, ์ด๋ ‡๊ฒŒ ํ•˜์‹œ๋ฉด sequential ํ•œ ๋ชจ๋ธ, (vgg๊ฐ™์€ residual connection์ด๋‚˜ ๋‹ค์–‘ํ•œ operation์ด ์—†๋Š” ๋ชจ๋ธ) ์—์„œ๋งŒ ๊ทธ๋ ‡๊ฒŒ ๊ฐ€๋Šฅํ•˜๊ณ  ๋‚˜๋จธ์ง€์—์„œ๋Š” ๋ถˆ๊ฐ€๋Šฅ ํ•ด์š”

๊ทธ๋ž˜์„œ forward hook์„ ์‚ฌ์šฉํ•˜์…”์•ผํ•ด์š”

์—ฌ๊ธฐ์— ์นœ์ ˆํ•˜๊ฒŒ hook function ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•, ์–ด๋–ป๊ฒŒ ๋™์ž‘ํ•˜๋Š”์ง€๊นŒ์ง€ ์•Œ๋ ค์ฃผ๋‹ˆ๊นŒ ์ด ์ด์ƒ์˜ ์„ค๋ช…์€ ์ƒ๋žตํ• ๊ฒŒ์š”

์ œ๊ฐ€ ์ฝ”๋”ฉ๊นŒ์ง€ ํ•ด๋“œ๋ฆด์ •๋„๋กœ ์‹œ๊ฐ„์ด ๋งŽ์ง€๋Š” ์•Š์•„์„œ ^^;;
์—๋Ÿฌ๋ฅผ ๋ณด๋‹ˆ ์ œ๊ฐ€ ๋ง์”€๋“œ๋ฆฐ ๋ถ€๋ถ„์ด ๋งž์„ ๊ฒƒ ๊ฐ™์•„์š”. for children ์‚ฌ์šฉํ•˜์ง€ ๋งˆ์‹œ๊ตฌ์š” hook function ์‚ฌ์šฉํ•ด๋ณด์„ธ์š”!

์ •๋ง ๋‹ค์–‘ํ•œ ์ฝ”๋”ฉ์ด ๊ฐ€๋Šฅํ•ด์ง€๊ณ , ๋” ๊ณ ๊ธ‰ ํ”„๋กœ๊ทธ๋ž˜๋จธ๊ฐ€ ๋˜์‹ค๊บผ์˜ˆ์š”!

1 Like

์ข‹์€์ •๋ณด ๊ฐ์‚ฌํ•ฉ๋‹ˆ๋‹ค!

ํ›„ํฌ ํŽ‘์…˜์„ ์ด์šฉํ•˜์—ฌ ๋””๋ฒ„๊น…์„ ์‹œ๋„ํ•ด๋ณด๊ฒŸ์Šต๋‹ˆ๋‹ค.

์ •๋ง ๋งŽ์€๋„์›€์ด ๋˜์—ˆ์Šต๋‹ˆ๋‹ค.

1 Like