RuntimeError: The size of tensor a (262) must match the size of tensor b (264) at non-singleton dimension 3

Hi
I want to changed resnet50 to resnet18 but I have this error
RuntimeError: The size of tensor a (262) must match the size of tensor b (264) at non-singleton dimension 3
Please help me

    module_list = [
        ("/", nn.Conv2d(input_ch, 64, 7, stride=1, padding=0, bias=False)),
        ("bn", nn.BatchNorm2d(64, eps=1e-5)),
        ("relu", nn.ReLU(inplace=True)),
    ]
    self.conv0 = nn.Sequential(OrderedDict(module_list))
    self.d0 = ResidualBlock(64, [3,3], [64,64], 3, stride=1)
    self.d1 = ResidualBlock(64, [3,3], [128, 128], 4, stride=2)
    self.d2 = ResidualBlock(128, [3,3], [256, 256], 6, stride=2)
    self.d3 = ResidualBlock(256, [3,3], [512, 512], 3, stride=2)
    self.conv_bot = nn.Conv2d(512, 256, 1, stride=1, padding=0, bias=False)

class ResidualBlock(Net):

"""Residual block as defined in:

He, Kaiming, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. "Deep residual learning 

for image recognition." In Proceedings of the IEEE conference on computer vision 

and pattern recognition, pp. 770-778. 2016.

"""

def __init__(self, in_ch, unit_ksize, unit_ch, unit_count, stride=1):

    super(ResidualBlock, self).__init__()

    assert len(unit_ksize) == len(unit_ch), "Unbalance Unit Info"

    self.nr_unit = unit_count

    self.in_ch = in_ch

    self.unit_ch = unit_ch

    

    # ! For inference only so init values for batchnorm may not match tensorflow

    unit_in_ch = in_ch

    self.units = nn.ModuleList()

    for idx in range(unit_count):

        unit_layer = [

            ("preact/bn", nn.BatchNorm2d(unit_in_ch, eps=1e-5)),

            ("preact/relu", nn.ReLU(inplace=True)),

            (

                "conv1",

                nn.Conv2d(

                    unit_in_ch,

                    unit_ch[0],

                    unit_ksize[0],

                    stride=1,

                    padding=0,

                    bias=False,

                ),

            ),

            ("conv1/bn", nn.BatchNorm2d(unit_ch[0], eps=1e-5)),

            ("conv1/relu", nn.ReLU(inplace=True)),

            (

                "conv2/pad",

                TFSamepaddingLayer(

                    ksize=unit_ksize[1], stride=stride if idx == 0 else 1

                ),

            ),

            (

                "conv2",

                nn.Conv2d(

                    unit_ch[0],

                    unit_ch[1],

                    unit_ksize[1],

                    stride=stride if idx == 0 else 1,

                    padding=0,

                    bias=False,

                   ),

            ("conv2/bn", nn.BatchNorm2d(unit_ch[1], eps=1e-5)),

            ("conv2/relu", nn.ReLU(inplace=True)),]

        # * has bna to conclude each previous block so

        # * must not put preact for the first unit of this block

        unit_layer = unit_layer if idx != 0 else unit_layer[1:]

        self.units.append(nn.Sequential(OrderedDict(unit_layer)))

        unit_in_ch = unit_ch[-1]

    if in_ch = unit_ch[-1] or stride != 1:

        self.shortcut = nn.Conv2d(in_ch, unit_ch[-1], 1, stride=stride, bias=False)

    else:

        self.shortcut = None

    self.blk_bna = nn.Sequential(

        OrderedDict(

            [

                ("bn", nn.BatchNorm2d(unit_in_ch, eps=1e-5)),

                ("relu", nn.ReLU(inplace=True)),

            ]

        )

    )

    # print(self.units[0])

    # print(self.units[1])

    # exit()

def out_ch(self):

    return self.unit_ch[-1]

def forward(self, prev_feat, freeze=False):

    print(unit_ch)

    if self.shortcut is None:

        shortcut = prev_feat

    else:

        shortcut = self.shortcut(prev_feat)

        print(shortcut.shape)

    for idx in range(0, len(self.units)):

        print(self.units)

        new_feat = prev_feat

        if self.training:

            with torch.set_grad_enabled(not freeze):

                new_feat = self.units[idx](new_feat)

        else:

            new_feat = self.units[idx](new_feat)

        prev_feat = new_feat + shortcut

        shortcut = prev_feat

    feat = self.blk_bna(prev_feat)

    return feat

Without seeing the stacktrace I would guess the shape mismatch is raised by the shortcut addition.
Based on the error message I would suggest to check the setup of the shortcut layer and make sure the spatial size of its output activation is equal to the activation coming from the main branch of the model.