Is this basic Convoutional Network code correct?

Hello community.

I am working on a convolutional network based on “blocks” of convolutional and maxpooling.
This is the main class of the network

class Backbone(nn.Module):
    def __init__(self):
        super().__init__()

        self.block1_0 = nn.Sequential(
            Conv2dSamePadding(in_channels=3, out_channels=12, kernel_size=(3, 3), stride=(2, 2)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same'),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2)
        )

        self.block2_1 = nn.Sequential(
            Conv2dSamePadding(in_channels=3, out_channels=12, kernel_size=(3, 3), stride=(2, 2)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same'),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2)
        )

        self.block1_1 = basicBlock()
        self.block1_2 = basicBlock()
        self.block1_3 = basicBlock()
        self.block1_4 = basicBlock()
        self.avgpool1 = nn.AvgPool2d(kernel_size=2, stride=1)

        self.block2_1 = basicBlock()
        self.block2_2 = basicBlock()
        self.block2_3 = basicBlock()
        self.block2_4 = basicBlock()
        self.avgpool2 = nn.AvgPool2d(kernel_size=2, stride=1)

        self.conv = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), stride=1, padding='same')

IMPORTANT: you could see that the network is building with basically “basicBlock”

class basicBlock(nn.Module):
    def __init__(self):
        super().__init__()

        self.conv1 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv3 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv4 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv5 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')

        self.conv6 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv7 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv8 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv9 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv10 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')

        self.maxpool = nn.MaxPool2d(2, 1)
        self.conv11 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')

So:
1.class basicBlock has self.conv1, self.conv2 and so on …
2. class Backbone has self.block1_1 = basicBlock(), self.block1_2 = basicBlock() and son on…

Theoretically, would be able the network to learn in a training?
Is this coding style correct?
Am I overwritting the convs each time that I build a BasicBlock?

I will paste here the complete code to give more information:

import torch.nn as nn
import torch.nn.functional as F
from functools import reduce
    from operator import __add__


class Conv2dSamePadding(nn.Conv2d):
    def __init__(self,*args,**kwargs):
        super(Conv2dSamePadding, self).__init__(*args, **kwargs)
        self.zero_pad_2d = nn.ZeroPad2d(reduce(__add__,
            [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]]))

    def forward(self, input):
        return  self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias)



class basicBlock(nn.Module):
    def __init__(self):
        super().__init__()

        self.conv1 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv3 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv4 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv5 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')

        self.conv6 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv7 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv8 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv9 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')
        self.conv10 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')

        self.maxpool = nn.MaxPool2d(2, 1)
        self.conv11 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same')

    def forward(self, x):
        tower1_1 = F.relu(self.conv1(x))
        tower1_2 = F.relu(self.conv2(tower1_1))
        ct1_1 = tower1_1 + tower1_2
        tower1_3 = F.relu(self.conv3(ct1_1))
        ct1_2 = tower1_3 + tower1_2 + tower1_1
        tower1_4 = F.relu(self.conv4(ct1_2))
        ct1_3 = tower1_4 + tower1_3 + tower1_2 + tower1_1
        tower1_5 = F.relu(self.conv5(ct1_3))
        ct1_4 = tower1_5 + tower1_4 + tower1_3 + tower1_2 + tower1_1

        tower2_1 = F.relu(self.conv6(x))
        tower2_2 = F.relu(self.conv7(tower2_1))
        ct2_1 = tower2_1 + tower2_2
        tower2_3 = F.relu(self.conv8(ct2_1))
        ct2_2 = tower2_3 + tower2_2 + tower2_1
        tower2_4 = F.relu(self.conv9(ct2_2))
        ct2_3 = tower2_4 + tower2_3 + tower2_2 + tower2_1
        tower2_5 = F.relu(self.conv10(ct2_3))
        ct2_4 = tower2_5 + tower2_4 + tower2_3 + tower2_2 + tower2_1

        c1 = ct1_4 + ct2_4
        c1 = self.maxpool(c1)
        c1 = F.relu(self.conv11(c1))

        return c1

class SiameseBackbonev2(nn.Module):
    def __init__(self):
        super().__init__()

        self.block1_0 = nn.Sequential(
            Conv2dSamePadding(in_channels=3, out_channels=12, kernel_size=(3, 3), stride=(2, 2)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same'),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2)
        )

        self.block2_1 = nn.Sequential(
            Conv2dSamePadding(in_channels=3, out_channels=12, kernel_size=(3, 3), stride=(2, 2)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding='same'),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2)
        )

        self.block1_1 = basicBlock()
        self.block1_2 = basicBlock()
        self.block1_3 = basicBlock()
        self.block1_4 = basicBlock()
        self.avgpool1 = nn.AvgPool2d(kernel_size=2, stride=1)

        self.block2_1 = basicBlock()
        self.block2_2 = basicBlock()
        self.block2_3 = basicBlock()
        self.block2_4 = basicBlock()
        self.avgpool2 = nn.AvgPool2d(kernel_size=2, stride=1)

        self.conv = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), stride=1, padding='same')

    def forward(self, input1, input2):
        n4 = self.block1_0(input1)
        c1 = self.block1_1(n4)
        c2 = self.block1_2(c1)
        c3 = self.block1_3(c2)
        c4 = self.block1_4(c3)
        c4 = self.avgpool1(c4)

        o_n4 = self.block2_1(input2)
        o_c1 = self.block2_1(o_n4)
        o_c2 = self.block2_2(o_c1)
        o_c3 = self.block2_3(o_c2)
        o_c4 = self.block2_4(o_c3)
        o_c4 = self.avgpool2(o_c4)

        c = c4 + o_c4
        c = F.relu(self.conv(c))

        return c```