Modifying nn.sequential model into custom model

Hi all, I’m pretty newbie to PyTorch so please bear with me.

I was trying to modify the following nn.sequential model:

class CNN(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.model = torch.nn.Sequential(
            
            #In=3x32x32, out=32x32x32
            torch.nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
            torch.nn.BatchNorm2d(32),
            torch.nn.ReLU(),
            #In=32x32x32, out=32x16x16
            torch.nn.MaxPool2d(kernel_size=2),

            #In=32x16x16, out=64x16x16
            torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
            torch.nn.BatchNorm2d(64),
            torch.nn.ReLU(),
            #In=64x16x16, out=64x8x8
            torch.nn.MaxPool2d(kernel_size=2),

            #In=64x8x8, out=64x8x8
            torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
            torch.nn.BatchNorm2d(64),
            torch.nn.ReLU(),
            #In=64x8x8, out=64x4x4
            torch.nn.MaxPool2d(kernel_size=2),

            torch.nn.Flatten(),
            torch.nn.Linear(64*4*4, 512),
            torch.nn.ReLU(),
            torch.nn.Linear(512, 10)
        )

Which works fine for my task, but when I modify it into:

class CNN(torch.nn.Module):
    def __init__(self):
        super().__init__()           

        #In=3x32x32, out=32x32x32
        self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
        self.bn1 = torch.nn.BatchNorm2d(32),
        self.relu1 = torch.nn.ReLU(),
        #In=32x32x32, out=32x16x16
        self.maxpool1 = torch.nn.MaxPool2d(kernel_size=2),

        #In=32x16x16, out=64x16x16
        self.conv2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
        self.bn2 = torch.nn.BatchNorm2d(64),
        self.relu2 = torch.nn.ReLU(),
        #In=64x16x16, out=64x8x8
        self.maxpool2 = torch.nn.MaxPool2d(kernel_size=2),

        #In=64x8x8, out=64x8x8
        self.conv3 = torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
        self.bn3 = torch.nn.BatchNorm2d(64),
        self.relu3 = torch.nn.ReLU(),
        #In=64x8x8, out=64x4x4
        self.maxpool3 = torch.nn.MaxPool2d(kernel_size=2),

        self.flatten = torch.nn.Flatten(),
        self.linear1 = torch.nn.Linear(64*4*4, 512),
        self.relu4 = torch.nn.ReLU(),
        self.linear2 = torch.nn.Linear(512, 10)

And from the little experience that I have, I assume that I have to also modify the forward function from:

    def forward(self, x):
        return self.model(x)

to:

    def forward(self, x):
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu1(out)
        out = self.maxpool1(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu2(out)
        out = self.maxpool2(out)

        out = self.conv3(out)
        out = self.bn3(out)
        out = self.relu3(out)
        out = self.maxpool3(out)

        out = self.flatten(out)
        out = self.linear1(out)
        out = self.relu4(out)
        out = self.linear2(out)

        return out

But unfortunately, I get the following error:

  File "/home/usr/Documents/mycnn/train.py", line 41, in <module>
    outputs = model(images)
  File "/home/usr/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/usr/Documents/mycnn/CNN.py", line 37, in forward
    x = self.conv1(x)
TypeError: 'tuple' object is not callable

What am i doing wrong here?

The reason why I don’t want to use nn.sequential is because I want to experiment on some stuff that won’t be applicable with nn.sequential.

Thanks.

Can you show how you input “images” over here

outputs = model(images)

Here is the part where the data is downloaded, data loaders are generated, and the training process is executed:

# Download dataset if not downloaded
train_dataset, test_dataset = myutils.download_dataset(root="./CIFAR10")

# Generate data loaders
train_loader, test_loader = myutils.generate_dataset_loaders(train_dataset=train_dataset, test_dataset=test_dataset)

# Begin training
train_loss_list = []
for epoch in range(num_epochs):
    print(f"Epoch {epoch+1}/{num_epochs}", end=" ")
    train_loss = 0

    # Iterating over the training dataset in batches
    model.train()
    for i, (images, labels) in enumerate(train_loader):

        # Extracting images and target labels for the bacth being iterated
        images = images.to(device)
        labels = labels.to(device)

        # Calculating the model output and the cross entropy loss
        outputs = model(images)
        loss = criterion(outputs, labels)

        # Updating weights according to the calculated loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()

    # Printing loss for each epoch
    train_loss_list.append(train_loss/len(train_loader))
    print(f"Training loss = {train_loss_list[-1]}")