While I a am training the CNN Network, Getting ModuleAttributeError: 'MaxPool2d' object has no attribute 'view'

  # no. of conv: 6 
  # no. of fc: 3
  # Kernel size (conv): 3x3 
  # Stride (cov): 1x1
  # Stride (maaxPool) = 2x2
  # Dilation: 1x1 (default)
  # padding = 1
  # Dropout in FC: 10% 

import numpy as np
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd

from torch.autograd import Variable

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

class CNN2(nn.Module):
  def __init__(self):
    super(CNN2, self).__init__()
    # TODO: define your CNN
    # Convolution Layers
    self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
    self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
    self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
    self.conv4 = nn.Conv2d(128, 128, 3, padding=1)
    self.conv5 = nn.Conv2d(128, 256, 3, padding=1)
    self.conv6 = nn.Conv2d(256, 256, 3, padding=1)

    # FC Layers
    self.fc1 = nn.Linear(4096, 1024)
    self.fc2 = nn.Linear(1024, 512)
    self.fc3 = nn.Linear(512, 10)

    # Sub Sampling (Max pooling)
    #self.maxpool2d1 = nn.MaxPool2d(kernel_size=2, stride=2)
    #self.maxpool2d2 = nn.MaxPool2d(kernel_size=2, stride=2)
    #self.maxpool2d3 = nn.MaxPool2d(kernel_size=2, stride=2)
    
    # Dropout: 10% 
    self.drouput1 = nn.Dropout2d(p=0.1)
    self.drouput2 = nn.Dropout(p=0.1)
    self.drouput3 = nn.Dropout(p=0.1)

    # Activation Function 
    self.relu = nn.ReLU()
    
  def forward(self, y):
    # TODO: define your forward function

    # 1st conv
    y = self.conv1
    # Batch Normalization over 4D input
    y = nn.BatchNorm2d(32)
    y = self.relu

    # 2nd conv
    y = self.conv2
    y = self.relu
    # Max pooling over a (2, 2) window with stride = 2 on 2nd conv layer
    y = nn.MaxPool2d(kernel_size=2, stride=2)

    # 3rd conv
    y = self.conv3
    y = nn.BatchNorm2d(128)
    y = self.relu

    # 4th conv
    y = self.conv4
    y = self.relu
    y = nn.MaxPool2d(kernel_size=2, stride=2)
    y = self.drouput1
    
    # 5th conv
    y = self.conv5
    y = nn.BatchNorm2d(256)
    y = self.relu

    # 6th conv
    y = self.conv6
    y = self.relu
    y = nn.MaxPool2d(kernel_size=2, stride=2)

    # flatten
    y = y.view(-1, self.num_flat_features(y))
    #y = y.view(y.size(0), -1)
    #y = torch.flatten(y, start_dim = 1)

    # fc layers
    y = self.drouput2
    y = self.fc1
    y = self.relu

    y = self.fc2
    y = self.relu
    
    y = self.drouput3
    y = self.fc3
    return y

  def num_flat_features(self, y):
      size = y.size()[1:]  # all dimensions except the batch dimension
      num_features = 1
      for s in size:
          num_features *= s
      return num_features


cnn2 = CNN2()
print(cnn2)

params2 = list(cnn2.parameters())
print(len(params2))
print(params2[0].size()) 
print(params2[1].size())  
print(params2[2].size()) 
print(params2[3].size())  
print(params2[4].size()) 
print(params2[5].size())
print(params2[6].size()) 
print(params2[7].size())
print(params2[8].size()) 
print(params2[9].size()) 
print(params2[10].size())
print(params2[11].size()) 
print(params2[12].size())
print(params2[13].size()) 
print(params2[14].size()) 
print(params2[15].size()) 
print(params2[16].size())
print(params2[17].size())

cnn2 = CNN2().to(device)  # operate on GPU

print(cnn2)

## Define the Loss function Optimizer

import torch.optim as optim

# TODO: you can change loss function and optimizer

criterion2 = nn.CrossEntropyLoss()

optimizer2 = optim.SGD(cnn2.parameters(), lr=0.001, momentum=0.9)

## Train the Network

n_epoch2 = 5

for epoch2 in range(n_epoch2):  # loop over the dataset multiple times

  running_loss2 = 0.0

  for i, data in enumerate(cifar_trainloader, 0):

    # TODO: write training code

    # get the inputs 

    inputs, labels = data

    inputs = inputs.to(device)

    labels = labels.to(device)

    # wrap them in Variable

    inputs, labels = Variable(inputs), Variable(labels)

              

    # zero the parameter gradients

    optimizer.zero_grad()

    # forward + backward + optimize

    output2 = cnn2(inputs)

    loss2 = criterion2(output2, labels)

    loss2.backward()

    optimizer2.step()

    

    # print statistics

    running_loss2 += loss2.item()

    if i % 2000 == 1999:    # print every 2000 mini-batches

        print('[%d, %5d] loss2: %.3f' %(epoch2 + 1, i + 1, running_loss2 / 2000))

        running_loss2 = 0.0

print('Finished Training the CNN2 Model')


---------------------------------------------------------------------------
ModuleAttributeError                      Traceback (most recent call last)
<ipython-input-108-58a5de353b76> in <module>()
     20 
     21     # forward + backward + optimize
---> 22     output2 = cnn2(inputs)
     23     loss2 = criterion2(output2, labels)
     24     loss2.backward()

2 frames
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __getattr__(self, name)
    770                 return modules[name]
    771         raise ModuleAttributeError("'{}' object has no attribute '{}'".format(
--> 772             type(self).__name__, name))
    773 
    774     def __setattr__(self, name: str, value: Union[Tensor, 'Module']) -> None:

ModuleAttributeError: 'MaxPool2d' object has no attribute 'view' ```

Sharing any thoughts on this would be greatly appreciated.

Hi,

In your forward method, you are not calling any of objects you have instantiated in __init__ method.
In Python, first you initilize a class and make an object, then use it:

self.conv1 = nn.Conv2d(#args)  # just init, now need to call it

# in forward
y = self.conv1(#some_input)

In none of your calls in forward you have specified input.

Look at the way you have initialized criterion2 and used it by passing inputs. You need to do the same thing for all layers in forward method.

I think before doing that, you need to read about basic PyTorch from available tutorials. It will help you to get started with it.

Bests

Thanks for the response. It helps. Sorry for the late reply.