NotimplementedError while implementing GoogLeNet (v2)

I haven’t found any problem with my implementation for GoogLeNet (v2), and I still get the “NotimplementedError”.

The code is same code I used for CIFAR10, expect for the net architecture. It seems like there’s some problem when forward is running (The trace is down below). Any idea why I get this error?

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader, Dataset
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

batch_size = 64

train_dataset = datasets.CIFAR10(root=‘./data/’,
train=True,
transform=transforms.ToTensor(),
download=True)

test_dataset = datasets.CIFAR10(root=‘./data/’,
train=False,
transform=transforms.ToTensor())

train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)

print(len(train_dataset))

print(‘==> Preparing data…’)
transform_train = transforms.Compose([
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

train_dataset = torchvision.datasets.CIFAR10(root=‘./data’,
train=True, download=True,
transform=transform_train)

train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=2)

test_dataset = torchvision.datasets.CIFAR10(root=‘./data’,
train=False,
download=True,
transform=transform_test)

test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=2)

class InceptionModule(nn.Module):
    def __init__(self, in_channels,n_ker_11, n_ker_13, n_ker_33, n_ker_15, n_ker_55, n_pool_11):
       super(InceptionModule, self).__init__()
       self.b11 = nn.Sequential(
           nn.Conv2d(in_channels=in_channels, out_channels=n_ker_11, kernel_size=1, stride=1, padding=0),
           nn.BatchNorm2d(n_ker_11),
           nn.ReLU()               
       )      
       self.b13 = nn.Sequential(
          nn.Conv2d(in_channels=in_channels, out_channels=n_ker_13, kernel_size=1, stride=1, padding=0),
           nn.BatchNorm2d(n_ker_13),
           nn.ReLU(),
           nn.Conv2d(in_channels=n_ker_13, out_channels=n_ker_33, kernel_size=3, stride=1, padding=1),
           nn.BatchNorm2d(n_ker_33), 
           nn.ReLU()   
       )      
       self.b15 = nn.Sequential(
           nn.Conv2d(in_channels=in_channels, out_channels=n_ker_15, kernel_size=1, stride=1, padding=0),
           nn.ReLU(),
           nn.BatchNorm2d(n_ker_15),
           nn.Conv2d(in_channels=n_ker_15, out_channels=n_ker_55, kernel_size=3, stride=1, padding=1),
           nn.BatchNorm2d(n_ker_55), 
           nn.ReLU(),
           nn.Conv2d(in_channels=n_ker_55, out_channels=n_ker_55, kernel_size=3, stride=1, padding=1),
           nn.BatchNorm2d(n_ker_55), 
           nn.ReLU()
       )  
       self.pool_11 = nn.Sequential(
           nn.MaxPool2d(3, stride=1, padding=1),
             nn.Conv2d(in_channels=in_channels, out_channels=n_pool_11, kernel_size=1, stride=1, padding=0),
           nn.BatchNorm2d(n_pool_11),
           nn.ReLU()
       )    
       def forward(self, x):
           out_b11 = self.b11(x)
           out_b13 = self.b13(x)
           out_b15 = self.b15(x)
           out_pool = self.pool_11(x)
          return torch.cat([out_b11,out_b13,out_b15,out_pool], 1) 
       

class GoogleNet(nn.Module):
   def __init__(self):
     super(GoogleNet, self).__init__()      
         self.pre_inception = nn.Sequential(
          nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
          nn.BatchNorm2d(64),
          nn.ReLU(),
          nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
          )      
    self.inception = nn.Sequential(              
       InceptionModule(in_channels = 192, n_ker_11= 64, n_ker_13=96, n_ker_33=128, n_ker_15=16, n_ker_55=32, n_pool_11=32),
        InceptionModule(in_channels = 256, n_ker_11=128, n_ker_13=128, n_ker_33=192, n_ker_15=32, n_ker_55=96, n_pool_11=64),    
        nn.MaxPool2d(3, stride=2, padding=1),      
        InceptionModule(in_channels = 480, n_ker_11=192, n_ker_13=96, n_ker_33=208, n_ker_15=16, n_ker_55=48, n_pool_11=64),
        InceptionModule(in_channels = 512, n_ker_11=160, n_ker_13=112, n_ker_33=224, n_ker_15=24, n_ker_55=64, n_pool_11=64),
        InceptionModule(in_channels = 512, n_ker_11=128, n_ker_13=128, n_ker_33=256, n_ker_15=24, n_ker_55=64, n_pool_11=64),
        InceptionModule(in_channels = 512, n_ker_11=256, n_ker_13=144, n_ker_33=288, n_ker_15=32, n_ker_55=64, n_pool_11=64),
        InceptionModule(in_channels = 528, n_ker_11=384, n_ker_13=160, n_ker_33=320, n_ker_15=32, n_ker_55=128, n_pool_11=128),
        nn.MaxPool2d(3, stride=1, padding=1),
        InceptionModule(in_channels = 832, n_ker_11=256, n_ker_13=160, n_ker_33=320, n_ker_15=32, n_ker_55=128, n_pool_11=128),
        InceptionModule(in_channels = 832, n_ker_11=384, n_ker_13=192, n_ker_33=384, n_ker_15=48, n_ker_55=128, n_pool_11=128),
        nn.AvgPool2d(3, stride=1, padding=1),     
        nn.Dropout(0.4) 
  )
    
        
def forward(self, x):
    x = self.pre_inception(x)
    x = self.inception(x)
    x = x.view(x.size(0), -1)
    x = nn.Linear(1024, 10)
    return x

model = GoogleNet()
if torch.cuda.is_available():
    model.cuda()  

print(model)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
running_loss_list =

def train(epoch):
    model.train()
    running_loss = 0.0

    for batch_idx, (data, target) in enumerate(train_loader):
       data, target = data.cuda(), target.cuda()
       optimizer.zero_grad()
       output = model(data).cuda()
       loss = criterion(output, target)
       loss.backward()
       optimizer.step()        
       running_loss += loss.item()
       running_loss_list.append(running_loss)
       if batch_idx % 100 == 0:
        print('Train Epoch: {} [{}/{} ({:.3f}%)]\tLoss: {:.3f}'.format(
            epoch, batch_idx * len(data), len(train_loader.dataset),
            100. * batch_idx / len(train_loader), loss.data[0]))   
def test():
     model.eval()
     test_loss = 0
     correct = 0
     for data, target in test_loader:
     data, target = data.cuda(), target.cuda()
     output = model(data).cuda()
     test_loss += criterion(output, target).data[0]
     pred = output.data.max(1, keepdim=True)[1]
     correct += pred.eq(target.data.view_as(pred)).cpu().sum()                  

test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.3f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
    test_loss, correct, len(test_loader.dataset),
    100. * correct / len(test_loader.dataset)))
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.3f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
    test_loss, correct, len(test_loader.dataset),
    100. * correct / len(test_loader.dataset)))

for epoch in range(1, 5):
train(epoch)
test()

The Trace:

Traceback (most recent call last):

File “”, line 1, in
runfile(‘/home/nimrod/Desktop/NN/Kaggle/CIFAR10/CIFAR10_try_v2.py’, wdir=‘/home/nimrod/Desktop/NN/Kaggle/CIFAR10’)

File “/home/nimrod/anaconda3/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py”, line 678, in runfile
execfile(filename, namespace)

File “/home/nimrod/anaconda3/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py”, line 106, in execfile
exec(compile(f.read(), filename, ‘exec’), namespace)

File “/home/nimrod/Desktop/NN/Kaggle/CIFAR10/CIFAR10_try_v2.py”, line 243, in
train(epoch)

File “/home/nimrod/Desktop/NN/Kaggle/CIFAR10/CIFAR10_try_v2.py”, line 202, in train
output = model(data).cuda()

File “/home/nimrod/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)

File “/home/nimrod/Desktop/NN/Kaggle/CIFAR10/CIFAR10_try_v2.py”, line 176, in forward
x = self.inception(x)

File “/home/nimrod/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)

File “/home/nimrod/anaconda3/lib/python3.6/site-packages/torch/nn/modules/container.py”, line 91, in forward
input = module(input)

File “/home/nimrod/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 491, in call
result = self.forward(*input, **kwargs)

File “/home/nimrod/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 83, in forward
raise NotImplementedError

NotImplementedError

Make sure the forward definition is in your model class:

class MyModel(nn.Module):
    def __init__(self):
        ....

    def forward(self, x):
        ....

Some times there is a typo or the indentation is wrong.

I have that in my code, that’s not the problem. Indentation is ok too. (sorry for my messy code above, it’s my first time here).

It might be an issue of your code formatting in the board, but currently both forward methods have the wrong indentation.
The error message points to exactly this issue.

1 Like

Oh, yu’re right!

I have an indentation problem both here and in my code. What a stupid mistake. lol.