Not Implemented Error

class CNNModel(nn.Module):
def init(self):
super(CNNModel,self).init()
#convolution 1
self.conv1=nn.Conv2d(in_channels=3,out_channels=16,kernel_size=3)
self.relu1=nn.ReLU()
self.maxpool1=nn.MaxPool2d(kernel_size=2)
#convolution2
self.conv2=nn.Conv2d(in_channels=16,out_channels=32,kernel_size=3)
self.relu2=nn.ReLU()
self.maxpool2=nn.MaxPool2d(kernel_size=2)
#convolution 3
self.conv3=nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3)
self.relu3=nn.ReLU()
self.maxpool3=nn.MaxPool2d(kernel_size=2)

    #fully connected
    #self.fc1=
    def forward(self,x):
        x=self.conv1(x)
        x=self.relu1(x)
        x=self.maxpool1(x)
        x=self.conv2(x)
        x=self.relu2(x)
        x=self.maxpool2(x)
        x=self.conv3(x)
        x=self.relu3(x)
        x=self.maxpool3(x)
        #x=self.conv4(x)
        #x=self.relu4(x)
        #x=self.maxpool4(x)
        return x

model=CNNModel()

y=torch.rand(1,3,224,224)

out=model(y)

it gives error as


NotImplementedError Traceback (most recent call last)
in ()
----> 1 out=model(y)

/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
–> 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)

/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in forward(self, *input)
81 registered hooks while the latter silently ignores them.
82 “”"
—> 83 raise NotImplementedError
84
85 def register_buffer(self, name, tensor):

NotImplementedError:

Hi,

You can use triple backticks to format your code sample properly ```.
The error you see happens if you try to forward an nn.Module for which you don’t have implemented the forward method.

3 Likes

Probably your code indent is to much for your forward method which results in your forward method being defined inside your __init__

6 Likes

Hi All,

Im also getting the same error

My code is here:-

class Fashion_MNIST_NeuralNet(nn.Module) :
  
  def __init__(self) :
    
    #Calling the module class for the reason mentioned above...
    super(Fashion_MNIST_NeuralNet,self).__init__()
    self.fMnistConv1 = nn.Conv2d(in_channels = 1 , out_channels = 6 , kernel_size = 5)
    self.fMnistConv2 = nn.Conv2d(in_channels = 6 , out_channels = 12 , kernel_size = 5)
    
    #adding fully connected layers
    
    self.fMnist_fc1 = nn.Linear(12*4*4 , 120)
    self.fMnist_fc2 = nn.Linear(120 , 84)
    self.fMnist_fc3 = nn.Linear(84 , 10)
    
    
    
  def forward(self,x) :
    #appplying max_pool2d layer to reduce the dimensionality of the matrix obtained from convolutions..
    # Max pooling over a (2, 2) window
    print('Forward pass')
    x = F.max_pool2d(F.relu(self.fMnistConv1(x)), (2, 2))
    # If the size is a square you can only specify a single number
    x = F.max_pool2d(F.relu(self.fMnistConv2(x)), 2)
    print("===x===1" , x.shape)
    x = x.view(-1 , self.get_num_features(x))
    print("==View==" , x.size())
    x = F.relu(self.fMnist_fc1(x))
    x = F.relu(self.fMnist_fc2(x))
    x = self.fMnist_fc3(x)
    return x
   
    
    
  def get_num_features(self ,x):
    print('Number of Features found=====' , x.shape)
    size = x.size()[1:]
    print("Size" , size)
    num_features = 1
    for s in size:
      num_features*=s
    
    print("==Mult==" , num_features)
    return num_features
    
fn = Fashion_MNIST_NeuralNet()





import torch.nn as nn
def fit_model(model , train_loader):
#Train Loader is used from the DataLoader done earlier....
  optimizer = torch.optim.Adam(model.parameters())
  '''
  Parameters :-
  conv1.weight	torch.Size([6, 1, 5, 5])
  conv1.bias	torch.Size([6])
  conv2.weight	torch.Size([12, 6, 5, 5])
  conv2.bias	torch.Size([12])
  fc1.weight	torch.Size([120, 192])
  fc1.bias	torch.Size([120])
  fc2.weight	torch.Size([84, 120])
  fc2.bias	torch.Size([84])
  fc3.weight	torch.Size([10, 84])
  fc3.bias	torch.Size([10])
  '''
  error = nn.CrossEntropyLoss()
  EPOCHS = 5
  model.train()

  for epoch in range(EPOCHS):

    print(epoch)

    for idx , (image , label) in enumerate(train_loader):

      var_X_batch = torch.randn(image.shape)
      print(var_X_batch.shape)
      var_y_batch = torch.tensor(label)

      optimizer.zero_grad()
      #passing the batch of images to the FNet_MNIST
      print('Debug 1')
      output = model(var_X_batch)
      print('Debug 2')
      loss = error(output, var_y_batch)
      print('Debug 2')
      loss.backward()
      #backpropagation....
      #Step gradient function....
      optimizer.step()

      predicted = torch.max(output.data, 1)[1] 
      correct += (predicted == var_y_batch).sum()
      #print(correct)
      if idx % 50 == 0:
        print('Epoch : {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy:{:.3f}%'.
        format(
          epoch, idx*len(X_batch), len(train_loader.dataset), 100.*idx / len(train_loader), loss.data[0], float(correct*100) / float(BATCH_SIZE*(idx+1))))

Please let me know what am i missing

Thanks in advance!!

Your code seems to run fine on my machine using random inputs of 1x28x28.
Could you post the error message and in which line of code it is thrown?

PS: you can add code snippets using three backticks ``` :wink:
I’ve formatted your code, so that I could easily copy it.

mine was due to indentation, i mistakely defined the forward function inside the init!!

Actually, I had similar manner I found out the forward function is not there :sweat_smile:

class Model(nn.Module):
    def __init__(self, m, n=6, pre=True):
        super().__init__()
        
        self.enc = nn.Sequential(*list(m.children())[:-2])       
        #nc = list(m.children())[-1].in_features
        self.head = nn.Sequential(AdaptiveConcatPool2d(),Flatten(),nn.Linear(2*1536,512),
                            Mish(),nn.BatchNorm1d(512), nn.Dropout(0.5),nn.Linear(512,n))
        
    def forward(self, x):
        
        x = self.enc(x)
        #x: bs*N x C x 4 x 4
         
        #concatenate the output for tiles into a single map
         
        x = self.head(x)
        #x: bs x n
        return x

@ptrblck
could u please understand
md_ef = EfficientNet.from_pretrained(‘efficientnet-b3’, num_classes=6)
i pass md_ef to the Model

Why do i get Not Implemented Error at self.enc

Could you post the complete error message, please?

Hi @ptrblck, I guess @Jaideep_Valani is currently involved in the PANDA Kaggle competition. Just like him, I tried to implement a transfer learning algorithm for EfficientNetB0 (or B3 in his case).

My code can be found here: which is almost the same as the aforementioned but has some additional lines for completion:

class Model(nn.Module):
    def __init__(self, n=6, pre=True):
        super().__init__()
        
        # Load model backbone
        model = EfficientNet.from_pretrained('efficientnet-b0')
        
        # runs through the pretrained efficientnet, remove BN and FC layer
        self.enc = nn.Sequential(*list(model.children())[:-2])
        print(self.enc)
        
        # Replace the network transfer learning part with my own:
        nc = list(model.children())[-1].in_features
        self.head = nn.Sequential(AdaptiveConcatPool2d(),
                                  Flatten(),
                                  nn.Linear(2*nc,512), 
                                  Mish(),
                                  nn.BatchNorm1d(512), 
                                  nn.Dropout(0.5),
                                  nn.Linear(512,n))
        
        
    def forward(self, *x):
        #...

        x = self.enc(x)
       
        # ...
        
        x = self.head(x)
      
        return x

Strangely, I get an NotImplementedError at x = self.enc(x)
You can find the error message here:

---------------------------------------------------------------------------
NotImplementedError                       Traceback (most recent call last)
/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
    100                 xb, yb = cb_handler.on_batch_begin(xb, yb)
--> 101                 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
    102                 if cb_handler.on_batch_end(loss): break

/opt/conda/lib/python3.6/site-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
     25     if not is_listy(yb): yb = [yb]
---> 26     out = model(*xb)
     27     out = cb_handler.on_loss_begin(out)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():

<ipython-input-14-a64347871882> in forward(self, *x)
     29         
---> 30         x = self.enc(x)
     31 

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
     99         for module in self:
--> 100             input = module(input)
    101         return input

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    531         else:
--> 532             result = self.forward(*input, **kwargs)
    533         for hook in self._forward_hooks.values():

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in forward(self, *input)
     95         """
---> 96         raise NotImplementedError
     97 

NotImplementedError: 

I guess it’s failing since the modules might not be easily wrapped inside an nn.Sequential container.
Running your code yields an error while creating the head module:

>  ModuleAttributeError: 'MemoryEfficientSwish' object has no attribute 'in_features'

which seems to indicate that the model.children() order might not reflect the actual execution order.

After removing the head module, I had to call the submodules of self.enc via:

    def forward(self, x):
        x = self.enc.extract_features(x)
        x = self.enc._avg_pooling(x)
        # x = self.head(x)      
        return x
1 Like

Dear @ptrblck and @Jaideep_Valani,

Exactly, I just wanted to comment myself!
I guess, the mistake lays in the forward function, as you mentioned.

The key here is to pass self.enc.extract_features(x) in the forward function.

Thank you a lot for your response!

Hi there I had the same error and the reason was due to the forward pass. But in my case it was because I wan writting def foward instead of def forward and it took me hours to realize. Hopefully not many of you do the same mistake but if you do check if you are doing the same mistake.