Forward not implemented error

Hi experts
please help over the below error that i get

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py in forward(self, input)
90 def forward(self, input):
91 for module in self._modules.values():
—> 92 input = module(input)
93 return input
94

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
–> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py in forward(self, input)
90 def forward(self, input):
91 for module in self._modules.values():
—> 92 input = module(input)

Model details: it is sequence of two modules

  1. Efficient Net
    Sequential(
    (0): Sequential(
    (0): Conv2dStaticSamePadding(
    3, 48, kernel_size=(3, 3), stride=(2, 2), bias=False
    (static_padding): ZeroPad2d(padding=(0, 1, 0, 1), value=0.0)
    )
    (1): BatchNorm2d(48, eps=0.001, momentum=0.010000000000000009, affine=True, track_running_stats=True)
    (2): ModuleList(
    (0): MBConvBlock(
    (_depthwise_conv): Conv2dStaticSamePadding(
    48, 48, kernel_size=(3, 3), stride=[1, 1], groups=48, bias=False
    (static_padding): ZeroPad2d(padding=(1, 1, 1, 1), value=0.0)
    )
    (_bn1): BatchNorm2d(48, eps=0.001, momentum=0.010000000000000009, affine=True, track_running_stats=True)
    (_se_reduce): Conv2dStaticSamePadding(
    48, 12, kernel_size=(1, 1), stride=(1, 1)
    (static_padding): Identity()
    )
    (_se_expand): Conv2dStaticSamePadding(
    12, 48, kernel_size=(1, 1), stride=(1, 1)
    (static_padding): Identity()
    )
    (_project_conv): Conv2dStaticSamePadding(
    48, 24, kernel_size=(1, 1), stride=(1, 1), bias=False
    (static_padding): Identity()
    )
    (_bn2): BatchNorm2d(24, eps=0.001, momentum=0.010000000000000009, affine=True, track_running_stats=True)
    )
    (1): MBConvBlock(
    (_depthwise_conv): Conv2dStaticSamePadding(
    24, 24, kernel_size=(3, 3), stride=(1, 1), groups=24, bias=False
    (static_padding): ZeroPad2d(padding=(1, 1, 1, 1), value=0.0)
    )
    (_bn1): BatchNorm2d(24, eps=0.001, momentum=0.010000000000000009, affine=True, track_running_stats=True)
    (_se_reduce): Conv2dStaticSamePadding(
    24, 6, kernel_size=(1, 1), stride=(1, 1)
    (static_padding): Identity()
    )
    (_se_expand): Conv2dStaticSamePadding(
    6, 24, kernel_size=(1, 1), stride=(1, 1)
    (static_padding): Identity()
    )
    (_project_conv): Conv2dStaticSamePadding(
    24, 24, kernel_size=(1, 1), stride=(1, 1), bias=False
    (static_padding): Identity()
    ) … and so on…

  2. Customheadeff(
    (head): Sequential(
    (0): AdaptiveConcatPool2d(
    (ap): AdaptiveAvgPool2d(output_size=1)
    (mp): AdaptiveMaxPool2d(output_size=1)
    )
    (1): Flatten()
    (2): BatchNorm1d(4096, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (3): Dropout(p=0.25)
    (4): Linear(in_features=4096, out_features=2048, bias=True)
    (5): LeakyReLU(negative_slope=0.01, inplace)
    (6): BatchNorm1d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (7): Dropout(p=0.5)
    (8): Linear(in_features=2048, out_features=1024, bias=True)
    )
    (arc_margin): ArcMarginProduct()
    )

How did you define the current model?
It seems as if you just stacked some modules in an nn.Sequential block.
Note that this might work for very simple models, but might fail for others.
E.g. if the original forward method contains some other functions than just calling all submodules sequentially, it won’t work out of the box.

ok
this is how models are stacked up
Sequential(
(0): Sequential(
(0): EfficientNet(
(_conv_stem): Conv2dStaticSamePadding(
3, 48, kernel_size=(3, 3), stride=(2, 2), bias=False
(static_padding): ZeroPad2d(padding=(0, 1, 0, 1), value=0.0)
)
(_bn0): BatchNorm2d(48, eps=0.001, momentum=0.010000000000000009, affine=True, track_running_stats=True)
(_blocks): ModuleList(
----so on
(1): Customheadeff(
(head): Sequential(
(0): AdaptiveConcatPool2d(
(ap): AdaptiveAvgPool2d(output_size=1)
(mp): AdaptiveMaxPool2d(output_size=1)
)
(1): Flatten()
(2): BatchNorm1d(4096, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): Dropout(p=0.25)
(4): Linear(in_features=4096, out_features=2048, bias=True)
(5): ReLU(inplace)
(6): BatchNorm1d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(7): Dropout(p=0.5)
(8): Linear(in_features=2048, out_features=1024, bias=True)
)
(arc_margin): ArcMarginProduct()
)
)

This should provide you sufficient info to you help debug this issue.Input size(1,3,224,224)
Module class
!pip install efficientnet_pytorch
from efficientnet_pytorch import EfficientNet
model = EfficientNet.from_pretrained(‘efficientnet-b5’, num_classes=5)

class efficient(nn.Module):
    
    
  
    def __init__(self,m):
        super().__init__()
        
        l=nn.Sequential()
        fc=nn.Sequential()
        l.add_module('conv_stem',m._conv_stem)
        l.add_module('_bn0',m._bn0)
        l.add_module('_blocks',m._blocks)
        l.add_module('_conv_head',m._conv_head)
        
        self.add_module('feature',l)
        fc.add_module('_bn1',m._bn1)
        fc.add_module('_fc',m._fc)
        
        self.add_module('fc',fc)
         
         
        
         
    def forward(self, x):
         
        x1=self.feature(x)
        x1= self.fc(x1)
        #x1=self._blocks(x1)
        #x1=self._conv_head(x1)
        #x1=self._bn1(x1)
        #x = x.view(x.size(0), -1)
         
        #x=self.l1(x)
       # x = self.x_unet(x, self.sfs[0].features)
        #x1 = self.fc(x)
        return x1

m=efficient(model)
m(x)

Gives Not implemented error
result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
90 def forward(self, input):
91 for module in self._modules.values():
—> 92 input = module(input)
93 return input
94

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
–> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
90 def forward(self, input):
91 for module in self._modules.values():
—> 92 input = module(input)
93 return input
94

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
–> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in forward(self, *input)
86 registered hooks while the latter silently ignores them.
87 “”"
—> 88 raise NotImplementedError