PyTorch NotImplementedError in forward

I am receiving a forward_unimplemented raise NotImplementedError
NotImplementedError

class Start_RN50(torch.nn.Module):
    def __init__(self, rn50):
    super(Start_RN50, self).__init__()
        self.start = torch.nn.Sequential(rn50.conv1, rn50.relu, rn50.maxpool)

    def forward(self, x):
        x = self.start(x)
        return x

class Middle_RN50(torch.nn.Module):
    def __init__(self, model):
    super(Middle_RN50, self).__init__()
        self.start = torch.nn.Sequential(rn50.conv1, rn50.relu, rn50.maxpool,rn50.layer1, rn50.layer2,rn50.layer3, rn50.layer4)

    def forward(self, x):
        x = self.start(x)
        return x

class End_RN50(torch.nn.Module):
    def __init__(self, rn50):
    super(End_RN50, self).__init__()
        self.avgpool = rn50.avgpool
        self.fc = rn50.fc

      
    def forward(self, x):
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x

This error is usually raised e.g. if the forward method isn’t defined which can be the case if you have a typo or a wrong indentation in the code.
Your code is unfortunately not formatted properly (you can add code snippets by wrapping them into three backticks ```) so I don’t know what’s exactly failing.

Thank you! I have properly formatted the code snippet

Are your super().__init__() lines indented properly? Perhaps you’re not initializing the parent class and hence forward will be undefined!

I can execute Start_RN50 without issue. Only when Middle_RN50 forward function is stating it is “NotImplementedError”

Yes there’s no problem with creating the class, but if your super isn’t indented it will be an Object and not an nn.Module object. Is your class like,

class Start_RN50(torch.nn.Module):
    def __init__(self, rn50):
    super(Start_RN50, self).__init__()
        self.start = torch.nn.Sequential(rn50.conv1, rn50.relu, rn50.maxpool)

    def forward(self, x):
        x = self.start(x)
        return x

OR

class Start_RN50(torch.nn.Module):
    def __init__(self, rn50):
        super(Start_RN50, self).__init__() #<<<<<< this line 
        self.start = torch.nn.Sequential(rn50.conv1, rn50.relu, rn50.maxpool)

    def forward(self, x):
        x = self.start(x)
        return x

(notice the ident on the super)

Yes, my apologies, the super(Start_RN50, self).init() is idented from the constructor; the probelem still persists.

def calibrate_end(model,cal_loader):
  
    rn50_middle = Middle_RN50(model)
    rn50_middle.eval()
  

    rn50_end = End_RN50(model)
    rn50_end.eval()
 
    
    for i, (images, target) in enumerate(cal_loader):
            temp = rn50_middle(images)
            output = rn50_end(temp)

The error, NotImplementedError stems from this forward

This module works properly:

class Middle_RN50(torch.nn.Module):
    def __init__(self, rn50):
        super(Middle_RN50, self).__init__()
        self.start = torch.nn.Sequential(rn50.conv1, rn50.relu, rn50.maxpool,rn50.layer1, rn50.layer2,rn50.layer3, rn50.layer4)

    def forward(self, x):
        x = self.start(x)
        return x
    
model = Middle_RN50(models.resnet50())
x = torch.randn(1, 3, 224, 224)
out = model(x)
print(out.shape)
# torch.Size([1, 2048, 7, 7])

I had to replace the unused model input argument with rn50 as this parameter was missing.
Given that I guess you are posting modified code here (as the execution would yield an error complaining about an undefined variable), which might not represent your error.

For some reason, I am still receving the NotImplementedError, so I have displayed the complete source. Thanks so much in advance

class Middle_RN50(torch.nn.Module):
    def __init__(self, rn50):
        super(Middle_RN50, self).__init__()
        self.start = torch.nn.Sequential(rn50.conv1, rn50.relu, rn50.maxpool,rn50.layer1, rn50.layer2,rn50.layer3, rn50.layer4)


    def forward(self, x):
        x = self.start(x)
        return x

class End_RN50(torch.nn.Module):
    def __init__(self, rn50):
        super(End_RN50, self).__init__()
        self.avgpool = rn50.avgpool
        self.fc = rn50.fc

    def forward(self, x):
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x


def calibrate_end(model,cal_loader):
    
    rn50_middle = Middle_RN50(model)

    rn50_middle.eval()
    rn50_end = End_RN50(model)
    rn50_end.eval()

    for i, (images, target) in enumerate(cal_loader):
            temp = rn50_middle(images)
            output = rn50_end(temp)


   
model = models.__dict__['resnet50'](pretrained=False)

state_dict = torch.load('resnet50-19c8e357.pth', map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
model.eval()

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                       std=[0.229, 0.224, 0.225])

cal_loader = torch.utils.data.DataLoader(datasets.ImageFolder("calibration_dataset/", transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ])),

            batch_size=1, shuffle=False,
            num_workers=4, pin_memory=True)



calibrate_end(model, cal_loader)

I have shared the complete source code. Thanks in advance.

Your code still works after removing the undefined parts:

class Middle_RN50(torch.nn.Module):
    def __init__(self, rn50):
        super(Middle_RN50, self).__init__()
        self.start = torch.nn.Sequential(rn50.conv1, rn50.relu, rn50.maxpool,rn50.layer1, rn50.layer2,rn50.layer3, rn50.layer4)

    def forward(self, x):
        x = self.start(x)
        return x

class End_RN50(torch.nn.Module):
    def __init__(self, rn50):
        super(End_RN50, self).__init__()
        self.avgpool = rn50.avgpool
        self.fc = rn50.fc

    def forward(self, x):
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x

def calibrate_end(model):
    rn50_middle = Middle_RN50(model)
    rn50_middle.eval()
    rn50_end = End_RN50(model)
    rn50_end.eval()

    images = torch.randn(2, 3, 224, 224)
    temp = rn50_middle(images)
    output = rn50_end(temp)
    return output

model = models.__dict__['resnet50'](pretrained=False)
model.eval()
output = calibrate_end(model)
print(output.shape)
# torch.Size([2, 1000])

Generally, a minimal and executable code snippet can be simply copy-pasted to another environment, executed, and should reproduce the issue.
Your current code snippets still use undefined checkpoints, datasets etc. and removing them works fine.

Thank you so much! I realized when trying to fuse layers that was causing the ‘NotImplementedError’

import torch.fx.experimental.optimization as optimization
class Middle_RN50(torch.nn.Module):
    def __init__(self, rn50):
        super(Middle_RN50, self).__init__()
        with torch.no_grad():
            rn50 = optimization.fuse(rn50, inplace=False) # ADDED
        self.start = torch.nn.Sequential(rn50.conv1, rn50.relu, rn50.maxpool,rn50.layer1, rn50.layer2,rn50.layer3, rn50.layer4)

    def forward(self, x):
        x = self.start(x)
        return x

class End_RN50(torch.nn.Module):
    def __init__(self, rn50):
        super(End_RN50, self).__init__()
        self.avgpool = rn50.avgpool
        self.fc = rn50.fc

    def forward(self, x):
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x

def calibrate_end(model):
    rn50_middle = Middle_RN50(model)
    rn50_middle.eval()
    rn50_end = End_RN50(model)
    rn50_end.eval()

    images = torch.randn(2, 3, 224, 224)
    temp = rn50_middle(images)
    output = rn50_end(temp)
    return output

model = models.__dict__['resnet50'](pretrained=False)
model.eval()
output = calibrate_end(model)
print(output.shape)
# torch.Size([2, 1000])

Thanks for the update.
I would guess your use case fails since you are first fusing the entire model and are later trying to rip out smaller modules to create self.start.
Creating the nn.Sequential container with raw and fused modules should work:

        self.start = torch.nn.Sequential(
            rn50.conv1,
            rn50.relu,
            rn50.maxpool,
            optimization.fuse(rn50.layer1),
            optimization.fuse(rn50.layer2),
            optimization.fuse(rn50.layer3),
            optimization.fuse(rn50.layer4)
        )