Not Implented error

I am facing non implemented error, and I do not have any problem in forward method, please help me figure this out

class Discriminator(nn.Module):
    
    def __init__(self, num_labels = 1):
        
        super().__init__()
        
        self.progression = nn.ModuleList([
        conv_block(128, 256, 3, 1),
        conv_block(256, 512, 3, 1),
        conv_block(512, 512, 3, 1),
        conv_block(512, 512, 3, 1),
        conv_block(512, 512, 3, 1),
        conv_block(513, 512, 3, 1, 4, 0)
        ])
        
        
        self.from_RGB = nn.ModuleList([
            
        nn.Conv2d(3,128,1),
        nn.Conv2d(3,256,1),
        nn.Conv2d(3,512,1),
        nn.Conv2d(3,512,1),
        nn.Conv2d(3,512,1),
        nn.Conv2d(3,512,1)  
        ])
            
        self.linear = nn.Linear(512, 1 + num_labels)
        
        self.num_layers = len(self.progression)
        
    def forward(self, input, step = 0, alpha = -1):
            
        for i in range(step, -1, -1):
                
            index = self.num_layers - i - 1
                
            if i == step :
                    
                out = self.from_RGB[index](input)
                
            if i == 0:
                mean_standard_deviation = input.std(0).mean()
                print("Look at this ",mean_standard_deviation.size())
                
                mean_standard_deviation = mean_standard_deviation.expand(16,1,4,4)
                
                out = torch.cat([out,mean_standard_deviation],1)
                
            out = self.progression[index](out) # Convolution jo lagai ha
            
            if i > 0:
                    
                out = F.avg_pool2d(out, 2)
                    
                if i == step and  0<= alpha <1:
                        
                    down_sampled_image = F.avg_pool2d(input,2)
                    skip_rgb = self.from_RGB[index+1](down_sampled_image)
                    out = (1-alpha)*skip_rgb + alpha*out
                
        out = out.squeeze(2).squeeze(2)
        out = self.linear(out)
            
        return out[:,0],out[:,1]

here is the training part

loss = nn.CrossEntropyLoss()

# Optimizers
g_optim = optim.Adam(Generator.parameters(),lr = 0.001, betas = (0.0,0.99))
d_optim = optim.Adam(Discriminator.parameters(),lr = 0.001, betas = (0,0.99))

def requires_gradient(model, value = True):
    for p in model.parameters():
        p.requires_grad = value



def loader(transform):
    data = datasets.CIFAR10(
        "/home/abdullah/Documents/New Folder/GANs/PGGANs/", transform=transform,
        target_transform=lambda x: 0,download = True)
    data_loader = DataLoader(data, shuffle=False, batch_size=16,
                                 num_workers=4)

    return data_loader



def sample_data(dataloader, image_size=4):
    transform = transforms.Compose([
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    loader = dataloader(transform)

    for img, label in loader:
        yield img, label


def train():
    step = 0
    alpha = 0
    dataset = sample_data(loader, 4*2**step)
    
    requires_gradient(Generator, False)
    requires_gradient(Discriminator, True)
    
    disc_loss = 0
    grad_loss = 0
    one = torch.FloatTensor([1]).cuda()
    mone = one*-1
    
    for iteration in range(60000):
        
        Discriminator.zero_grad()
        
        alpha = min(1, 0.002*iteration)
        
        if (iteration +1 )% 10000 == 0:
            step +=1 
            alpha = 0
            dataset = data_loader(loader,4*2**step)
            if step > 5:
                alpha = 1
                step = 5
        
        real_image, label = next(dataset)
        print("Size of real images is ",real_image.size())
        # Train Discriminator
        
        batch_Size = real_image.size(0)
        real_image = Variable(real_image.cuda())
        label = Variable(label.cuda())
        print("size of real image is ",real_image.size())
        real_predict, real_label = Discriminator(real_image, step, alpha)
        
        real_predict = real_predict.mean() \
        - 0.001 * (real_predict ** 2).mean()
        real_predict.backward(mone)
        
        fake_images = Generator(Variable(torch.randn(b_size,noise_dim).cuda()),real_label,step, alpha)
        
        fake_decision, fake_label = Discriminator(fake_images,step,alpha)
        
        fake_decision = fake_decision.mean()
        
        fake_decision.backward(one)
        
        d_optim.step()
        
        if (iteration+1) % n_critic == 1:
            Generator.zero_grad()
            
            requires_gradient(Generator, True)
            requires_gradient(Discriminator, False)
            
            input_class = Variable(
                torch.multinomial(
                    torch.ones(num_label), batch_Size, replacement=True)).cuda()
        
        
            fake_image  = Generator(Variable(torch.randn(batch_Size, noise_dim).cuda()), input_class, step, alpha)
            
            disc_pred, class_pre = Discriminator(fake_image, step, alpha)
            
            loss = -disc_pred.mean()
            
            loss.backward(mone)
            
            g_optim.step()
            
            requires_gradient(Generator, False)
            requires_gradient(Discriminator, True)
        
        print("Generator loss is {} and discriminator loss is {} ".format(loss.data[0],fake_decision.data[0]))
        if (iteration + 1 )%100 == 0:
            images = []
            
            input_class = Variable(torch.multinomial(
                torch.ones(num_label), batch_Size, replacement=True)).cuda()
            fake_image  = Generator(Variable(torch.randn(batch_Size, noise_dim).cuda()), input_class, step, alpha)
            utils.save_image(torch.cat(images,0),f'sample/{str(iteration + 1).zfill(6)}.png',nrow=n_label * 10,normalize=True,range=(-1, 1))
        
        if (i + 1) % 10000 == 0:
            torch.save(Generator, f'checkpoint/{str(iteration + 1).zfill(6)}.model')
        
        
train()

and here is the error

NotImplementedError                       Traceback (most recent call last)
<ipython-input-18-05b375cbbab0> in <module>()
    125 
    126 
--> 127 train()

<ipython-input-18-05b375cbbab0> in train()
     73         label = Variable(label.cuda())
     74         print("size of real image is ",real_image.size())
---> 75         real_predict, real_label = Discriminator(real_image, step, alpha)
     76 
     77         real_predict = real_predict.mean()         - 0.001 * (real_predict ** 2).mean()

~/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    489             result = self._slow_forward(*input, **kwargs)
    490         else:
--> 491             result = self.forward(*input, **kwargs)
    492         for hook in self._forward_hooks.values():
    493             hook_result = hook(self, input, result)

<ipython-input-15-96b17c672419> in forward(self, input, step, alpha)
     47                 out = torch.cat([out,mean_standard_deviation],1)
     48 
---> 49             out = self.progression[index](out) # Convolution jo lagai ha
     50 
     51             if i > 0:

~/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    489             result = self._slow_forward(*input, **kwargs)
    490         else:
--> 491             result = self.forward(*input, **kwargs)
    492         for hook in self._forward_hooks.values():
    493             hook_result = hook(self, input, result)

~/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in forward(self, *input)
     81             registered hooks while the latter silently ignores them.
     82         """
---> 83         raise NotImplementedError
     84 
     85     def register_buffer(self, name, tensor):

NotImplementedError: 

Could you post the source code of conv_block?
You might have a typo there, e.g. forwrad instead of forward.

1 Like

Ohh thanks, man, forward was indented within init block
Thanks

In case if this helps anyone, this error is also raised if your forward function takes more than 1 input.