Error in forward() takes 2 positional arguments but 3 were given

Hi,
I am trying to train an object detection network which I took from Detectron2 and I loaded it in Pytorch. It has to detect apple. The problem is that when I it gets to the forward() function the following error appears in the fucntion:

train_one_epoch(py_model, optimizer, data_loader, device, epoch, args.print_freq )

File “/home/felipevw/MyAppleDetector/utility/engine.py”, line 30, in train_one_epoch
loss_dict = model(images, targets)
File “/home/felipevw/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 532, in call
result = self.forward(*input, **kwargs)
TypeError: forward() takes 2 positional arguments but 3 were given

If someone could hel me, I would be deeply grateful. My code is:


import datetime
import os
import time
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor


from data.apple_dataset import AppleDataset
from utility.engine import train_one_epoch, evaluate
import utility.utils as utils
import utility.transforms as T


from detectron2.utils.logger import setup_logger
setup_logger()



from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.modeling import build_model

import matplotlib.pyplot as plt


def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)



def get_R50_model(num_classes):
    

    # Get the model from detectron2
    model = get_cfg()
    model.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    model.DATALOADER.NUM_WORKERS = args.workers
    model.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
    model.MODEL.ROI_HEADS.NUM_CLASSES = 1
    
    py_model = build_model(model)
    
    
    return py_model
def get_R101_model(num_classes):
    
    return 1



def main(args):
    print(args)
    device = args.device
    
    
    # Data loading code
    print("Loading data")
    num_classes = 2
    dataset = AppleDataset(os.path.join(args.data_path, 'train'), get_transform(train=True))
    dataset_test = AppleDataset(os.path.join(args.data_path, 'test'), get_transform(train=False))
    
    
    
    
    print("Creating data loaders")
    data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True,
                                              num_workers=args.workers, collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1,
                                                   shuffle=False, num_workers=args.workers,
                                                   collate_fn=utils.collate_fn)
    
    print("Creating model")
    # Create the correct model type
    if args.model == 'R50_fpn':
        py_model = get_R50_model(num_classes)
    else:
        model = get_R101_model(num_classes)
        
    
    
    
    # Move model to the right device
    py_model.to(device)
    print(py_model)
    
    

    
    
    params = [p for p in py_model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    #  lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
    
    
    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        
        
    print("Start training")
    start_time = time.time()
    for epoch in range(args.epochs):
        train_one_epoch(py_model, optimizer, data_loader, device, epoch, args.print_freq)
        lr_scheduler.step()

        if args.output_dir:
            torch.save(model.state_dict(), os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))

        # evaluate after every epoch
        evaluate(py_model, data_loader_test, device=device)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
    
     
>

Hi,

The problem is that you give too many arguments to when you call your model when you do loss_dict = model(images, targets). I guess it does not expect the targets and should be loss_dict = model(images).

If I leave loss_dict = model(images) it gets an error too. I did not change anything in the function of train_one_epoch.
I am stuck on fixing this part, but have no clue on what to check.

I guess then that the original Model was expecting the images and targets and was computing the full loss. So you should make sure your model does the same?

1 Like