Cannot get consistent results with PointNet++

Hi guys,

I am trying to use PointNet++ to predict something.
I am having a problem that I cannot get the same results when I compute the test loss. I wrote a for loop to compute the test loss
(MSE loss) for 5 times and get the results: [0.35994256, 0.33167917, 0.35866088, 0.36784896, 0.29914567].
The random seed is fixed. I plot the predicted value and the predicted values are different!
Below is the code for computing test loss. The code is revised from GitHub - yanx27/Pointnet_Pointnet2_pytorch: PointNet and PointNet++ implemented by pytorch (pure python) and on ModelNet, ShapeNet and S3DIS..
Appreciate any comment. I am stuck on this issue for nearly one month.

def test(model,loader):
    for m in model.modules():
        for child in m.children():
            if type(child) == torch.nn.BatchNorm1d:
                child.track_running_stats = False
                child.running_mean = None
                child.running_var = None
    classifier = model.eval()
    # classifier=model.train()
    criterion = torch.nn.MSELoss()
    n=0
    torch_loss=0
    res=[]
    pred_all=[]
    target_all=[]
    with torch.no_grad():
        for j, (points, target) in tqdm(enumerate(loader), total=len(loader)):
            if not args.use_cpu:
                points, target = points.cuda(), target.cuda()
            points = points.transpose(2, 1).to(torch.float)
            pred, _ = classifier(points)
            MSEloss=criterion(pred,target)
            torch_loss=torch_loss+MSEloss.to('cpu').numpy()*len(target)
            n+=len(target)
            pred2=pred.to('cpu').numpy()
            pred2=pred2.flatten()
    
            target2=target.to('cpu').numpy()
            for i in range(len(pred2)):
                pred_all.append(pred2[i])
                target_all.append(target2[i])
                res.append(pred2[i]-target2[i])
        res=np.array(res).flatten()
        loss=skMSE(target_all,pred_all)
        plt.scatter(target_all,pred_all)
        plt.savefig('scatter_train.png')
        R2=r2_score(target_all,pred_all)
        return loss, R2


def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    experiment_dir = 'log/classification/' + args.log_dir

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)

    '''DATA LOADING'''
    log_string('Load dataset ...')
    data_path = 'remember to change this'

    test_dataset = LASDataLoader(root=data_path, split='test', preprocessed=True)
    testDataLoader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10)

    '''MODEL LOADING'''
    num_class = 1
    model_name = os.listdir(experiment_dir + '/logs')[0].split('.')[0]
    model = importlib.import_module(model_name)
    classifier = model.get_model(num_class, normal_channel=False)
    pytorch_total_params = sum(p.numel() for p in classifier.parameters())
    # print('total:',pytorch_total_params)
    pytorch_total_params = sum(p.numel() for p in classifier.parameters() if p.requires_grad)
    print('total trainable:',pytorch_total_params)
    # quit()
    
    if not args.use_cpu:
        classifier = classifier.cuda()
    checkpoint = torch.load(str(experiment_dir) + '/checkpoints/CV/best_model-fold-1.pth')
    classifier.load_state_dict(checkpoint['model_state_dict'])

    with torch.no_grad():
        test_loss,test_R2 = test(classifier, testDataLoader)
        log_string('Test mean squared error: %f' % (test_loss))
    return test_loss, test_R2

if __name__ == '__main__':
    args = parse_args()
    loss_list=[]
    R2_list=[]
    for i in range(5):
        [loss,R2]=main(args)
        loss_list.append(loss)
        R2_list.append(R2)
    print(loss_list)
    print(R2_list)
    print('avg loss:', np.mean(loss_list))
    print('avg_R**2:', np.mean(R2_list))
1 Like

I am facing the same problem using the same source code.
Did u solve it?