CPUAllocator.cpp:76] data. DefaultCPUAllocator: not enough memory

I am using d2go.
when I convert a mask_rcnn_fbnetv3g_fpn based trained model,got the following error:
CPUAllocator.cpp:76] data. DefaultCPUAllocator: not enough memory: you tried to allocate xxxx

my convert code is:

patch_d2_meta_arch()

@contextlib.contextmanager
def create_fake_detection_data_loader(height, width, is_train):
with make_temp_directory(“detectron2go_tmp_dataset”) as dataset_dir: #创建一个临时目录 dataset_dir=detectron2go_tmp_dataset
runner = create_runner(“d2go.runner.GeneralizedRCNNRunner”) 。
cfg = runner.get_default_cfg()
cfg.DATASETS.TRAIN = [“default_dataset_train”]
cfg.DATASETS.TEST = [“default_dataset_test”]

    with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir: #再次创建相同的目录?
        image_dir = os.path.join(dataset_dir, "images")
        os.makedirs(image_dir)
        image_generator = LocalImageGenerator(image_dir, width=width, height=height)

        if is_train:
            with _register_toy_dataset(
                "default_dataset_train", image_generator, num_images=3
            ):
                train_loader = runner.build_detection_train_loader(cfg)
                yield train_loader
        else:
            with _register_toy_dataset(
                "default_dataset_test", image_generator, num_images=3
            ):
                test_loader = runner.build_detection_test_loader(
                    cfg, dataset_name="default_dataset_test"
                )
                yield test_loader

def test_export_torchvision_format():
runner = GeneralizedRCNNRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_file(model_zoo.get_config_file(“mask_rcnn_fbnetv3a_dsmask_C4.yaml”))
cfg.MODEL.WEIGHTS = os.path.join("./output_mask_rcnn_fbnetv3a_dsmask_C4_20211225", “model_0009999.pth”)
cfg.MODEL_EMA.ENABLED = False
cfg.MODEL.DEVICE=“cpu”
cfg.DATASETS.TRAIN = (“infusion_train”,)
cfg.DATASETS.TEST = (“infusion_val”,)
cfg.DATALOADER.NUM_WORKERS = 1

#cfg.INPUT.MAX_SIZE_TEST = 1920
#cfg.INPUT.MAX_SIZE_TRAIN = 1920
#cfg.INPUT.MIN_SIZE_TEST = 1920
#cfg.INPUT.MIN_SIZE_TRAIN = (1920,)
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.SOLVER.STEPS = []  # do not decay learning rate
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 1 
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 25

pytorch_model = runner.build_model(cfg, eval_only=True)
pytorch_model.cpu()
#pytorch_model.eval()

from typing import List, Dict
class Wrapper(torch.nn.Module):
    def __init__(self, model):
        super().__init__()
        self.model = model
        coco_idx_list = [0,1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,25]

        self.coco_idx = torch.tensor(coco_idx_list)

    def forward(self, inputs: List[torch.Tensor]):
        x = inputs[0].unsqueeze(0) * 255
        scale = 320.0 / min(x.shape[-2], x.shape[-1])
        x = torch.nn.functional.interpolate(x, scale_factor=scale, mode="bilinear", align_corners=True, recompute_scale_factor=True)
        out = self.model(x[0])
        res : Dict[str, torch.Tensor] = {}
        res["boxes"] = out[0] / scale
        res["labels"] = torch.index_select(self.coco_idx, 0, out[1])
        res["scores"] = out[2]
        #print("return",inputs,[res])
        return inputs, [res]

size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
with create_fake_detection_data_loader(h, w, is_train=False) as data_loader:
    predictor_path = convert_and_export_predictor(
        copy.deepcopy(cfg),
        copy.deepcopy(pytorch_model),
        "torchscript_int8@tracing",
        './',
        data_loader,
    )

    orig_model = torch.jit.load(os.path.join(predictor_path, "model.jit"))
    wrapped_model = Wrapper(orig_model)
    # optionally do a forward
    wrapped_model([torch.rand(3, 1920, 1080)])
    scripted_model = torch.jit.script(wrapped_model) 
    scripted_model.save("d2go.pt")

if name == ‘main’:
test_export_torchvision_format()

The error message points out that you are running out of RAM. How large is the reported allocation? Does the values fit your expectation? If so, you would need to reduce the memory requirements by e.g. lowering the batch size if possible.