Is this model convertable to mobile?

Getting an error saying the metalprepack to Conv2dprepack is not supported. Seems like it is in the code, so is it maybe the usage of it in the model below? I added the following check for unsupported things:

optimization_blocklist = [
MobileOptimizerType.CONV_BN_FUSION,
MobileOptimizerType.INSERT_FOLD_PREPACK_OPS,
MobileOptimizerType.REMOVE_DROPOUT,
MobileOptimizerType.FUSE_ADD_RELU,
MobileOptimizerType.HOIST_CONV_PACKED_PARAMS,
]

OcclusionAwareGenerator(
(dense_motion_network): DenseMotionNetwork(
(hourglass): Hourglass(
(encoder): Encoder(
(down_blocks): ModuleList(
(0): DownBlock2d(
(conv): Conv2d(44, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
)
(1): DownBlock2d(
(conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
)
(2): DownBlock2d(
(conv): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
)
(3): DownBlock2d(
(conv): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
)
(4): DownBlock2d(
(conv): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
)
)
)
(decoder): Decoder(
(up_blocks): ModuleList(
(0): UpBlock2d(
(conv): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): UpBlock2d(
(conv): Conv2d(2048, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(2): UpBlock2d(
(conv): Conv2d(1024, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(3): UpBlock2d(
(conv): Conv2d(512, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(4): UpBlock2d(
(conv): Conv2d(256, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
)
(mask): Conv2d(108, 11, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(occlusion): Conv2d(108, 1, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(down): AntiAliasInterpolation2d()
)
(first): SameBlock2d(
(conv): Conv2d(3, 64, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(norm): SynchronizedBatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(down_blocks): ModuleList(
(0): DownBlock2d(
(conv): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
)
(1): DownBlock2d(
(conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(pool): AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
)
)
(up_blocks): ModuleList(
(0): UpBlock2d(
(conv): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): UpBlock2d(
(conv): Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm): SynchronizedBatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(bottleneck): Sequential(
(r0): ResBlock2d(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm1): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(norm2): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(r1): ResBlock2d(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm1): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(norm2): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(r2): ResBlock2d(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm1): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(norm2): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(r3): ResBlock2d(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm1): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(norm2): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(r4): ResBlock2d(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm1): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(norm2): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(r5): ResBlock2d(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(norm1): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(norm2): SynchronizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(final): Conv2d(64, 3, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
)

converting model to optimize for mobile gets the error:
RuntimeError Traceback (most recent call last)
in ()
17 traced_detector = torch.jit.trace(generator, (image_tensor, kp_driving, kp_driving), strict=False)
18 #optimized_traced_detector = optimize_for_mobile(traced_detector, backend=‘metal’)
—> 19 optimized_traced_detector = optimize_for_mobile(traced_detector, optimization_blocklist, backend=‘metal’)
20 #print(torch.jit.export_opnames(optimized_traced_detector))
21 optimized_traced_detector._save_for_lite_interpreter("/content/generator_metal.pt")

/usr/local/lib/python3.7/dist-packages/torch/utils/mobile_optimizer.py in optimize_for_mobile(script_module, optimization_blocklist, preserved_methods, backend)
67 optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
68 elif backend == ‘metal’:
—> 69 optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str)
70 else:
71 raise TypeError(“Unknown backend, must be one of ‘CPU’, ‘Vulkan’ or ‘Metal’”)

RuntimeError: 0 INTERNAL ASSERT FAILED at “/pytorch/torch/csrc/jit/ir/alias_analysis.cpp”:532, please report a bug to PyTorch. We don’t have an op for metal_prepack::conv2d_prepack but it isn’t a special case. Argument types: Tensor, Tensor, int[], int[], int[], int, None, None,

CPU Model Works perfectly, Model Performs great on GPU. Just not able to get it onto phone quite yet.

It seems you are hitting this issue. Could you comment on this bug with your use case (and if possible a code snippet to reproduce it)?

@xta0 for metal related stuff