i have got a problem in the TensorRT when using torch.split() on one of my Layers
The Class
class Unpack(nn.Module):
def __init__(self,split_size=3):
super(Unpack,self).__init__()
self.split_size = split_size
def forward(self,input):
input_r,input_i =torch.split(input,split_size_or_sections=self.split_size,dim=0)
return input_r,input_i
The Code part :
trt_model_fp16 = torch_tensorrt.compile(model,
inputs = [(torch_tensorrt.Input((1,)+X_cat.shape , dtype=data_type_16))],
enabled_precisions = data_type_16, # Run with FP16
workspace_size = 1 << 22)
The Error (I think the error is on the TorchScript conversion)
if module_type == _ModuleType.nn:
120 logging.log(
121 logging.Level.Info,
122 "Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript",
123 )
--> 124 ts_mod = torch.jit.script(module)
125 return torch_tensorrt.ts.compile(
126 ts_mod, inputs=inputs, enabled_precisions=enabled_precisions, **kwargs
127 )
128 elif target_ir == _IRType.fx:
...
#print(input.shape)
input_r,input_i =torch.split(input,split_size_or_sections=self.split_size,dim=1)
~~~~~~~~~~~ <--- HERE
return input_r,input_i