How could I get a torchscript version of torchvision.models.detection.
maskrcnn_resnet50_fpn
?
torch.jit.script and torch.jit.tarce are not working with this model
With torch.jit.script
model = torch.load(modelname+"-best.pth")
model=model.cuda()
model.eval()
print(img)
with torch.no_grad():
print(model(img))
traced_cell = torch.jit.script(model, (img))
torch.jit.save(traced_cell, modelname+"-torchscript.pth")
loaded_trace = torch.jit.load(modelname+"-torchscript.pth")
loaded_trace.eval()
with torch.no_grad():
print(loaded_trace(img))
TensorMask(torch.argmax(loaded_trace(img),1)).show()
Output:
TensorImage([[[[0.8961, 0.9132, 0.8789, ..., 0.2453, 0.1939, 0.2282],
[0.8276, 0.9132, 0.8618, ..., 0.2282, 0.1939, 0.2282],
[0.8961, 0.9132, 0.8789, ..., 0.2282, 0.2282, 0.2453],
...,
[0.8961, 0.8618, 0.9132, ..., 0.4508, 0.4166, 0.3994],
[0.9303, 0.9132, 0.9474, ..., 0.4166, 0.4166, 0.4508],
[0.9646, 0.8789, 0.9303, ..., 0.3994, 0.3994, 0.3994]],
[[1.0455, 1.0630, 1.0280, ..., 0.3803, 0.3277, 0.3627],
[0.9755, 1.0630, 1.0105, ..., 0.3627, 0.3277, 0.3627],
[1.0455, 1.0630, 1.0280, ..., 0.3627, 0.3627, 0.3803],
...,
[1.0455, 1.0105, 1.0630, ..., 0.5903, 0.5553, 0.5378],
[1.0805, 1.0630, 1.0980, ..., 0.5553, 0.5553, 0.5903],
[1.1155, 1.0280, 1.0805, ..., 0.5378, 0.5378, 0.5378]],
[[1.2631, 1.2805, 1.2457, ..., 0.6008, 0.5485, 0.5834],
[1.1934, 1.2805, 1.2282, ..., 0.5834, 0.5485, 0.5834],
[1.2631, 1.2805, 1.2457, ..., 0.5834, 0.5834, 0.6008],
...,
[1.2631, 1.2282, 1.2805, ..., 0.8099, 0.7751, 0.7576],
[1.2980, 1.2805, 1.3154, ..., 0.7751, 0.7751, 0.8099],
[1.3328, 1.2457, 1.2980, ..., 0.7576, 0.7576, 0.7576]]]],
device='cuda:0')
[{'boxes': tensor([[412.5222, 492.3208, 619.7662, 620.9233]], device='cuda:0'), 'labels': tensor([1], device='cuda:0'), 'scores': tensor([0.1527], device='cuda:0'), 'masks': tensor([[[[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]]], device='cuda:0')}]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-23-7216a0dac5a0> in <module>
12 loaded_trace.eval()
13 with torch.no_grad():
---> 14 print(loaded_trace(img))
15
16 TensorMask(torch.argmax(loaded_trace(img),1)).show()
~/anaconda3/envs/pro1/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
556 result = self._slow_forward(*input, **kwargs)
557 else:
--> 558 result = self.forward(*input, **kwargs)
559 for hook in self._forward_hooks.values():
560 hook_result = hook(self, input, result)
RuntimeError: forward() Expected a value of type 'List[Tensor]' for argument 'images' but instead found type 'TensorImage'.
Position: 1
Value: TensorImage([[[[0.8961, 0.9132, 0.8789, ..., 0.2453, 0.1939, 0.2282],
[0.8276, 0.9132, 0.8618, ..., 0.2282, 0.1939, 0.2282],
[0.8961, 0.9132, 0.8789, ..., 0.2282, 0.2282, 0.2453],
...,
[0.8961, 0.8618, 0.9132, ..., 0.4508, 0.4166, 0.3994],
[0.9303, 0.9132, 0.9474, ..., 0.4166, 0.4166, 0.4508],
[0.9646, 0.8789, 0.9303, ..., 0.3994, 0.3994, 0.3994]],
[[1.0455, 1.0630, 1.0280, ..., 0.3803, 0.3277, 0.3627],
[0.9755, 1.0630, 1.0105, ..., 0.3627, 0.3277, 0.3627],
[1.0455, 1.0630, 1.0280, ..., 0.3627, 0.3627, 0.3803],
...,
[1.0455, 1.0105, 1.0630, ..., 0.5903, 0.5553, 0.5378],
[1.0805, 1.0630, 1.0980, ..., 0.5553, 0.5553, 0.5903],
[1.1155, 1.0280, 1.0805, ..., 0.5378, 0.5378, 0.5378]],
[[1.2631, 1.2805, 1.2457, ..., 0.6008, 0.5485, 0.5834],
[1.1934, 1.2805, 1.2282, ..., 0.5834, 0.5485, 0.5834],
[1.2631, 1.2805, 1.2457, ..., 0.5834, 0.5834, 0.6008],
...,
[1.2631, 1.2282, 1.2805, ..., 0.8099, 0.7751, 0.7576],
[1.2980, 1.2805, 1.3154, ..., 0.7751, 0.7751, 0.8099],
[1.3328, 1.2457, 1.2980, ..., 0.7576, 0.7576, 0.7576]]]],
device='cuda:0')
Declaration: forward(__torch__.torchvision.models.detection.mask_rcnn.___torch_mangle_1723.MaskRCNN self, Tensor[] images, Dict(str, Tensor)[]? targets=None) -> ((Dict(str, Tensor), Dict(str, Tensor)[]))
Cast error details: Unable to cast Python instance to C++ type (compile in debug mode for details)
With torch.jit.trace
modelname="maskrcnn"
model = torch.load(modelname+"-best.pth")
model=model.cuda()
model.eval()
print(img)
with torch.no_grad():
print(model(img))
traced_cell = torch.jit.trace(model, (img))
torch.jit.save(traced_cell, modelname+"-torchscript.pth")
loaded_trace = torch.jit.load(modelname+"-torchscript.pth")
loaded_trace.eval()
with torch.no_grad():
print(loaded_trace(img))
TensorMask(torch.argmax(loaded_trace(img),1)).show()
Output
TensorImage([[[[0.8961, 0.9132, 0.8789, ..., 0.2453, 0.1939, 0.2282],
[0.8276, 0.9132, 0.8618, ..., 0.2282, 0.1939, 0.2282],
[0.8961, 0.9132, 0.8789, ..., 0.2282, 0.2282, 0.2453],
...,
[0.8961, 0.8618, 0.9132, ..., 0.4508, 0.4166, 0.3994],
[0.9303, 0.9132, 0.9474, ..., 0.4166, 0.4166, 0.4508],
[0.9646, 0.8789, 0.9303, ..., 0.3994, 0.3994, 0.3994]],
[[1.0455, 1.0630, 1.0280, ..., 0.3803, 0.3277, 0.3627],
[0.9755, 1.0630, 1.0105, ..., 0.3627, 0.3277, 0.3627],
[1.0455, 1.0630, 1.0280, ..., 0.3627, 0.3627, 0.3803],
...,
[1.0455, 1.0105, 1.0630, ..., 0.5903, 0.5553, 0.5378],
[1.0805, 1.0630, 1.0980, ..., 0.5553, 0.5553, 0.5903],
[1.1155, 1.0280, 1.0805, ..., 0.5378, 0.5378, 0.5378]],
[[1.2631, 1.2805, 1.2457, ..., 0.6008, 0.5485, 0.5834],
[1.1934, 1.2805, 1.2282, ..., 0.5834, 0.5485, 0.5834],
[1.2631, 1.2805, 1.2457, ..., 0.5834, 0.5834, 0.6008],
...,
[1.2631, 1.2282, 1.2805, ..., 0.8099, 0.7751, 0.7576],
[1.2980, 1.2805, 1.3154, ..., 0.7751, 0.7751, 0.8099],
[1.3328, 1.2457, 1.2980, ..., 0.7576, 0.7576, 0.7576]]]],
device='cuda:0')
[{'boxes': tensor([[412.5222, 492.3208, 619.7662, 620.9233]], device='cuda:0'), 'labels': tensor([1], device='cuda:0'), 'scores': tensor([0.1527], device='cuda:0'), 'masks': tensor([[[[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]]], device='cuda:0')}]
/opt/conda/conda-bld/pytorch_1587452831668/work/torch/csrc/utils/python_arg_parser.cpp:760: UserWarning: This overload of nonzero is deprecated:
nonzero(Tensor input, *, Tensor out)
Consider using one of the following signatures instead:
nonzero(Tensor input, *, bool as_tuple)
/home/david/anaconda3/envs/proy/lib/python3.7/site-packages/torch/tensor.py:467: RuntimeWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).
'incorrect results).', category=RuntimeWarning)
/home/david/anaconda3/envs/proy/lib/python3.7/site-packages/fastai2/torch_core.py:272: TracerWarning: Converting a tensor to a Python index might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
res = getattr(super(TensorBase, self), fn)(*args, **kwargs)
/opt/conda/conda-bld/pytorch_1587452831668/work/aten/src/ATen/native/BinaryOps.cpp:81: UserWarning: Integer division of tensors using div or / is deprecated, and in a future release div will perform true division as in Python 3. Use true_divide or floor_divide (// in Python) instead.
/home/david/anaconda3/envs/proy/lib/python3.7/site-packages/torchvision/models/detection/rpn.py:164: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
torch.tensor(image_size[1] / g[1], dtype=torch.int64, device=device)] for g in grid_sizes]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-15-44b7a9360e87> in <module>
6 with torch.no_grad():
7 print(model(img))
----> 8 traced_cell = torch.jit.trace(model, (img))
9 torch.jit.save(traced_cell, modelname+"-torchscript.pth")
10
~/anaconda3/envs/proy/lib/python3.7/site-packages/torch/jit/__init__.py in trace(func, example_inputs, optimize, check_trace, check_inputs, check_tolerance, strict, _force_outplace, _module_class, _compilation_unit)
881 return trace_module(func, {'forward': example_inputs}, None,
882 check_trace, wrap_check_inputs(check_inputs),
--> 883 check_tolerance, strict, _force_outplace, _module_class)
884
885 if (hasattr(func, '__self__') and isinstance(func.__self__, torch.nn.Module) and
~/anaconda3/envs/proy/lib/python3.7/site-packages/torch/jit/__init__.py in trace_module(mod, inputs, optimize, check_trace, check_inputs, check_tolerance, strict, _force_outplace, _module_class, _compilation_unit)
1035 func = mod if method_name == "forward" else getattr(mod, method_name)
1036 example_inputs = make_tuple(example_inputs)
-> 1037 module._c._create_method_from_trace(method_name, func, example_inputs, var_lookup_fn, strict, _force_outplace)
1038 check_trace_method = module._c._get_method(method_name)
1039
~/anaconda3/envs/proy/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
554 input = result
555 if torch._C._get_tracing_state():
--> 556 result = self._slow_forward(*input, **kwargs)
557 else:
558 result = self.forward(*input, **kwargs)
~/anaconda3/envs/proy/lib/python3.7/site-packages/torch/nn/modules/module.py in _slow_forward(self, *input, **kwargs)
540 recording_scopes = False
541 try:
--> 542 result = self.forward(*input, **kwargs)
543 finally:
544 if recording_scopes:
~/anaconda3/envs/proy/lib/python3.7/site-packages/torchvision/models/detection/generalized_rcnn.py in forward(self, images, targets)
68 if isinstance(features, torch.Tensor):
69 features = OrderedDict([('0', features)])
---> 70 proposals, proposal_losses = self.rpn(images, features, targets)
71 detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
72 detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
~/anaconda3/envs/proy/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
554 input = result
555 if torch._C._get_tracing_state():
--> 556 result = self._slow_forward(*input, **kwargs)
557 else:
558 result = self.forward(*input, **kwargs)
~/anaconda3/envs/proy/lib/python3.7/site-packages/torch/nn/modules/module.py in _slow_forward(self, *input, **kwargs)
540 recording_scopes = False
541 try:
--> 542 result = self.forward(*input, **kwargs)
543 finally:
544 if recording_scopes:
~/anaconda3/envs/proy/lib/python3.7/site-packages/torchvision/models/detection/rpn.py in forward(self, images, features, targets)
486 proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)
487 proposals = proposals.view(num_images, -1, 4)
--> 488 boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)
489
490 losses = {}
~/anaconda3/envs/proy/lib/python3.7/site-packages/torchvision/models/detection/rpn.py in filter_proposals(self, proposals, objectness, image_shapes, num_anchors_per_level)
392
393 # select top_n boxes independently per level before applying nms
--> 394 top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level)
395
396 image_range = torch.arange(num_images, device=device)
~/anaconda3/envs/proy/lib/python3.7/site-packages/torchvision/models/detection/rpn.py in _get_top_n_idx(self, objectness, num_anchors_per_level)
372 pre_nms_top_n = min(self.pre_nms_top_n(), num_anchors)
373 _, top_n_idx = ob.topk(pre_nms_top_n, dim=1)
--> 374 r.append(top_n_idx + offset)
375 offset += num_anchors
376 return torch.cat(r, dim=1)
RuntimeError: expected device cuda:0 but got device cpu