Self.rnn.flatten_parameters() not supported in torchscript?

I got the following warning when loading torchscript model in libtorch:

Warning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters(). (_cudnn_impl at /pytorch/aten/src/ATen/native/cudnn/RNN.cpp:1269)

But when I added flatten_parameters() in my code like this:

class BidirectionalLSTM(nn.Module):
    def __init__(self, nIn, nHidden, nOut):
        super(BidirectionalLSTM, self).__init__()
        self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
        self.embedding = nn.Linear(nHidden * 2, nOut)

    def forward(self, input):
        self.rnn.flatten_parameters()
        recurrent, _ = self.rnn(input)
        T, b, h = recurrent.size()
        t_rec = recurrent.view(T * b, h)
        output = self.embedding(t_rec)  # [T * b, nOut]
        output = output.view(T, b, -1)
        return output

I got this error:

Traceback (most recent call last):
  File "to_torchscript.py", line 252, in <module>
    tt = torch.jit.script(teacher_model)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/__init__.py", line 1203, in script
    return torch.jit.torch.jit._recursive.recursive_script(obj)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/_recursive.py", line 173, in recursive_script
    return copy_to_script_module(mod, overload_stubs + stubs)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/_recursive.py", line 95, in copy_to_script_module
    torch.jit._create_methods_from_stubs(script_module, stubs)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/__init__.py", line 1423, in _create_methods_from_stubs
    self._c._create_methods(self, defs, rcbs, defaults)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/_recursive.py", line 195, in make_strong_submodule
    new_strong_submodule = recursive_script(module)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/_recursive.py", line 116, in recursive_script
    return create_constant_iterable_module(mod)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/_recursive.py", line 233, in create_constant_iterable_module
    modules[key] = recursive_script(submodule)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/_recursive.py", line 173, in recursive_script
    return copy_to_script_module(mod, overload_stubs + stubs)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/_recursive.py", line 95, in copy_to_script_module
    torch.jit._create_methods_from_stubs(script_module, stubs)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/__init__.py", line 1423, in _create_methods_from_stubs
    self._c._create_methods(self, defs, rcbs, defaults)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/_recursive.py", line 181, in create_method_from_fn
    stub = torch.jit.script_method(fn, _jit_internal.createResolutionCallbackFromClosure(fn))
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/__init__.py", line 1280, in script_method
    ast = get_jit_def(fn, self_name="ScriptModule")
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 169, in get_jit_def
    return build_def(ctx, py_ast.body[0], type_line, self_name)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 209, in build_def
    build_stmts(ctx, body))
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 127, in build_stmts
    stmts = [build_stmt(ctx, s) for s in stmts]
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 127, in <listcomp>
    stmts = [build_stmt(ctx, s) for s in stmts]
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 185, in __call__
    return method(ctx, node)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 283, in build_Assign
    rhs = build_expr(ctx, stmt.value)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 185, in __call__
    return method(ctx, node)
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 442, in build_Call
    args = [build_expr(ctx, py_arg) for py_arg in expr.args]
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 442, in <listcomp>
    args = [build_expr(ctx, py_arg) for py_arg in expr.args]
  File "/home/dai/py36env/lib/python3.6/site-packages/torch/jit/frontend.py", line 184, in __call__
    raise UnsupportedNodeError(ctx, node)
torch.jit.frontend.UnsupportedNodeError: GeneratorExp aren't supported:
at /home/dai/py36env/lib/python3.6/site-packages/torch/nn/modules/rnn.py:105:31
        any_param = next(self.parameters()).data
        if not any_param.is_cuda or not torch.backends.cudnn.is_acceptable(any_param):
            return

        # If any parameters alias, we fall back to the slower, copying code path. This is
        # a sufficient check, because overlapping parameter buffers that don't completely
        # alias would break the assumptions of the uniqueness check in
        # Module.named_parameters().
        all_weights = self._flat_weights
        unique_data_ptrs = set(p.data_ptr() for p in all_weights)
                               ~ <--- HERE
        if len(unique_data_ptrs) != len(all_weights):
            return

        with torch.cuda.device_of(any_param):
            import torch.backends.cudnn.rnn as rnn

            # NB: This is a temporary hack while we still don't have Tensor
            # bindings for ATen functions
            with torch.no_grad():
'__torch__.BidirectionalLSTM.forward' is being compiled since it was called from '__torch__.teacher.forward'
at to_torchscript.py:221:8
    def forward(self, input):
        # conv features
        conv = self.cnn(input)
        b, c, h, w = conv.size()

        assert h == 1, "the height of conv must be 1"
        conv = conv.squeeze(2)
        conv = conv.permute(2, 0, 1)  # [w, b, c]
        # rnn features
        output = self.rnn(conv)
        ~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
        T, b, h = output.size()
        output = output.view(T, b, -1)

How can I call flatten_parameters() in torchscript?And are there other ways to get rid of the warning?

Looking forward to your reply.

3 Likes

Same situation here.

And my current workaround is just suppress the warning message.

import warnings
warnings.filterwarnings('ignore')
1 Like

It seems like it’s a bit late, but i found a solution.
Since flattern_parameters() only works on GPU, so if you decide to use it, you can edit your code like this.

class BidirectionalLSTM(nn.Module):
    def __init__(self, nIn, nHidden, nOut):
        super(BidirectionalLSTM, self).__init__()
        self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True).cuda()
        self.embedding = nn.Linear(nHidden * 2, nOut)
        self.rnn.flatten_parameters()

    def forward(self, input):
        # self.rnn.flatten_parameters()
        recurrent, _ = self.rnn(input)
        T, b, h = recurrent.size()
        t_rec = recurrent.view(T * b, h)
        output = self.embedding(t_rec)  # [T * b, nOut]
        output = output.view(T, b, -1)
        return output

2022-11-08T16:00:00Z