How to convert the encoder to torch script module

class ResnetEncoder(nn.Module):
    """Pytorch module for a resnet encoder
    """
    def __init__(self, num_layers, pretrained, num_input_images=1):
        super(ResnetEncoder, self).__init__()

        self.num_ch_enc = np.array([64, 64, 128, 256, 512])

        resnets = {18: models.resnet18,
                   34: models.resnet34,
                   50: models.resnet50,
                   101: models.resnet101,
                   152: models.resnet152}

        if num_layers not in resnets:
            raise ValueError("{} is not a valid number of resnet layers".format(num_layers))

        if num_input_images > 1:
            self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images)
        else:
            self.encoder = resnets[num_layers](pretrained)

        if num_layers > 34:
            self.num_ch_enc[1:] *= 4

    def forward(self, input_image):
        self.features = []
        x = (input_image - 0.45) / 0.225
        x = self.encoder.conv1(x)
        x = self.encoder.bn1(x)
        self.features.append(self.encoder.relu(x))
        self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
        self.features.append(self.encoder.layer2(self.features[-1]))
        self.features.append(self.encoder.layer3(self.features[-1]))
        self.features.append(self.encoder.layer4(self.features[-1]))

        return self.features

encoder = ResnetEncoder(18, True )
example = torch.rand(1, 3, 640, 192)
traced_script_module_encoder = torch.jit.trace(encoder.__getattr__('encoder'), example )
traced_script_module.save('encoder_new.pt')
torch.jit.load('encoder_new.pt')

I have tried to convert the model via trace and loaded it back but it returns different shape features as also suggested by the community trace will not work (Tracing doesn’t understand dynamic control flow, so sometimes it will “constant-ify” shapes in your model. Try turning your model in to a ScriptModule and using TorchScript;)

But in order to convert via torch.jit.script I get the following error
TypeError: module, class, method, function, traceback, frame, or code object was expected, got ResnetEncoder

while using below example :

encoder = ResnetEncoder(18, True )
traced_script_module_encoder = torch.jit.script(encoder)
traced_script_module_encoder.save('new-encoder.pt')

You may be on an old version of torchvision or torch, can you make sure you’re on the latest of both? (or use the nightly if there are still errors)

Your code works fine for me after applying the following diff:

diff --git a/test.py b/test.py
index e305823abd..a96eb93829 100644
--- a/test.py
+++ b/test.py
@@ -208,21 +208,20 @@ class ResnetEncoder(nn.Module):
             self.num_ch_enc[1:] *= 4
 
     def forward(self, input_image):
-        self.features = []
+        features = []
         x = (input_image - 0.45) / 0.225
         x = self.encoder.conv1(x)
         x = self.encoder.bn1(x)
-        self.features.append(self.encoder.relu(x))
-        self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
-        self.features.append(self.encoder.layer2(self.features[-1]))
-        self.features.append(self.encoder.layer3(self.features[-1]))
-        self.features.append(self.encoder.layer4(self.features[-1]))
+        features.append(self.encoder.relu(x))
+        features.append(self.encoder.layer1(self.encoder.maxpool(features[-1])))
+        features.append(self.encoder.layer2(features[-1]))
+        features.append(self.encoder.layer3(features[-1]))
+        features.append(self.encoder.layer4(features[-1]))
 
-        return self.features
+        return features
 
 encoder = ResnetEncoder(18, True )
-example = torch.rand(1, 3, 640, 192)
-traced_script_module_encoder = torch.jit.trace(encoder.__getattr__('encoder'), example )
+traced_script_module = torch.jit.script(encoder)
 traced_script_module.save('encoder_new.pt')
 torch.jit.load('encoder_new.pt')

This is necessary since in TorchScript you can’t add new attributes to self outside of __init__ (you can only mutate existing attributes), but here it looks like features wasn’t being used outside of forward anyways.

Thankyou @driazati for the quick reply, actually I am still facing the same issue after performing the changes( This is necessary since in TorchScript you can’t add new attributes to self outside of __init__ (you can only mutate existing attributes)) suggested by you. I am using pytorch 1.0.1, torchvision 0.2.2. (Might be i upgrade it to pytorch 1.2)

Also, do you get the same feature output before saving and after loading back the saved encoder model again ? and which version of Pytorch and torchvision are you using.

Now, I have updated pytorch to 1.3.1(Stable Version) and torchvision 0.4.
While running the same code now the error is changed to :

File “C:\Users\anaconda3\envs\pytorch_converter\lib\site-packages\torch\jit_init_.py”, line 1423, in _create_methods_from_stubs
self._c._create_methods(self, defs, rcbs, defaults)

RuntimeError:
module has no attribute ‘downsample’:

People have suggested to use nightly version : https://github.com/pytorch/pytorch/issues/28351

I will try and will update if I fix the issue.

Thankyou so much@driazati , It worked with torch nightly and the conversion is completed.

Now I move to a next step and their I have a similar error :slight_smile:

Now I am trying to convert the decoder using torch.jit.script but I am facing some error as below.

My module

import numpy as np
import torch
import torch.nn as nn

from collections import OrderedDict
from layers import *

class DepthDecoder(nn.Module):
def init(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True):
super(DepthDecoder, self).init()

    self.num_output_channels = num_output_channels
    self.use_skips = use_skips
    self.upsample_mode = 'nearest'
    self.scales = scales

    self.num_ch_enc = num_ch_enc
    self.num_ch_dec = np.array([16, 32, 64, 128, 256])

    # decoder
    self.convs = OrderedDict()
    for i in range(4, -1, -1):
        # upconv_0
        num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
        num_ch_out = self.num_ch_dec[i]
        self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)

        # upconv_1
        num_ch_in = self.num_ch_dec[i]
        if self.use_skips and i > 0:
            num_ch_in += self.num_ch_enc[i - 1]
        num_ch_out = self.num_ch_dec[i]
        self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)

    for s in self.scales:
        self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)

    self.decoder = nn.ModuleList(list(self.convs.values()))
    self.sigmoid = nn.Sigmoid()

def forward(self, input_features):
    outputs = {}

    # decoder
    x = input_features[-1]
    for i in range(4, -1, -1):
        x = self.convs[("upconv", i, 0)](x)
        x = [upsample(x)]
        if self.use_skips and i > 0:
            x += [input_features[i - 1]]
        x = torch.cat(x, 1)
        x = self.convs[("upconv", i, 1)](x)
        if i in self.scales:
            outputs[("disp", i)] = self.sigmoid(self.convs[("dispconv", i)](x))

    return outputs

num_enc_channels = np.array([ 64, 64, 128, 256, 512])
depth_decoder = DepthDecoder( num_ch_enc= num_enc_channels , scales=range(4))
traced_script_module_decoder = torch.jit.script(depth_decoder)
traced_script_module_decoder.save(‘new-decoder.pt’)

Error :

File “C:\Users\lib\site-packages\torch\jit_recursive.py”, line 259, in create_methods_from_stubs
concrete_type._create_methods(defs, rcbs, defaults)
RuntimeError:
Module ‘DepthDecoder’ has no attribute ‘convs’ (This attribute exists on the Python module, but we failed to convert Python type: ‘OrderedDict’ to a TorchScript type.):
File “C:\Users\networks\depth_decoder.py”, line 55
x = input_features[-1]
for i in range(4, -1, -1):
x = self.convs(“upconv”, i, 0)
~~~~~~~~~~ <— HERE
x = [upsample(x)]
if self.use_skips and i > 0:

I feel the error is the same as explained here in the link : https://github.com/pytorch/pytorch/issues/23905

failed to convert Python type: ‘dict’ to a TorchScript type.

Is there any fix ?