- Traced module
torch.manual_seed(25)
module = torch.jit.trace(net, torch.randn(1,3,5,5))
print(module.code)
module(torch.randn(1,3,5,5))[0, 0, 0]
And the output
# module.code
def forward(self,
input: Tensor) -> Tensor:
_0 = getattr(self.convs, "1")
_1 = (getattr(self.convs, "0")).forward(input, )
_2 = getattr(self.residuals, "0")
_3 = (getattr(self.convs, "2")).forward((_0).forward(_1, ), )
_4 = getattr(self.residuals, "2")
_5 = (getattr(self.residuals, "1")).forward((_2).forward(_3, ), )
_6 = getattr(self.upsample_convs, "1")
_7 = (getattr(self.upsample_convs, "0")).forward((_4).forward(_5, ), )
_8 = (self.final_conv).forward((_6).forward(_7, ), )
return torch.sigmoid(_8)
# Output
tensor([0.3275, 0.4366, 0.2979, 0.2537, 0.4489, 0.4663, 0.4455, 0.4363],
grad_fn=<SelectBackward>)
- Scripted module: I only changed
torch.jit.trace()
totorch.jit.script()
# module.code
def forward(self,
x: Tensor) -> Tensor:
_0 = self.convs
_1 = getattr(_0, "0")
_2 = getattr(_0, "1")
_3 = getattr(_0, "2")
x0 = (_1).forward(x, )
x1 = (_2).forward(x0, )
x2 = (_3).forward(x1, )
_4 = self.residuals
_5 = getattr(_4, "0")
_6 = getattr(_4, "1")
_7 = getattr(_4, "2")
x3 = (_5).forward(x2, )
x4 = (_6).forward(x3, )
x5 = (_7).forward(x4, )
_8 = self.upsample_convs
_9 = getattr(_8, "0")
_10 = getattr(_8, "1")
x6 = (_9).forward(x5, )
x7 = (_10).forward(x6, )
x8 = (self.final_conv).forward(x7, )
return torch.sigmoid(x8)
# Output
tensor([0.3275, 0.4366, 0.2979, 0.2537, 0.4489, 0.4663, 0.4455, 0.4363],
grad_fn=<SelectBackward>)
You can see the results module.code
is totally different, as my forward()
contains loops, but the module’s output is exactly the same.
Why is this the case? Thanks in advance!