data_paralleled_module.module.variable
surely worked for me. Could you double check?
>>> class Model(nn.Module):
... def __init__(self, variable):
... super(Model, self).__init__()
... self.variable = variable
...
>>> m = Model(Variable(torch.randn(3,4)))
>>> dpm = nn.DataParallel(m)
>>> dpm.module
Model (
)
>>> dpm.module.variable
Variable containing:
0.8198 1.3578 -1.9757 -0.4722
0.0324 0.5272 -0.3619 -2.0327
-0.2448 -0.3940 -1.0660 -0.4111
[torch.FloatTensor of size 3x4]
>>> dir(dpm.module)
['__call__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattr__', '__getattribute__', '__gt__', '__hash__', '__init
__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__setstate__', '__sizeof__', '__s
tr__', '__subclasshook__', '__weakref__', '_all_buffers', '_apply', '_backend', '_backward_hooks', '_buffers', '_forward_hooks', '_forward_pre_hooks', '_modules', '_paramete
rs', 'add_module', 'apply', 'children', 'cpu', 'cuda', 'double', 'dump_patches', 'eval', 'float', 'forward', 'half', 'load_state_dict', 'modules', 'named_children', 'named_m
odules', 'named_parameters', 'parameters', 'register_backward_hook', 'register_buffer', 'register_forward_hook', 'register_forward_pre_hook', 'register_parameter', 'share_me
mory', 'state_dict', 'train', 'training', 'type', 'variable', 'zero_grad']
>>>