Got an empty parameter list

when I define a model as follows:

class TextNet(nn.Module):
    def init(self, input_dim=100, hidden_dim=100):
        super(TextNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim

        ###lstm层生成句子的encoding表示
        self.encoder = nn.LSTM(input_size=100, hidden_size=100, batch_first=True)

        self.fc1 = nn.Sequential(nn.Linear(200, 100), nn.Tanh()
                                 )
        self.fc2 = nn.Sequential(nn.Linear(100, 50), nn.Tanh())
        self.fc3 = nn.Sequential(nn.Linear(50, 25), nn.Tanh())
        self.fc4 = nn.Sequential(nn.Linear(25, 1), nn.Sigmoid())

    def forward(self, input_p, input_h, lens_p, lens_h):
        self.encoder.flatten_parameters()
        # encode sentence p
        _, idx_sort_p = torch.sort(lens_p, dim=0, descending=True)
        _, idx_unsort_p = torch.sort(idx_sort_p, dim=0)
        lens_p = list(lens_p[idx_sort_p])
        enc_pack_input_p = input_p.index_select(0, idx_sort_p)
        total_length_p = enc_pack_input_p.size(1)
        enc_pack_input_p = pack_padded_sequence(enc_pack_input_p, lens_p, True)
        enc_pack_output_p, (_, _) = self.encoder(enc_pack_input_p)
        output_p, _ = pad_packed_sequence(enc_pack_output_p, batch_first=True,
                                          total_length=total_length_p)
        enc_p = output_p[0].index_select(0, idx_unsort_p)

        # encode sentence h
        _, idx_sort_h = torch.sort(lens_h, dim=0, descending=True)
        _, idx_unsort_h = torch.sort(idx_sort_h, dim=0)
        lens_h = list(lens_h[idx_sort_h])
        enc_pack_input_h = input_h.index_select(0, idx_sort_h)
        total_length_h = enc_pack_input_h.size(1)
        enc_pack_input_h = pack_padded_sequence(enc_pack_input_h, lens_h, True)
        enc_pack_output_h, (_, _) = self.encoder(enc_pack_input_h)
        output_h, _ = pad_packed_sequence(enc_pack_output_h, batch_first=True,
                                          total_length=total_length_h)
        enc_h = output_h[0].index_select(0, idx_unsort_h)

        x = torch.cat(enc_p, enc_h)
        x = self.fc1(x)
        x = self.fc2(x)
        x = self.fc3(x)
        x = self.fc4(x)
        return x

after that I do some print operation。

model = TextNet()
print(model)
params = list(model.parameters())
print(params)
print(len(params))

I got the results like that:
TextNet()
[]
0
which means the parameter of this model is empty! I really don’t know what’s tha problems, can anyone help me ? thanks a lot!

Preformatted text

Using your code I get a list of the parameters:

print(params)
> [Parameter containing:
tensor([[ 0.0308,  0.0509,  0.0137,  ...,  0.0551,  0.0822,  0.0668],
        [-0.0764, -0.0238, -0.0644,  ...,  0.0089, -0.0293,  0.0269],
        [ 0.0975, -0.0644, -0.0705,  ..., -0.0368, -0.0390,  0.0447],
        ...,
        [ 0.0853, -0.0484,  0.0760,  ..., -0.0174,  0.0020,  0.0201],
        [-0.0089, -0.0343, -0.0659,  ...,  0.0491,  0.0273,  0.0921],
        [-0.0995,  0.0122, -0.0222,  ..., -0.0214, -0.0614,  0.0399]],
       requires_grad=True), Parameter containing:
tensor([[-0.0036, -0.0802, -0.0809,  ..., -0.0705,  0.0067,  0.0278],
        [-0.0539,  0.0010,  0.0222,  ...,
...
print(len(params))
> 12

oh ,how could that happened? I JUST got nothing?

Are you sure you are using this model definition?
If you are running the code in a Jupyter notebook, try to restart it and run the cells again.

I am sure, and just on the same code file,I define another model like that:

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        # 1 input image channel, 6 output channels, 5x5 square convolution
        # kernel
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        # an affine operation: y = Wx + b
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        # Max pooling over a (2, 2) window
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        # If the size is a square you can only specify a single number
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

I can successfully print this model and get results as follows;

Net(
  (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
  (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
  (fc1): Linear(in_features=400, out_features=120, bias=True)
  (fc2): Linear(in_features=120, out_features=84, bias=True)
  (fc3): Linear(in_features=84, out_features=10, bias=True)
)

this 2 models were define in the same file

This code also works for me.
Which PyTorch version are you using?
Maybe it’s some weird bug and I would like to debug it.