AttributeError: 'builtin_function_or_method' object has no attribute 'dim'

The following error report occurred when I used the LSTM network to train the IMDB dataset for affective dichotomy

AttributeError Traceback (most recent call last)
in
4 for epoch in range(N_EPOCHS):
5 start_time = time.time()
----> 6 train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
7 valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
8

in train(model, iterator, optimizer, criterion)
17 # batch.text 就是上面forward函数的参数text
18 # 压缩维度,不然跟 batch.label 维度对不上
—> 19 predictions = model(batch.text).squeeze(1)
20
21 loss = criterion(predictions, batch.label)

/usr/local/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
–> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)

in forward(self, text)
38
39
—> 40 return self.fc(hidden.squeeze)

/usr/local/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
–> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)

/usr/local/lib/python3.6/site-packages/torch/nn/modules/linear.py in forward(self, input)
65 @weak_script_method
66 def forward(self, input):
—> 67 return F.linear(input, self.weight, self.bias)
68
69 def extra_repr(self):

/usr/local/lib/python3.6/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1348 - Output: :math:(N, *, out\_features)
1349 “”"
-> 1350 if input.dim() == 2 and bias is not None:
1351 # fused op is marginally faster
1352 ret = torch.addmm(torch.jit._unwrap_optional(bias), input, weight.t())

AttributeError: ‘builtin_function_or_method’ object has no attribute ‘dim’

This is the associated source code

import torch.nn as nn
import torch.nn.functional as F

class RNN(nn.Module):
def init(self, vocab_size, embedding_dim, hidden_dim, output_dim,
n_layers, bidirectional, dropout, pad_idx):

super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)

# embedding_dim: 每个词向量的维度
# hidden_dim: 隐藏层的维度
# num_layers: 神经网络深度,纵向深度
# bidrectional: 是否双向循环RNN
# dropout是指在深度学习网络的训练过程中,对于神经网络单元,按照一定的概率将其暂时从网络中丢弃。
# 经过交叉验证,隐含节点dropout率等于0.5的时候效果最好,原因是0.5的时候dropout随机生成的网络结构最多。
self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers,
                   bidirectional=bidirectional, dropout=dropout)

self.fc = nn.Linear(hidden_dim*2, output_dim)  # *2是因为BiLSTM
self.dropout = nn.Dropout(dropout)

def forward(self, text):
embedded = self.dropout(self.embedding(text)) # [sent len, batch size, emb dim]

# output = [sent len, batch size, hid dim * num directions]
# hidden = [num layers * num directions, batch size, hid dim]
# cell = [num layers * num directions, batch size, hid dim]
output, (hidden, cell) = self.rnn(embedded)

# concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers
# and apply dropout
# [batch size, hid dim * num directions], 横着拼接的
# 倒数第一个和倒数第二个是BiLSTM最后要保留的状态
hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))

hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :])))

return self.fc(hidden.squeeze)

def train(model, iterator, optimizer, criterion):

epoch_loss = 0
epoch_acc = 0
total_len = 0

model.train()代表了训练模式

model.train() :启用 BatchNormalization 和 Dropout

model.eval() :不启用 BatchNormalization 和 Dropout

model.train()

iterator为train_iterator

for batch in iterator:
# 梯度清零,加这步防止梯度叠加
optimizer.zero_grad()

# batch.text 就是上面forward函数的参数text
# 压缩维度,不然跟 batch.label 维度对不上
predictions = model(batch.text).squeeze(1)

loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)

loss.backward()  # 反向传播
optimizer.step() # 梯度下降

# loss.item() 以及本身除以了 len(batch.label)
# 所以得再乘一次,得到一个batch的损失,累加得到所有样本损失
epoch_loss += loss.item() * len(batch.label)

# (acc.item(): 一个batch的正确率) * batch数 = 正确数
# train_iterator 所有batch的正确数累加
epoch_acc += acc.item() * len(batch.label)

# 计算 train_iterator 所有样本的数量,应该是17500
total_len += len(batch.label)

epoch_loss / total_len :train_iterator所有batch的损失

epoch_acc / total_len :train_iterator所有batch的正确率

return epoch_loss / total_len, epoch_acc / total_len

N_EPOCHS = 10
best_valid_loss = float(‘inf’)

for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)

end_time = time.time()

epoch_mins, epoch_secs = epoch_time(start_time, end_time)

if valid_loss < best_valid_loss:
    best_valid_loss = valid_loss
    torch.save(model.state_dict(), 'lstm-model.pt')

print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} |  Val. Acc: {valid_acc*100:.2f}%')

How can I solve this problem??

It seems, you are accidentally passing a method to a forward function in this line of code:

return self.fc(hidden.squeeze)

Make sure you are calling the squeeze operation and pass its output to self.fc via:

return self.fc(hidden.squeeze())

Thank you for your reply. You are right