LSTM TypeError: 'int' object is not callable


As far as I know, int object is not callable is an error when you use a function name already assigned to a variable.

However, I cannot find any aliasing in my code

What could have been possibly gone wrong???

Are you passing a tensor to the model or some other object?
Could you post a small, executable code snippet so that we could have a look?

import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torch.utils.data import Dataset, DataLoader
import time
import math

USE_GPU = False
if USE_GPU and torch.cuda.is_available():
device = torch.device(‘cuda’)
else:
device = torch.device(‘cpu’)

vocab = open(‘vocab.txt’).read().splitlines()
n_vocab = len(vocab)
torch.manual_seed(1)

def text2int(csv_file, dname, vocab):
ret = []
data = csv_file[dname].values
for datum in data:
for char in str(datum):
idx = vocab.index(char)
ret.append(idx)
ret = np.array(ret)
return ret

class NewsDataset(Dataset):
def init(self, csv_file, vocab):
self.csv_file = pd.read_csv(csv_file, sep=’|’)
self.vocab = vocab
self.len = len(self.csv_file)
self.x_data = torch.tensor(text2int(self.csv_file, ‘x_data’, self.vocab))
self.y_data = torch.tensor(text2int(self.csv_file, ‘y_data’, self.vocab))
def len(self):
return self.len
def getitem(self, idx):
return self.x_data[idx], self.y_data[idx]

dataset = NewsDataset(csv_file = ‘data.csv’, vocab = vocab)
train_loader = DataLoader(dataset=dataset,
batch_size=64,
shuffle=False,
num_workers=1)

from torch.autograd import Variable
import numpy as np

class selfModule(nn.Module):
def init(self, inputdim, hiddendim, batchsize, outputdim, numlayers):
super(selfModule, self).init()
self.inputdim = inputdim
self.hiddendim = hiddendim
self.numlayers = numlayers
self.outputdim = outputdim
self.batchsize = batchsize
self.lstm = nn.LSTM(self.inputdim, self.hiddendim, self.numlayers, bias=True, batch_first=False, bidirectional=False)
self.fc = nn.Linear(self.hiddendim, self.outputdim)
def forward(self, input):
onehot_input = np.zeros((len(input), 86)) #86 = dictionary size
onehot_input[np.arange(len(input)), input] = 1
lstm_out, self.hidden = self.lstm(onehot_input)
prediction = self.fc(lstm_out[-1].view(self.batchsize, -1))
return prediction.view(-1)
def init_hidden_cell(self):
return (torch.zeros(self.numlayers, self.batchsize, self.hiddendim), torch.zeros(self.numlayers, self.batchsize, self.hiddendim))

def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return ‘%dm %ds’ % (m, s)

def train(dataset, model, optimizer, n_iters):
model.to(device=device)
model.train()
start = time.time()
print_every = 50
for e in range(n_iters):
model.hidden = model.init_hidden_cell()
for i, (x, y) in enumerate(dataset):
x = x.to(device=device)
y = y.to(device=device)
model.zero_grad()
output = model(x) #####
loss = loss_fcn(output, y)
loss.backward()
optimizer.step()
if e % print_every == 0:
print(’%s (%d %d%%) %.4f’ % (timeSince(start), e, e / n_iters * 100, loss))

def test(start_letter):
max_length = 1000
with torch.no_grad():
idx = vocab.index(start_letter)
input_nparray = [idx]
input_nparray = np.reshape(input_nparray, (1, len(input_nparray)))
inputs = torch.tensor(input_nparray, device=device, dtype=torch.long)
output_sen = start_letter
for i in range(max_length):
output = model(inputs)
topv, topi = output.topk(1)
topi = topi[-1]
letter = vocab[topi]
output_sen += letter
idx = vocab.index(letter)
input_nparray = np.append(input_nparray, [idx])
inputs = torch.tensor(input_nparray, device=device, dtype=torch.long)
return output_sen

print(‘using device:’, device)
inputdim = 86
hiddendim = 100
batchsize = 64
outputdim = 86
numlayers = 128
loss_fcn = nn.NLLLoss()
model = selfModule(inputdim, hiddendim, batchsize, outputdim, numlayers)

do_restore = False

if do_restore:
model.load_state_dict(torch.load(‘fng_pt.pt’))
model.eval()
model.to(device=device)
else:
n_iters = 500
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=2e-16, weight_decay=0)
train(train_loader, model, optimizer, n_iters)
torch.save(model.state_dict(), ‘fng_pt.pt’)

print(test(‘W’))

here is all my code!!
I think I’m passing a tensor to the model but I’m not sure.

I am having the same exact error, I am passing numpy.ndarray to the model after successfully train it. Can anybody help diagnose this please.

/

Pass a PyTorch tensor to the model, since the .size returns an int in numpy while it’s a function in PyTorch.
You can convert a numpy array to a tensor via tensor = torch.from_numpy(array).

1 Like