# LSTM by hand, beginner's question

Hello, I’m a real beginner in PyTorch, specially LSTM model, so thank you for indulgence.
I’m trying to understand how it works based on the handmade model. It actually involves predicting the share price of two companies A and B whose past prices are as follows.
`inputs = torch.tensor([[0.0, 0.25, 0.50, 1.0], [1.0, 0.25, 0.50, 1.0]])`

Here is code for LSTM model.

``````std = torch.tensor(1.0)

predic_a = []
predic_b = []```

```class lstmbyhand():
def __init__(self):
#super().__init__()

# Creation de deux tenseurs initialisé à 0 et à 1
mean = torch.tensor(0.0)
std = torch.tensor(1.0)

# Init of weights and biases

# Define components of lstm_unit block
def lstm_unit(self, short_term_memory, long_term_memory, input):
layer_1_output = torch.sigmoid((short_term_memory * self.layer_1_w1) +
(input * self.layer_1_w2) +
self.layer_1_b1)

layer_2_output = torch.sigmoid((short_term_memory * self.layer_2_w1) +
(input * self.layer_2_w2) +
self.layer_2_b1)

layer_3_output = torch.tanh((short_term_memory * self.layer_3_w1) +
(input * self.layer_3_w2) +
self.layer_3_b1)

layer_4_output = torch.sigmoid((short_term_memory * self.layer_4_w1) +
(input * self.layer_4_w2) +
self.layer_4_b1)

# Calculation of long term memory and short term memory
percent_LT_toremember = long_term_memory * layer_1_output
updated_LT = layer_2_output * layer_3_output + percent_LT_toremember

potential_ST_toremember = layer_4_output

return [updated_LT, updated_ST]

def forward_pass(self, input):
long_term_memory = 0
short_term_memory = 0
day1 = input[0]
day2 = input[1]
day3 = input[2]
day4 = input[3]

updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day1)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day2)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day3)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day4)

return updated_ST

def optimizer(self):

def loss_fn(self):

for i, (data, label) in enumerate(dataloader):
output = self.forward_pass(data)
print('output: {}'.format(output))
loss = loss_fn (output, label)
print('loss: {}'.format(loss))

if label == 0:
predic_a.append(output)

else:
predict_b.append(output)

return loss, predic_a, predic_b
``````

Then create instance.
`model = lstmbyhand()`
And training the model.

``````labels = torch.tensor([0.0, 1.0])
dataset = TensorDataset(inputs, labels)

n_epochs = 200
for epoch in range(n_epochs):
model(data)
``````

Error message.

``````Cell In[220], line 8
6 n_epochs = 200
7 for epoch in range(n_epochs):
----> 8     model(data)

TypeError: 'lstmbyhand' object is not callable```

I would like to understand what I'm missing here, Thank you for your lights !``````

You either rename this method to `def forward(self, input):` or explicitly call

``````for epoch in range(n_epochs):
model.forward_pass(data)
``````

Thank you Chris for your time on this, nevertheless still having problems.

``````       long_term_memory = 0
short_term_memory = 0
day1 = input[0]
day2 = input[1]
day3 = input[2]
day4 = input[3]
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day1)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day2)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day3)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day4)
return updated_ST```

```def optimizer(self):

```def loss_fn(self):

for i, (data, label) in enumerate(dataloader):
output = self.forward(data)
print('output: {}'.format(output))
loss = loss_fn (output, label)
print('loss: {}'.format(loss))
if label == 0:
predic_a.append(output)
else:
predict_b.append(output)
return loss, predic_a, predic_b```

```model = lstmbyhand()```

```inputs = torch.tensor([[0.0, 0.25, 0.50, 1.0], [1.0, 0.25, 0.50, 1.0]])
labels = torch.tensor([0.0, 1.0])
dataset = TensorDataset(inputs, labels)

```n_epochs = 200```
```for epoch in range(n_epochs):
model(data)```

Error msg
```TypeError                                 Traceback (most recent call last)
Cell In[419], line 8
6 n_epochs = 200
7 for epoch in range(n_epochs):
----> 8     model(data)

TypeError: 'lstmbyhand' object is not callable`````````

Try this:

class lstmbyhand(nn.Module):
def init(self):
super(lstmbyhand, self).init()

Thank you Rinaldi for your time on this, I thing it solved that issue.
still error msg, this time it’s an index one and I will manage to solve it,
Thank you again,

1 Like

I’m posting the code that might help other beginners like me,

``````
mean = torch.tensor(0.0)
std = torch.tensor(1.0)

predict_a = []
predict_b = []```

Then create the LSTM by hand model
```class lstmbyhand():
def __init__(self):
#super(lstmbyhand, self).__init__()

# Creation de deux tenseurs initialisé à 0 et à 1
mean = torch.tensor(0.0)
std = torch.tensor(1.0)
print('mean: {}'.format(mean))

# Init of weights and biases

print('self.layer_4_b1: {}'.format(self.layer_4_b1))
print('input: {}'.format(input))

# Params + optimizer
self.parameters = [self.layer_1_w1,self.layer_1_w2, self.layer_1_b1,
self.layer_2_w1,self.layer_2_w2, self.layer_2_b1,
self.layer_3_w1,self.layer_3_w2, self.layer_3_b1,
self.layer_4_w1,self.layer_4_w2, self.layer_4_b1]

# Loss
self.criterion = nn.MSELoss()

# Define components of lstm_unit block
def lstm_unit(self, short_term_memory, long_term_memory, input):

print('input: {}'.format(input))
print('short_term_memory: {}'.format(short_term_memory))

layer_1_output = torch.sigmoid((short_term_memory * self.layer_1_w1) +
(input * self.layer_1_w2) +
self.layer_1_b1)

layer_2_output = torch.sigmoid((short_term_memory * self.layer_2_w1) +
(input * self.layer_2_w2) +
self.layer_2_b1)

layer_3_output = torch.tanh((short_term_memory * self.layer_3_w1) +
(input * self.layer_3_w2) +
self.layer_3_b1)

layer_4_output = torch.sigmoid((short_term_memory * self.layer_4_w1) +
(input * self.layer_4_w2) +
self.layer_4_b1)

# Calculation of long term memory and short term memory
percent_LT_toremember = long_term_memory * layer_1_output
updated_LT = layer_2_output * layer_3_output + percent_LT_toremember

potential_ST_toremember = layer_4_output

print('updated_ST: {}'.format(updated_ST))

return [updated_LT, updated_ST]

def forward(self, input):
long_term_memory = 0
short_term_memory = 0
print('input:{}'.format(input))
day1 = input[0][0]
print('day1 {}'.format(day1))
day2 = input[0][1]
day3 = input[0][2]
day4 = input[0][3]

updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day1)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day2)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day3)
updated_LT, updated_ST = self.lstm_unit(long_term_memory, short_term_memory, day4)

return updated_ST

for i, (input, label) in enumerate(dataloader):

print('i: {}, input: {}, label: {}'.format(i, input, label))
output = self.forward(input)
print('output: {}'.format(output))

self.optimizer = Adam(params = self.parameters, lr=0.001)

loss = self.criterion(output, label)
print('loss: {}'.format(loss))
loss.backward()

#self.loss_fn (output, label).backward()
self.optimizer.step()

if label == 0:
predict_a.append(output)

else:
predict_b.append(output)

return loss, predict_a, predic_b```

Then training the model,
```model = lstmbyhand()
inputs = torch.tensor([[0.0, 0.25, 0.50, 1.0], [1.0, 0.25, 0.50, 1.0]])
labels = torch.tensor([0.0, 1.0])
dataset = TensorDataset(inputs, labels)

n_epochs = 200
for epoch in range(n_epochs):

It gives ideas on how the whole LSTM architecture works, and hope it can help,``````

This seems already solved for your purpose, however, I will still mention this for future reference. The most commonly seen `model(input)` format for calling a PyTorch model is achieved by inheriting from the class `torch.nn.Module` and implementing the method `forward()`. Here’s a basic example:

``````import torch
std = torch.tensor(1.0)

predic_a = []
predic_b = []```

class lstmbyhand(torch.nn.Module):
def __init__(self):
super().__init__()
``````model = lstmbyhand()