-
import torch.nn as nn
-
from torch.autograd import Variable
-
class RNN(nn.Module):
-
def __init__(self, input_size, hidden_size, output_size):
-
super(RNN, self).__init__()
-
self.hidden_size = hidden_size
-
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
-
self.i2o = nn.Linear(input_size + hidden_size, output_size)
-
self.softmax = nn.LogSoftmax(dim = 1)
-
def forward(self, input, hidden):
-
combined = torch.cat((input, hidden), 1)
-
hidden = self.i2h(combined)
-
output = self.i2o(combined)
-
output = self.softmax(output)
-
return output, hidden
-
def initHidden(self):
-
return Variable(torch.zeros(1, self.hidden_size))
I see it in a book named (Vishnu).
Are there any errors in Line 13th? I mean that ‘combined’ should be replaced by ‘hidden’?
Correspondingly, does the Line 8th should be changed to ’self.h2o = nn.Linear(hidden_size, output_size)‘?
My another reference book gives the similar codes with , so I was very confused.