Dear Friends
I’m following the tutorial
In the Exercise section, you will see:
-> Add more linear layers
Does adding more linear layer means, creating hidden-to-hidden layers?
For instance, below is the tutorial code:
class RNN(Module):
def __init__(self, input_size, hidden_size,
output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
size_sum = input_size + hidden_size
self.i2h = Linear(size_sum, hidden_size)
self.h2o = Linear(size_sum, output_size)
self.softmax = LogSoftmax(dim=1)
def forward(self, input_, hidden_):
combined = cat(tensors=(input_, hidden_), dim=1)
hidden_ = self.i2h(input=combined)
output = self.h2o(input=combined)
output = self.softmax(input=output)
return output, hidden_
def init_hidden(self):
return zeros(1, self.hidden_size)
If I add a hidden-to-hidden layer, does it mean I’m adding more linear layers or am I mistaken?
class RNN(Module):
def __init__(self, input_size, hidden_size,
output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
size_sum = input_size + hidden_size
self.i2h = Linear(size_sum, hidden_size)
self.h2h = Linear(hidden_size, hidden_size)
self.h2o = Linear(hidden_size, output_size)
self.softmax = LogSoftmax(dim=1)
def forward(self, input_, hidden_):
combined = cat(tensors=(input_, hidden_), dim=1)
hidden_ = self.i2h(input=combined)
hidden_ = self.h2h(input=hidden_)
hidden_ = self.h2h(input=hidden_)
hidden_ = self.h2h(input=hidden_)
hidden_ = self.h2h(input=hidden_)
hidden_ = self.h2h(input=hidden_)
output = self.h2o(input=hidden_)
output = self.softmax(input=output)
return output, hidden_
def init_hidden(self):
return zeros(1, self.hidden_size)
The reason I’m asking is when I run the second RNN Module my accuracy is 8% lower than the previous module. I may be interpreting adding a more linear layer statement wrong.
Thanks