Hidden size gru

My model

class Model(torch.nn.Module):

    def __init__(self):
        super(Model, self).__init__()
        #self.fc1 = nn.Linear(3,32)
        self.gru = nn.GRU(3, 256, 3)
        #nn.init.xavier_uniform_(self.gru.weight)
        self.fc3 = nn.Linear(256, 1)
        nn.init.xavier_uniform_(self.fc3.weight)
    def forward(self, x):
        batch = x.size(0)
        #out = self.fc1(out)
        out = torch.transpose(x,0,1)
        hidden = self.__init__hidden(batch)
        out,hidden = self.gru(out, hidden)
        #print(hidden.size()) #torch.Size([3, 128, 256])
        out = self.fc3(hidden)
        return out
    def __init__hidden(self, batch):
        hidden = torch.zeros(3, batch, 256).to(device)
        return hidden

I need the fc3 layer to accept not 3 sizes, but the last one. How to write this?

In your example, hidden[-1] is the hidden state for the last step, for the last layer.
It is shaped [batch_size, hidden_size], so

self.fc3(hidden[-1])

will do fine.

1 Like