How we use GRU encoder state as input to decoder?

The following code is taken from 9.7. Sequence to Sequence Learning — Dive into Deep Learning 0.17.2 documentation
The state shape from the encoder(X)[1] is torch.Size([2, 4, 16])), where 2-would be the number of layers, 4- batch size, 16- embedding size.

    """The RNN decoder for sequence to sequence learning."""
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
                 dropout=0, **kwargs):
        super(Seq2SeqDecoder, self).__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers,
        self.dense = nn.Linear(num_hiddens, vocab_size)

    def init_state(self, enc_outputs, *args):
        return enc_outputs[1]

    def forward(self, X, state):
        # The output `X` shape: (`num_steps`, `batch_size`, `embed_size`)
        X = self.embedding(X).permute(1, 0, 2)
        # Broadcast `context` so it has the same `num_steps` as `X`
        context = state[-1].repeat(X.shape[0], 1, 1)
        X_and_context =, context), 2)
        output, state = self.rnn(X_and_context, state)
        output = self.dense(output).permute(1, 0, 2)
        # `output` shape: (`batch_size`, `num_steps`, `vocab_size`)
        # `state` shape: (`num_layers`, `batch_size`, `num_hiddens`)
        return output, state

decoder = Seq2SeqDecoder(vocab_size=10, embed_size=8, num_hiddens=16,
state = decoder.init_state(encoder(X))
output, state = decoder(X, state)
output.shape, state.shape

My doubt is In line context = state[-1].repeat(X.shape[0], 1, 1)
Why state[-1] is taken ? Do we take the last layer’s output from GRU and give it to every input embedding of X by concatenating it?