How to use an LSTM where the input is not needed (e.g. decoder)

I want to implement a decoder but I don’t need its input. Do I just pass a torch.zeros for its input?

e.g.

def main_test_zero_input_to_lstm():
    ## model params
    n_layers, nb_directions = 1, 1
    hidden_size = 64
    input_size = 3
    decoder = nn.LSTM(input_size=input_size,hidden_size=hidden_size).to(device)
    print(lstm.hidden_size)
    ## initialize the hidden state(s)
    batch_size = 1
    h_n = torch.randn(n_layers*nb_directions, batch_size, hidden_size)
    c_n = torch.randn(n_layers*nb_directions, batch_size, hidden_size)
    hidden = (h_n, c_n)
    ## pass through fake data
    seq_len, embedding_dim = 1, input_size
    zeros_fake_data = torch.zeros(seq_len, batch_size, embedding_dim)
    #zeros_fake_data = torch.zeros(seq_len, embedding_dim) # this throws an error, we need the batch dimension!!!
    ## do a decoder step
    out, hidden = decoder(zeros_fake_data, hidden)
    ##
    print(f'out size equals hidden state size: {out.size() ==  hidden[0].size()}')
    print(f'out.size() = {out.size()}')
    print(out)

if __name__ == '__main__':
    print('start')
    main_test_zero_input_to_lstm()
    print('DONE \a')

or is this bad practice? Perhaps I do need its input from previous step…?