Getting "trying to backward through graph second time" error while implementing stateful LSTM

I am implementing Stateful LSTM:
class StatefulFraudLSTM(nn.Module):
def init(self, num_features, hidden_size=100, hidden_size_lstm=100, num_layers_lstm=3, dropout_lstm=0, batch_size=128):
super(StatefulFraudLSTM, self).init()
# Parameters
self.num_features = num_features
self.hidden_size = hidden_size
self.hidden_size_lstm = hidden_size_lstm
self.num_layers_lstm = num_layers_lstm
self.batch_size = batch_size

    # Representation learning part
    self.lstm = nn.LSTM(num_features, hidden_size_lstm, num_layers_lstm, batch_first=True, dropout=dropout_lstm)

    # Representation to hidden
    self.fc1 = nn.Linear(hidden_size_lstm, hidden_size)
    self.relu = nn.ReLU()

    # Hidden to output
    self.fc2 = nn.Linear(hidden_size, 1)
    self.sigmoid = nn.Sigmoid()

    # Initialize hidden and cell states
    self.hidden = self.init_hidden()

def init_hidden(self):
    # Initialize hidden and cell states with zeros
    h0 = torch.zeros(self.num_layers_lstm, self.batch_size, self.hidden_size_lstm).to(device)
    c0 = torch.zeros(self.num_layers_lstm, self.batch_size, self.hidden_size_lstm).to(device)
    return (h0, c0)

def forward(self, x):

    representation, self.hidden = self.lstm(x.transpose(1, 2), self.hidden)

    hidden_state = self.hidden[0][-1]
    out = self.fc1(hidden_state)
    out = self.relu(out)

    out = self.fc2(out)
    out = self.sigmoid(out)

    return out

Getting below error:

RuntimeError: Trying to backward through the graph a second time, but the buffers have already been freed. Specify retain_graph=True when calling backward the first time.

You might need to .detach() the hidden states before passing them to the model.

I am learning and implementing PyTorch. How can I do that. Could you please help me.