How convert the output ( time_step * batch_size * 1) of LSTM to 1 or 0?

My code is as below:

class Mymodel(nn.Module):

def __init__(self, input_size, hidden_size, output_size, num_layers, batch_size):
    super(Discriminator, self).__init__()
    self.input_size = input_size
    self.hidden_size = hidden_size
    self.output_size = output_size
    self.num_layers = num_layers
    self.batch_size = batch_size
    self.lstm = nn.LSTM(input_size, hidden_size)
    self.proj = nn.Linear(hidden_size, output_size)
    self.hidden = self.init_hidden()

def init_hidden(self):
    return (Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_size)),
            Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_size)))

def forward(self, x):
    lstm_out, self.hidden = self.lstm(x, self.hidden)
    output = self.proj(lstm_out)
    result = F.sigmoid(output)
    return result

I want to use LSTM to classify a sentence to good (1) or bad (0). Using this code, I get the result which is time_step * batch_size * 1 but not 0 or 1. How to edit the code in order to get the classification result?

Pass the output hidden state through an MLP.