Test Loss looks weird when plotted

I am using LSTM in-order to perform binary-classification, when I plot the test-loss it is not reducing over time.It is rather fluctuating a lot over time and looks extremely weird.The training loss on the other hand looks normal and is decreasing over time.Here’s a picture of it

This is my code of the model definition and configuration.

        # Create LSTM Model
class LSTMModel(nn.Module):
  def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
    super(LSTMModel, self).__init__()
    # Number of hidden dimensions
    self.hidden_dim = hidden_dim

    # Number of hidden layers
    self.layer_dim = layer_dim

    # LSTM
    self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True, dropout=0.2)

    # Readout layer       
    self.f1 = nn.Linear(hidden_dim, output_dim)
    self.softmax = nn.Sigmoid()

  def forward(self, x):
    # Initialize hidden state with zeros        
    h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).type(torch.FloatTensor).cuda())        

    # Initialize cell state        
    c0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).type(torch.FloatTensor).cuda())
    out, (hn, cn) = self.lstm(x, (h0,c0))
    out = self.f1(hn[-1])
    out = self.softmax(out)
    return out

#LSTM Configuration
batch_size = 10000
num_epochs = 200
learning_rate = 0.001#Try lowering the rate 

# Create LSTM
input_dim = 1       # input dimension
hidden_dim = 50  # hidden layer dimension
layer_dim =2 # number of hidden layers
output_dim = 1      # output dimension

model = LSTMModel(input_dim, hidden_dim, layer_dim, output_dim)

error = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

This is my code for training and testing

from tensorboardcolab import TensorBoardColab
globaliter = 0
tb = TensorBoardColab()
for epoch in tqdm(range(num_epochs)):  
      # Train
    for i, (inputs, targets) in enumerate(train_loader):
        train  = Variable(inputs.type(torch.FloatTensor).cuda())
        targets = Variable(targets.type(torch.FloatTensor).cuda())

        outputs = model(train)
        loss = error(outputs, targets)
        tb.save_value('Train Loss', 'train_loss', globaliter, loss.item())
        globaliter += 1

      # Test
      for inputs, targets in test_loader:

        inputs = Variable(inputs.type(torch.FloatTensor).cuda())
        targets = Variable(targets.type(torch.FloatTensor).cuda())  
        outputs = model(inputs)
        loss_test = error(outputs, targets)
        tb.save_value('Test Loss', 'test_loss', globaliter2, loss_test.item())
        globaliter2 += 1

I’d really be grateful if someone helped me figure this out, or offered suggestions or advice