Whenever I run any rnn, I get this CUDNN_STATUS_BAD_PARAM

class BiRNN(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, num_classes):
        super(BiRNN, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers,
                            batch_first=True, bidirectional=True)
        self.fc = nn.Linear(hidden_size * 2, num_classes)  # 2 for bidirection

    def forward(self, x):
        # Set initial states
        h0 = Variable(torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size)).cuda()  # 2 for bidirection
        c0 = Variable(torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size)).cuda()

        # Forward propagate RNN
        out, _ = self.lstm(x, (h0, c0))
        # print(out.shape)
        # print(out[:, -1, :].shape)

        # Decode hidden state of last time step
        out = self.fc(out[:, -1, :])
        return out

File “/home/asif/PycharmProjects/RAVDESS/capsule_main.py”, line 143, in
engine.train(processor, utils.get_iterator(True), maxepoch=config.NUM_EPOCHS, optimizer=optimizer)
File “/home/asif/asifml_tf/lib/python3.6/site-packages/torchnet/engine/engine.py”, line 63, in train
state[‘optimizer’].step(closure)
File “/home/asif/asifml_tf/lib/python3.6/site-packages/torch/optim/adam.py”, line 58, in step
loss = closure()
File “/home/asif/asifml_tf/lib/python3.6/site-packages/torchnet/engine/engine.py”, line 52, in closure
loss, output = state’network’
File “/home/asif/PycharmProjects/RAVDESS/capsule_main.py”, line 42, in processor
classes= model(data)
File “/home/asif/asifml_tf/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 489, in call
result = self.forward(*input, **kwargs)
File “/home/asif/PycharmProjects/RAVDESS/LSTMSenti.py”, line 23, in forward
out, _ = self.lstm(x, (h0, c0))
File “/home/asif/asifml_tf/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 489, in call
result = self.forward(*input, **kwargs)
File “/home/asif/asifml_tf/lib/python3.6/site-packages/torch/nn/modules/rnn.py”, line 179, in forward
self.dropout, self.training, self.bidirectional, self.batch_first)
RuntimeError: cuDNN error: CUDNN_STATUS_BAD_PARAM

nvcc: NVIDIA ® Cuda compiler driver
Copyright © 2005-2017 NVIDIA Corporation
Built on Fri_Sep__1_21:08:03_CDT_2017
Cuda compilation tools, release 9.0, V9.0.176

cudnn version 7.1

All the paths are correctly put in LD_LIBRARY_PATH. Could not find a solution please help.