RuntimeError: NVML_SUCCESS == DriverAPI::get()->nvmlInit_v2_()

I am trying to train the Hugging Face Transformer Model, but getting the following error

Traceback (most recent call last):
  File "/home/ubuntu/NLP_Project/genere_classifier/trainer.py", line 402, in <module>
    train_data_loader, val_data_loader = get_data(tokenizer_name=tokenizer_name, batch_size=batch_size)
                                         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/NLP_Project/genere_classifier/src/data.py", line 114, in get_data
    train_data = train_data.map(tokenize_and_numericalize_example, fn_kwargs={"tokenizer": tokenizer})
                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 593, in wrapper
    out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
                                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 558, in wrapper
    out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
                                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 3105, in map
    for rank, done, content in Dataset._map_single(**dataset_kwargs):
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 3458, in _map_single
    example = apply_function_on_filtered_inputs(example, i, offset=offset)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/datasets/arrow_dataset.py", line 3361, in apply_function_on_filtered_inputs
    processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/NLP_Project/genere_classifier/src/data.py", line 12, in tokenize_and_numericalize_example
    tokens = tokenizer(example["Lyric"], truncation=True, padding=True)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/tokenization_utils_base.py", line 2872, in __call__
    encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs)
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/tokenization_utils_base.py", line 2978, in _call_one
    return self.encode_plus(
           ^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/tokenization_utils_base.py", line 3051, in encode_plus
    return self._encode_plus(
           ^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/tokenization_utils_fast.py", line 576, in _encode_plus
    batched_output = self._batch_encode_plus(
                     ^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/tokenization_utils_fast.py", line 504, in _batch_encode_plus
    encodings = self._tokenizer.encode_batch(
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
KeyboardInterrupt
^[[A
(nlp_env) (base) ubuntu@ip-172-31-12-57:~/NLP_Project/genere_classifier$ python3 trainer.py --transformer_name distilbert-base-uncased --tokenizer_name distilbert-base-uncased --freeze_transformer false --batch_size 128 --learning_rate 3e-4 --epochs 20
Map: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8000/8000 [00:08<00:00, 993.88 examples/s]
Map: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2000/2000 [00:01<00:00, 1010.68 examples/s]
  0%|                                                                                                                                                                 | 0/63 [00:01<?, ?it/s]
  0%|                                                                                                                                                                 | 0/20 [00:01<?, ?it/s]
Traceback (most recent call last):
  File "/home/ubuntu/NLP_Project/genere_classifier/trainer.py", line 425, in <module>
    trainer.training_loop(model, data_loader_train=train_data_loader, data_loader_test=val_data_loader, epochs=epochs, 
    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/NLP_Project/genere_classifier/trainer.py", line 123, in training_loop
    epoch_loss, epoch_accuracy = self._one_epoch_train(model, data_loader_train, optim_alog)
                                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/NLP_Project/genere_classifier/trainer.py", line 74, in _one_epoch_train
    y_pred_prob = model(inputs, attention_mask)
                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/NLP_Project/genere_classifier/src/model.py", line 57, in forward
    output = self.transformer(ids, attention_mask = attention_mask, output_attentions=True)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 822, in forward
    return self.transformer(
           ^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 587, in forward
    layer_outputs = layer_module(
                    ^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 513, in forward
    sa_output = self.attention(
                ^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/ubuntu/nlp_env/lib/python3.12/site-packages/transformers/models/distilbert/modeling_distilbert.py", line 243, in forward
    scores = torch.matmul(q, k.transpose(2, 3))  # (bs, n_heads, q_length, k_length)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
RuntimeError: NVML_SUCCESS == DriverAPI::get()->nvmlInit_v2_() INTERNAL ASSERT FAILED at "/opt/conda/conda-bld/pytorch_1711403380481/work/c10/cuda/CUDACachingAllocator.cpp":799, please report a bug to PyTorch.