DQN Agent is not learning anything

My DQN agent in gym FrozenLake environment is not learning I tried everything but I can not make it work. Please if someone can help me, Thank you.

import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import collections
import random
from torch import optim
from tqdm import tqdm


class DQN(nn.Module):
    def __init__(self, n_actions, n_inputs, lr=0.01):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(n_inputs, 64)
        self.fc2 = nn.Linear(64, 128)
        self.fc3 = nn.Linear(128, n_actions)

        self.optimizer = optim.Adam(self.parameters(), lr=lr)
        self.device = 'cuda:0'
        self.loss = nn.MSELoss()
        self.to(self.device)

    def forward(self, state):
        x = F.relu(self.fc1(state))
        x = F.relu(self.fc2(x))
        actions = self.fc3(x)
        return actions


class Agent:
    def __init__(self, gamma=0.9, epsilon=0.9):
        self.env = gym.make('FrozenLake-v0')
        self.n_actions = self.env.action_space.n
        self.gamma = gamma
        self.epsilon = epsilon
        self.policy_net = DQN(self.n_actions, 1)
        self.target_net = DQN(self.n_actions, 1)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.replay_memory = collections.deque(maxlen=10000)
        self.min_replay_memory_size = 100
        self.batch_size = 64
        self.target_update_counter = 0
        self.reward_list = []

    def update_replay_memory(self, obs):
        self.replay_memory.append(obs)

    def get_q(self, state):
        return self.policy_net.forward(torch.tensor([state], dtype=torch.float32).cuda())

    def choose_action(self, q):
        if np.random.random() > self.epsilon:
            action = torch.argmax(q).item()
        else:
            action = self.env.action_space.sample()
        return action

    def train(self):
        if len(self.replay_memory) < self.min_replay_memory_size:
            return
        batch = random.sample(self.replay_memory, self.batch_size)
        current_states = torch.tensor([transition[0] for transition in batch], dtype=torch.float32).cuda()
        current_states = current_states.unsqueeze(1)
        self.policy_net.eval()
        self.policy_net.zero_grad()
        current_qs_list = self.policy_net.forward(
            current_states).detach().cpu().numpy()
        new_current_states = torch.tensor([transition[3] for transition in batch], dtype=torch.float32).cuda()
        new_current_states = new_current_states.unsqueeze(1)
        self.target_net.eval()
        self.target_net.zero_grad()
        future_qs_list = self.target_net.forward(
            new_current_states).detach().cpu().numpy()
        X = []
        y = []
        for index, (current_state, action, reward, new_current_state, done) in enumerate(batch):
            done = False
            if not done:
                max_future_q = np.max(future_qs_list[index])
                new_q = reward + self.gamma * max_future_q
            else:
                new_q = reward
            current_qs = current_qs_list[index]
            current_qs[action] = new_q
            X.append(current_state)
            y.append(current_qs)
        self.policy_net.train()
        self.policy_net.zero_grad()
        preds = self.policy_net.forward(torch.tensor(X, dtype=torch.float32).unsqueeze(1).cuda())
        loss = self.policy_net.loss(preds, torch.tensor(y).cuda())
        loss.backward()
        self.policy_net.optimizer.step()
        if self.target_update_counter > 5:
            self.target_net.load_state_dict(self.policy_net.state_dict())
            self.target_update_counter = 0
            self.epsilon -= 0.02

    def step(self):
        done = False
        state = self.env.reset()
        reward_all = 0
        while not done:
            q = self.get_q(state)
            action = self.choose_action(q)
            next_state, reward, done, _ = self.env.step(action)
            reward_all += reward
            self.update_replay_memory([state, action, reward, next_state, done])
            self.train()
            if done:
                agent.target_update_counter += 1
            state = next_state
        self.reward_list.append(reward_all)


agent = Agent()
episodes = 1000
for episode in tqdm(range(episodes)):
    agent.step()

print(sum(agent.reward_list) / episodes)

agent.env.close()


""" average reward is 0.0043"""

Some evident erros, there might be more:

  1. What’s this? n-step learning?

      for index, (current_state, action, reward, new_current_state, done) in enumerate(batch):
             done = False
             if not done:
                 max_future_q = np.max(future_qs_list[index])
                 new_q = reward + self.gamma * max_future_q
             else:
                 new_q = reward
    
  2. No direct call of Forward when you want to calculate gradients:

    preds = self.policy_net.forward(...)
    

    You should do:

    preds = self.policy_net(...)
    

I tried removing .forward() but it did not change anything, still very bad results.