DQN network running but agent is not improving

Hi, I’m new to machine learning and Programming in general. I’m trying to get a DQN to beat the OpenAI gym Mountain car-v0 game. the code runs without any errors but does not seem to improve at the game at all. I ran 50,000 episodes and the average score over past 100 episodes remained unchanged at -200. This is the code. If anyone is willing to go through it and let me know what I’ve done wrong I would greatly appreciate it.

import numpy as np
import random
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import gym

sample_size = 25

# Creating the architecture of the Neural Network

class Network(nn.Module):
    
    def __init__(self, input_size, nb_action):
        super(Network, self).__init__()
        self.input_size = input_size
        self.nb_action = nb_action
        self.fc1 = nn.Linear(input_size, 30)
        self.fc2 = nn.Linear(30, nb_action)
    
    def forward(self, state):
        x = F.relu(self.fc1(state))
        q_values = self.fc2(x)
        return q_values

# Implementing Experience Replay

class ReplayMemory(object):
    
    def __init__(self, capacity):
        self.capacity = capacity
        self.memory = []
    
    def push(self, event):
        self.memory.append(event)
        if len(self.memory) > self.capacity:
            del self.memory[0]
    
    def sample(self, batch_size):
        samples = zip(*random.sample(self.memory, batch_size))
        with torch.no_grad():
            return map(lambda x: torch.cat(x, 0), samples)

# Implementing Deep Q Learning

class Dqn():
    
    def __init__(self, input_size, nb_action, gamma):
        self.gamma = gamma

        self.model = Network(input_size, nb_action)
        self.memory = ReplayMemory(100000)
        self.optimizer = optim.Adam(self.model.parameters(), lr = 0.001)
        self.last_state = torch.Tensor(input_size).unsqueeze(0)
        self.last_action = 0
        self.last_reward = 0 
    
    def select_action(self, state):
        state_transformed = torch.Tensor(state).float().unsqueeze(0)
        probs = F.softmax(self.model(state_transformed)*100, dim= 1) # T=100
        action = probs.multinomial(1)
        self.last_action = action.data[0,0]
        self.last_state = state_transformed
        return action.data[0,0]
    
    def learn(self, batch_state, batch_next_state, batch_reward, batch_action):
        outputs = self.model(batch_state).gather(1, batch_action.unsqueeze(1)).squeeze(1)
        next_outputs = self.model(batch_next_state).detach().max(1)[0]  
        target = self.gamma*next_outputs + batch_reward
        td_loss = F.smooth_l1_loss(outputs, target)
        self.optimizer.zero_grad()
        td_loss.backward(retain_graph = True)
        self.optimizer.step()
    
    def update(self, reward, new_signal):
        new_state = torch.Tensor(new_signal).float().unsqueeze(0)
        self.memory.push((self.last_state, new_state, torch.LongTensor([int(self.last_action)]), torch.Tensor([reward]))) 
        if len(self.memory.memory) > sample_size:
            batch_state, batch_next_state, batch_action, batch_reward = self.memory.sample(sample_size)
            self.learn(batch_state, batch_next_state, batch_reward, batch_action)
    
        self.reward_window.append(reward)
        if len(self.reward_window) > 1000:
            del self.reward_window[0]
        return action
    
    def score(self):
        return sum(self.reward_window)/(len(self.reward_window)+1.)
    
    def save(self):
        torch.save({'state_dict': self.model.state_dict(),
                    'optimizer' : self.optimizer.state_dict(),
                   }, 'Brain_save_1.pth')
    
    def load(self):
        '''
        loads brain
        '''
        if os.path.isfile('Brain_save_1.pth'):
            print("=> loading checkpoint... ")
            checkpoint = torch.load('Brain_save_1.pth')
            self.model.load_state_dict(checkpoint['state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            print("done !")
        else:
            print("no checkpoint found...")


if __name__ == '__main__':
    env = gym.make('MountainCar-v0')
    EPISODES = 25000
    show_every = 500
    save_check = 24998
    scores =[]
    env.reset()


    brain = Dqn(2, env.action_space.n, 0.95)
    brain.load()

    for episode in range(EPISODES):
        score = 0
        done = False
        obs = env.reset()
        reward = 0

        if (episode == save_check):
          brain.save()
          print('File saved')

        while not done:
            action = brain.select_action(obs).item()
            obs_,reward,done,info = env.step(action)
            brain.update(reward,obs_)
            obs = obs_
            score += reward 
        scores.append(score)

        if episode % show_every == 0:
            avg_score = np.mean(scores[-100:])
            print('episode ', episode, 'score %.1f avg score %.1f' %(score, avg_score))
1 Like