Newcomer to PyTorch in need of help

Hello all. New to the forum and new to ML/PyTorch in general.

My end goal right now is to use PyTorch and RL specifically to calculate the movement of a robotic arm to a given target location. To take some small steps towards my end goal, I’m starting off with a single link arm in a 2D environment which will try and point towards a goal position.

To reiterate, I am brand new to machine learning but do have over a decade of programming experience.

I have followed this basic tutorial here to get the infamous CartPole model running from OpenAI. After getting that all running on my computer I began to try and modify the CartPole env source code from here and the PyTorch example to get to my “2D Single link arm” environment.

I have gotten this modified environment to run as you can see below:

I’m not sure that I am using the env state and reward calculations correctly as the model doesn’t seen to converge on a good reward in training. I’m also not sure if I’m making the training loop correct, although the only thing I’ve change so far in the PyTorch tutorial is the get_screen function.

I would love some input on the code I’ve written as I’m not sure where to go from here.

I am attaching the code to the modified PyTorch example and Env.

armenv.py:

import gym
from gym import spaces
from gym.utils import seeding
import numpy as np

MAX_STEPS=200
STEPS_ON_GOAL_TO_FINISH=10

class ArmEnv(gym.Env):
  """Arm Environment that follows gym interface"""
  metadata = {
      'render.modes': ['human', 'rgb_array'],
      'video.frames_per_second': 50
  }

  def __init__(self):
    self.length = 1.0  # length of arm
    self.goal = [2.,2.]
    self.tau = 0.02  # seconds between state updates
    self.theta_adj = 2.0

    self.angle_difference_threshold = 0.5

    # The max and min values that can be observed
    high = np.array([np.pi/2, np.pi, 1.8, 2.1], dtype=np.float32)
    low = np.array([-np.pi/2, 0, -1.8, 1.1], dtype=np.float32)

    self.action_space = spaces.Discrete(3)
    self.observation_space = spaces.Box(low, high, dtype=np.float32)

    self.on_goal = 0
    self.current_step = 0

    self.seed()
    self.viewer = None
    self.state = None

  def seed(self, seed=None):
    self.np_random, seed = seeding.np_random(seed)
    return [seed]

  def calc_angle_difference(self, theta):
    # Get state of arm
    arm_mag = self.length
    arm_vector = np.array([arm_mag * np.sin(theta), arm_mag * np.cos(theta)])

    # Get distance vector between the goal and end of arm
    distance_vector = np.array([self.goal[0] - arm_vector[0], self.goal[1] - arm_vector[1]])
    distance_mag = np.sqrt(distance_vector[0]**2 + distance_vector[1]**2)

    # Get the angle between this distance vector and the arm vector
    return np.arccos(np.dot(arm_vector, distance_vector)/(arm_mag*distance_mag), dtype=np.float32)

  def step(self, action):
    err_msg = "%r (%s) invalid" % (action, type(action))
    assert self.action_space.contains(action), err_msg

    self.current_step += 1
    theta, _, goalx, goaly = self.state

    # Adjust theta based on the chosen action
    # if action == 1:
    #   theta_adj = self.theta_adj
    # else:
    #   theta_adj = -self.theta_adj
    if action == 1:
      theta_adj = self.theta_adj
    elif action == 2:
      theta_adj = -self.theta_adj
    else:
      theta_adj = 0

    theta += theta_adj * self.tau
    theta = max(min(theta, np.pi/2), -np.pi/2)
  
    # Get the angle between this distance vector and the arm vector
    angle_difference = self.calc_angle_difference(theta)

    self.state = (theta, angle_difference, goalx, goaly)

    # delay_modifier = float(self.current_step / MAX_STEPS)
    # r = float(1 - angle_difference*2)
    # r = np.exp(-angle_difference, dtype=np.float32)
    r = np.exp(-angle_difference*3, dtype=np.float32)
    # r = np.exp(-angle_difference, dtype=np.float32) * delay_modifier
    # r = np.exp(-angle_difference, dtype=np.float32) * (1.0 - delay_modifier)

    if theta >= np.pi/2 or theta <= -np.pi/2:
      r = 0.0 # Baaaad boi

    if angle_difference <= self.angle_difference_threshold and \
      angle_difference >= -self.angle_difference_threshold:
      self.on_goal += 1
      r = 1.0 # Goooood boi
    else:
      self.on_goal = self.on_goal - 2 if self.on_goal > 0 else 0

    done = bool(self.on_goal >= STEPS_ON_GOAL_TO_FINISH or
      self.current_step >= MAX_STEPS or
      theta >= np.pi/2 or
      theta <= -np.pi/2
    )

    print(self.current_step, np.array(self.state), r, self.on_goal, action)
    return np.array(self.state), float(r), done, {}

  def reset(self):
    self.goal = np.array([self.np_random.rand() * 3.6 - 1.8, self.np_random.rand() + 1.1])
    self.on_goal = 0
    self.current_step = 0

    new_theta = self.np_random.rand()*np.pi - np.pi/2
    new_angle_difference = self.calc_angle_difference(new_theta)

    self.state = np.array([new_theta, new_angle_difference, *self.goal], dtype=np.float32)
    return np.array(self.state)

  def render(self, mode='human'):
      screen_width = 600
      screen_height = 400

      world_width = self.length * 4
      scale = screen_width/world_width
      polewidth = 10.0
      polelen = scale * (self.length)
      goalwidth = 15.0
      goalheight = 15.0

      if self.viewer is None:
          from gym.envs.classic_control import rendering
          self.viewer = rendering.Viewer(screen_width, screen_height)
          l, r, t, b = -goalwidth / 2, goalwidth / 2, goalheight / 2, -goalheight / 2
          goal = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
          self.goaltrans = rendering.Transform(translation=(self.goal[0] * scale + screen_width / 2.0, self.goal[0] * scale))
          goal.add_attr(self.goaltrans)
          self.viewer.add_geom(goal)
          l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
          pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
          pole.set_color(.8, .6, .4)
          self.poletrans = rendering.Transform(translation=(screen_width / 2.0, 0))
          pole.add_attr(self.poletrans)
          self.viewer.add_geom(pole)
          self._pole_geom = pole

      if self.state is None:
          return None

      # Edit the pole polygon vertex
      pole = self._pole_geom
      l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
      pole.v = [(l, b), (l, t), (r, t), (r, b)]

      x = self.state
      self.goaltrans.set_translation(self.goal[0] * scale + screen_width / 2.0, self.goal[1] * scale)
      self.poletrans.set_rotation(-x[0])

      return self.viewer.render(return_rgb_array=mode == 'rgb_array')

  def close(self):
      if self.viewer:
          self.viewer.close()
          self.viewer = None

main.py:


from armenv import ArmEnv
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T

env = ArmEnv()

# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
    from IPython import display

plt.ion()

# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))


class ReplayMemory(object):
  def __init__(self, capacity):
      self.capacity = capacity
      self.memory = []
      self.position = 0

  def push(self, *args):
      """Saves a transition."""
      if len(self.memory) < self.capacity:
          self.memory.append(None)
      self.memory[self.position] = Transition(*args)
      self.position = (self.position + 1) % self.capacity

  def sample(self, batch_size):
      return random.sample(self.memory, batch_size)

  def __len__(self):
      return len(self.memory)

class DQN(nn.Module):

  def __init__(self, h, w, outputs):
    super(DQN, self).__init__()
    self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
    self.bn1 = nn.BatchNorm2d(16)
    self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
    self.bn2 = nn.BatchNorm2d(32)
    self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
    self.bn3 = nn.BatchNorm2d(32)

    # Number of Linear input connections depends on output of conv2d layers
    # and therefore the input image size, so compute it.
    def conv2d_size_out(size, kernel_size = 5, stride = 2):
        return (size - (kernel_size - 1) - 1) // stride  + 1
    convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
    convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
    linear_input_size = convw * convh * 32
    self.head = nn.Linear(linear_input_size, outputs)

  # Called with either one element to determine next action, or a batch
  # during optimization. Returns tensor([[left0exp,right0exp]...]).
  def forward(self, x):
    x = F.relu(self.bn1(self.conv1(x)))
    x = F.relu(self.bn2(self.conv2(x)))
    x = F.relu(self.bn3(self.conv3(x)))
    return self.head(x.view(x.size(0), -1))

resize = T.Compose([T.ToPILImage(),
  T.Resize(40, interpolation=Image.CUBIC),
  T.ToTensor()])

def get_screen():
  # Returned screen requested by gym is 400x600x3, but is sometimes larger
  # such as 800x1200x3. Transpose it into torch order (CHW).
  screen = env.render(mode='rgb_array').transpose((2, 0, 1))
  screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
  screen = torch.from_numpy(screen)
  # Resize, and add a batch dimension (BCHW)
  return resize(screen).unsqueeze(0).to(device)

env.reset()
plt.figure()
plt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(),
           interpolation='none')
plt.title('Example extracted screen')
plt.show()

BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10

# Get screen size so that we can initialize layers correctly based on shape
# returned from AI gym. Typical dimensions at this point are close to 3x40x90
# which is the result of a clamped and down-scaled render buffer in get_screen()
init_screen = get_screen()
_, _, screen_height, screen_width = init_screen.shape

# Get number of actions from gym action space
n_actions = env.action_space.n

policy_net = DQN(screen_height, screen_width, n_actions).to(device)
target_net = DQN(screen_height, screen_width, n_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()

optimizer = optim.RMSprop(policy_net.parameters())
memory = ReplayMemory(10000)

steps_done = 0

def select_action(state):
  global steps_done
  sample = random.random()
  eps_threshold = EPS_END + (EPS_START - EPS_END) * \
    math.exp(-1. * steps_done / EPS_DECAY)
  steps_done += 1
  if sample > eps_threshold:
    with torch.no_grad():
      # t.max(1) will return largest column value of each row.
      # second column on max result is index of where max element was
      # found, so we pick action with the larger expected reward.
      return policy_net(state).max(1)[1].view(1, 1)
  else:
    return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)

episode_rewards = []

def plot_rewards():
  plt.figure(2)
  plt.clf()
  rewards_t = torch.tensor(episode_rewards, dtype=torch.float)
  plt.title('Training...')
  plt.xlabel('Episode')
  plt.ylabel('Reward')
  plt.plot(rewards_t.numpy())
  # Take 100 episode averages and plot them too
  if len(rewards_t) >= 10:
    means = rewards_t.unfold(0, 10, 1).mean(1).view(-1)
    means = torch.cat((torch.zeros(9), means))
    plt.plot(means.numpy())

  plt.pause(0.001)  # pause a bit so that plots are updated
  if is_ipython:
    display.clear_output(wait=True)
    display.display(plt.gcf())

def optimize_model():
  if len(memory) < BATCH_SIZE:
    return
  transitions = memory.sample(BATCH_SIZE)
  # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
  # detailed explanation). This converts batch-array of Transitions
  # to Transition of batch-arrays.
  batch = Transition(*zip(*transitions))

  # Compute a mask of non-final states and concatenate the batch elements
  # (a final state would've been the one after which simulation ended)
  non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
                                          batch.next_state)), device=device, dtype=torch.bool)
  non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])
  state_batch = torch.cat(batch.state)
  action_batch = torch.cat(batch.action)
  reward_batch = torch.cat(batch.reward)

  # Compute Q(s_t, a) - the model computes Q(s_t), then we select the
  # columns of actions taken. These are the actions which would've been taken
  # for each batch state according to policy_net
  state_action_values = policy_net(state_batch).gather(1, action_batch)

  # Compute V(s_{t+1}) for all next states.
  # Expected values of actions for non_final_next_states are computed based
  # on the "older" target_net; selecting their best reward with max(1)[0].
  # This is merged based on the mask, such that we'll have either the expected
  # state value or 0 in case the state was final.
  next_state_values = torch.zeros(BATCH_SIZE, device=device)
  next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
  # Compute the expected Q values
  expected_state_action_values = (next_state_values * GAMMA) + reward_batch

  # Compute Huber loss
  loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))

  # Optimize the model
  optimizer.zero_grad()
  loss.backward()
  for param in policy_net.parameters():
    param.grad.data.clamp_(-1, 1)
  optimizer.step()

num_episodes = 400
for i_episode in range(num_episodes):
  # Initialize the environment and state
  env.reset()
  last_screen = get_screen()
  current_screen = get_screen()
  state = current_screen - last_screen
  for t in count():
    # Select and perform an action
    action = select_action(state)
    _, reward, done, _ = env.step(action.item())
    reward = torch.tensor([reward], device=device)

    # Observe new state
    last_screen = current_screen
    current_screen = get_screen()
    if not done:
      next_state = current_screen - last_screen
    else:
      next_state = None

    # Store the transition in memory
    memory.push(state, action, next_state, reward)

    # Move to the next state
    state = next_state

    # Perform one step of the optimization (on the target network)
    optimize_model()
    if done:
      episode_rewards.append(reward)
      plot_rewards()
      break
  # Update the target network, copying all weights and biases in DQN
  if i_episode % TARGET_UPDATE == 0:
    target_net.load_state_dict(policy_net.state_dict())

print('Complete')
env.render()
env.close()
plt.ioff()
plt.show()