I need your help on how I can resolve this error:

I need your help on how I can resolve this error:
next_state, reward, done, info = env.step(action)
TypeError: cannot unpack non-iterable int object

class QNetwork(nn.Module):
def init(self, state_size, action_size, seed):
super(QNetwork, self).init()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 32)
self.fc2 = nn.Linear(32, 64)
self.fc3 = nn.Linear(64, action_size)

def forward(self, x):
    """Forward pass"""
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)

    return x

class ReplayBuffer:
def init(self, buffer_size, batch_size, seed):
self.batch_size = batch_size
self.seed = random.seed(seed)
self.memory = deque(maxlen=buffer_size)
self.experience = namedtuple(“Experience”, field_names=[“state”, “action”, “reward”, “next_state”, “done”])

def add(self, state, action, reward, next_state, done):
    """Add experience"""
    experience = self.experience(state, action, reward, next_state, done)
    self.memory.append(experience)

def sample(self):
    experiences = random.sample(self.memory, k=self.batch_size)

    # Convert to torch tensors
    states = torch.from_numpy(
        np.vstack([experience.state for experience in experiences if experience is not None])).float().to(device)
    actions = torch.from_numpy(
        np.vstack([experience.action for experience in experiences if experience is not None])).long().to(device)
    rewards = torch.from_numpy(
        np.vstack([experience.reward for experience in experiences if experience is not None])).float().to(device)
    next_states = torch.from_numpy(
        np.vstack([experience.next_state for experience in experiences if experience is not None])).float().to(
        device)
    # Convert done from boolean to int
    dones = torch.from_numpy(
        np.vstack([experience.done for experience in experiences if experience is not None]).astype(
            np.uint8)).float().to(device)

    return (states, actions, rewards, next_states, dones)

def __len__(self):
    return len(self.memory)

BUFFER_SIZE = int(1e5) # Replay memory size
BATCH_SIZE = 64 # Number of experiences to sample from memory
GAMMA = 0.99 # Discount factor
TAU = 1e-3 # Soft update parameter for updating fixed q network
LR = 1e-4 # Q Network learning rate
UPDATE_EVERY = 4 # How often to update Q network

class DQNAgent:
def init(self, state_size, action_size, seed, learningRate, decayRate, numOfEpisodes, stepsPerEpisode, epsilon,
annealingConstant, annealAfter, checkpoint=’’):
self.state_size = state_size
self.action_size = action_size
random.seed(seed)
# Initialize Q and Fixed Q networks
self.q_network = QNetwork(state_size, action_size, seed).to(device)
self.fixed_network = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.q_network.parameters())
# Initialise memory
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, seed)
self.timestep = 0
self.voltageRanges_2 = [’<0.85’, ‘0.85-0.874’, ‘0.875-0.899’, ‘0.9-0.924’, ‘0.925-0-949’, ‘0.95-0.974’,
‘0.975.0.999’, ‘1-1.024’, ‘1.025-1.049’, ‘1.05-1.074’, ‘1.075-1.1’, ‘>=1.1’]
self.loadingPercentRange = [‘0-9’, ‘10-19’, ‘20-29’, ‘30-39’, ‘40-49’, ‘50-59’, ‘60-69’, ‘70-79’, ‘80-89’,
‘90-99’, ‘100-109’, ‘110-119’, ‘120-129’, ‘130-139’, ‘140-149’, ‘150 and above’]
self.statesLev1 = [‘v_’ + x + ‘l’ + y for x in self.voltageRanges_2 for y in self.loadingPercentRange]
self.states = [‘s1:’ + x + ‘;s2:’ + y + ‘;’ for x in self.statesLev1 for y in self.statesLev1]

    # initialise environment
    self.env = case9()

    # Possible actions to take, combinations of v_ref into combinations of lp_ref
    self.actions = ['v_ref:' + str(x) + ';lp_ref:' + str(y) for x in self.env.actionSpace['v_ref_pu'] for y in
                    self.env.actionSpace['lp_ref']]

    # Check if pickle file with current hyperparams exist
    self.checkPointName = 'pickles_qlearning\pickled_q_table_lr' + str(learningRate) + 'dr' + str(
        decayRate) + 'noe' + str(numOfEpisodes) + 'spe' + str(stepsPerEpisode) + 'e' + str(epsilon) + 'ac' + str(
        annealingConstant) + 'aa' + str(annealAfter) + '.pkl'
    if os.path.isfile(self.checkPointName) or checkpoint != '':
        print('loading data from checkpoint')
        # Load Qtable from pickle file
        with open(self.checkPointName if os.path.isfile(self.checkPointName) else checkpoint, 'rb') as pickle_file:
            data = pickle.load(pickle_file)
            self.epsilon = data['e'] if os.path.isfile(self.checkPointName) else epsilon
            self.q_table = data['q_table']
            self.allRewards = data['allRewards'] if os.path.isfile(self.checkPointName) else []
    else:  # create new Q_table
        self.q_table = pd.DataFrame(0, index=np.arange(len(self.actions)), columns=self.states)
        self.epsilon = epsilon
        self.allRewards = []
    self.numOfEpisodes = numOfEpisodes
    self.annealingRate = annealingConstant
    self.numOfSteps = stepsPerEpisode
    self.learningRate = learningRate
    self.decayRate = decayRate
    self.annealAfter = annealAfter

def step(self, state, action, reward, next_state, done):

    self.memory.add(state, action, reward, next_state, done)
    self.timestep += 1
    if self.timestep % UPDATE_EVERY == 0:
        if len(self.memory) > BATCH_SIZE:
            sampled_experiences = self.memory.sample()
            self.learn(sampled_experiences)

def learn(self, experiences):

    states, actions, rewards, next_states, dones = experiences
    action_values = self.fixed_network(next_states).detach()
    max_action_values = action_values.max(1)[0].unsqueeze(1)

    # If done just use reward, else update Q_target with discounted action values
    Q_target = rewards + (GAMMA * max_action_values * (1 - dones))
    Q_expected = self.q_network(states).gather(1, actions)

    # Calculate loss
    loss = F.mse_loss(Q_expected, Q_target)
    self.optimizer.zero_grad()
    # backward pass
    loss.backward()
    # update weights
    self.optimizer.step()

    # Update fixed weights
    self.update_fixed_network(self.q_network, self.fixed_network)

def update_fixed_network(self, q_network, fixed_network):
    for source_parameters, target_parameters in zip(q_network.parameters(), fixed_network.parameters()):
        target_parameters.data.copy_(TAU * source_parameters.data + (1.0 - TAU) * target_parameters.data)

def act(self, state, eps=0.0):

    rnd = random.random()
    if rnd < eps:
        return np.random.randint(self.action_size)
    else:
        state = torch.from_numpy(state).float().unsqueeze(0).to(device)
        # set the network into evaluation mode
        self.q_network.eval()
        with torch.no_grad():
            action_values = self.q_network(state)
        # Back to training mode
        self.q_network.train()
        action = np.argmax(action_values.cpu().data.numpy())
        return action

def checkpoint(self, filename):
    torch.save(self.q_network.state_dict(), filename)

MAX_EPISODES = 2000
MAX_STEPS = 1000
ENV_SOLVED = 200
PRINT_EVERY = 100

Epsilon schedule

EPS_START = 1.0
EPS_DECAY = 0.999
EPS_MIN = 0.01

EPS_DECAY_RATES = [0.9, 0.99, 0.999, 0.9999]
plt.figure(figsize=(10, 6))

for decay_rate in EPS_DECAY_RATES:
test_eps = EPS_START
eps_list = []
for _ in range(MAX_EPISODES):
test_eps = max(test_eps * decay_rate, EPS_MIN)
eps_list.append(test_eps)

plt.plot(eps_list, label='decay rate: {}'.format(decay_rate))

plt.title(‘Effect of various decay rates’)
plt.legend(loc=‘best’)
plt.xlabel(‘Number of episodes’)
plt.ylabel(‘epsilon’)
plt.show()

Get state and action sizes

state_size = env.observation_space
action_size = env.action_space
print(‘State size: {}, action size: {}’.format(state_size, action_size))

State_size: 8
action_size: 4

dqn_agent = DQNAgent(8, 4, seed=0, numOfEpisodes=50000, stepsPerEpisode=12, learningRate=0.1,
decayRate=0.1, epsilon=1, annealingConstant=0.98, annealAfter=400, checkpoint=’’)

start = time()
scores = []

Maintain a list of last 100 scores

scores_window = deque(maxlen=100)
eps = EPS_START
for episode in range(1, MAX_EPISODES + 1):
state = env.reset
score = 0
for t in range(MAX_STEPS):
action = dqn_agent.act(state, eps)
next_state, reward, done, info = env.step(action)
dqn_agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break

    eps = max(eps * EPS_DECAY, EPS_MIN)
    if episode % PRINT_EVERY == 0:
        mean_score = np.mean(scores_window)
        print('\r Progress {}/{}, average score:{:.2f}'.format(episode, MAX_EPISODES, mean_score), end="")
    if score >= ENV_SOLVED:
        mean_score = np.mean(scores_window)
        print('\rEnvironment solved in {} episodes, average score: {:.2f}'.format(episode, mean_score), end="")
        sys.stdout.flush()
        dqn_agent.checkpoint('solved_200.pth')
        break

scores_window.append(score)
scores.append(score)

end = time()
print(‘Took {} seconds’.format(end - start))

Progress: 2000 / 2000
average_score: 205.24
dqn_agent.checkpoint(‘solved_200.pth’)

plt.figure(figsize=(10, 6))
plt.plot(scores)

A bit hard to see the above plot, so lets smooth it (red)

plt.plot(pd.Series(scores).rolling(100).mean())
plt.title(‘DQN Training’)
plt.xlabel(’# of episodes’)
plt.ylabel(‘score’)
plt.show()

Based on the error message you are trying to unpack an int, which doesn’t work:

def fun():
    return 1

a, b = fun()
> TypeError: cannot unpack non-iterable int object

However, your code also doesn’t show any return statement in step(), so I’m unsure if you’ve changed the code already.
In any case, a missing return statement would return a None object by default, so the error should say:

TypeError: cannot unpack non-iterable NoneType object

thank you a lot for help. I will give you the feedback.