Use OffPAC to Play Acrobot-v1¶

TensorFlow version

In [1]:
%matplotlib inline

import sys
import logging
import itertools

import numpy as np
np.random.seed(0)
import pandas as pd
import gym
import matplotlib.pyplot as plt
import tensorflow.compat.v2 as tf
tf.random.set_seed(0)
from tensorflow import keras
from tensorflow import nn
from tensorflow import optimizers
from tensorflow.keras import layers
from tensorflow.keras import losses

logging.basicConfig(level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        stream=sys.stdout, datefmt='%H:%M:%S')
In [2]:
env = gym.make('Acrobot-v1')
for key in vars(env):
    logging.info('%s: %s', key, vars(env)[key])
for key in vars(env.spec):
    logging.info('%s: %s', key, vars(env.spec)[key])
00:00:00 [INFO] env: <AcrobotEnv<Acrobot-v1>>
00:00:00 [INFO] action_space: Discrete(3)
00:00:00 [INFO] observation_space: Box(-28.274333953857422, 28.274333953857422, (6,), float32)
00:00:00 [INFO] reward_range: (-inf, inf)
00:00:00 [INFO] metadata: {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 15}
00:00:00 [INFO] _max_episode_steps: 500
00:00:00 [INFO] _elapsed_steps: None
00:00:00 [INFO] id: Acrobot-v1
00:00:00 [INFO] entry_point: gym.envs.classic_control:AcrobotEnv
00:00:00 [INFO] reward_threshold: -100.0
00:00:00 [INFO] nondeterministic: False
00:00:00 [INFO] max_episode_steps: 500
00:00:00 [INFO] _kwargs: {}
00:00:00 [INFO] _env_name: Acrobot
In [3]:
class OffPACAgent:
    def __init__(self, env):
        self.action_n = env.action_space.n
        self.gamma = 0.99

        self.actor_net = self.build_net(hidden_sizes=[100,],
                output_size=self.action_n,
                output_activation=nn.softmax, learning_rate=0.0001)
        self.critic_net = self.build_net(hidden_sizes=[100,],
                output_size=self.action_n,
                learning_rate=0.0002)

    def build_net(self, hidden_sizes, output_size,
                activation=nn.relu, output_activation=None,
                loss=losses.mse, learning_rate=0.001):
        model = keras.Sequential()
        for hidden_size in hidden_sizes:
            model.add(layers.Dense(units=hidden_size,
                    activation=activation))
        model.add(layers.Dense(units=output_size,
                activation=output_activation))
        optimizer = optimizers.SGD(learning_rate)
        model.compile(optimizer=optimizer, loss=loss)
        return model

    def reset(self, mode=None):
        self.mode = mode
        if self.mode == 'train':
            self.trajectory = []
            self.discount = 1.

    def step(self, observation, reward, terminated):
        if self.mode == 'train':
            action = np.random.choice(self.action_n)
            self.trajectory += [observation, reward, terminated, action]
            if len(self.trajectory) >= 8:
                self.learn()
            self.discount *= self.gamma
        else:
            probs = self.actor_net.predict(observation[np.newaxis], verbose=0)[0]
            action = np.random.choice(self.action_n, p=probs)
        return action

    def close(self):
        pass

    def learn(self):
        state, _, _, action, next_state, reward, terminated, next_action = \
                self.trajectory[-8:]
        behavior_prob = 1. / self.action_n
        pi = self.actor_net.predict(state[np.newaxis], verbose=0)[0, action]
        ratio = pi / behavior_prob # importance sampling ratio

        # update actor
        q = self.critic_net.predict(state[np.newaxis], verbose=0)[0, action]
        state_tensor = tf.convert_to_tensor(state[np.newaxis], dtype=tf.float32)
        with tf.GradientTape() as tape:
            pi_tensor = self.actor_net(state_tensor)[0, action]
            actor_loss_tensor = -self.discount * q / behavior_prob * pi_tensor
        grad_tensors = tape.gradient(actor_loss_tensor, self.actor_net.variables)
        self.actor_net.optimizer.apply_gradients(zip(grad_tensors,
                self.actor_net.variables))

        # update critic
        next_q = self.critic_net.predict(next_state[np.newaxis], verbose=0)[0,
                next_action]
        target = reward + (1. - terminated) * self.gamma * next_q
        target_tensor = tf.convert_to_tensor(target, dtype=tf.float32)
        with tf.GradientTape() as tape:
            q_tensor = self.critic_net(state_tensor)[:, action]
            mse_tensor = losses.MSE(target_tensor, q_tensor)
            critic_loss_tensor = ratio * mse_tensor
        grad_tensors = tape.gradient(critic_loss_tensor, self.critic_net.variables)
        self.critic_net.optimizer.apply_gradients(zip(grad_tensors,
                self.critic_net.variables))


agent = OffPACAgent(env)
In [4]:
def play_episode(env, agent, seed=None, mode=None, render=False):
    observation, _ = env.reset(seed=seed)
    reward, terminated, truncated = 0., False, False
    agent.reset(mode=mode)
    episode_reward, elapsed_steps = 0., 0
    while True:
        action = agent.step(observation, reward, terminated)
        if render:
            env.render()
        if terminated or truncated:
            break
        observation, reward, terminated, truncated, _ = env.step(action)
        episode_reward += reward
        elapsed_steps += 1
    agent.close()
    return episode_reward, elapsed_steps


logging.info('==== train ====')
episode_rewards = []
for episode in itertools.count():
    play_episode(env, agent, seed=episode,
            mode='train')
    episode_reward, elapsed_steps = play_episode(env, agent)
    episode_rewards.append(episode_reward)
    logging.info('train episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
    if np.mean(episode_rewards[-10:]) > -140:
        break
plt.plot(episode_rewards)


logging.info('==== test ====')
episode_rewards = []
for episode in range(100):
    episode_reward, elapsed_steps = play_episode(env, agent)
    episode_rewards.append(episode_reward)
    logging.info('test episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
logging.info('average episode reward = %.2f ± %.2f',
        np.mean(episode_rewards), np.std(episode_rewards))
00:00:02 [INFO] ==== train ====
00:03:32 [INFO] train episode 0: reward = -500.00, steps = 500
00:06:54 [INFO] train episode 1: reward = -477.00, steps = 478
00:10:16 [INFO] train episode 2: reward = -500.00, steps = 500
00:13:26 [INFO] train episode 3: reward = -385.00, steps = 386
00:15:29 [INFO] train episode 4: reward = -500.00, steps = 500
00:18:32 [INFO] train episode 5: reward = -372.00, steps = 373
00:21:01 [INFO] train episode 6: reward = -500.00, steps = 500
00:25:18 [INFO] train episode 7: reward = -500.00, steps = 500
00:29:34 [INFO] train episode 8: reward = -451.00, steps = 452
00:32:04 [INFO] train episode 9: reward = -500.00, steps = 500
00:35:42 [INFO] train episode 10: reward = -500.00, steps = 500
00:39:15 [INFO] train episode 11: reward = -500.00, steps = 500
00:42:51 [INFO] train episode 12: reward = -500.00, steps = 500
00:46:28 [INFO] train episode 13: reward = -500.00, steps = 500
00:49:57 [INFO] train episode 14: reward = -500.00, steps = 500
00:53:27 [INFO] train episode 15: reward = -500.00, steps = 500
00:56:37 [INFO] train episode 16: reward = -320.00, steps = 321
01:00:03 [INFO] train episode 17: reward = -500.00, steps = 500
01:03:36 [INFO] train episode 18: reward = -500.00, steps = 500
01:07:02 [INFO] train episode 19: reward = -500.00, steps = 500
01:10:11 [INFO] train episode 20: reward = -306.00, steps = 307
01:13:38 [INFO] train episode 21: reward = -500.00, steps = 500
01:16:50 [INFO] train episode 22: reward = -345.00, steps = 346
01:19:59 [INFO] train episode 23: reward = -310.00, steps = 311
01:23:11 [INFO] train episode 24: reward = -348.00, steps = 349
01:56:15 [INFO] train episode 25: reward = -500.00, steps = 500
01:59:19 [INFO] train episode 26: reward = -270.00, steps = 271
02:02:31 [INFO] train episode 27: reward = -341.00, steps = 342
02:04:32 [INFO] train episode 28: reward = -208.00, steps = 209
03:01:29 [INFO] train episode 29: reward = -158.00, steps = 159
03:04:31 [INFO] train episode 30: reward = -177.00, steps = 178
03:07:28 [INFO] train episode 31: reward = -183.00, steps = 184
03:10:32 [INFO] train episode 32: reward = -193.00, steps = 194
03:13:36 [INFO] train episode 33: reward = -219.00, steps = 220
03:16:32 [INFO] train episode 34: reward = -133.00, steps = 134
03:19:38 [INFO] train episode 35: reward = -192.00, steps = 193
03:22:32 [INFO] train episode 36: reward = -138.00, steps = 139
03:25:22 [INFO] train episode 37: reward = -130.00, steps = 131
03:28:16 [INFO] train episode 38: reward = -147.00, steps = 148
03:31:14 [INFO] train episode 39: reward = -182.00, steps = 183
03:34:11 [INFO] train episode 40: reward = -166.00, steps = 167
03:37:40 [INFO] train episode 41: reward = -500.00, steps = 500
03:40:41 [INFO] train episode 42: reward = -202.00, steps = 203
03:43:44 [INFO] train episode 43: reward = -223.00, steps = 224
03:46:39 [INFO] train episode 44: reward = -160.00, steps = 161
03:49:36 [INFO] train episode 45: reward = -168.00, steps = 169
03:52:33 [INFO] train episode 46: reward = -153.00, steps = 154
03:55:33 [INFO] train episode 47: reward = -188.00, steps = 189
03:58:33 [INFO] train episode 48: reward = -164.00, steps = 165
04:01:37 [INFO] train episode 49: reward = -183.00, steps = 184
04:04:37 [INFO] train episode 50: reward = -162.00, steps = 163
04:07:46 [INFO] train episode 51: reward = -251.00, steps = 252
04:10:47 [INFO] train episode 52: reward = -162.00, steps = 163
04:13:44 [INFO] train episode 53: reward = -130.00, steps = 131
04:16:44 [INFO] train episode 54: reward = -153.00, steps = 154
04:19:44 [INFO] train episode 55: reward = -175.00, steps = 176
04:22:48 [INFO] train episode 56: reward = -176.00, steps = 177
04:25:50 [INFO] train episode 57: reward = -148.00, steps = 149
04:28:54 [INFO] train episode 58: reward = -151.00, steps = 152
04:31:55 [INFO] train episode 59: reward = -154.00, steps = 155
04:34:55 [INFO] train episode 60: reward = -158.00, steps = 159
04:37:56 [INFO] train episode 61: reward = -157.00, steps = 158
04:41:00 [INFO] train episode 62: reward = -162.00, steps = 163
04:44:01 [INFO] train episode 63: reward = -178.00, steps = 179
04:47:03 [INFO] train episode 64: reward = -144.00, steps = 145
04:50:14 [INFO] train episode 65: reward = -232.00, steps = 233
04:53:12 [INFO] train episode 66: reward = -142.00, steps = 143
04:56:17 [INFO] train episode 67: reward = -157.00, steps = 158
04:59:19 [INFO] train episode 68: reward = -133.00, steps = 134
05:02:20 [INFO] train episode 69: reward = -200.00, steps = 201
05:05:13 [INFO] train episode 70: reward = -137.00, steps = 138
05:08:05 [INFO] train episode 71: reward = -137.00, steps = 138
05:10:57 [INFO] train episode 72: reward = -126.00, steps = 127
05:13:50 [INFO] train episode 73: reward = -131.00, steps = 132
05:16:43 [INFO] train episode 74: reward = -136.00, steps = 137
05:19:33 [INFO] train episode 75: reward = -122.00, steps = 123
05:22:26 [INFO] train episode 76: reward = -143.00, steps = 144
05:25:20 [INFO] train episode 77: reward = -159.00, steps = 160
05:28:15 [INFO] train episode 78: reward = -152.00, steps = 153
05:31:09 [INFO] train episode 79: reward = -154.00, steps = 155
05:31:09 [INFO] ==== test ====
05:31:25 [INFO] test episode 0: reward = -160.00, steps = 161
05:31:44 [INFO] test episode 1: reward = -186.00, steps = 187
05:32:02 [INFO] test episode 2: reward = -175.00, steps = 176
05:32:17 [INFO] test episode 3: reward = -146.00, steps = 147
05:32:32 [INFO] test episode 4: reward = -147.00, steps = 148
05:32:48 [INFO] test episode 5: reward = -152.00, steps = 153
05:33:05 [INFO] test episode 6: reward = -170.00, steps = 171
05:33:19 [INFO] test episode 7: reward = -129.00, steps = 130
05:33:35 [INFO] test episode 8: reward = -155.00, steps = 156
05:33:51 [INFO] test episode 9: reward = -156.00, steps = 157
05:34:04 [INFO] test episode 10: reward = -124.00, steps = 125
05:34:16 [INFO] test episode 11: reward = -119.00, steps = 120
05:34:33 [INFO] test episode 12: reward = -170.00, steps = 171
05:34:49 [INFO] test episode 13: reward = -151.00, steps = 152
05:35:02 [INFO] test episode 14: reward = -129.00, steps = 130
05:35:18 [INFO] test episode 15: reward = -156.00, steps = 157
05:35:32 [INFO] test episode 16: reward = -133.00, steps = 134
05:35:50 [INFO] test episode 17: reward = -171.00, steps = 172
05:36:06 [INFO] test episode 18: reward = -161.00, steps = 162
05:36:21 [INFO] test episode 19: reward = -138.00, steps = 139
05:36:35 [INFO] test episode 20: reward = -139.00, steps = 140
05:36:50 [INFO] test episode 21: reward = -149.00, steps = 150
05:37:11 [INFO] test episode 22: reward = -206.00, steps = 207
05:37:29 [INFO] test episode 23: reward = -173.00, steps = 174
05:37:44 [INFO] test episode 24: reward = -144.00, steps = 145
05:38:01 [INFO] test episode 25: reward = -158.00, steps = 159
05:38:15 [INFO] test episode 26: reward = -140.00, steps = 141
05:38:31 [INFO] test episode 27: reward = -149.00, steps = 150
05:38:43 [INFO] test episode 28: reward = -119.00, steps = 120
05:38:56 [INFO] test episode 29: reward = -124.00, steps = 125
05:39:11 [INFO] test episode 30: reward = -143.00, steps = 144
05:39:28 [INFO] test episode 31: reward = -168.00, steps = 169
05:39:51 [INFO] test episode 32: reward = -214.00, steps = 215
05:40:06 [INFO] test episode 33: reward = -150.00, steps = 151
05:40:23 [INFO] test episode 34: reward = -167.00, steps = 168
05:40:38 [INFO] test episode 35: reward = -142.00, steps = 143
05:40:51 [INFO] test episode 36: reward = -129.00, steps = 130
05:41:05 [INFO] test episode 37: reward = -139.00, steps = 140
05:41:18 [INFO] test episode 38: reward = -118.00, steps = 119
05:41:33 [INFO] test episode 39: reward = -151.00, steps = 152
05:41:51 [INFO] test episode 40: reward = -173.00, steps = 174
05:42:08 [INFO] test episode 41: reward = -171.00, steps = 172
05:42:22 [INFO] test episode 42: reward = -129.00, steps = 130
05:42:34 [INFO] test episode 43: reward = -123.00, steps = 124
05:42:50 [INFO] test episode 44: reward = -151.00, steps = 152
05:43:09 [INFO] test episode 45: reward = -182.00, steps = 183
05:43:24 [INFO] test episode 46: reward = -142.00, steps = 143
05:43:39 [INFO] test episode 47: reward = -153.00, steps = 154
05:43:55 [INFO] test episode 48: reward = -149.00, steps = 150
05:44:28 [INFO] test episode 49: reward = -325.00, steps = 326
05:44:44 [INFO] test episode 50: reward = -151.00, steps = 152
05:45:05 [INFO] test episode 51: reward = -205.00, steps = 206
05:45:22 [INFO] test episode 52: reward = -163.00, steps = 164
05:45:38 [INFO] test episode 53: reward = -155.00, steps = 156
05:45:55 [INFO] test episode 54: reward = -167.00, steps = 168
05:46:10 [INFO] test episode 55: reward = -147.00, steps = 148
05:46:26 [INFO] test episode 56: reward = -158.00, steps = 159
05:46:42 [INFO] test episode 57: reward = -147.00, steps = 148
05:47:02 [INFO] test episode 58: reward = -200.00, steps = 201
05:47:18 [INFO] test episode 59: reward = -159.00, steps = 160
05:47:38 [INFO] test episode 60: reward = -195.00, steps = 196
05:47:59 [INFO] test episode 61: reward = -203.00, steps = 204
05:48:14 [INFO] test episode 62: reward = -147.00, steps = 148
05:48:30 [INFO] test episode 63: reward = -156.00, steps = 157
05:48:52 [INFO] test episode 64: reward = -207.00, steps = 208
05:49:08 [INFO] test episode 65: reward = -161.00, steps = 162
05:49:26 [INFO] test episode 66: reward = -175.00, steps = 176
05:49:44 [INFO] test episode 67: reward = -172.00, steps = 173
05:50:04 [INFO] test episode 68: reward = -197.00, steps = 198
05:50:20 [INFO] test episode 69: reward = -152.00, steps = 153
05:50:34 [INFO] test episode 70: reward = -148.00, steps = 149
05:50:49 [INFO] test episode 71: reward = -142.00, steps = 143
05:51:06 [INFO] test episode 72: reward = -159.00, steps = 160
05:51:22 [INFO] test episode 73: reward = -157.00, steps = 158
05:51:41 [INFO] test episode 74: reward = -192.00, steps = 193
05:51:59 [INFO] test episode 75: reward = -167.00, steps = 168
05:52:15 [INFO] test episode 76: reward = -164.00, steps = 165
05:52:32 [INFO] test episode 77: reward = -162.00, steps = 163
05:52:51 [INFO] test episode 78: reward = -187.00, steps = 188
05:53:06 [INFO] test episode 79: reward = -142.00, steps = 143
05:53:25 [INFO] test episode 80: reward = -189.00, steps = 190
05:53:38 [INFO] test episode 81: reward = -130.00, steps = 131
05:53:53 [INFO] test episode 82: reward = -148.00, steps = 149
05:54:19 [INFO] test episode 83: reward = -244.00, steps = 245
05:54:35 [INFO] test episode 84: reward = -158.00, steps = 159
05:54:50 [INFO] test episode 85: reward = -154.00, steps = 155
05:55:06 [INFO] test episode 86: reward = -152.00, steps = 153
05:55:19 [INFO] test episode 87: reward = -128.00, steps = 129
05:55:50 [INFO] test episode 88: reward = -300.00, steps = 301
05:56:11 [INFO] test episode 89: reward = -213.00, steps = 214
05:56:27 [INFO] test episode 90: reward = -146.00, steps = 147
05:56:43 [INFO] test episode 91: reward = -154.00, steps = 155
05:57:04 [INFO] test episode 92: reward = -207.00, steps = 208
05:57:23 [INFO] test episode 93: reward = -188.00, steps = 189
05:57:39 [INFO] test episode 94: reward = -155.00, steps = 156
05:58:03 [INFO] test episode 95: reward = -234.00, steps = 235
05:58:16 [INFO] test episode 96: reward = -132.00, steps = 133
05:58:33 [INFO] test episode 97: reward = -160.00, steps = 161
05:58:50 [INFO] test episode 98: reward = -168.00, steps = 169
05:59:04 [INFO] test episode 99: reward = -140.00, steps = 141
05:59:04 [INFO] average episode reward = -162.85 ± 32.87
In [5]:
env.close()