Use Elibitility Trace Actor-Critic to Play Acrobot-v1¶

TensorFlow version

In [1]:
%matplotlib inline

import sys
import logging
import itertools

import numpy as np
np.random.seed(0)
import pandas as pd
import gym
import matplotlib.pyplot as plt
import tensorflow.compat.v2 as tf
tf.random.set_seed(0)
from tensorflow import keras
from tensorflow import nn
from tensorflow import optimizers
from tensorflow import losses
from tensorflow.keras import layers
from tensorflow.keras import models

logging.basicConfig(level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        stream=sys.stdout, datefmt='%H:%M:%S')
In [2]:
env = gym.make('Acrobot-v1')
for key in vars(env):
    logging.info('%s: %s', key, vars(env)[key])
for key in vars(env.spec):
    logging.info('%s: %s', key, vars(env.spec)[key])
14:32:46 [INFO] env: <AcrobotEnv<Acrobot-v1>>
14:32:46 [INFO] action_space: Discrete(3)
14:32:46 [INFO] observation_space: Box(-28.274333953857422, 28.274333953857422, (6,), float32)
14:32:46 [INFO] reward_range: (-inf, inf)
14:32:46 [INFO] metadata: {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 15}
14:32:46 [INFO] _max_episode_steps: 500
14:32:46 [INFO] _elapsed_steps: None
14:32:46 [INFO] id: Acrobot-v1
14:32:46 [INFO] entry_point: gym.envs.classic_control:AcrobotEnv
14:32:46 [INFO] reward_threshold: -100.0
14:32:46 [INFO] nondeterministic: False
14:32:46 [INFO] max_episode_steps: 500
14:32:46 [INFO] _kwargs: {}
14:32:46 [INFO] _env_name: Acrobot
In [3]:
class ElibilityTraceActorCriticAgent:
    def __init__(self, env):
        self.action_n = env.action_space.n
        self.gamma = 0.99
        self.actor_lambda = 0.9
        self.critic_lambda = 0.9

        self.actor_net = self.build_net(
                input_size=env.observation_space.shape[0],
                hidden_sizes=[100,],
                output_size=self.action_n, output_activation=nn.softmax,
                loss=losses.categorical_crossentropy, learning_rate=0.0001)
        self.critic_net = self.build_net(
                input_size=env.observation_space.shape[0],
                hidden_sizes=[100,],
                learning_rate=0.0002)

    def build_net(self, input_size, hidden_sizes, output_size=1,
                activation=nn.relu, output_activation=None,
                loss=losses.mse, learning_rate=0.001):
        model = keras.Sequential()
        for layer, hidden_size in enumerate(hidden_sizes):
            kwargs = {'input_shape': (input_size,)} if layer == 0 else {}
            model.add(layers.Dense(units=hidden_size, activation=activation,
                    **kwargs))
        model.add(layers.Dense(units=output_size, activation=output_activation))
        optimizer = optimizers.Adam(learning_rate)
        model.compile(optimizer=optimizer, loss=loss)
        return model

    def reset(self, mode=None):
        self.mode = mode
        if self.mode == 'train':
            self.trajectory = []
            self.discount = 1.
            self.actor_trace_tensors = [0. * weight for weight in
                    self.actor_net.get_weights()]
            self.critic_trace_tensors = [0. * weight for weight in
                    self.critic_net.get_weights()]

    def step(self, observation, reward, terminated):
        probs = self.actor_net.predict(observation[np.newaxis], verbose=0)[0]
        action = np.random.choice(self.action_n, p=probs)
        if self.mode == 'train':
            self.trajectory += [observation, reward, terminated, action]
            if len(self.trajectory) >= 8:
                self.learn()
            self.discount *= self.gamma
        return action

    def close(self):
        pass

    def learn(self):
        state, _, _, action, next_state, reward, terminated, _ = \
                self.trajectory[-8:]
        states = state[np.newaxis]
        q = self.critic_net.predict(states, verbose=0)[0, 0]
        next_v = self.critic_net.predict(next_state[np.newaxis], verbose=0)[0, 0]
        target = reward + (1. - terminated) * self.gamma * next_v
        td_error = target - q

        # update actor
        state_tensor = tf.convert_to_tensor(states, dtype=tf.float32)
        with tf.GradientTape() as tape:
            pi_tensor = self.actor_net(state_tensor)[0, action]
            logpi_tensor = tf.math.log(tf.clip_by_value(pi_tensor, 1e-6, 1.))
        grad_tensors = tape.gradient(logpi_tensor, self.actor_net.variables)
        self.actor_trace_tensors = [self.gamma * self.actor_lambda * trace +
                self.discount * grad for trace, grad in
                zip(self.actor_trace_tensors, grad_tensors)]
        actor_grads = [-td_error * trace for trace in self.actor_trace_tensors]
        actor_grads_and_vars = tuple(zip(actor_grads, self.actor_net.variables))
        self.actor_net.optimizer.apply_gradients(actor_grads_and_vars)

        # update critic
        with tf.GradientTape() as tape:
            v_tensor = self.critic_net(state_tensor)[0, 0]
        grad_tensors = tape.gradient(v_tensor, self.critic_net.variables)
        self.critic_trace_tensors = [self.gamma * self.critic_lambda * trace +
                grad for trace, grad in
                zip(self.critic_trace_tensors, grad_tensors)]
        critic_grads = [-td_error * trace for trace in self.critic_trace_tensors]
        critic_grads_and_vars = tuple(zip(critic_grads,
                self.critic_net.variables))
        self.critic_net.optimizer.apply_gradients(critic_grads_and_vars)


agent = ElibilityTraceActorCriticAgent(env)
In [4]:
def play_episode(env, agent, seed=None, mode=None, render=False):
    observation, _ = env.reset(seed=seed)
    reward, terminated, truncated = 0., False, False
    agent.reset(mode=mode)
    episode_reward, elapsed_steps = 0., 0
    while True:
        action = agent.step(observation, reward, terminated)
        if render:
            env.render()
        if terminated or truncated:
            break
        observation, reward, terminated, truncated, _ = env.step(action)
        episode_reward += reward
        elapsed_steps += 1
    agent.close()
    return episode_reward, elapsed_steps


logging.info('==== train ====')
episode_rewards = []
for episode in itertools.count():
    episode_reward, elapsed_steps = play_episode(env, agent, seed=episode,
            mode='train')
    episode_rewards.append(episode_reward)
    logging.info('train episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
    if np.mean(episode_rewards[-10:]) > -120:
        break
plt.plot(episode_rewards)


logging.info('==== test ====')
episode_rewards = []
for episode in range(100):
    episode_reward, elapsed_steps = play_episode(env, agent)
    episode_rewards.append(episode_reward)
    logging.info('test episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
logging.info('average episode reward = %.2f ± %.2f',
        np.mean(episode_rewards), np.std(episode_rewards))
14:32:48 [INFO] ==== train ====
14:34:49 [INFO] train episode 0: reward = -481.00, steps = 482
14:36:50 [INFO] train episode 1: reward = -500.00, steps = 500
14:38:55 [INFO] train episode 2: reward = -500.00, steps = 500
14:40:59 [INFO] train episode 3: reward = -500.00, steps = 500
14:43:03 [INFO] train episode 4: reward = -500.00, steps = 500
14:45:10 [INFO] train episode 5: reward = -500.00, steps = 500
14:47:00 [INFO] train episode 6: reward = -427.00, steps = 428
14:49:05 [INFO] train episode 7: reward = -500.00, steps = 500
14:50:53 [INFO] train episode 8: reward = -438.00, steps = 439
14:52:59 [INFO] train episode 9: reward = -500.00, steps = 500
14:54:56 [INFO] train episode 10: reward = -482.00, steps = 483
14:57:02 [INFO] train episode 11: reward = -500.00, steps = 500
14:59:07 [INFO] train episode 12: reward = -500.00, steps = 500
15:00:59 [INFO] train episode 13: reward = -459.00, steps = 460
15:03:00 [INFO] train episode 14: reward = -500.00, steps = 500
15:05:04 [INFO] train episode 15: reward = -500.00, steps = 500
15:06:55 [INFO] train episode 16: reward = -445.00, steps = 446
15:08:30 [INFO] train episode 17: reward = -399.00, steps = 400
15:09:34 [INFO] train episode 18: reward = -285.00, steps = 286
15:10:39 [INFO] train episode 19: reward = -299.00, steps = 300
15:12:21 [INFO] train episode 20: reward = -500.00, steps = 500
15:13:57 [INFO] train episode 21: reward = -472.00, steps = 473
15:15:07 [INFO] train episode 22: reward = -334.00, steps = 335
15:15:40 [INFO] train episode 23: reward = -160.00, steps = 161
15:16:29 [INFO] train episode 24: reward = -244.00, steps = 245
15:17:12 [INFO] train episode 25: reward = -214.00, steps = 215
15:18:07 [INFO] train episode 26: reward = -269.00, steps = 270
15:18:54 [INFO] train episode 27: reward = -233.00, steps = 234
15:19:46 [INFO] train episode 28: reward = -256.00, steps = 257
15:20:31 [INFO] train episode 29: reward = -219.00, steps = 220
15:21:27 [INFO] train episode 30: reward = -265.00, steps = 266
15:22:24 [INFO] train episode 31: reward = -268.00, steps = 269
15:23:02 [INFO] train episode 32: reward = -189.00, steps = 190
15:23:48 [INFO] train episode 33: reward = -229.00, steps = 230
15:24:30 [INFO] train episode 34: reward = -209.00, steps = 210
15:25:11 [INFO] train episode 35: reward = -206.00, steps = 207
15:25:44 [INFO] train episode 36: reward = -162.00, steps = 163
15:26:28 [INFO] train episode 37: reward = -219.00, steps = 220
15:27:22 [INFO] train episode 38: reward = -270.00, steps = 271
15:27:58 [INFO] train episode 39: reward = -179.00, steps = 180
15:28:33 [INFO] train episode 40: reward = -175.00, steps = 176
15:29:13 [INFO] train episode 41: reward = -201.00, steps = 202
15:29:43 [INFO] train episode 42: reward = -143.00, steps = 144
15:30:19 [INFO] train episode 43: reward = -174.00, steps = 175
15:30:46 [INFO] train episode 44: reward = -133.00, steps = 134
15:31:19 [INFO] train episode 45: reward = -159.00, steps = 160
15:32:10 [INFO] train episode 46: reward = -237.00, steps = 238
15:32:45 [INFO] train episode 47: reward = -171.00, steps = 172
15:33:24 [INFO] train episode 48: reward = -172.00, steps = 173
15:34:06 [INFO] train episode 49: reward = -199.00, steps = 200
15:34:45 [INFO] train episode 50: reward = -165.00, steps = 166
15:35:21 [INFO] train episode 51: reward = -162.00, steps = 163
15:36:00 [INFO] train episode 52: reward = -184.00, steps = 185
15:36:22 [INFO] train episode 53: reward = -103.00, steps = 104
15:36:56 [INFO] train episode 54: reward = -173.00, steps = 174
15:37:24 [INFO] train episode 55: reward = -140.00, steps = 141
15:37:54 [INFO] train episode 56: reward = -150.00, steps = 151
15:38:22 [INFO] train episode 57: reward = -142.00, steps = 143
15:38:47 [INFO] train episode 58: reward = -124.00, steps = 125
15:39:20 [INFO] train episode 59: reward = -164.00, steps = 165
15:39:56 [INFO] train episode 60: reward = -162.00, steps = 163
15:40:24 [INFO] train episode 61: reward = -126.00, steps = 127
15:40:58 [INFO] train episode 62: reward = -153.00, steps = 154
15:41:43 [INFO] train episode 63: reward = -203.00, steps = 204
15:42:23 [INFO] train episode 64: reward = -179.00, steps = 180
15:42:54 [INFO] train episode 65: reward = -141.00, steps = 142
15:43:27 [INFO] train episode 66: reward = -150.00, steps = 151
15:43:59 [INFO] train episode 67: reward = -139.00, steps = 140
15:44:47 [INFO] train episode 68: reward = -219.00, steps = 220
15:45:25 [INFO] train episode 69: reward = -178.00, steps = 179
15:45:58 [INFO] train episode 70: reward = -159.00, steps = 160
15:46:31 [INFO] train episode 71: reward = -154.00, steps = 155
15:47:00 [INFO] train episode 72: reward = -140.00, steps = 141
15:48:04 [INFO] train episode 73: reward = -299.00, steps = 300
15:48:25 [INFO] train episode 74: reward = -98.00, steps = 99
15:48:58 [INFO] train episode 75: reward = -159.00, steps = 160
15:49:26 [INFO] train episode 76: reward = -134.00, steps = 135
15:49:54 [INFO] train episode 77: reward = -125.00, steps = 126
15:50:19 [INFO] train episode 78: reward = -118.00, steps = 119
15:50:54 [INFO] train episode 79: reward = -163.00, steps = 164
15:51:22 [INFO] train episode 80: reward = -138.00, steps = 139
15:51:55 [INFO] train episode 81: reward = -160.00, steps = 161
15:52:34 [INFO] train episode 82: reward = -191.00, steps = 192
15:53:03 [INFO] train episode 83: reward = -142.00, steps = 143
15:53:30 [INFO] train episode 84: reward = -129.00, steps = 130
15:53:56 [INFO] train episode 85: reward = -126.00, steps = 127
15:54:22 [INFO] train episode 86: reward = -131.00, steps = 132
15:54:51 [INFO] train episode 87: reward = -140.00, steps = 141
15:55:20 [INFO] train episode 88: reward = -140.00, steps = 141
15:55:43 [INFO] train episode 89: reward = -112.00, steps = 113
15:56:19 [INFO] train episode 90: reward = -174.00, steps = 175
15:56:38 [INFO] train episode 91: reward = -94.00, steps = 95
15:57:02 [INFO] train episode 92: reward = -116.00, steps = 117
15:57:33 [INFO] train episode 93: reward = -147.00, steps = 148
15:58:04 [INFO] train episode 94: reward = -151.00, steps = 152
15:58:29 [INFO] train episode 95: reward = -126.00, steps = 127
15:58:57 [INFO] train episode 96: reward = -133.00, steps = 134
15:59:19 [INFO] train episode 97: reward = -114.00, steps = 115
15:59:44 [INFO] train episode 98: reward = -122.00, steps = 123
16:00:03 [INFO] train episode 99: reward = -88.00, steps = 89
16:00:28 [INFO] train episode 100: reward = -122.00, steps = 123
16:00:52 [INFO] train episode 101: reward = -119.00, steps = 120
16:01:19 [INFO] train episode 102: reward = -133.00, steps = 134
16:01:46 [INFO] train episode 103: reward = -133.00, steps = 134
16:02:04 [INFO] train episode 104: reward = -87.00, steps = 88
16:02:04 [INFO] ==== test ====
16:02:10 [INFO] test episode 0: reward = -93.00, steps = 94
16:02:18 [INFO] test episode 1: reward = -126.00, steps = 127
16:02:25 [INFO] test episode 2: reward = -107.00, steps = 108
16:02:31 [INFO] test episode 3: reward = -99.00, steps = 100
16:02:39 [INFO] test episode 4: reward = -118.00, steps = 119
16:02:47 [INFO] test episode 5: reward = -132.00, steps = 133
16:02:55 [INFO] test episode 6: reward = -121.00, steps = 122
16:03:02 [INFO] test episode 7: reward = -127.00, steps = 128
16:03:08 [INFO] test episode 8: reward = -95.00, steps = 96
16:03:16 [INFO] test episode 9: reward = -122.00, steps = 123
16:03:27 [INFO] test episode 10: reward = -186.00, steps = 187
16:03:39 [INFO] test episode 11: reward = -164.00, steps = 165
16:03:47 [INFO] test episode 12: reward = -108.00, steps = 109
16:03:54 [INFO] test episode 13: reward = -112.00, steps = 113
16:04:05 [INFO] test episode 14: reward = -136.00, steps = 137
16:04:13 [INFO] test episode 15: reward = -101.00, steps = 102
16:04:20 [INFO] test episode 16: reward = -102.00, steps = 103
16:04:26 [INFO] test episode 17: reward = -88.00, steps = 89
16:04:32 [INFO] test episode 18: reward = -87.00, steps = 88
16:04:40 [INFO] test episode 19: reward = -115.00, steps = 116
16:04:49 [INFO] test episode 20: reward = -130.00, steps = 131
16:04:58 [INFO] test episode 21: reward = -134.00, steps = 135
16:05:07 [INFO] test episode 22: reward = -139.00, steps = 140
16:05:16 [INFO] test episode 23: reward = -117.00, steps = 118
16:05:23 [INFO] test episode 24: reward = -104.00, steps = 105
16:05:36 [INFO] test episode 25: reward = -185.00, steps = 186
16:05:44 [INFO] test episode 26: reward = -118.00, steps = 119
16:05:52 [INFO] test episode 27: reward = -110.00, steps = 111
16:06:01 [INFO] test episode 28: reward = -128.00, steps = 129
16:06:07 [INFO] test episode 29: reward = -81.00, steps = 82
16:06:15 [INFO] test episode 30: reward = -108.00, steps = 109
16:06:22 [INFO] test episode 31: reward = -100.00, steps = 101
16:06:29 [INFO] test episode 32: reward = -102.00, steps = 103
16:06:36 [INFO] test episode 33: reward = -96.00, steps = 97
16:06:42 [INFO] test episode 34: reward = -91.00, steps = 92
16:06:52 [INFO] test episode 35: reward = -146.00, steps = 147
16:07:00 [INFO] test episode 36: reward = -119.00, steps = 120
16:07:07 [INFO] test episode 37: reward = -99.00, steps = 100
16:07:14 [INFO] test episode 38: reward = -103.00, steps = 104
16:07:21 [INFO] test episode 39: reward = -98.00, steps = 99
16:07:30 [INFO] test episode 40: reward = -122.00, steps = 123
16:07:36 [INFO] test episode 41: reward = -91.00, steps = 92
16:07:44 [INFO] test episode 42: reward = -120.00, steps = 121
16:07:53 [INFO] test episode 43: reward = -121.00, steps = 122
16:08:01 [INFO] test episode 44: reward = -116.00, steps = 117
16:08:07 [INFO] test episode 45: reward = -114.00, steps = 115
16:08:12 [INFO] test episode 46: reward = -114.00, steps = 115
16:08:18 [INFO] test episode 47: reward = -122.00, steps = 123
16:08:24 [INFO] test episode 48: reward = -121.00, steps = 122
16:08:28 [INFO] test episode 49: reward = -102.00, steps = 103
16:08:36 [INFO] test episode 50: reward = -185.00, steps = 186
16:08:41 [INFO] test episode 51: reward = -111.00, steps = 112
16:08:47 [INFO] test episode 52: reward = -135.00, steps = 136
16:08:53 [INFO] test episode 53: reward = -121.00, steps = 122
16:08:59 [INFO] test episode 54: reward = -135.00, steps = 136
16:09:03 [INFO] test episode 55: reward = -98.00, steps = 99
16:09:09 [INFO] test episode 56: reward = -144.00, steps = 145
16:09:15 [INFO] test episode 57: reward = -117.00, steps = 118
16:09:22 [INFO] test episode 58: reward = -163.00, steps = 164
16:09:26 [INFO] test episode 59: reward = -92.00, steps = 93
16:09:31 [INFO] test episode 60: reward = -126.00, steps = 127
16:09:38 [INFO] test episode 61: reward = -153.00, steps = 154
16:09:44 [INFO] test episode 62: reward = -132.00, steps = 133
16:09:48 [INFO] test episode 63: reward = -95.00, steps = 96
16:09:54 [INFO] test episode 64: reward = -126.00, steps = 127
16:09:59 [INFO] test episode 65: reward = -112.00, steps = 113
16:10:03 [INFO] test episode 66: reward = -102.00, steps = 103
16:10:08 [INFO] test episode 67: reward = -115.00, steps = 116
16:10:13 [INFO] test episode 68: reward = -110.00, steps = 111
16:10:18 [INFO] test episode 69: reward = -126.00, steps = 127
16:10:24 [INFO] test episode 70: reward = -121.00, steps = 122
16:10:28 [INFO] test episode 71: reward = -96.00, steps = 97
16:10:34 [INFO] test episode 72: reward = -137.00, steps = 138
16:10:40 [INFO] test episode 73: reward = -138.00, steps = 139
16:10:46 [INFO] test episode 74: reward = -136.00, steps = 137
16:10:50 [INFO] test episode 75: reward = -91.00, steps = 92
16:10:55 [INFO] test episode 76: reward = -115.00, steps = 116
16:11:06 [INFO] test episode 77: reward = -260.00, steps = 261
16:11:12 [INFO] test episode 78: reward = -138.00, steps = 139
16:11:16 [INFO] test episode 79: reward = -87.00, steps = 88
16:11:21 [INFO] test episode 80: reward = -116.00, steps = 117
16:11:26 [INFO] test episode 81: reward = -116.00, steps = 117
16:11:31 [INFO] test episode 82: reward = -112.00, steps = 113
16:11:36 [INFO] test episode 83: reward = -106.00, steps = 107
16:11:41 [INFO] test episode 84: reward = -134.00, steps = 135
16:11:45 [INFO] test episode 85: reward = -79.00, steps = 80
16:11:50 [INFO] test episode 86: reward = -112.00, steps = 113
16:11:55 [INFO] test episode 87: reward = -122.00, steps = 123
16:12:01 [INFO] test episode 88: reward = -141.00, steps = 142
16:12:05 [INFO] test episode 89: reward = -100.00, steps = 101
16:12:11 [INFO] test episode 90: reward = -136.00, steps = 137
16:12:16 [INFO] test episode 91: reward = -106.00, steps = 107
16:12:20 [INFO] test episode 92: reward = -100.00, steps = 101
16:12:24 [INFO] test episode 93: reward = -95.00, steps = 96
16:12:29 [INFO] test episode 94: reward = -95.00, steps = 96
16:12:37 [INFO] test episode 95: reward = -206.00, steps = 207
16:12:42 [INFO] test episode 96: reward = -111.00, steps = 112
16:12:47 [INFO] test episode 97: reward = -112.00, steps = 113
16:12:58 [INFO] test episode 98: reward = -249.00, steps = 250
16:13:02 [INFO] test episode 99: reward = -104.00, steps = 105
16:13:02 [INFO] average episode reward = -120.59 ± 29.65
In [5]:
env.close()