Use DDPG to Play Pendulum-v1¶

TensorFlow version

In [1]:
%matplotlib inline

import sys
import logging
import itertools

import numpy as np
np.random.seed(0)
import pandas as pd
import gym
import matplotlib.pyplot as plt
import tensorflow.compat.v2 as tf
tf.random.set_seed(0)
from tensorflow import keras
from tensorflow import nn
from tensorflow import optimizers
from tensorflow import losses
from tensorflow.keras import layers
from tensorflow.keras import models

logging.basicConfig(level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        stream=sys.stdout, datefmt='%H:%M:%S')
In [2]:
env = gym.make('Pendulum-v1')
for key in vars(env.spec):
    logging.info('%s: %s', key, vars(env.spec)[key])
for key in vars(env.unwrapped):
    logging.info('%s: %s', key, vars(env.unwrapped)[key])
22:22:00 [INFO] id: Pendulum-v1
22:22:00 [INFO] entry_point: gym.envs.classic_control:PendulumEnv
22:22:00 [INFO] reward_threshold: None
22:22:00 [INFO] nondeterministic: False
22:22:00 [INFO] max_episode_steps: 200
22:22:00 [INFO] order_enforce: True
22:22:00 [INFO] _kwargs: {}
22:22:00 [INFO] _env_name: Pendulum
22:22:00 [INFO] max_speed: 8
22:22:00 [INFO] max_torque: 2.0
22:22:00 [INFO] dt: 0.05
22:22:00 [INFO] g: 10.0
22:22:00 [INFO] m: 1.0
22:22:00 [INFO] l: 1.0
22:22:00 [INFO] viewer: None
22:22:00 [INFO] action_space: Box([-2.], [2.], (1,), float32)
22:22:00 [INFO] observation_space: Box([-1. -1. -8.], [1. 1. 8.], (3,), float32)
22:22:00 [INFO] np_random: RandomState(MT19937)
22:22:00 [INFO] spec: EnvSpec(Pendulum-v1)
In [3]:
class DQNReplayer:
    def __init__(self, capacity):
        self.memory = pd.DataFrame(index=range(capacity),
                columns=['state', 'action', 'reward', 'next_state', 'terminated'])
        self.i = 0
        self.count = 0
        self.capacity = capacity

    def store(self, *args):
        self.memory.loc[self.i] = np.asarray(args, dtype=object)
        self.i = (self.i + 1) % self.capacity
        self.count = min(self.count + 1, self.capacity)

    def sample(self, size):
        indices = np.random.choice(self.count, size=size)
        return (np.stack(self.memory.loc[indices, field]) for field in
                self.memory.columns)
In [4]:
class OrnsteinUhlenbeckProcess:
    def __init__(self, x0):
        self.x = x0

    def __call__(self, mu=0., sigma=1., theta=.15, dt=.01):
        n = np.random.normal(size=self.x.shape)
        self.x += (theta * (mu - self.x) * dt + sigma * np.sqrt(dt) * n)
        return self.x
In [5]:
class DDPGAgent:
    def __init__(self, env):
        state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.shape[0]
        self.action_low = env.action_space.low
        self.action_high = env.action_space.high
        self.gamma = 0.99

        self.replayer = DQNReplayer(20000)

        self.actor_evaluate_net = self.build_net(
                input_size=state_dim, hidden_sizes=[32, 64],
                output_size=self.action_dim, output_activation=nn.tanh,
                learning_rate=0.0001)
        self.actor_target_net = models.clone_model(self.actor_evaluate_net)
        self.actor_target_net.set_weights(self.actor_evaluate_net.get_weights())

        self.critic_evaluate_net = self.build_net(
                input_size=state_dim+self.action_dim, hidden_sizes=[64, 128],
                learning_rate=0.001)
        self.critic_target_net = models.clone_model(self.critic_evaluate_net)
        self.critic_target_net.set_weights(self.critic_evaluate_net.get_weights())

    def build_net(self, input_size=None, hidden_sizes=None, output_size=1,
                activation=nn.relu, output_activation=None,
                loss=losses.mse, learning_rate=0.001):
        model = keras.Sequential()
        for layer, hidden_size in enumerate(hidden_sizes):
            kwargs = {'input_shape' : (input_size,)} if layer == 0 else {}
            model.add(layers.Dense(units=hidden_size,
                    activation=activation, **kwargs))
        model.add(layers.Dense(units=output_size,
                activation=output_activation))
        optimizer = optimizers.Adam(learning_rate)
        model.compile(optimizer=optimizer, loss=loss)
        return model

    def reset(self, mode=None):
        self.mode = mode
        if self.mode == 'train':
            self.trajectory = []
            self.noise = OrnsteinUhlenbeckProcess(np.zeros((self.action_dim,)))

    def step(self, observation, reward, terminated):
        if self.mode == 'train' and self.replayer.count < 3000:
            action = np.random.uniform(self.action_low, self.action_high)
        else:
            action = self.actor_evaluate_net.predict(observation[np.newaxis],
                    verbose=0)[0]
        if self.mode == 'train':
            # noisy action
            noise = self.noise(sigma=0.1)
            action = (action + noise).clip(self.action_low, self.action_high)

            self.trajectory += [observation, reward, terminated, action]
            if len(self.trajectory) >= 8:
                state, _, _, act, next_state, reward, terminated, _ = \
                        self.trajectory[-8:]
                self.replayer.store(state, act, reward, next_state, terminated)

            if self.replayer.count >= 3000:
                self.learn()
        return action

    def close(self):
        pass

    def update_net(self, target_net, evaluate_net, learning_rate=0.005):
        average_weights = [(1. - learning_rate) * t + learning_rate * e for t, e
                in zip(target_net.get_weights(), evaluate_net.get_weights())]
        target_net.set_weights(average_weights)

    def learn(self):
        # replay
        states, actions, rewards, next_states, terminateds = \
                self.replayer.sample(64)
        state_tensor = tf.convert_to_tensor(states, dtype=tf.float32)

        # update critic
        next_actions = self.actor_target_net.predict(next_states, verbose=0)
        next_noises = np.random.normal(0, 0.2, size=next_actions.shape)
        next_actions = (next_actions + next_noises).clip(self.action_low,
                self.action_high)
        state_actions = np.hstack([states, actions])
        next_state_actions = np.hstack([next_states, next_actions])
        next_qs = self.critic_target_net.predict(next_state_actions,
                verbose=0)[:, 0]
        targets = rewards + (1. - terminateds) * self.gamma * next_qs
        self.critic_evaluate_net.fit(state_actions, targets[:, np.newaxis],
                verbose=0)

        # update actor
        with tf.GradientTape() as tape:
            action_tensor = self.actor_evaluate_net(state_tensor)
            state_action_tensor = tf.concat([state_tensor, action_tensor], axis=1)
            q_tensor = self.critic_evaluate_net(state_action_tensor)
            loss_tensor = -tf.reduce_mean(q_tensor)
        grad_tensors = tape.gradient(loss_tensor,
                self.actor_evaluate_net.variables)
        self.actor_evaluate_net.optimizer.apply_gradients(zip(
                grad_tensors, self.actor_evaluate_net.variables))

        self.update_net(self.critic_target_net, self.critic_evaluate_net)
        self.update_net(self.actor_target_net, self.actor_evaluate_net)


agent = DDPGAgent(env)
In [6]:
def play_episode(env, agent, seed=None, mode=None, render=False):
    observation, _ = env.reset(seed=seed)
    reward, terminated, truncated = 0., False, False
    agent.reset(mode=mode)
    episode_reward, elapsed_steps = 0., 0
    while True:
        action = agent.step(observation, reward, terminated)
        if render:
            env.render()
        if terminated or truncated:
            break
        observation, reward, terminated, truncated, _ = env.step(action)
        episode_reward += reward
        elapsed_steps += 1
    agent.close()
    return episode_reward, elapsed_steps


logging.info('==== train ====')
episode_rewards = []
for episode in itertools.count():
    episode_reward, elapsed_steps = play_episode(env, agent, seed=episode,
            mode='train')
    episode_rewards.append(episode_reward)
    logging.info('train episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
    if np.mean(episode_rewards[-10:]) > -150:
        break
plt.plot(episode_rewards)


logging.info('==== test ====')
episode_rewards = []
for episode in range(100):
    episode_reward, elapsed_steps = play_episode(env, agent)
    episode_rewards.append(episode_reward)
    logging.info('test episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
logging.info('average episode reward = %.2f ± %.2f',
        np.mean(episode_rewards), np.std(episode_rewards))
22:22:28 [INFO] ==== train ====
22:22:28 [INFO] train episode 0: reward = -1744.13, steps = 200
22:22:28 [INFO] train episode 1: reward = -1025.25, steps = 200
22:22:28 [INFO] train episode 2: reward = -1590.20, steps = 200
22:22:28 [INFO] train episode 3: reward = -1137.77, steps = 200
22:22:28 [INFO] train episode 4: reward = -1675.82, steps = 200
22:22:28 [INFO] train episode 5: reward = -1632.97, steps = 200
22:22:29 [INFO] train episode 6: reward = -753.85, steps = 200
22:22:29 [INFO] train episode 7: reward = -1833.66, steps = 200
22:22:29 [INFO] train episode 8: reward = -936.49, steps = 200
22:22:29 [INFO] train episode 9: reward = -1622.68, steps = 200
22:22:29 [INFO] train episode 10: reward = -1307.43, steps = 200
22:22:29 [INFO] train episode 11: reward = -908.99, steps = 200
22:22:29 [INFO] train episode 12: reward = -1504.19, steps = 200
22:22:29 [INFO] train episode 13: reward = -1003.41, steps = 200
22:22:29 [INFO] train episode 14: reward = -921.67, steps = 200
22:23:21 [INFO] train episode 15: reward = -1026.82, steps = 200
22:24:34 [INFO] train episode 16: reward = -1373.11, steps = 200
22:25:47 [INFO] train episode 17: reward = -1548.92, steps = 200
22:26:42 [INFO] train episode 18: reward = -1734.48, steps = 200
22:27:39 [INFO] train episode 19: reward = -1429.13, steps = 200
22:28:37 [INFO] train episode 20: reward = -1562.60, steps = 200
22:29:35 [INFO] train episode 21: reward = -1757.53, steps = 200
22:30:29 [INFO] train episode 22: reward = -1538.83, steps = 200
22:31:34 [INFO] train episode 23: reward = -1377.11, steps = 200
22:32:34 [INFO] train episode 24: reward = -1086.61, steps = 200
22:33:34 [INFO] train episode 25: reward = -1243.76, steps = 200
22:34:32 [INFO] train episode 26: reward = -1161.08, steps = 200
22:35:34 [INFO] train episode 27: reward = -980.70, steps = 200
22:36:45 [INFO] train episode 28: reward = -872.02, steps = 200
22:38:03 [INFO] train episode 29: reward = -1005.65, steps = 200
22:39:21 [INFO] train episode 30: reward = -849.75, steps = 200
22:40:51 [INFO] train episode 31: reward = -969.59, steps = 200
22:42:26 [INFO] train episode 32: reward = -382.24, steps = 200
22:44:05 [INFO] train episode 33: reward = -748.97, steps = 200
22:45:40 [INFO] train episode 34: reward = -614.85, steps = 200
22:47:14 [INFO] train episode 35: reward = -507.74, steps = 200
22:48:35 [INFO] train episode 36: reward = -750.61, steps = 200
22:50:04 [INFO] train episode 37: reward = -127.51, steps = 200
22:51:33 [INFO] train episode 38: reward = -125.73, steps = 200
22:53:01 [INFO] train episode 39: reward = -369.50, steps = 200
22:54:35 [INFO] train episode 40: reward = -535.08, steps = 200
22:56:05 [INFO] train episode 41: reward = -623.69, steps = 200
22:57:33 [INFO] train episode 42: reward = -124.19, steps = 200
22:59:02 [INFO] train episode 43: reward = -1.74, steps = 200
23:00:38 [INFO] train episode 44: reward = -126.13, steps = 200
23:02:20 [INFO] train episode 45: reward = -121.12, steps = 200
23:03:58 [INFO] train episode 46: reward = -238.68, steps = 200
23:05:33 [INFO] train episode 47: reward = -121.55, steps = 200
23:07:07 [INFO] train episode 48: reward = -128.42, steps = 200
23:08:39 [INFO] train episode 49: reward = -126.94, steps = 200
23:10:14 [INFO] train episode 50: reward = -488.97, steps = 200
23:12:01 [INFO] train episode 51: reward = -123.99, steps = 200
23:14:08 [INFO] train episode 52: reward = -240.86, steps = 200
23:16:12 [INFO] train episode 53: reward = -248.66, steps = 200
23:18:12 [INFO] train episode 54: reward = -248.74, steps = 200
23:20:13 [INFO] train episode 55: reward = -490.37, steps = 200
23:22:14 [INFO] train episode 56: reward = -125.50, steps = 200
23:24:19 [INFO] train episode 57: reward = -371.78, steps = 200
23:26:22 [INFO] train episode 58: reward = -121.67, steps = 200
23:28:23 [INFO] train episode 59: reward = -239.14, steps = 200
23:30:26 [INFO] train episode 60: reward = -125.34, steps = 200
23:32:29 [INFO] train episode 61: reward = -245.64, steps = 200
23:34:29 [INFO] train episode 62: reward = -239.81, steps = 200
23:36:31 [INFO] train episode 63: reward = -121.81, steps = 200
23:38:37 [INFO] train episode 64: reward = -240.38, steps = 200
23:40:41 [INFO] train episode 65: reward = -122.90, steps = 200
23:42:51 [INFO] train episode 66: reward = -127.64, steps = 200
23:45:05 [INFO] train episode 67: reward = -368.60, steps = 200
23:47:19 [INFO] train episode 68: reward = -480.96, steps = 200
23:49:36 [INFO] train episode 69: reward = -240.60, steps = 200
23:51:50 [INFO] train episode 70: reward = -121.73, steps = 200
23:54:04 [INFO] train episode 71: reward = -125.15, steps = 200
23:56:06 [INFO] train episode 72: reward = -474.45, steps = 200
23:58:08 [INFO] train episode 73: reward = -127.03, steps = 200
00:00:09 [INFO] train episode 74: reward = -234.61, steps = 200
00:02:11 [INFO] train episode 75: reward = -125.06, steps = 200
00:04:12 [INFO] train episode 76: reward = -360.36, steps = 200
00:06:13 [INFO] train episode 77: reward = -123.72, steps = 200
00:08:12 [INFO] train episode 78: reward = -120.88, steps = 200
00:10:13 [INFO] train episode 79: reward = -124.85, steps = 200
00:12:12 [INFO] train episode 80: reward = -510.35, steps = 200
00:14:11 [INFO] train episode 81: reward = -252.65, steps = 200
00:16:11 [INFO] train episode 82: reward = -598.00, steps = 200
00:18:11 [INFO] train episode 83: reward = -125.84, steps = 200
00:20:11 [INFO] train episode 84: reward = -122.68, steps = 200
00:22:12 [INFO] train episode 85: reward = -122.48, steps = 200
00:24:12 [INFO] train episode 86: reward = -596.71, steps = 200
00:26:14 [INFO] train episode 87: reward = -242.67, steps = 200
00:28:17 [INFO] train episode 88: reward = -124.79, steps = 200
00:30:17 [INFO] train episode 89: reward = -118.99, steps = 200
00:32:22 [INFO] train episode 90: reward = -122.25, steps = 200
00:34:22 [INFO] train episode 91: reward = -119.55, steps = 200
00:36:21 [INFO] train episode 92: reward = -237.18, steps = 200
00:38:22 [INFO] train episode 93: reward = -124.58, steps = 200
00:40:25 [INFO] train episode 94: reward = -124.54, steps = 200
00:42:26 [INFO] train episode 95: reward = -516.10, steps = 200
00:44:26 [INFO] train episode 96: reward = -122.73, steps = 200
00:46:25 [INFO] train episode 97: reward = -362.34, steps = 200
00:48:24 [INFO] train episode 98: reward = -120.02, steps = 200
00:50:23 [INFO] train episode 99: reward = -470.78, steps = 200
00:52:23 [INFO] train episode 100: reward = -240.18, steps = 200
00:54:21 [INFO] train episode 101: reward = -235.94, steps = 200
00:56:21 [INFO] train episode 102: reward = -496.23, steps = 200
00:58:22 [INFO] train episode 103: reward = -491.39, steps = 200
01:00:22 [INFO] train episode 104: reward = -494.63, steps = 200
01:02:20 [INFO] train episode 105: reward = -122.63, steps = 200
01:04:19 [INFO] train episode 106: reward = -243.02, steps = 200
01:06:17 [INFO] train episode 107: reward = -355.75, steps = 200
01:08:16 [INFO] train episode 108: reward = -118.91, steps = 200
01:10:15 [INFO] train episode 109: reward = -486.70, steps = 200
01:12:14 [INFO] train episode 110: reward = -124.55, steps = 200
01:14:13 [INFO] train episode 111: reward = -124.19, steps = 200
01:16:14 [INFO] train episode 112: reward = -353.29, steps = 200
01:18:12 [INFO] train episode 113: reward = -119.65, steps = 200
01:20:10 [INFO] train episode 114: reward = -117.90, steps = 200
01:22:09 [INFO] train episode 115: reward = -468.36, steps = 200
01:24:09 [INFO] train episode 116: reward = -369.00, steps = 200
01:26:09 [INFO] train episode 117: reward = -123.22, steps = 200
01:28:09 [INFO] train episode 118: reward = -237.21, steps = 200
01:30:10 [INFO] train episode 119: reward = -124.54, steps = 200
01:32:10 [INFO] train episode 120: reward = -363.51, steps = 200
01:34:10 [INFO] train episode 121: reward = -119.92, steps = 200
01:36:10 [INFO] train episode 122: reward = -241.55, steps = 200
01:38:12 [INFO] train episode 123: reward = -501.51, steps = 200
01:40:12 [INFO] train episode 124: reward = -121.89, steps = 200
01:42:11 [INFO] train episode 125: reward = -360.58, steps = 200
01:44:11 [INFO] train episode 126: reward = -124.94, steps = 200
01:46:11 [INFO] train episode 127: reward = -124.46, steps = 200
01:48:12 [INFO] train episode 128: reward = -120.90, steps = 200
01:50:12 [INFO] train episode 129: reward = -350.52, steps = 200
01:52:12 [INFO] train episode 130: reward = -1.16, steps = 200
01:54:14 [INFO] train episode 131: reward = -359.38, steps = 200
01:56:13 [INFO] train episode 132: reward = -123.24, steps = 200
01:58:12 [INFO] train episode 133: reward = -118.54, steps = 200
02:00:11 [INFO] train episode 134: reward = -493.59, steps = 200
02:02:11 [INFO] train episode 135: reward = -520.58, steps = 200
02:04:09 [INFO] train episode 136: reward = -240.74, steps = 200
02:06:09 [INFO] train episode 137: reward = -476.74, steps = 200
02:08:10 [INFO] train episode 138: reward = -240.46, steps = 200
02:10:09 [INFO] train episode 139: reward = -358.08, steps = 200
02:12:09 [INFO] train episode 140: reward = -362.48, steps = 200
02:14:09 [INFO] train episode 141: reward = -354.69, steps = 200
02:16:08 [INFO] train episode 142: reward = -519.00, steps = 200
02:18:08 [INFO] train episode 143: reward = -120.05, steps = 200
02:20:07 [INFO] train episode 144: reward = -119.95, steps = 200
02:22:07 [INFO] train episode 145: reward = -121.48, steps = 200
02:24:06 [INFO] train episode 146: reward = -125.48, steps = 200
02:26:06 [INFO] train episode 147: reward = -243.18, steps = 200
02:28:07 [INFO] train episode 148: reward = -251.35, steps = 200
02:30:05 [INFO] train episode 149: reward = -594.13, steps = 200
02:32:03 [INFO] train episode 150: reward = -614.17, steps = 200
02:34:01 [INFO] train episode 151: reward = -360.23, steps = 200
02:35:59 [INFO] train episode 152: reward = -2.58, steps = 200
02:37:59 [INFO] train episode 153: reward = -472.12, steps = 200
02:40:02 [INFO] train episode 154: reward = -495.22, steps = 200
02:42:01 [INFO] train episode 155: reward = -492.00, steps = 200
02:43:59 [INFO] train episode 156: reward = -239.79, steps = 200
02:45:57 [INFO] train episode 157: reward = -124.08, steps = 200
02:47:56 [INFO] train episode 158: reward = -236.19, steps = 200
02:49:54 [INFO] train episode 159: reward = -124.14, steps = 200
02:51:52 [INFO] train episode 160: reward = -351.57, steps = 200
02:53:51 [INFO] train episode 161: reward = -243.86, steps = 200
02:55:49 [INFO] train episode 162: reward = -592.03, steps = 200
02:57:39 [INFO] train episode 163: reward = -246.36, steps = 200
02:59:23 [INFO] train episode 164: reward = -122.62, steps = 200
03:01:08 [INFO] train episode 165: reward = -124.00, steps = 200
03:02:53 [INFO] train episode 166: reward = -244.09, steps = 200
03:04:39 [INFO] train episode 167: reward = -599.97, steps = 200
03:06:24 [INFO] train episode 168: reward = -243.81, steps = 200
03:08:11 [INFO] train episode 169: reward = -247.69, steps = 200
03:09:57 [INFO] train episode 170: reward = -588.21, steps = 200
03:11:43 [INFO] train episode 171: reward = -123.71, steps = 200
03:13:29 [INFO] train episode 172: reward = -123.24, steps = 200
03:15:15 [INFO] train episode 173: reward = -560.48, steps = 200
03:17:00 [INFO] train episode 174: reward = -239.93, steps = 200
03:18:46 [INFO] train episode 175: reward = -474.06, steps = 200
03:20:30 [INFO] train episode 176: reward = -125.17, steps = 200
03:22:13 [INFO] train episode 177: reward = -505.22, steps = 200
03:23:56 [INFO] train episode 178: reward = -123.46, steps = 200
03:25:39 [INFO] train episode 179: reward = -354.98, steps = 200
03:27:24 [INFO] train episode 180: reward = -606.40, steps = 200
03:29:07 [INFO] train episode 181: reward = -243.73, steps = 200
03:30:51 [INFO] train episode 182: reward = -237.82, steps = 200
03:32:33 [INFO] train episode 183: reward = -245.63, steps = 200
03:34:16 [INFO] train episode 184: reward = -357.65, steps = 200
03:35:59 [INFO] train episode 185: reward = -124.40, steps = 200
03:37:41 [INFO] train episode 186: reward = -121.28, steps = 200
03:39:24 [INFO] train episode 187: reward = -1.86, steps = 200
03:41:07 [INFO] train episode 188: reward = -356.27, steps = 200
03:42:51 [INFO] train episode 189: reward = -1.59, steps = 200
03:44:35 [INFO] train episode 190: reward = -239.10, steps = 200
03:46:18 [INFO] train episode 191: reward = -471.49, steps = 200
03:48:01 [INFO] train episode 192: reward = -241.57, steps = 200
03:49:43 [INFO] train episode 193: reward = -568.13, steps = 200
03:51:26 [INFO] train episode 194: reward = -125.65, steps = 200
03:53:10 [INFO] train episode 195: reward = -362.44, steps = 200
03:54:53 [INFO] train episode 196: reward = -241.77, steps = 200
03:56:36 [INFO] train episode 197: reward = -481.28, steps = 200
03:58:20 [INFO] train episode 198: reward = -363.06, steps = 200
04:00:02 [INFO] train episode 199: reward = -125.28, steps = 200
04:01:46 [INFO] train episode 200: reward = -492.51, steps = 200
04:03:28 [INFO] train episode 201: reward = -117.85, steps = 200
04:05:09 [INFO] train episode 202: reward = -124.04, steps = 200
04:06:51 [INFO] train episode 203: reward = -120.50, steps = 200
04:08:34 [INFO] train episode 204: reward = -246.64, steps = 200
04:10:26 [INFO] train episode 205: reward = -124.52, steps = 200
04:12:09 [INFO] train episode 206: reward = -124.92, steps = 200
04:13:51 [INFO] train episode 207: reward = -119.11, steps = 200
04:15:33 [INFO] train episode 208: reward = -118.05, steps = 200
04:17:15 [INFO] train episode 209: reward = -126.10, steps = 200
04:18:56 [INFO] train episode 210: reward = -121.87, steps = 200
04:18:56 [INFO] ==== test ====
04:19:19 [INFO] test episode 0: reward = -117.49, steps = 200
04:19:41 [INFO] test episode 1: reward = -120.82, steps = 200
04:20:03 [INFO] test episode 2: reward = -0.43, steps = 200
04:20:27 [INFO] test episode 3: reward = -609.62, steps = 200
04:20:49 [INFO] test episode 4: reward = -125.38, steps = 200
04:21:12 [INFO] test episode 5: reward = -126.21, steps = 200
04:21:35 [INFO] test episode 6: reward = -119.12, steps = 200
04:21:57 [INFO] test episode 7: reward = -489.59, steps = 200
04:22:20 [INFO] test episode 8: reward = -497.19, steps = 200
04:22:43 [INFO] test episode 9: reward = -119.96, steps = 200
04:23:05 [INFO] test episode 10: reward = -121.93, steps = 200
04:23:27 [INFO] test episode 11: reward = -0.18, steps = 200
04:23:50 [INFO] test episode 12: reward = -484.38, steps = 200
04:24:12 [INFO] test episode 13: reward = -357.51, steps = 200
04:24:34 [INFO] test episode 14: reward = -125.31, steps = 200
04:24:57 [INFO] test episode 15: reward = -636.04, steps = 200
04:25:19 [INFO] test episode 16: reward = -241.37, steps = 200
04:25:42 [INFO] test episode 17: reward = -496.39, steps = 200
04:26:04 [INFO] test episode 18: reward = -125.82, steps = 200
04:26:27 [INFO] test episode 19: reward = -233.57, steps = 200
04:26:51 [INFO] test episode 20: reward = -362.01, steps = 200
04:27:13 [INFO] test episode 21: reward = -123.02, steps = 200
04:27:36 [INFO] test episode 22: reward = -125.20, steps = 200
04:27:59 [INFO] test episode 23: reward = -366.01, steps = 200
04:28:21 [INFO] test episode 24: reward = -588.12, steps = 200
04:28:44 [INFO] test episode 25: reward = -119.44, steps = 200
04:29:07 [INFO] test episode 26: reward = -124.77, steps = 200
04:29:29 [INFO] test episode 27: reward = -237.18, steps = 200
04:29:52 [INFO] test episode 28: reward = -233.96, steps = 200
04:30:15 [INFO] test episode 29: reward = -121.85, steps = 200
04:30:37 [INFO] test episode 30: reward = -560.96, steps = 200
04:30:59 [INFO] test episode 31: reward = -124.91, steps = 200
04:31:22 [INFO] test episode 32: reward = -240.69, steps = 200
04:31:44 [INFO] test episode 33: reward = -634.98, steps = 200
04:32:05 [INFO] test episode 34: reward = -126.30, steps = 200
04:32:27 [INFO] test episode 35: reward = -495.60, steps = 200
04:32:49 [INFO] test episode 36: reward = -474.51, steps = 200
04:33:10 [INFO] test episode 37: reward = -551.25, steps = 200
04:33:32 [INFO] test episode 38: reward = -123.75, steps = 200
04:33:53 [INFO] test episode 39: reward = -121.79, steps = 200
04:34:15 [INFO] test episode 40: reward = -357.20, steps = 200
04:34:37 [INFO] test episode 41: reward = -124.20, steps = 200
04:34:58 [INFO] test episode 42: reward = -500.24, steps = 200
04:35:20 [INFO] test episode 43: reward = -122.91, steps = 200
04:35:41 [INFO] test episode 44: reward = -351.98, steps = 200
04:36:02 [INFO] test episode 45: reward = -235.28, steps = 200
04:36:24 [INFO] test episode 46: reward = -123.27, steps = 200
04:36:46 [INFO] test episode 47: reward = -121.95, steps = 200
04:37:06 [INFO] test episode 48: reward = -357.05, steps = 200
04:37:27 [INFO] test episode 49: reward = -121.16, steps = 200
04:37:48 [INFO] test episode 50: reward = -481.52, steps = 200
04:38:08 [INFO] test episode 51: reward = -123.90, steps = 200
04:38:29 [INFO] test episode 52: reward = -0.19, steps = 200
04:38:50 [INFO] test episode 53: reward = -241.87, steps = 200
04:39:11 [INFO] test episode 54: reward = -356.29, steps = 200
04:39:31 [INFO] test episode 55: reward = -355.98, steps = 200
04:39:52 [INFO] test episode 56: reward = -119.89, steps = 200
04:40:12 [INFO] test episode 57: reward = -123.31, steps = 200
04:40:33 [INFO] test episode 58: reward = -123.78, steps = 200
04:40:54 [INFO] test episode 59: reward = -589.42, steps = 200
04:41:15 [INFO] test episode 60: reward = -0.22, steps = 200
04:41:35 [INFO] test episode 61: reward = -123.29, steps = 200
04:41:56 [INFO] test episode 62: reward = -117.87, steps = 200
04:42:17 [INFO] test episode 63: reward = -362.99, steps = 200
04:42:38 [INFO] test episode 64: reward = -123.37, steps = 200
04:42:58 [INFO] test episode 65: reward = -234.81, steps = 200
04:43:19 [INFO] test episode 66: reward = -243.27, steps = 200
04:43:40 [INFO] test episode 67: reward = -620.50, steps = 200
04:44:01 [INFO] test episode 68: reward = -119.09, steps = 200
04:44:21 [INFO] test episode 69: reward = -122.85, steps = 200
04:44:42 [INFO] test episode 70: reward = -245.37, steps = 200
04:45:04 [INFO] test episode 71: reward = -123.43, steps = 200
04:45:24 [INFO] test episode 72: reward = -234.45, steps = 200
04:45:45 [INFO] test episode 73: reward = -244.40, steps = 200
04:46:06 [INFO] test episode 74: reward = -125.58, steps = 200
04:46:27 [INFO] test episode 75: reward = -118.40, steps = 200
04:46:47 [INFO] test episode 76: reward = -616.93, steps = 200
04:47:08 [INFO] test episode 77: reward = -124.10, steps = 200
04:47:29 [INFO] test episode 78: reward = -125.62, steps = 200
04:47:50 [INFO] test episode 79: reward = -119.06, steps = 200
04:48:10 [INFO] test episode 80: reward = -349.48, steps = 200
04:48:31 [INFO] test episode 81: reward = -231.84, steps = 200
04:48:52 [INFO] test episode 82: reward = -0.43, steps = 200
04:49:13 [INFO] test episode 83: reward = -124.12, steps = 200
04:49:34 [INFO] test episode 84: reward = -233.98, steps = 200
04:49:55 [INFO] test episode 85: reward = -0.10, steps = 200
04:50:15 [INFO] test episode 86: reward = -361.32, steps = 200
04:50:35 [INFO] test episode 87: reward = -360.94, steps = 200
04:50:54 [INFO] test episode 88: reward = -236.13, steps = 200
04:51:14 [INFO] test episode 89: reward = -121.61, steps = 200
04:51:34 [INFO] test episode 90: reward = -352.96, steps = 200
04:51:54 [INFO] test episode 91: reward = -235.01, steps = 200
04:52:14 [INFO] test episode 92: reward = -123.05, steps = 200
04:52:34 [INFO] test episode 93: reward = -495.73, steps = 200
04:52:54 [INFO] test episode 94: reward = -244.08, steps = 200
04:53:13 [INFO] test episode 95: reward = -123.29, steps = 200
04:53:33 [INFO] test episode 96: reward = -0.95, steps = 200
04:53:53 [INFO] test episode 97: reward = -242.85, steps = 200
04:54:13 [INFO] test episode 98: reward = -124.81, steps = 200
04:54:33 [INFO] test episode 99: reward = -122.80, steps = 200
04:54:33 [INFO] average episode reward = -241.67 ± 171.27
In [7]:
env.close()