Use Closed-Form Policy to Play Pendulum-v1¶

In [1]:
import sys
import logging
import itertools

import numpy as np
np.random.seed(0)
import gym

logging.basicConfig(level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        stream=sys.stdout, datefmt='%H:%M:%S')
In [2]:
env = gym.make('Pendulum-v1')
for key in vars(env.spec):
    logging.info('%s: %s', key, vars(env.spec)[key])
for key in vars(env.unwrapped):
    logging.info('%s: %s', key, vars(env.unwrapped)[key])
00:00:00 [INFO] id: Pendulum-v1
00:00:00 [INFO] entry_point: gym.envs.classic_control:PendulumEnv
00:00:00 [INFO] reward_threshold: None
00:00:00 [INFO] nondeterministic: False
00:00:00 [INFO] max_episode_steps: 200
00:00:00 [INFO] order_enforce: True
00:00:00 [INFO] _kwargs: {}
00:00:00 [INFO] _env_name: Pendulum
00:00:00 [INFO] max_speed: 8
00:00:00 [INFO] max_torque: 2.0
00:00:00 [INFO] dt: 0.05
00:00:00 [INFO] g: 10.0
00:00:00 [INFO] m: 1.0
00:00:00 [INFO] l: 1.0
00:00:00 [INFO] viewer: None
00:00:00 [INFO] action_space: Box([-2.], [2.], (1,), float32)
00:00:00 [INFO] observation_space: Box([-1. -1. -8.], [1. 1. 8.], (3,), float32)
00:00:00 [INFO] np_random: RandomState(MT19937)
00:00:00 [INFO] spec: EnvSpec(Pendulum-v1)
In [3]:
class ClosedFormAgent:
    def __init__(self, _):
        pass

    def reset(self, mode=None):
        pass

    def step(self, observation, reward, terminated):
        x, y, angle_velocity = observation
        flip = (y < 0.)
        if flip:
            y *= -1. # now y >= 0
            angle_velocity *= -1.
        angle = np.arcsin(y)
        if x < 0.:
            angle = np.pi - angle
        if (angle < -0.3 * angle_velocity) or \
                (angle > 0.03 * (angle_velocity - 2.5) ** 2. + 1. and \
                angle < 0.15 * (angle_velocity + 3.) ** 2. + 2.):
            force = 2.
        else:
            force = -2.
        if flip:
            force *= -1.
        action = np.array([force,])
        return action

    def close(self):
        pass


agent = ClosedFormAgent(env)
In [4]:
def play_episode(env, agent, seed=None, mode=None, render=False):
    observation, _ = env.reset(seed=seed)
    reward, terminated, truncated = 0., False, False
    agent.reset(mode=mode)
    episode_reward, elapsed_steps = 0., 0
    while True:
        action = agent.step(observation, reward, terminated)
        if render:
            env.render()
        if terminated or truncated:
            break
        observation, reward, terminated, truncated, _ = env.step(action)
        episode_reward += reward
        elapsed_steps += 1
    agent.close()
    return episode_reward, elapsed_steps


logging.info('==== test ====')
episode_rewards = []
for episode in range(100):
    episode_reward, elapsed_steps = play_episode(env, agent)
    episode_rewards.append(episode_reward)
    logging.info('test episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
logging.info('average episode reward = %.2f ± %.2f',
        np.mean(episode_rewards), np.std(episode_rewards))
00:00:00 [INFO] ==== test ====
00:00:00 [INFO] test episode 0: reward = -267.55, steps = 200
00:00:00 [INFO] test episode 1: reward = -126.17, steps = 200
00:00:00 [INFO] test episode 2: reward = -235.76, steps = 200
00:00:00 [INFO] test episode 3: reward = -127.66, steps = 200
00:00:00 [INFO] test episode 4: reward = -231.72, steps = 200
00:00:00 [INFO] test episode 5: reward = -283.73, steps = 200
00:00:01 [INFO] test episode 6: reward = -2.03, steps = 200
00:00:01 [INFO] test episode 7: reward = -225.50, steps = 200
00:00:01 [INFO] test episode 8: reward = -2.29, steps = 200
00:00:01 [INFO] test episode 9: reward = -283.62, steps = 200
00:00:01 [INFO] test episode 10: reward = -120.88, steps = 200
00:00:01 [INFO] test episode 11: reward = -3.70, steps = 200
00:00:01 [INFO] test episode 12: reward = -238.70, steps = 200
00:00:01 [INFO] test episode 13: reward = -119.87, steps = 200
00:00:01 [INFO] test episode 14: reward = -1.69, steps = 200
00:00:01 [INFO] test episode 15: reward = -120.17, steps = 200
00:00:01 [INFO] test episode 16: reward = -2.05, steps = 200
00:00:01 [INFO] test episode 17: reward = -126.85, steps = 200
00:00:01 [INFO] test episode 18: reward = -230.96, steps = 200
00:00:01 [INFO] test episode 19: reward = -1.69, steps = 200
00:00:01 [INFO] test episode 20: reward = -127.73, steps = 200
00:00:01 [INFO] test episode 21: reward = -338.72, steps = 200
00:00:01 [INFO] test episode 22: reward = -301.22, steps = 200
00:00:01 [INFO] test episode 23: reward = -221.75, steps = 200
00:00:01 [INFO] test episode 24: reward = -127.78, steps = 200
00:00:01 [INFO] test episode 25: reward = -221.79, steps = 200
00:00:01 [INFO] test episode 26: reward = -237.35, steps = 200
00:00:01 [INFO] test episode 27: reward = -120.55, steps = 200
00:00:01 [INFO] test episode 28: reward = -119.65, steps = 200
00:00:01 [INFO] test episode 29: reward = -336.56, steps = 200
00:00:01 [INFO] test episode 30: reward = -245.63, steps = 200
00:00:01 [INFO] test episode 31: reward = -238.51, steps = 200
00:00:01 [INFO] test episode 32: reward = -3.20, steps = 200
00:00:01 [INFO] test episode 33: reward = -118.62, steps = 200
00:00:01 [INFO] test episode 34: reward = -223.51, steps = 200
00:00:01 [INFO] test episode 35: reward = -232.35, steps = 200
00:00:01 [INFO] test episode 36: reward = -352.74, steps = 200
00:00:01 [INFO] test episode 37: reward = -4.81, steps = 200
00:00:01 [INFO] test episode 38: reward = -124.59, steps = 200
00:00:01 [INFO] test episode 39: reward = -116.71, steps = 200
00:00:02 [INFO] test episode 40: reward = -240.21, steps = 200
00:00:02 [INFO] test episode 41: reward = -349.29, steps = 200
00:00:02 [INFO] test episode 42: reward = -124.80, steps = 200
00:00:02 [INFO] test episode 43: reward = -2.00, steps = 200
00:00:02 [INFO] test episode 44: reward = -127.37, steps = 200
00:00:02 [INFO] test episode 45: reward = -120.40, steps = 200
00:00:02 [INFO] test episode 46: reward = -116.89, steps = 200
00:00:02 [INFO] test episode 47: reward = -120.74, steps = 200
00:00:02 [INFO] test episode 48: reward = -3.17, steps = 200
00:00:02 [INFO] test episode 49: reward = -1.92, steps = 200
00:00:02 [INFO] test episode 50: reward = -225.52, steps = 200
00:00:02 [INFO] test episode 51: reward = -123.44, steps = 200
00:00:02 [INFO] test episode 52: reward = -116.63, steps = 200
00:00:02 [INFO] test episode 53: reward = -119.64, steps = 200
00:00:02 [INFO] test episode 54: reward = -120.17, steps = 200
00:00:02 [INFO] test episode 55: reward = -229.39, steps = 200
00:00:02 [INFO] test episode 56: reward = -127.65, steps = 200
00:00:02 [INFO] test episode 57: reward = -122.00, steps = 200
00:00:02 [INFO] test episode 58: reward = -118.49, steps = 200
00:00:02 [INFO] test episode 59: reward = -115.77, steps = 200
00:00:02 [INFO] test episode 60: reward = -126.50, steps = 200
00:00:02 [INFO] test episode 61: reward = -119.58, steps = 200
00:00:02 [INFO] test episode 62: reward = -115.18, steps = 200
00:00:02 [INFO] test episode 63: reward = -118.86, steps = 200
00:00:02 [INFO] test episode 64: reward = -115.36, steps = 200
00:00:02 [INFO] test episode 65: reward = -123.53, steps = 200
00:00:02 [INFO] test episode 66: reward = -2.37, steps = 200
00:00:02 [INFO] test episode 67: reward = -239.47, steps = 200
00:00:02 [INFO] test episode 68: reward = -222.48, steps = 200
00:00:02 [INFO] test episode 69: reward = -117.72, steps = 200
00:00:02 [INFO] test episode 70: reward = -121.58, steps = 200
00:00:02 [INFO] test episode 71: reward = -128.51, steps = 200
00:00:03 [INFO] test episode 72: reward = -218.31, steps = 200
00:00:03 [INFO] test episode 73: reward = -2.90, steps = 200
00:00:03 [INFO] test episode 74: reward = -115.21, steps = 200
00:00:03 [INFO] test episode 75: reward = -3.35, steps = 200
00:00:03 [INFO] test episode 76: reward = -116.52, steps = 200
00:00:03 [INFO] test episode 77: reward = -125.39, steps = 200
00:00:03 [INFO] test episode 78: reward = -118.11, steps = 200
00:00:03 [INFO] test episode 79: reward = -127.57, steps = 200
00:00:03 [INFO] test episode 80: reward = -244.67, steps = 200
00:00:03 [INFO] test episode 81: reward = -3.62, steps = 200
00:00:03 [INFO] test episode 82: reward = -337.73, steps = 200
00:00:03 [INFO] test episode 83: reward = -133.64, steps = 200
00:00:03 [INFO] test episode 84: reward = -122.92, steps = 200
00:00:03 [INFO] test episode 85: reward = -124.27, steps = 200
00:00:03 [INFO] test episode 86: reward = -339.02, steps = 200
00:00:03 [INFO] test episode 87: reward = -117.75, steps = 200
00:00:03 [INFO] test episode 88: reward = -127.78, steps = 200
00:00:03 [INFO] test episode 89: reward = -118.56, steps = 200
00:00:03 [INFO] test episode 90: reward = -124.17, steps = 200
00:00:03 [INFO] test episode 91: reward = -120.17, steps = 200
00:00:03 [INFO] test episode 92: reward = -116.71, steps = 200
00:00:03 [INFO] test episode 93: reward = -127.08, steps = 200
00:00:03 [INFO] test episode 94: reward = -2.01, steps = 200
00:00:03 [INFO] test episode 95: reward = -238.65, steps = 200
00:00:03 [INFO] test episode 96: reward = -124.64, steps = 200
00:00:03 [INFO] test episode 97: reward = -118.74, steps = 200
00:00:03 [INFO] test episode 98: reward = -120.61, steps = 200
00:00:03 [INFO] test episode 99: reward = -219.02, steps = 200
00:00:03 [INFO] average episode reward = -145.52 ± 90.77
In [5]:
env.close()