PyTorch version
%matplotlib inline
import sys
import logging
import copy
import itertools
import numpy as np
np.random.seed(0)
import pandas as pd
import gym
import matplotlib.pyplot as plt
import torch
torch.manual_seed(0)
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.distributions as distributions
logging.basicConfig(level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(message)s',
stream=sys.stdout, datefmt='%H:%M:%S')
env = gym.make('Acrobot-v1')
for key in vars(env):
logging.info('%s: %s', key, vars(env)[key])
for key in vars(env.spec):
logging.info('%s: %s', key, vars(env.spec)[key])
14:21:37 [INFO] env: <AcrobotEnv<Acrobot-v1>> 14:21:37 [INFO] action_space: Discrete(3) 14:21:37 [INFO] observation_space: Box(-28.274333953857422, 28.274333953857422, (6,), float32) 14:21:37 [INFO] reward_range: (-inf, inf) 14:21:37 [INFO] metadata: {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 15} 14:21:37 [INFO] _max_episode_steps: 500 14:21:37 [INFO] _elapsed_steps: None 14:21:37 [INFO] id: Acrobot-v1 14:21:37 [INFO] entry_point: gym.envs.classic_control:AcrobotEnv 14:21:37 [INFO] reward_threshold: -100.0 14:21:37 [INFO] nondeterministic: False 14:21:37 [INFO] max_episode_steps: 500 14:21:37 [INFO] _kwargs: {} 14:21:37 [INFO] _env_name: Acrobot
class ElibilityTraceActorCriticAgent:
def __init__(self, env):
self.action_n = env.action_space.n
self.gamma = 0.99
self.actor_lambda = 0.9
self.critic_lambda = 0.9
self.actor_net = self.build_net(
input_size=env.observation_space.shape[0],
hidden_sizes=[100,],
output_size=env.action_space.n, output_activator=nn.Softmax(1))
self.actor_optimizer = optim.Adam(self.actor_net.parameters(), 0.0001)
self.actor_trace = copy.deepcopy(self.actor_net)
self.critic_net = self.build_net(
input_size=env.observation_space.shape[0],
hidden_sizes=[100,], output_size=self.action_n)
self.critic_optimizer = optim.Adam(self.critic_net.parameters(), 0.0002)
self.critic_loss = nn.MSELoss()
self.critic_trace = copy.deepcopy(self.critic_net)
def build_net(self, input_size, hidden_sizes, output_size,
output_activator=None):
layers = []
for input_size, output_size in zip(
[input_size,] + hidden_sizes, hidden_sizes + [output_size,]):
layers.append(nn.Linear(input_size, output_size))
layers.append(nn.ReLU())
layers = layers[:-1]
if output_activator:
layers.append(output_activator)
net = nn.Sequential(*layers)
return net
def reset(self, mode=None):
self.mode = mode
if self.mode == 'train':
self.trajectory = []
self.discount = 1.
def weights_init(m):
if isinstance(m, nn.Linear):
init.zeros_(m.weight)
init.zeros_(m.bias)
self.actor_trace.apply(weights_init)
self.critic_trace.apply(weights_init)
def step(self, observation, reward, terminated):
state_tensor = torch.as_tensor(observation, dtype=torch.float).unsqueeze(0)
prob_tensor = self.actor_net(state_tensor)
action_tensor = distributions.Categorical(prob_tensor).sample()
action = action_tensor.numpy()[0]
if self.mode == 'train':
self.trajectory += [observation, reward, terminated, action]
if len(self.trajectory) >= 8:
self.learn()
self.discount *= self.gamma
return action
def close(self):
pass
def update_net(self, target_net, evaluate_net, target_weight, evaluate_weight):
for target_param, evaluate_param in zip(
target_net.parameters(), evaluate_net.parameters()):
target_param.data.copy_(evaluate_weight * evaluate_param.data
+ target_weight * target_param.data)
def learn(self):
state, _, _, action, next_state, reward, terminated, next_action = \
self.trajectory[-8:]
state_tensor = torch.as_tensor(state, dtype=torch.float).unsqueeze(0)
next_state_tensor = torch.as_tensor(state, dtype=torch.float).unsqueeze(0)
pred_tensor = self.critic_net(state_tensor)
pred = pred_tensor.detach().numpy()[0, 0]
next_v_tesnor = self.critic_net(next_state_tensor)
next_v = next_v_tesnor.detach().numpy()[0, 0]
target = reward + (1. - terminated) * self.gamma * next_v
td_error = target - pred
# update actor
pi_tensor = self.actor_net(state_tensor)[0, action]
logpi_tensor = torch.log(torch.clamp(pi_tensor, 1e-6, 1.))
self.actor_optimizer.zero_grad()
logpi_tensor.backward(retain_graph=True)
for param, trace in zip(self.actor_net.parameters(),
self.actor_trace.parameters()):
trace.data.copy_(self.gamma * self.actor_lambda * trace.data + \
self.discount * param.grad)
param.grad.copy_(-td_error * trace)
self.actor_optimizer.step()
# update critic
v_tensor = self.critic_net(state_tensor)[0, 0]
self.critic_optimizer.zero_grad()
v_tensor.backward()
for param, trace in zip(self.critic_net.parameters(),
self.critic_trace.parameters()):
trace.data.copy_(self.gamma * self.critic_lambda * trace.data +
param.grad)
param.grad.copy_(-td_error * trace)
self.critic_optimizer.step()
agent = ElibilityTraceActorCriticAgent(env)
def play_episode(env, agent, seed=None, mode=None, render=False):
observation, _ = env.reset(seed=seed)
reward, terminated, truncated = 0., False, False
agent.reset(mode=mode)
episode_reward, elapsed_steps = 0., 0
while True:
action = agent.step(observation, reward, terminated)
if render:
env.render()
if terminated or truncated:
break
observation, reward, terminated, truncated, _ = env.step(action)
episode_reward += reward
elapsed_steps += 1
agent.close()
return episode_reward, elapsed_steps
logging.info('==== train ====')
episode_rewards = []
for episode in itertools.count():
episode_reward, elapsed_steps = play_episode(env, agent, seed=episode,
mode='train')
episode_rewards.append(episode_reward)
logging.info('train episode %d: reward = %.2f, steps = %d',
episode, episode_reward, elapsed_steps)
if np.mean(episode_rewards[-10:]) > -120:
break
plt.plot(episode_rewards)
logging.info('==== test ====')
episode_rewards = []
for episode in range(100):
episode_reward, elapsed_steps = play_episode(env, agent)
episode_rewards.append(episode_reward)
logging.info('test episode %d: reward = %.2f, steps = %d',
episode, episode_reward, elapsed_steps)
logging.info('average episode reward = %.2f ± %.2f',
np.mean(episode_rewards), np.std(episode_rewards))
14:21:37 [INFO] ==== train ==== 14:21:40 [INFO] train episode 0: reward = -500.00, steps = 500 14:21:43 [INFO] train episode 1: reward = -500.00, steps = 500 14:21:45 [INFO] train episode 2: reward = -500.00, steps = 500 14:21:47 [INFO] train episode 3: reward = -346.00, steps = 347 14:21:48 [INFO] train episode 4: reward = -149.00, steps = 150 14:21:50 [INFO] train episode 5: reward = -311.00, steps = 312 14:21:51 [INFO] train episode 6: reward = -323.00, steps = 324 14:21:52 [INFO] train episode 7: reward = -184.00, steps = 185 14:21:53 [INFO] train episode 8: reward = -143.00, steps = 144 14:21:54 [INFO] train episode 9: reward = -225.00, steps = 226 14:21:56 [INFO] train episode 10: reward = -207.00, steps = 208 14:21:56 [INFO] train episode 11: reward = -171.00, steps = 172 14:21:57 [INFO] train episode 12: reward = -123.00, steps = 124 14:21:58 [INFO] train episode 13: reward = -208.00, steps = 209 14:21:59 [INFO] train episode 14: reward = -188.00, steps = 189 14:22:00 [INFO] train episode 15: reward = -209.00, steps = 210 14:22:01 [INFO] train episode 16: reward = -123.00, steps = 124 14:22:02 [INFO] train episode 17: reward = -158.00, steps = 159 14:22:03 [INFO] train episode 18: reward = -143.00, steps = 144 14:22:04 [INFO] train episode 19: reward = -204.00, steps = 205 14:22:05 [INFO] train episode 20: reward = -165.00, steps = 166 14:22:06 [INFO] train episode 21: reward = -148.00, steps = 149 14:22:07 [INFO] train episode 22: reward = -145.00, steps = 146 14:22:08 [INFO] train episode 23: reward = -115.00, steps = 116 14:22:08 [INFO] train episode 24: reward = -159.00, steps = 160 14:22:09 [INFO] train episode 25: reward = -135.00, steps = 136 14:22:10 [INFO] train episode 26: reward = -117.00, steps = 118 14:22:11 [INFO] train episode 27: reward = -137.00, steps = 138 14:22:11 [INFO] train episode 28: reward = -129.00, steps = 130 14:22:12 [INFO] train episode 29: reward = -102.00, steps = 103 14:22:13 [INFO] train episode 30: reward = -119.00, steps = 120 14:22:13 [INFO] train episode 31: reward = -99.00, steps = 100 14:22:14 [INFO] train episode 32: reward = -122.00, steps = 123 14:22:15 [INFO] train episode 33: reward = -134.00, steps = 135 14:22:15 [INFO] train episode 34: reward = -108.00, steps = 109 14:22:16 [INFO] train episode 35: reward = -131.00, steps = 132 14:22:16 [INFO] ==== test ==== 14:22:16 [INFO] test episode 0: reward = -130.00, steps = 131 14:22:16 [INFO] test episode 1: reward = -226.00, steps = 227 14:22:16 [INFO] test episode 2: reward = -133.00, steps = 134 14:22:17 [INFO] test episode 3: reward = -219.00, steps = 220 14:22:17 [INFO] test episode 4: reward = -189.00, steps = 190 14:22:17 [INFO] test episode 5: reward = -106.00, steps = 107 14:22:17 [INFO] test episode 6: reward = -107.00, steps = 108 14:22:17 [INFO] test episode 7: reward = -111.00, steps = 112 14:22:17 [INFO] test episode 8: reward = -162.00, steps = 163 14:22:17 [INFO] test episode 9: reward = -105.00, steps = 106 14:22:17 [INFO] test episode 10: reward = -133.00, steps = 134 14:22:17 [INFO] test episode 11: reward = -123.00, steps = 124 14:22:17 [INFO] test episode 12: reward = -96.00, steps = 97 14:22:18 [INFO] test episode 13: reward = -118.00, steps = 119 14:22:18 [INFO] test episode 14: reward = -105.00, steps = 106 14:22:18 [INFO] test episode 15: reward = -126.00, steps = 127 14:22:18 [INFO] test episode 16: reward = -182.00, steps = 183 14:22:18 [INFO] test episode 17: reward = -119.00, steps = 120 14:22:18 [INFO] test episode 18: reward = -137.00, steps = 138 14:22:18 [INFO] test episode 19: reward = -133.00, steps = 134 14:22:18 [INFO] test episode 20: reward = -287.00, steps = 288 14:22:19 [INFO] test episode 21: reward = -169.00, steps = 170 14:22:19 [INFO] test episode 22: reward = -217.00, steps = 218 14:22:19 [INFO] test episode 23: reward = -272.00, steps = 273 14:22:19 [INFO] test episode 24: reward = -105.00, steps = 106 14:22:19 [INFO] test episode 25: reward = -223.00, steps = 224 14:22:19 [INFO] test episode 26: reward = -142.00, steps = 143 14:22:19 [INFO] test episode 27: reward = -148.00, steps = 149 14:22:20 [INFO] test episode 28: reward = -158.00, steps = 159 14:22:20 [INFO] test episode 29: reward = -144.00, steps = 145 14:22:20 [INFO] test episode 30: reward = -153.00, steps = 154 14:22:20 [INFO] test episode 31: reward = -126.00, steps = 127 14:22:20 [INFO] test episode 32: reward = -210.00, steps = 211 14:22:20 [INFO] test episode 33: reward = -136.00, steps = 137 14:22:20 [INFO] test episode 34: reward = -140.00, steps = 141 14:22:20 [INFO] test episode 35: reward = -123.00, steps = 124 14:22:20 [INFO] test episode 36: reward = -107.00, steps = 108 14:22:21 [INFO] test episode 37: reward = -139.00, steps = 140 14:22:21 [INFO] test episode 38: reward = -118.00, steps = 119 14:22:21 [INFO] test episode 39: reward = -118.00, steps = 119 14:22:21 [INFO] test episode 40: reward = -118.00, steps = 119 14:22:21 [INFO] test episode 41: reward = -250.00, steps = 251 14:22:21 [INFO] test episode 42: reward = -140.00, steps = 141 14:22:21 [INFO] test episode 43: reward = -160.00, steps = 161 14:22:21 [INFO] test episode 44: reward = -115.00, steps = 116 14:22:21 [INFO] test episode 45: reward = -148.00, steps = 149 14:22:22 [INFO] test episode 46: reward = -128.00, steps = 129 14:22:22 [INFO] test episode 47: reward = -206.00, steps = 207 14:22:22 [INFO] test episode 48: reward = -172.00, steps = 173 14:22:22 [INFO] test episode 49: reward = -152.00, steps = 153 14:22:22 [INFO] test episode 50: reward = -142.00, steps = 143 14:22:22 [INFO] test episode 51: reward = -178.00, steps = 179 14:22:22 [INFO] test episode 52: reward = -84.00, steps = 85 14:22:22 [INFO] test episode 53: reward = -115.00, steps = 116 14:22:23 [INFO] test episode 54: reward = -229.00, steps = 230 14:22:23 [INFO] test episode 55: reward = -226.00, steps = 227 14:22:23 [INFO] test episode 56: reward = -104.00, steps = 105 14:22:23 [INFO] test episode 57: reward = -172.00, steps = 173 14:22:23 [INFO] test episode 58: reward = -115.00, steps = 116 14:22:23 [INFO] test episode 59: reward = -141.00, steps = 142 14:22:23 [INFO] test episode 60: reward = -139.00, steps = 140 14:22:23 [INFO] test episode 61: reward = -148.00, steps = 149 14:22:24 [INFO] test episode 62: reward = -256.00, steps = 257 14:22:24 [INFO] test episode 63: reward = -172.00, steps = 173 14:22:24 [INFO] test episode 64: reward = -161.00, steps = 162 14:22:24 [INFO] test episode 65: reward = -134.00, steps = 135 14:22:24 [INFO] test episode 66: reward = -201.00, steps = 202 14:22:24 [INFO] test episode 67: reward = -117.00, steps = 118 14:22:24 [INFO] test episode 68: reward = -125.00, steps = 126 14:22:24 [INFO] test episode 69: reward = -122.00, steps = 123 14:22:24 [INFO] test episode 70: reward = -197.00, steps = 198 14:22:25 [INFO] test episode 71: reward = -145.00, steps = 146 14:22:25 [INFO] test episode 72: reward = -124.00, steps = 125 14:22:25 [INFO] test episode 73: reward = -147.00, steps = 148 14:22:25 [INFO] test episode 74: reward = -134.00, steps = 135 14:22:25 [INFO] test episode 75: reward = -129.00, steps = 130 14:22:25 [INFO] test episode 76: reward = -107.00, steps = 108 14:22:25 [INFO] test episode 77: reward = -171.00, steps = 172 14:22:25 [INFO] test episode 78: reward = -82.00, steps = 83 14:22:25 [INFO] test episode 79: reward = -117.00, steps = 118 14:22:26 [INFO] test episode 80: reward = -131.00, steps = 132 14:22:26 [INFO] test episode 81: reward = -145.00, steps = 146 14:22:26 [INFO] test episode 82: reward = -111.00, steps = 112 14:22:26 [INFO] test episode 83: reward = -170.00, steps = 171 14:22:26 [INFO] test episode 84: reward = -161.00, steps = 162 14:22:26 [INFO] test episode 85: reward = -109.00, steps = 110 14:22:26 [INFO] test episode 86: reward = -111.00, steps = 112 14:22:26 [INFO] test episode 87: reward = -98.00, steps = 99 14:22:26 [INFO] test episode 88: reward = -142.00, steps = 143 14:22:26 [INFO] test episode 89: reward = -155.00, steps = 156 14:22:27 [INFO] test episode 90: reward = -85.00, steps = 86 14:22:27 [INFO] test episode 91: reward = -101.00, steps = 102 14:22:27 [INFO] test episode 92: reward = -165.00, steps = 166 14:22:27 [INFO] test episode 93: reward = -125.00, steps = 126 14:22:27 [INFO] test episode 94: reward = -114.00, steps = 115 14:22:27 [INFO] test episode 95: reward = -145.00, steps = 146 14:22:27 [INFO] test episode 96: reward = -131.00, steps = 132 14:22:27 [INFO] test episode 97: reward = -126.00, steps = 127 14:22:27 [INFO] test episode 98: reward = -173.00, steps = 174 14:22:27 [INFO] test episode 99: reward = -112.00, steps = 113 14:22:27 [INFO] average episode reward = -146.28 ± 41.48
env.close()