Use TRPO to Play Acrobot-v1¶

PyTorch version

In [1]:
%matplotlib inline

import sys
import logging
import itertools

import numpy as np
np.random.seed(0)
import pandas as pd
import scipy.signal as signal
import gym
import matplotlib.pyplot as plt
import torch
torch.manual_seed(0)
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.distributions as distributions

logging.basicConfig(level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        stream=sys.stdout, datefmt='%H:%M:%S')
In [2]:
env = gym.make('Acrobot-v1')
for key in vars(env):
    logging.info('%s: %s', key, vars(env)[key])
for key in vars(env.spec):
    logging.info('%s: %s', key, vars(env.spec)[key])
11:45:19 [INFO] env: <AcrobotEnv<Acrobot-v1>>
11:45:19 [INFO] action_space: Discrete(3)
11:45:19 [INFO] observation_space: Box(-28.274333953857422, 28.274333953857422, (6,), float32)
11:45:19 [INFO] reward_range: (-inf, inf)
11:45:19 [INFO] metadata: {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 15}
11:45:19 [INFO] _max_episode_steps: 500
11:45:19 [INFO] _elapsed_steps: None
11:45:19 [INFO] id: Acrobot-v1
11:45:19 [INFO] entry_point: gym.envs.classic_control:AcrobotEnv
11:45:19 [INFO] reward_threshold: -100.0
11:45:19 [INFO] nondeterministic: False
11:45:19 [INFO] max_episode_steps: 500
11:45:19 [INFO] _kwargs: {}
11:45:19 [INFO] _env_name: Acrobot
In [3]:
class PPOReplayer:
    def __init__(self):
        self.fields = ['state', 'action', 'prob', 'advantage', 'return']
        self.memory = pd.DataFrame(columns=self.fields)

    def store(self, df):
        if self.memory.empty:
            self.memory = df[self.fields]
        else:
            self.memory = pd.concat([self.memory, df[self.fields]], ignore_index=True)

    def sample(self, size):
        indices = np.random.choice(self.memory.shape[0], size=size)
        return (np.stack(self.memory.loc[indices, field]) for field in
                self.fields)
In [4]:
def conjugate_gradient(f, b, iter_count=10, epsilon=1e-12, tol=1e-6):
    x = b * 0.
    r = b.clone()
    p = b.clone()
    rho = torch.dot(r, r)
    for i in range(iter_count):
        z = f(p)
        alpha = rho / (torch.dot(p, z) + epsilon)
        x += alpha * p
        r -= alpha * z
        rho_new = torch.dot(r, r)
        p = r + (rho_new / rho) * p
        rho = rho_new
        if rho < tol:
            break
    return x, f(x)
In [5]:
class TRPOAgent:
    def __init__(self, env):
        self.gamma = 0.99

        self.replayer = PPOReplayer()
        self.trajectory = []

        self.actor_net = self.build_net(
                input_size=env.observation_space.shape[0],
                hidden_sizes=[100,],
                output_size=env.action_space.n, output_activator=nn.Softmax(1))
        self.max_kl = 0.01
        self.critic_net = self.build_net(
                input_size=env.observation_space.shape[0],
                hidden_sizes=[100,])
        self.critic_optimizer = optim.Adam(self.critic_net.parameters(), 0.002)
        self.critic_loss = nn.MSELoss()

    def build_net(self, input_size, hidden_sizes, output_size=1,
            output_activator=None):
        layers = []
        for input_size, output_size in zip(
                [input_size,] + hidden_sizes, hidden_sizes + [output_size,]):
            layers.append(nn.Linear(input_size, output_size))
            layers.append(nn.ReLU())
        layers = layers[:-1]
        if output_activator:
            layers.append(output_activator)
        net = nn.Sequential(*layers)
        return net

    def reset(self, mode=None):
        self.mode = mode
        if self.mode == 'train':
            self.trajectory = []

    def step(self, observation, reward, terminated):
        state_tensor = torch.as_tensor(observation, dtype=torch.float).unsqueeze(0)
        prob_tensor = self.actor_net(state_tensor)
        action_tensor = distributions.Categorical(prob_tensor).sample()
        action = action_tensor.numpy()[0]
        if self.mode == 'train':
            self.trajectory += [observation, reward, terminated, action]
        return action

    def close(self):
        if self.mode == 'train':
            self.save_trajectory_to_replayer()
            if len(self.replayer.memory) >= 1000:
                for batch in range(5):  # learn multiple times
                    self.learn()
                self.replayer = PPOReplayer()
                        # reset replayer after the agent changes itself

    def save_trajectory_to_replayer(self):
        df = pd.DataFrame(
                np.array(self.trajectory, dtype=object).reshape(-1, 4),
                columns=['state', 'reward', 'terminated', 'action'])
        state_tensor = torch.as_tensor(np.stack(df['state']), dtype=torch.float)
        action_tensor = torch.as_tensor(df['action'], dtype=torch.long)
        v_tensor = self.critic_net(state_tensor)
        df['v'] = v_tensor.detach().numpy()
        prob_tensor = self.actor_net(state_tensor)
        pi_tensor = prob_tensor.gather(-1, action_tensor.unsqueeze(1)).squeeze(1)
        df['prob'] = pi_tensor.detach().numpy()
        df['next_v'] = df['v'].shift(-1).fillna(0.)
        df['u'] = df['reward'] + self.gamma * df['next_v']
        df['delta'] = df['u'] - df['v']
        df['advantage'] = signal.lfilter([1.,], [1., -self.gamma],
                df['delta'][::-1])[::-1]
        df['return'] = signal.lfilter([1.,], [1., -self.gamma],
                df['reward'][::-1])[::-1]
        self.replayer.store(df)

    def learn(self):
        states, actions, old_pis, advantages, returns = \
                self.replayer.sample(size=64)
        state_tensor = torch.as_tensor(states, dtype=torch.float)
        action_tensor = torch.as_tensor(actions, dtype=torch.long)
        old_pi_tensor = torch.as_tensor(old_pis, dtype=torch.float)
        advantage_tensor = torch.as_tensor(advantages, dtype=torch.float)
        return_tensor = torch.as_tensor(returns, dtype=torch.float).unsqueeze(1)

        # update actor
        # ... calculate first order gradient: g
        all_pi_tensor = self.actor_net(state_tensor)
        pi_tensor = all_pi_tensor.gather(1, action_tensor.unsqueeze(1)).squeeze(1)
        surrogate_tensor = (pi_tensor / old_pi_tensor) * advantage_tensor
        loss_tensor = surrogate_tensor.mean()
        loss_grads = autograd.grad(loss_tensor, self.actor_net.parameters())
        loss_grad = torch.cat([grad.view(-1) for grad in loss_grads]).detach()
                # flatten for calculating conjugate gradient

        # ... calculate conjugate gradient: Fx = g
        def f(x):  # calculate Fx
            prob_tensor = self.actor_net(state_tensor)
            prob_old_tensor = prob_tensor.detach()
            kld_tensor = (prob_old_tensor * torch.log(
                    (prob_old_tensor / prob_tensor).clamp(1e-6, 1e6))).sum(axis=1)
            kld_loss_tensor = kld_tensor.mean()
            grads = autograd.grad(kld_loss_tensor, self.actor_net.parameters(),
                    create_graph=True)
            flatten_grad_tensor = torch.cat([grad.view(-1) for grad in grads])
            grad_matmul_x = torch.dot(flatten_grad_tensor, x)
            grad_grads = autograd.grad(grad_matmul_x, self.actor_net.parameters())
            flatten_grad_grad = torch.cat([grad.contiguous().view(-1) for grad
                    in grad_grads]).detach()
            fx = flatten_grad_grad + x * 0.01
            return fx
        x, fx = conjugate_gradient(f, loss_grad)

        # ... calculate natural gradient: sqrt(...) g
        natural_gradient_tensor = torch.sqrt(2 * self.max_kl /
                torch.dot(fx, x)) * x

        # ... line search
        def set_actor_net_params(flatten_params):
                # auxiliary function to overwrite actor_net
            begin = 0
            for param in self.actor_net.parameters():
                end = begin + param.numel()
                param.data.copy_(flatten_params[begin:end].view(param.size()))
                begin = end

        old_param = torch.cat([param.view(-1) for param in
                self.actor_net.parameters()])
        expected_improve = torch.dot(loss_grad, natural_gradient_tensor)
        for learning_step in [0.,] + [.5 ** j for j in range(10)]:
            new_param = old_param + learning_step * natural_gradient_tensor
            set_actor_net_params(new_param)
            all_pi_tensor = self.actor_net(state_tensor)
            new_pi_tensor = all_pi_tensor.gather(1,
                    action_tensor.unsqueeze(1)).squeeze(1)
            new_pi_tensor = new_pi_tensor.detach()
            surrogate_tensor = (new_pi_tensor / pi_tensor) * advantage_tensor
            objective = surrogate_tensor.mean().item()
            if np.isclose(learning_step, 0.):
                old_objective = objective
            else:
                if objective - old_objective > 0.1 * expected_improve * \
                        learning_step:
                    break # success, keep the weight
        else:
            set_actor_net_params(old_param)

        # update critic
        pred_tensor = self.critic_net(state_tensor)
        critic_loss_tensor = self.critic_loss(pred_tensor, return_tensor)
        self.critic_optimizer.zero_grad()
        critic_loss_tensor.backward()
        self.critic_optimizer.step()


agent = TRPOAgent(env)
In [6]:
def play_episode(env, agent, seed=None, mode=None, render=False):
    observation, _ = env.reset(seed=seed)
    reward, terminated, truncated = 0., False, False
    agent.reset(mode=mode)
    episode_reward, elapsed_steps = 0., 0
    while True:
        action = agent.step(observation, reward, terminated)
        if render:
            env.render()
        if terminated or truncated:
            break
        observation, reward, terminated, truncated, _ = env.step(action)
        episode_reward += reward
        elapsed_steps += 1
    agent.close()
    return episode_reward, elapsed_steps


logging.info('==== train ====')
episode_rewards = []
for episode in itertools.count():
    episode_reward, elapsed_steps = play_episode(env, agent, seed=episode,
            mode='train')
    episode_rewards.append(episode_reward)
    logging.info('train episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
    if np.mean(episode_rewards[-10:]) > -120:
        break
plt.plot(episode_rewards)


logging.info('==== test ====')
episode_rewards = []
for episode in range(100):
    episode_reward, elapsed_steps = play_episode(env, agent)
    episode_rewards.append(episode_reward)
    logging.info('test episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
logging.info('average episode reward = %.2f ± %.2f',
        np.mean(episode_rewards), np.std(episode_rewards))
11:45:19 [INFO] ==== train ====
11:45:19 [INFO] NumExpr defaulting to 8 threads.
11:45:19 [INFO] train episode 0: reward = -500.00, steps = 500
11:45:20 [INFO] train episode 1: reward = -500.00, steps = 500
11:45:20 [INFO] train episode 2: reward = -500.00, steps = 500
11:45:21 [INFO] train episode 3: reward = -500.00, steps = 500
11:45:21 [INFO] train episode 4: reward = -500.00, steps = 500
11:45:22 [INFO] train episode 5: reward = -500.00, steps = 500
11:45:22 [INFO] train episode 6: reward = -371.00, steps = 372
11:45:23 [INFO] train episode 7: reward = -344.00, steps = 345
11:45:23 [INFO] train episode 8: reward = -311.00, steps = 312
11:45:23 [INFO] train episode 9: reward = -286.00, steps = 287
11:45:24 [INFO] train episode 10: reward = -398.00, steps = 399
11:45:24 [INFO] train episode 11: reward = -282.00, steps = 283
11:45:25 [INFO] train episode 12: reward = -454.00, steps = 455
11:45:25 [INFO] train episode 13: reward = -467.00, steps = 468
11:45:25 [INFO] train episode 14: reward = -500.00, steps = 500
11:45:26 [INFO] train episode 15: reward = -369.00, steps = 370
11:45:26 [INFO] train episode 16: reward = -468.00, steps = 469
11:45:27 [INFO] train episode 17: reward = -483.00, steps = 484
11:45:27 [INFO] train episode 18: reward = -366.00, steps = 367
11:45:27 [INFO] train episode 19: reward = -447.00, steps = 448
11:45:28 [INFO] train episode 20: reward = -500.00, steps = 500
11:45:28 [INFO] train episode 21: reward = -330.00, steps = 331
11:45:29 [INFO] train episode 22: reward = -500.00, steps = 500
11:45:29 [INFO] train episode 23: reward = -500.00, steps = 500
11:45:30 [INFO] train episode 24: reward = -500.00, steps = 500
11:45:30 [INFO] train episode 25: reward = -274.00, steps = 275
11:45:30 [INFO] train episode 26: reward = -413.00, steps = 414
11:45:30 [INFO] train episode 27: reward = -232.00, steps = 233
11:45:31 [INFO] train episode 28: reward = -320.00, steps = 321
11:45:31 [INFO] train episode 29: reward = -233.00, steps = 234
11:45:31 [INFO] train episode 30: reward = -183.00, steps = 184
11:45:32 [INFO] train episode 31: reward = -380.00, steps = 381
11:45:32 [INFO] train episode 32: reward = -489.00, steps = 490
11:45:32 [INFO] train episode 33: reward = -500.00, steps = 500
11:45:33 [INFO] train episode 34: reward = -352.00, steps = 353
11:45:33 [INFO] train episode 35: reward = -333.00, steps = 334
11:45:33 [INFO] train episode 36: reward = -244.00, steps = 245
11:45:34 [INFO] train episode 37: reward = -271.00, steps = 272
11:45:34 [INFO] train episode 38: reward = -243.00, steps = 244
11:45:34 [INFO] train episode 39: reward = -368.00, steps = 369
11:45:35 [INFO] train episode 40: reward = -312.00, steps = 313
11:45:35 [INFO] train episode 41: reward = -487.00, steps = 488
11:45:35 [INFO] train episode 42: reward = -289.00, steps = 290
11:45:36 [INFO] train episode 43: reward = -341.00, steps = 342
11:45:36 [INFO] train episode 44: reward = -241.00, steps = 242
11:45:36 [INFO] train episode 45: reward = -289.00, steps = 290
11:45:37 [INFO] train episode 46: reward = -500.00, steps = 500
11:45:37 [INFO] train episode 47: reward = -500.00, steps = 500
11:45:38 [INFO] train episode 48: reward = -139.00, steps = 140
11:45:38 [INFO] train episode 49: reward = -215.00, steps = 216
11:45:38 [INFO] train episode 50: reward = -154.00, steps = 155
11:45:38 [INFO] train episode 51: reward = -111.00, steps = 112
11:45:38 [INFO] train episode 52: reward = -139.00, steps = 140
11:45:38 [INFO] train episode 53: reward = -138.00, steps = 139
11:45:39 [INFO] train episode 54: reward = -136.00, steps = 137
11:45:39 [INFO] train episode 55: reward = -248.00, steps = 249
11:45:39 [INFO] train episode 56: reward = -150.00, steps = 151
11:45:39 [INFO] train episode 57: reward = -298.00, steps = 299
11:45:39 [INFO] train episode 58: reward = -258.00, steps = 259
11:45:40 [INFO] train episode 59: reward = -232.00, steps = 233
11:45:40 [INFO] train episode 60: reward = -500.00, steps = 500
11:45:40 [INFO] train episode 61: reward = -362.00, steps = 363
11:45:41 [INFO] train episode 62: reward = -304.00, steps = 305
11:45:41 [INFO] train episode 63: reward = -380.00, steps = 381
11:45:41 [INFO] train episode 64: reward = -269.00, steps = 270
11:45:42 [INFO] train episode 65: reward = -234.00, steps = 235
11:45:42 [INFO] train episode 66: reward = -334.00, steps = 335
11:45:42 [INFO] train episode 67: reward = -180.00, steps = 181
11:45:42 [INFO] train episode 68: reward = -222.00, steps = 223
11:45:43 [INFO] train episode 69: reward = -195.00, steps = 196
11:45:43 [INFO] train episode 70: reward = -363.00, steps = 364
11:45:43 [INFO] train episode 71: reward = -172.00, steps = 173
11:45:44 [INFO] train episode 72: reward = -500.00, steps = 500
11:45:44 [INFO] train episode 73: reward = -500.00, steps = 500
11:45:45 [INFO] train episode 74: reward = -500.00, steps = 500
11:45:45 [INFO] train episode 75: reward = -500.00, steps = 500
11:45:46 [INFO] train episode 76: reward = -500.00, steps = 500
11:45:46 [INFO] train episode 77: reward = -500.00, steps = 500
11:45:47 [INFO] train episode 78: reward = -366.00, steps = 367
11:45:47 [INFO] train episode 79: reward = -344.00, steps = 345
11:45:48 [INFO] train episode 80: reward = -500.00, steps = 500
11:45:48 [INFO] train episode 81: reward = -172.00, steps = 173
11:45:48 [INFO] train episode 82: reward = -159.00, steps = 160
11:45:48 [INFO] train episode 83: reward = -255.00, steps = 256
11:45:48 [INFO] train episode 84: reward = -160.00, steps = 161
11:45:48 [INFO] train episode 85: reward = -162.00, steps = 163
11:45:49 [INFO] train episode 86: reward = -294.00, steps = 295
11:45:49 [INFO] train episode 87: reward = -222.00, steps = 223
11:45:49 [INFO] train episode 88: reward = -155.00, steps = 156
11:45:49 [INFO] train episode 89: reward = -172.00, steps = 173
11:45:49 [INFO] train episode 90: reward = -134.00, steps = 135
11:45:50 [INFO] train episode 91: reward = -159.00, steps = 160
11:45:50 [INFO] train episode 92: reward = -128.00, steps = 129
11:45:50 [INFO] train episode 93: reward = -147.00, steps = 148
11:45:50 [INFO] train episode 94: reward = -194.00, steps = 195
11:45:50 [INFO] train episode 95: reward = -155.00, steps = 156
11:45:50 [INFO] train episode 96: reward = -173.00, steps = 174
11:45:51 [INFO] train episode 97: reward = -192.00, steps = 193
11:45:51 [INFO] train episode 98: reward = -165.00, steps = 166
11:45:51 [INFO] train episode 99: reward = -182.00, steps = 183
11:45:51 [INFO] train episode 100: reward = -252.00, steps = 253
11:45:51 [INFO] train episode 101: reward = -157.00, steps = 158
11:45:52 [INFO] train episode 102: reward = -172.00, steps = 173
11:45:52 [INFO] train episode 103: reward = -176.00, steps = 177
11:45:52 [INFO] train episode 104: reward = -231.00, steps = 232
11:45:52 [INFO] train episode 105: reward = -209.00, steps = 210
11:45:52 [INFO] train episode 106: reward = -171.00, steps = 172
11:45:53 [INFO] train episode 107: reward = -159.00, steps = 160
11:45:53 [INFO] train episode 108: reward = -238.00, steps = 239
11:45:53 [INFO] train episode 109: reward = -152.00, steps = 153
11:45:53 [INFO] train episode 110: reward = -226.00, steps = 227
11:45:53 [INFO] train episode 111: reward = -210.00, steps = 211
11:45:54 [INFO] train episode 112: reward = -382.00, steps = 383
11:45:54 [INFO] train episode 113: reward = -205.00, steps = 206
11:45:54 [INFO] train episode 114: reward = -230.00, steps = 231
11:45:55 [INFO] train episode 115: reward = -311.00, steps = 312
11:45:55 [INFO] train episode 116: reward = -281.00, steps = 282
11:45:55 [INFO] train episode 117: reward = -310.00, steps = 311
11:45:55 [INFO] train episode 118: reward = -199.00, steps = 200
11:45:55 [INFO] train episode 119: reward = -178.00, steps = 179
11:45:56 [INFO] train episode 120: reward = -236.00, steps = 237
11:45:56 [INFO] train episode 121: reward = -206.00, steps = 207
11:45:56 [INFO] train episode 122: reward = -220.00, steps = 221
11:45:56 [INFO] train episode 123: reward = -195.00, steps = 196
11:45:56 [INFO] train episode 124: reward = -171.00, steps = 172
11:45:57 [INFO] train episode 125: reward = -246.00, steps = 247
11:45:57 [INFO] train episode 126: reward = -200.00, steps = 201
11:45:57 [INFO] train episode 127: reward = -163.00, steps = 164
11:45:57 [INFO] train episode 128: reward = -206.00, steps = 207
11:45:58 [INFO] train episode 129: reward = -177.00, steps = 178
11:45:58 [INFO] train episode 130: reward = -232.00, steps = 233
11:45:58 [INFO] train episode 131: reward = -151.00, steps = 152
11:45:58 [INFO] train episode 132: reward = -288.00, steps = 289
11:45:59 [INFO] train episode 133: reward = -301.00, steps = 302
11:45:59 [INFO] train episode 134: reward = -416.00, steps = 417
11:45:59 [INFO] train episode 135: reward = -304.00, steps = 305
11:46:00 [INFO] train episode 136: reward = -500.00, steps = 500
11:46:00 [INFO] train episode 137: reward = -370.00, steps = 371
11:46:01 [INFO] train episode 138: reward = -456.00, steps = 457
11:46:01 [INFO] train episode 139: reward = -237.00, steps = 238
11:46:01 [INFO] train episode 140: reward = -359.00, steps = 360
11:46:01 [INFO] train episode 141: reward = -227.00, steps = 228
11:46:02 [INFO] train episode 142: reward = -454.00, steps = 455
11:46:02 [INFO] train episode 143: reward = -293.00, steps = 294
11:46:03 [INFO] train episode 144: reward = -337.00, steps = 338
11:46:03 [INFO] train episode 145: reward = -246.00, steps = 247
11:46:03 [INFO] train episode 146: reward = -241.00, steps = 242
11:46:03 [INFO] train episode 147: reward = -294.00, steps = 295
11:46:04 [INFO] train episode 148: reward = -303.00, steps = 304
11:46:04 [INFO] train episode 149: reward = -211.00, steps = 212
11:46:04 [INFO] train episode 150: reward = -191.00, steps = 192
11:46:04 [INFO] train episode 151: reward = -180.00, steps = 181
11:46:04 [INFO] train episode 152: reward = -233.00, steps = 234
11:46:05 [INFO] train episode 153: reward = -282.00, steps = 283
11:46:05 [INFO] train episode 154: reward = -283.00, steps = 284
11:46:05 [INFO] train episode 155: reward = -255.00, steps = 256
11:46:06 [INFO] train episode 156: reward = -356.00, steps = 357
11:46:06 [INFO] train episode 157: reward = -203.00, steps = 204
11:46:06 [INFO] train episode 158: reward = -270.00, steps = 271
11:46:06 [INFO] train episode 159: reward = -361.00, steps = 362
11:46:07 [INFO] train episode 160: reward = -267.00, steps = 268
11:46:07 [INFO] train episode 161: reward = -232.00, steps = 233
11:46:07 [INFO] train episode 162: reward = -186.00, steps = 187
11:46:07 [INFO] train episode 163: reward = -178.00, steps = 179
11:46:08 [INFO] train episode 164: reward = -201.00, steps = 202
11:46:08 [INFO] train episode 165: reward = -253.00, steps = 254
11:46:08 [INFO] train episode 166: reward = -196.00, steps = 197
11:46:09 [INFO] train episode 167: reward = -419.00, steps = 420
11:46:09 [INFO] train episode 168: reward = -208.00, steps = 209
11:46:09 [INFO] train episode 169: reward = -277.00, steps = 278
11:46:09 [INFO] train episode 170: reward = -304.00, steps = 305
11:46:10 [INFO] train episode 171: reward = -224.00, steps = 225
11:46:10 [INFO] train episode 172: reward = -282.00, steps = 283
11:46:10 [INFO] train episode 173: reward = -296.00, steps = 297
11:46:10 [INFO] train episode 174: reward = -211.00, steps = 212
11:46:11 [INFO] train episode 175: reward = -216.00, steps = 217
11:46:11 [INFO] train episode 176: reward = -192.00, steps = 193
11:46:11 [INFO] train episode 177: reward = -248.00, steps = 249
11:46:12 [INFO] train episode 178: reward = -409.00, steps = 410
11:46:12 [INFO] train episode 179: reward = -231.00, steps = 232
11:46:12 [INFO] train episode 180: reward = -171.00, steps = 172
11:46:12 [INFO] train episode 181: reward = -209.00, steps = 210
11:46:12 [INFO] train episode 182: reward = -147.00, steps = 148
11:46:12 [INFO] train episode 183: reward = -217.00, steps = 218
11:46:13 [INFO] train episode 184: reward = -218.00, steps = 219
11:46:13 [INFO] train episode 185: reward = -214.00, steps = 215
11:46:13 [INFO] train episode 186: reward = -236.00, steps = 237
11:46:13 [INFO] train episode 187: reward = -236.00, steps = 237
11:46:14 [INFO] train episode 188: reward = -236.00, steps = 237
11:46:14 [INFO] train episode 189: reward = -203.00, steps = 204
11:46:14 [INFO] train episode 190: reward = -162.00, steps = 163
11:46:14 [INFO] train episode 191: reward = -180.00, steps = 181
11:46:14 [INFO] train episode 192: reward = -190.00, steps = 191
11:46:14 [INFO] train episode 193: reward = -168.00, steps = 169
11:46:15 [INFO] train episode 194: reward = -258.00, steps = 259
11:46:15 [INFO] train episode 195: reward = -179.00, steps = 180
11:46:15 [INFO] train episode 196: reward = -169.00, steps = 170
11:46:15 [INFO] train episode 197: reward = -147.00, steps = 148
11:46:15 [INFO] train episode 198: reward = -167.00, steps = 168
11:46:16 [INFO] train episode 199: reward = -226.00, steps = 227
11:46:16 [INFO] train episode 200: reward = -158.00, steps = 159
11:46:16 [INFO] train episode 201: reward = -137.00, steps = 138
11:46:16 [INFO] train episode 202: reward = -177.00, steps = 178
11:46:16 [INFO] train episode 203: reward = -126.00, steps = 127
11:46:17 [INFO] train episode 204: reward = -150.00, steps = 151
11:46:17 [INFO] train episode 205: reward = -132.00, steps = 133
11:46:17 [INFO] train episode 206: reward = -175.00, steps = 176
11:46:17 [INFO] train episode 207: reward = -171.00, steps = 172
11:46:17 [INFO] train episode 208: reward = -147.00, steps = 148
11:46:17 [INFO] train episode 209: reward = -160.00, steps = 161
11:46:18 [INFO] train episode 210: reward = -176.00, steps = 177
11:46:18 [INFO] train episode 211: reward = -153.00, steps = 154
11:46:18 [INFO] train episode 212: reward = -136.00, steps = 137
11:46:18 [INFO] train episode 213: reward = -150.00, steps = 151
11:46:18 [INFO] train episode 214: reward = -138.00, steps = 139
11:46:18 [INFO] train episode 215: reward = -137.00, steps = 138
11:46:19 [INFO] train episode 216: reward = -119.00, steps = 120
11:46:19 [INFO] train episode 217: reward = -127.00, steps = 128
11:46:19 [INFO] train episode 218: reward = -124.00, steps = 125
11:46:19 [INFO] train episode 219: reward = -164.00, steps = 165
11:46:19 [INFO] train episode 220: reward = -138.00, steps = 139
11:46:19 [INFO] train episode 221: reward = -136.00, steps = 137
11:46:19 [INFO] train episode 222: reward = -106.00, steps = 107
11:46:20 [INFO] train episode 223: reward = -116.00, steps = 117
11:46:20 [INFO] train episode 224: reward = -123.00, steps = 124
11:46:20 [INFO] train episode 225: reward = -128.00, steps = 129
11:46:20 [INFO] train episode 226: reward = -116.00, steps = 117
11:46:20 [INFO] train episode 227: reward = -145.00, steps = 146
11:46:20 [INFO] train episode 228: reward = -160.00, steps = 161
11:46:20 [INFO] train episode 229: reward = -131.00, steps = 132
11:46:21 [INFO] train episode 230: reward = -138.00, steps = 139
11:46:21 [INFO] train episode 231: reward = -104.00, steps = 105
11:46:21 [INFO] train episode 232: reward = -118.00, steps = 119
11:46:21 [INFO] train episode 233: reward = -103.00, steps = 104
11:46:21 [INFO] train episode 234: reward = -112.00, steps = 113
11:46:21 [INFO] train episode 235: reward = -124.00, steps = 125
11:46:21 [INFO] train episode 236: reward = -141.00, steps = 142
11:46:22 [INFO] train episode 237: reward = -141.00, steps = 142
11:46:22 [INFO] train episode 238: reward = -141.00, steps = 142
11:46:22 [INFO] train episode 239: reward = -127.00, steps = 128
11:46:22 [INFO] train episode 240: reward = -151.00, steps = 152
11:46:22 [INFO] train episode 241: reward = -107.00, steps = 108
11:46:22 [INFO] train episode 242: reward = -107.00, steps = 108
11:46:22 [INFO] train episode 243: reward = -131.00, steps = 132
11:46:23 [INFO] train episode 244: reward = -113.00, steps = 114
11:46:23 [INFO] train episode 245: reward = -140.00, steps = 141
11:46:23 [INFO] train episode 246: reward = -124.00, steps = 125
11:46:23 [INFO] train episode 247: reward = -143.00, steps = 144
11:46:23 [INFO] train episode 248: reward = -123.00, steps = 124
11:46:23 [INFO] train episode 249: reward = -126.00, steps = 127
11:46:23 [INFO] train episode 250: reward = -109.00, steps = 110
11:46:24 [INFO] train episode 251: reward = -117.00, steps = 118
11:46:24 [INFO] train episode 252: reward = -111.00, steps = 112
11:46:24 [INFO] train episode 253: reward = -124.00, steps = 125
11:46:24 [INFO] train episode 254: reward = -115.00, steps = 116
11:46:24 [INFO] train episode 255: reward = -106.00, steps = 107
11:46:24 [INFO] ==== test ====
11:46:24 [INFO] test episode 0: reward = -100.00, steps = 101
11:46:24 [INFO] test episode 1: reward = -108.00, steps = 109
11:46:24 [INFO] test episode 2: reward = -192.00, steps = 193
11:46:24 [INFO] test episode 3: reward = -128.00, steps = 129
11:46:25 [INFO] test episode 4: reward = -139.00, steps = 140
11:46:25 [INFO] test episode 5: reward = -110.00, steps = 111
11:46:25 [INFO] test episode 6: reward = -119.00, steps = 120
11:46:25 [INFO] test episode 7: reward = -124.00, steps = 125
11:46:25 [INFO] test episode 8: reward = -155.00, steps = 156
11:46:25 [INFO] test episode 9: reward = -124.00, steps = 125
11:46:25 [INFO] test episode 10: reward = -129.00, steps = 130
11:46:25 [INFO] test episode 11: reward = -127.00, steps = 128
11:46:26 [INFO] test episode 12: reward = -124.00, steps = 125
11:46:26 [INFO] test episode 13: reward = -135.00, steps = 136
11:46:26 [INFO] test episode 14: reward = -118.00, steps = 119
11:46:26 [INFO] test episode 15: reward = -150.00, steps = 151
11:46:26 [INFO] test episode 16: reward = -141.00, steps = 142
11:46:26 [INFO] test episode 17: reward = -114.00, steps = 115
11:46:26 [INFO] test episode 18: reward = -112.00, steps = 113
11:46:26 [INFO] test episode 19: reward = -126.00, steps = 127
11:46:27 [INFO] test episode 20: reward = -129.00, steps = 130
11:46:27 [INFO] test episode 21: reward = -106.00, steps = 107
11:46:27 [INFO] test episode 22: reward = -194.00, steps = 195
11:46:27 [INFO] test episode 23: reward = -114.00, steps = 115
11:46:27 [INFO] test episode 24: reward = -106.00, steps = 107
11:46:27 [INFO] test episode 25: reward = -110.00, steps = 111
11:46:27 [INFO] test episode 26: reward = -136.00, steps = 137
11:46:27 [INFO] test episode 27: reward = -140.00, steps = 141
11:46:27 [INFO] test episode 28: reward = -104.00, steps = 105
11:46:27 [INFO] test episode 29: reward = -103.00, steps = 104
11:46:28 [INFO] test episode 30: reward = -123.00, steps = 124
11:46:28 [INFO] test episode 31: reward = -119.00, steps = 120
11:46:28 [INFO] test episode 32: reward = -131.00, steps = 132
11:46:28 [INFO] test episode 33: reward = -130.00, steps = 131
11:46:28 [INFO] test episode 34: reward = -143.00, steps = 144
11:46:28 [INFO] test episode 35: reward = -114.00, steps = 115
11:46:28 [INFO] test episode 36: reward = -125.00, steps = 126
11:46:28 [INFO] test episode 37: reward = -123.00, steps = 124
11:46:28 [INFO] test episode 38: reward = -102.00, steps = 103
11:46:29 [INFO] test episode 39: reward = -119.00, steps = 120
11:46:29 [INFO] test episode 40: reward = -114.00, steps = 115
11:46:29 [INFO] test episode 41: reward = -127.00, steps = 128
11:46:29 [INFO] test episode 42: reward = -141.00, steps = 142
11:46:29 [INFO] test episode 43: reward = -109.00, steps = 110
11:46:29 [INFO] test episode 44: reward = -93.00, steps = 94
11:46:29 [INFO] test episode 45: reward = -120.00, steps = 121
11:46:29 [INFO] test episode 46: reward = -151.00, steps = 152
11:46:30 [INFO] test episode 47: reward = -160.00, steps = 161
11:46:30 [INFO] test episode 48: reward = -167.00, steps = 168
11:46:30 [INFO] test episode 49: reward = -123.00, steps = 124
11:46:30 [INFO] test episode 50: reward = -122.00, steps = 123
11:46:30 [INFO] test episode 51: reward = -135.00, steps = 136
11:46:30 [INFO] test episode 52: reward = -140.00, steps = 141
11:46:30 [INFO] test episode 53: reward = -145.00, steps = 146
11:46:30 [INFO] test episode 54: reward = -155.00, steps = 156
11:46:30 [INFO] test episode 55: reward = -110.00, steps = 111
11:46:31 [INFO] test episode 56: reward = -156.00, steps = 157
11:46:31 [INFO] test episode 57: reward = -140.00, steps = 141
11:46:31 [INFO] test episode 58: reward = -125.00, steps = 126
11:46:31 [INFO] test episode 59: reward = -102.00, steps = 103
11:46:31 [INFO] test episode 60: reward = -119.00, steps = 120
11:46:31 [INFO] test episode 61: reward = -127.00, steps = 128
11:46:31 [INFO] test episode 62: reward = -107.00, steps = 108
11:46:31 [INFO] test episode 63: reward = -146.00, steps = 147
11:46:32 [INFO] test episode 64: reward = -187.00, steps = 188
11:46:32 [INFO] test episode 65: reward = -101.00, steps = 102
11:46:32 [INFO] test episode 66: reward = -115.00, steps = 116
11:46:32 [INFO] test episode 67: reward = -159.00, steps = 160
11:46:32 [INFO] test episode 68: reward = -122.00, steps = 123
11:46:32 [INFO] test episode 69: reward = -122.00, steps = 123
11:46:32 [INFO] test episode 70: reward = -103.00, steps = 104
11:46:32 [INFO] test episode 71: reward = -126.00, steps = 127
11:46:32 [INFO] test episode 72: reward = -170.00, steps = 171
11:46:33 [INFO] test episode 73: reward = -119.00, steps = 120
11:46:33 [INFO] test episode 74: reward = -139.00, steps = 140
11:46:33 [INFO] test episode 75: reward = -126.00, steps = 127
11:46:33 [INFO] test episode 76: reward = -115.00, steps = 116
11:46:33 [INFO] test episode 77: reward = -141.00, steps = 142
11:46:33 [INFO] test episode 78: reward = -142.00, steps = 143
11:46:33 [INFO] test episode 79: reward = -124.00, steps = 125
11:46:33 [INFO] test episode 80: reward = -175.00, steps = 176
11:46:34 [INFO] test episode 81: reward = -113.00, steps = 114
11:46:34 [INFO] test episode 82: reward = -107.00, steps = 108
11:46:34 [INFO] test episode 83: reward = -119.00, steps = 120
11:46:34 [INFO] test episode 84: reward = -110.00, steps = 111
11:46:34 [INFO] test episode 85: reward = -123.00, steps = 124
11:46:34 [INFO] test episode 86: reward = -112.00, steps = 113
11:46:34 [INFO] test episode 87: reward = -166.00, steps = 167
11:46:34 [INFO] test episode 88: reward = -196.00, steps = 197
11:46:34 [INFO] test episode 89: reward = -134.00, steps = 135
11:46:35 [INFO] test episode 90: reward = -138.00, steps = 139
11:46:35 [INFO] test episode 91: reward = -120.00, steps = 121
11:46:35 [INFO] test episode 92: reward = -136.00, steps = 137
11:46:35 [INFO] test episode 93: reward = -114.00, steps = 115
11:46:35 [INFO] test episode 94: reward = -121.00, steps = 122
11:46:35 [INFO] test episode 95: reward = -119.00, steps = 120
11:46:35 [INFO] test episode 96: reward = -114.00, steps = 115
11:46:35 [INFO] test episode 97: reward = -118.00, steps = 119
11:46:35 [INFO] test episode 98: reward = -117.00, steps = 118
11:46:35 [INFO] test episode 99: reward = -126.00, steps = 127
11:46:35 [INFO] average episode reward = -128.69 ± 21.15
In [7]:
env.close()