stable-baselines3/tests/test_gae.py
Antonin RAFFIN 40e0b9d2c8
Add Gymnasium support (#1327)
* Fix failing set_env test

* Fix test failiing due to deprectation of env.seed

* Adjust mean reward threshold in failing test

* Fix her test failing due to rng

* Change seed and revert reward threshold to 90

* Pin gym version

* Make VecEnv compatible with gym seeding change

* Revert change to VecEnv reset signature

* Change subprocenv seed cmd to call reset instead

* Fix type check

* Add backward compat

* Add `compat_gym_seed` helper

* Add goal env checks in env_checker

* Add docs on  HER requirements for envs

* Capture user warning in test with inverted box space

* Update ale-py version

* Fix randint

* Allow noop_max to be zero

* Update changelog

* Update docker image

* Update doc conda env and dockerfile

* Custom envs should not have any warnings

* Fix test for numpy >= 1.21

* Add check for vectorized compute reward

* Bump to gym 0.24

* Fix gym default step docstring

* Test downgrading gym

* Revert "Test downgrading gym"

This reverts commit 0072b77156c006ada8a1d6e26ce347ed85a83eeb.

* Fix protobuf error

* Fix in dependencies

* Fix protobuf dep

* Use newest version of cartpole

* Update gym

* Fix warning

* Loosen required scipy version

* Scipy no longer needed

* Try gym 0.25

* Silence warnings from gym

* Filter warnings during tests

* Update doc

* Update requirements

* Add gym 26 compat in vec env

* Fixes in envs and tests for gym 0.26+

* Enforce gym 0.26 api

* format

* Fix formatting

* Fix dependencies

* Fix syntax

* Cleanup doc and warnings

* Faster tests

* Higher budget for HER perf test (revert prev change)

* Fixes and update doc

* Fix doc build

* Fix breaking change

* Fixes for rendering

* Rename variables in monitor

* update render method for gym 0.26 API

backwards compatible (mode argument is allowed) while using the gym 0.26 API (render mode is determined at environment creation)

* update tests and docs to new gym render API

* undo removal of render modes metatadata check

* set rgb_array as default render mode for gym.make

* undo changes & raise warning if not 'rgb_array'

* Fix type check

* Remove recursion and fix type checking

* Remove hacks for protobuf and gym 0.24

* Fix type annotations

* reuse existing render_mode attribute

* return tiled images for 'human' render mode

* Allow to use opencv for human render, fix typos

* Add warning when using non-zero start with Discrete (fixes #1197)

* Fix type checking

* Bug fixes and handle more cases

* Throw proper warnings

* Update test

* Fix new metadata name

* Ignore numpy warnings

* Fixes in vec recorder

* Global ignore

* Filter local warning too

* Monkey patch not needed for gym 26

* Add doc of VecEnv vs Gym API

* Add render test

* Fix return type

* Update VecEnv vs Gym API doc

* Fix for custom render mode

* Fix return type

* Fix type checking

* check test env test_buffer

* skip render check

* check env test_dict_env

* test_env test_gae

* check envs in remaining tests

* Update tests

* Add warning for Discrete action space with non-zero (#1295)

* Fix atari annotation

* ignore get_action_meanings [attr-defined]

* Fix mypy issues

* Add patch for gym/gymnasium transition

* Switch to gymnasium

* Rely on signature instead of version

* More patches

* Type ignore because of https://github.com/Farama-Foundation/Gymnasium/pull/39

* Fix doc build

* Fix pytype errors

* Fix atari requirement

* Update env checker due to change in dtype for Discrete

* Fix type hint

* Convert spaces for saved models

* Ignore pytype

* Remove gitlab CI

* Disable pytype for convert space

* Fix undefined info

* Fix undefined info

* Upgrade shimmy

* Fix wrappers type annotation (need PR from Gymnasium)

* Fix gymnasium dependency

* Fix dependency declaration

* Cap pygame version for python 3.7

* Point to master branch (v0.28.0)

* Fix: use main not master branch

* Rename done to terminated

* Fix pygame dependency for python 3.7

* Rename gym to gymnasium

* Update Gymnasium

* Fix test

* Fix tests

* Forks don't have access to private variables

* Fix linter warnings

* Update read the doc env

* Fix env checker for GoalEnv

* Fix import

* Update env checker (more info) and fix dtype

* Use micromamab for Docker

* Update dependencies

* Clarify VecEnv doc

* Fix Gymnasium version

* Copy file only after mamba install

* [ci skip] Update docker doc

* Polish code

* Reformat

* Remove deprecated features

* Ignore warning

* Update doc

* Update examples and changelog

* Fix type annotation bundle (SAC, TD3, A2C, PPO, base class) (#1436)

* Fix SAC type hints, improve DQN ones

* Fix A2C and TD3 type hints

* Fix PPO type hints

* Fix on-policy type hints

* Fix base class type annotation, do not use defaults

* Update version

* Disable mypy for python 3.7

* Rename Gym26StepReturn

* Update continuous critic type annotation

* Fix pytype complain

---------

Co-authored-by: Carlos Luis <carlos.luisgonc@gmail.com>
Co-authored-by: Quentin Gallouédec <45557362+qgallouedec@users.noreply.github.com>
Co-authored-by: Thomas Lips <37955681+tlpss@users.noreply.github.com>
Co-authored-by: tlips <thomas.lips@ugent.be>
Co-authored-by: tlpss <thomas17.lips@gmail.com>
Co-authored-by: Quentin GALLOUÉDEC <gallouedec.quentin@gmail.com>
2023-04-14 13:13:59 +02:00

190 lines
6.6 KiB
Python

from typing import Dict, Optional
import gymnasium as gym
import numpy as np
import pytest
import torch as th
from gymnasium import spaces
from stable_baselines3 import A2C, PPO, SAC
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.env_checker import check_env
from stable_baselines3.common.policies import ActorCriticPolicy
class CustomEnv(gym.Env):
def __init__(self, max_steps=8):
super().__init__()
self.observation_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32)
self.action_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32)
self.max_steps = max_steps
self.n_steps = 0
def seed(self, seed):
self.observation_space.seed(seed)
def reset(self, *, seed: Optional[int] = None, options: Optional[Dict] = None):
if seed is not None:
self.observation_space.seed(seed)
self.n_steps = 0
return self.observation_space.sample(), {}
def step(self, action):
self.n_steps += 1
terminated = truncated = False
reward = 0.0
if self.n_steps >= self.max_steps:
reward = 1.0
terminated = True
# To simplify GAE computation checks,
# we do not consider truncation here.
# Truncations are checked in InfiniteHorizonEnv
truncated = False
return self.observation_space.sample(), reward, terminated, truncated, {}
class InfiniteHorizonEnv(gym.Env):
def __init__(self, n_states=4):
super().__init__()
self.n_states = n_states
self.observation_space = spaces.Discrete(n_states)
self.action_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32)
self.current_state = 0
def reset(self, *, seed: Optional[int] = None, options: Optional[Dict] = None):
if seed is not None:
super().reset(seed=seed)
self.current_state = 0
return self.current_state, {}
def step(self, action):
self.current_state = (self.current_state + 1) % self.n_states
return self.current_state, 1.0, False, False, {}
class CheckGAECallback(BaseCallback):
def __init__(self):
super().__init__(verbose=0)
def _on_rollout_end(self):
buffer = self.model.rollout_buffer
rollout_size = buffer.size()
max_steps = self.training_env.envs[0].max_steps
gamma = self.model.gamma
gae_lambda = self.model.gae_lambda
value = self.model.policy.constant_value
# We know in advance that the agent will get a single
# reward at the very last timestep of the episode,
# so we can pre-compute the lambda-return and advantage
deltas = np.zeros((rollout_size,))
advantages = np.zeros((rollout_size,))
# Reward should be 1.0 on final timestep of episode
rewards = np.zeros((rollout_size,))
rewards[max_steps - 1 :: max_steps] = 1.0
# Note that these are episode starts (+1 timestep from done)
episode_starts = np.zeros((rollout_size,))
episode_starts[::max_steps] = 1.0
# Final step is always terminal (next would episode_start = 1)
deltas[-1] = rewards[-1] - value
advantages[-1] = deltas[-1]
for n in reversed(range(rollout_size - 1)):
# Values are constants
episode_start_mask = 1.0 - episode_starts[n + 1]
deltas[n] = rewards[n] + gamma * value * episode_start_mask - value
advantages[n] = deltas[n] + gamma * gae_lambda * advantages[n + 1] * episode_start_mask
# TD(lambda) estimate, see Github PR #375
lambda_returns = advantages + value
assert np.allclose(buffer.advantages.flatten(), advantages)
assert np.allclose(buffer.returns.flatten(), lambda_returns)
def _on_step(self):
return True
class CustomPolicy(ActorCriticPolicy):
"""Custom Policy with a constant value function"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.constant_value = 0.0
def forward(self, obs, deterministic=False):
actions, values, log_prob = super().forward(obs, deterministic)
# Overwrite values with ones
values = th.ones_like(values) * self.constant_value
return actions, values, log_prob
@pytest.mark.parametrize("env_cls", [CustomEnv, InfiniteHorizonEnv])
def test_env(env_cls):
# Check the env used for testing
check_env(env_cls(), skip_render_check=True)
@pytest.mark.parametrize("model_class", [A2C, PPO])
@pytest.mark.parametrize("gae_lambda", [1.0, 0.9])
@pytest.mark.parametrize("gamma", [1.0, 0.99])
@pytest.mark.parametrize("num_episodes", [1, 3])
def test_gae_computation(model_class, gae_lambda, gamma, num_episodes):
env = CustomEnv(max_steps=64)
rollout_size = 64 * num_episodes
model = model_class(
CustomPolicy,
env,
seed=1,
gamma=gamma,
n_steps=rollout_size,
gae_lambda=gae_lambda,
)
model.learn(rollout_size, callback=CheckGAECallback())
# Change constant value so advantage != returns
model.policy.constant_value = 1.0
model.learn(rollout_size, callback=CheckGAECallback())
@pytest.mark.parametrize("model_class", [A2C, SAC])
@pytest.mark.parametrize("handle_timeout_termination", [False, True])
def test_infinite_horizon(model_class, handle_timeout_termination):
max_steps = 8
gamma = 0.98
env = gym.wrappers.TimeLimit(InfiniteHorizonEnv(n_states=4), max_steps)
kwargs = {}
if model_class == SAC:
policy_kwargs = dict(net_arch=[64], n_critics=1)
kwargs = dict(
replay_buffer_kwargs=dict(handle_timeout_termination=handle_timeout_termination),
tau=0.5,
learning_rate=0.005,
)
else:
policy_kwargs = dict(net_arch=[64])
kwargs = dict(learning_rate=0.002)
# A2C always handle timeouts
if not handle_timeout_termination:
return
model = model_class("MlpPolicy", env, gamma=gamma, seed=1, policy_kwargs=policy_kwargs, **kwargs)
model.learn(1500)
# Value of the initial state
obs_tensor = model.policy.obs_to_tensor(0)[0]
if model_class == A2C:
value = model.policy.predict_values(obs_tensor).item()
else:
value = model.critic(obs_tensor, model.actor(obs_tensor))[0].item()
# True value (geometric series with a reward of one at each step)
infinite_horizon_value = 1 / (1 - gamma)
if handle_timeout_termination:
# true value +/- 1
assert abs(infinite_horizon_value - value) < 1.0
else:
# wrong estimation
assert abs(infinite_horizon_value - value) > 1.0