mirror of
https://github.com/saymrwulf/stable-baselines3.git
synced 2026-05-16 21:10:08 +00:00
* Fix failing set_env test * Fix test failiing due to deprectation of env.seed * Adjust mean reward threshold in failing test * Fix her test failing due to rng * Change seed and revert reward threshold to 90 * Pin gym version * Make VecEnv compatible with gym seeding change * Revert change to VecEnv reset signature * Change subprocenv seed cmd to call reset instead * Fix type check * Add backward compat * Add `compat_gym_seed` helper * Add goal env checks in env_checker * Add docs on HER requirements for envs * Capture user warning in test with inverted box space * Update ale-py version * Fix randint * Allow noop_max to be zero * Update changelog * Update docker image * Update doc conda env and dockerfile * Custom envs should not have any warnings * Fix test for numpy >= 1.21 * Add check for vectorized compute reward * Bump to gym 0.24 * Fix gym default step docstring * Test downgrading gym * Revert "Test downgrading gym" This reverts commit 0072b77156c006ada8a1d6e26ce347ed85a83eeb. * Fix protobuf error * Fix in dependencies * Fix protobuf dep * Use newest version of cartpole * Update gym * Fix warning * Loosen required scipy version * Scipy no longer needed * Try gym 0.25 * Silence warnings from gym * Filter warnings during tests * Update doc * Update requirements * Add gym 26 compat in vec env * Fixes in envs and tests for gym 0.26+ * Enforce gym 0.26 api * format * Fix formatting * Fix dependencies * Fix syntax * Cleanup doc and warnings * Faster tests * Higher budget for HER perf test (revert prev change) * Fixes and update doc * Fix doc build * Fix breaking change * Fixes for rendering * Rename variables in monitor * update render method for gym 0.26 API backwards compatible (mode argument is allowed) while using the gym 0.26 API (render mode is determined at environment creation) * update tests and docs to new gym render API * undo removal of render modes metatadata check * set rgb_array as default render mode for gym.make * undo changes & raise warning if not 'rgb_array' * Fix type check * Remove recursion and fix type checking * Remove hacks for protobuf and gym 0.24 * Fix type annotations * reuse existing render_mode attribute * return tiled images for 'human' render mode * Allow to use opencv for human render, fix typos * Add warning when using non-zero start with Discrete (fixes #1197) * Fix type checking * Bug fixes and handle more cases * Throw proper warnings * Update test * Fix new metadata name * Ignore numpy warnings * Fixes in vec recorder * Global ignore * Filter local warning too * Monkey patch not needed for gym 26 * Add doc of VecEnv vs Gym API * Add render test * Fix return type * Update VecEnv vs Gym API doc * Fix for custom render mode * Fix return type * Fix type checking * check test env test_buffer * skip render check * check env test_dict_env * test_env test_gae * check envs in remaining tests * Update tests * Add warning for Discrete action space with non-zero (#1295) * Fix atari annotation * ignore get_action_meanings [attr-defined] * Fix mypy issues * Add patch for gym/gymnasium transition * Switch to gymnasium * Rely on signature instead of version * More patches * Type ignore because of https://github.com/Farama-Foundation/Gymnasium/pull/39 * Fix doc build * Fix pytype errors * Fix atari requirement * Update env checker due to change in dtype for Discrete * Fix type hint * Convert spaces for saved models * Ignore pytype * Remove gitlab CI * Disable pytype for convert space * Fix undefined info * Fix undefined info * Upgrade shimmy * Fix wrappers type annotation (need PR from Gymnasium) * Fix gymnasium dependency * Fix dependency declaration * Cap pygame version for python 3.7 * Point to master branch (v0.28.0) * Fix: use main not master branch * Rename done to terminated * Fix pygame dependency for python 3.7 * Rename gym to gymnasium * Update Gymnasium * Fix test * Fix tests * Forks don't have access to private variables * Fix linter warnings * Update read the doc env * Fix env checker for GoalEnv * Fix import * Update env checker (more info) and fix dtype * Use micromamab for Docker * Update dependencies * Clarify VecEnv doc * Fix Gymnasium version * Copy file only after mamba install * [ci skip] Update docker doc * Polish code * Reformat * Remove deprecated features * Ignore warning * Update doc * Update examples and changelog * Fix type annotation bundle (SAC, TD3, A2C, PPO, base class) (#1436) * Fix SAC type hints, improve DQN ones * Fix A2C and TD3 type hints * Fix PPO type hints * Fix on-policy type hints * Fix base class type annotation, do not use defaults * Update version * Disable mypy for python 3.7 * Rename Gym26StepReturn * Update continuous critic type annotation * Fix pytype complain --------- Co-authored-by: Carlos Luis <carlos.luisgonc@gmail.com> Co-authored-by: Quentin Gallouédec <45557362+qgallouedec@users.noreply.github.com> Co-authored-by: Thomas Lips <37955681+tlpss@users.noreply.github.com> Co-authored-by: tlips <thomas.lips@ugent.be> Co-authored-by: tlpss <thomas17.lips@gmail.com> Co-authored-by: Quentin GALLOUÉDEC <gallouedec.quentin@gmail.com>
382 lines
13 KiB
Python
382 lines
13 KiB
Python
from typing import Union
|
|
|
|
import gymnasium as gym
|
|
import numpy as np
|
|
import pytest
|
|
import torch as th
|
|
import torch.nn as nn
|
|
|
|
from stable_baselines3 import A2C, DQN, PPO, SAC, TD3
|
|
from stable_baselines3.common.preprocessing import get_flattened_obs_dim
|
|
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
|
|
|
|
MODEL_LIST = [
|
|
PPO,
|
|
A2C,
|
|
TD3,
|
|
SAC,
|
|
DQN,
|
|
]
|
|
|
|
|
|
class FlattenBatchNormDropoutExtractor(BaseFeaturesExtractor):
|
|
"""
|
|
Feature extract that flatten the input and applies batch normalization and dropout.
|
|
Used as a placeholder when feature extraction is not needed.
|
|
|
|
:param observation_space:
|
|
"""
|
|
|
|
def __init__(self, observation_space: gym.Space):
|
|
super().__init__(
|
|
observation_space,
|
|
get_flattened_obs_dim(observation_space),
|
|
)
|
|
self.flatten = nn.Flatten()
|
|
self.batch_norm = nn.BatchNorm1d(self._features_dim)
|
|
self.dropout = nn.Dropout(0.5)
|
|
|
|
def forward(self, observations: th.Tensor) -> th.Tensor:
|
|
result = self.flatten(observations)
|
|
result = self.batch_norm(result)
|
|
result = self.dropout(result)
|
|
return result
|
|
|
|
|
|
def clone_batch_norm_stats(batch_norm: nn.BatchNorm1d) -> (th.Tensor, th.Tensor):
|
|
"""
|
|
Clone the bias and running mean from the given batch norm layer.
|
|
|
|
:param batch_norm:
|
|
:return: the bias and running mean
|
|
"""
|
|
return batch_norm.bias.clone(), batch_norm.running_mean.clone()
|
|
|
|
|
|
def clone_dqn_batch_norm_stats(model: DQN) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor):
|
|
"""
|
|
Clone the bias and running mean from the Q-network and target network.
|
|
|
|
:param model:
|
|
:return: the bias and running mean from the Q-network and target network
|
|
"""
|
|
q_net_batch_norm = model.policy.q_net.features_extractor.batch_norm
|
|
q_net_bias, q_net_running_mean = clone_batch_norm_stats(q_net_batch_norm)
|
|
|
|
q_net_target_batch_norm = model.policy.q_net_target.features_extractor.batch_norm
|
|
q_net_target_bias, q_net_target_running_mean = clone_batch_norm_stats(q_net_target_batch_norm)
|
|
|
|
return q_net_bias, q_net_running_mean, q_net_target_bias, q_net_target_running_mean
|
|
|
|
|
|
def clone_td3_batch_norm_stats(
|
|
model: TD3,
|
|
) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor):
|
|
"""
|
|
Clone the bias and running mean from the actor and critic networks and actor-target and critic-target networks.
|
|
|
|
:param model:
|
|
:return: the bias and running mean from the actor and critic networks and actor-target and critic-target networks
|
|
"""
|
|
actor_batch_norm = model.actor.features_extractor.batch_norm
|
|
actor_bias, actor_running_mean = clone_batch_norm_stats(actor_batch_norm)
|
|
|
|
critic_batch_norm = model.critic.features_extractor.batch_norm
|
|
critic_bias, critic_running_mean = clone_batch_norm_stats(critic_batch_norm)
|
|
|
|
actor_target_batch_norm = model.actor_target.features_extractor.batch_norm
|
|
actor_target_bias, actor_target_running_mean = clone_batch_norm_stats(actor_target_batch_norm)
|
|
|
|
critic_target_batch_norm = model.critic_target.features_extractor.batch_norm
|
|
critic_target_bias, critic_target_running_mean = clone_batch_norm_stats(critic_target_batch_norm)
|
|
|
|
return (
|
|
actor_bias,
|
|
actor_running_mean,
|
|
critic_bias,
|
|
critic_running_mean,
|
|
actor_target_bias,
|
|
actor_target_running_mean,
|
|
critic_target_bias,
|
|
critic_target_running_mean,
|
|
)
|
|
|
|
|
|
def clone_sac_batch_norm_stats(
|
|
model: SAC,
|
|
) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor):
|
|
"""
|
|
Clone the bias and running mean from the actor and critic networks and critic-target networks.
|
|
|
|
:param model:
|
|
:return: the bias and running mean from the actor and critic networks and critic-target networks
|
|
"""
|
|
actor_batch_norm = model.actor.features_extractor.batch_norm
|
|
actor_bias, actor_running_mean = clone_batch_norm_stats(actor_batch_norm)
|
|
|
|
critic_batch_norm = model.critic.features_extractor.batch_norm
|
|
critic_bias, critic_running_mean = clone_batch_norm_stats(critic_batch_norm)
|
|
|
|
critic_target_batch_norm = model.critic_target.features_extractor.batch_norm
|
|
critic_target_bias, critic_target_running_mean = clone_batch_norm_stats(critic_target_batch_norm)
|
|
|
|
return (actor_bias, actor_running_mean, critic_bias, critic_running_mean, critic_target_bias, critic_target_running_mean)
|
|
|
|
|
|
def clone_on_policy_batch_norm(model: Union[A2C, PPO]) -> (th.Tensor, th.Tensor):
|
|
return clone_batch_norm_stats(model.policy.features_extractor.batch_norm)
|
|
|
|
|
|
CLONE_HELPERS = {
|
|
A2C: clone_on_policy_batch_norm,
|
|
DQN: clone_dqn_batch_norm_stats,
|
|
SAC: clone_sac_batch_norm_stats,
|
|
TD3: clone_td3_batch_norm_stats,
|
|
PPO: clone_on_policy_batch_norm,
|
|
}
|
|
|
|
|
|
def test_dqn_train_with_batch_norm():
|
|
model = DQN(
|
|
"MlpPolicy",
|
|
"CartPole-v1",
|
|
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
|
|
learning_starts=0,
|
|
seed=1,
|
|
tau=0.0, # do not clone the target
|
|
target_update_interval=100, # Copy the stats to the target
|
|
)
|
|
|
|
(
|
|
q_net_bias_before,
|
|
q_net_running_mean_before,
|
|
q_net_target_bias_before,
|
|
q_net_target_running_mean_before,
|
|
) = clone_dqn_batch_norm_stats(model)
|
|
|
|
model.learn(total_timesteps=200)
|
|
# Force stats copy
|
|
model.target_update_interval = 1
|
|
model._on_step()
|
|
|
|
(
|
|
q_net_bias_after,
|
|
q_net_running_mean_after,
|
|
q_net_target_bias_after,
|
|
q_net_target_running_mean_after,
|
|
) = clone_dqn_batch_norm_stats(model)
|
|
|
|
assert ~th.isclose(q_net_bias_before, q_net_bias_after).all()
|
|
assert ~th.isclose(q_net_running_mean_before, q_net_running_mean_after).all()
|
|
|
|
# No weight update
|
|
assert th.isclose(q_net_bias_before, q_net_target_bias_after).all()
|
|
assert th.isclose(q_net_target_bias_before, q_net_target_bias_after).all()
|
|
# Running stat should be copied even when tau=0
|
|
assert th.isclose(q_net_running_mean_before, q_net_target_running_mean_before).all()
|
|
assert th.isclose(q_net_running_mean_after, q_net_target_running_mean_after).all()
|
|
|
|
|
|
def test_td3_train_with_batch_norm():
|
|
model = TD3(
|
|
"MlpPolicy",
|
|
"Pendulum-v1",
|
|
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
|
|
learning_starts=0,
|
|
tau=0, # do not copy the target
|
|
seed=1,
|
|
)
|
|
|
|
(
|
|
actor_bias_before,
|
|
actor_running_mean_before,
|
|
critic_bias_before,
|
|
critic_running_mean_before,
|
|
actor_target_bias_before,
|
|
actor_target_running_mean_before,
|
|
critic_target_bias_before,
|
|
critic_target_running_mean_before,
|
|
) = clone_td3_batch_norm_stats(model)
|
|
|
|
model.learn(total_timesteps=200)
|
|
|
|
(
|
|
actor_bias_after,
|
|
actor_running_mean_after,
|
|
critic_bias_after,
|
|
critic_running_mean_after,
|
|
actor_target_bias_after,
|
|
actor_target_running_mean_after,
|
|
critic_target_bias_after,
|
|
critic_target_running_mean_after,
|
|
) = clone_td3_batch_norm_stats(model)
|
|
|
|
assert ~th.isclose(actor_bias_before, actor_bias_after).all()
|
|
assert ~th.isclose(actor_running_mean_before, actor_running_mean_after).all()
|
|
|
|
assert ~th.isclose(critic_bias_before, critic_bias_after).all()
|
|
assert ~th.isclose(critic_running_mean_before, critic_running_mean_after).all()
|
|
|
|
assert th.isclose(actor_target_bias_before, actor_target_bias_after).all()
|
|
# Running stat should be copied even when tau=0
|
|
assert th.isclose(actor_running_mean_after, actor_target_running_mean_after).all()
|
|
|
|
assert th.isclose(critic_target_bias_before, critic_target_bias_after).all()
|
|
# Running stat should be copied even when tau=0
|
|
assert th.isclose(critic_running_mean_after, critic_target_running_mean_after).all()
|
|
|
|
|
|
def test_sac_train_with_batch_norm():
|
|
model = SAC(
|
|
"MlpPolicy",
|
|
"Pendulum-v1",
|
|
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
|
|
learning_starts=0,
|
|
tau=0, # do not copy the target
|
|
seed=1,
|
|
)
|
|
|
|
(
|
|
actor_bias_before,
|
|
actor_running_mean_before,
|
|
critic_bias_before,
|
|
critic_running_mean_before,
|
|
critic_target_bias_before,
|
|
critic_target_running_mean_before,
|
|
) = clone_sac_batch_norm_stats(model)
|
|
|
|
model.learn(total_timesteps=200)
|
|
|
|
(
|
|
actor_bias_after,
|
|
actor_running_mean_after,
|
|
critic_bias_after,
|
|
critic_running_mean_after,
|
|
critic_target_bias_after,
|
|
critic_target_running_mean_after,
|
|
) = clone_sac_batch_norm_stats(model)
|
|
|
|
assert ~th.isclose(actor_bias_before, actor_bias_after).all()
|
|
assert ~th.isclose(actor_running_mean_before, actor_running_mean_after).all()
|
|
|
|
assert ~th.isclose(critic_bias_before, critic_bias_after).all()
|
|
# Running stat should be copied even when tau=0
|
|
assert th.isclose(critic_running_mean_before, critic_target_running_mean_before).all()
|
|
|
|
assert th.isclose(critic_target_bias_before, critic_target_bias_after).all()
|
|
# Running stat should be copied even when tau=0
|
|
assert th.isclose(critic_running_mean_after, critic_target_running_mean_after).all()
|
|
|
|
|
|
@pytest.mark.parametrize("model_class", [A2C, PPO])
|
|
@pytest.mark.parametrize("env_id", ["Pendulum-v1", "CartPole-v1"])
|
|
def test_a2c_ppo_train_with_batch_norm(model_class, env_id):
|
|
model = model_class(
|
|
"MlpPolicy",
|
|
env_id,
|
|
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
|
|
seed=1,
|
|
)
|
|
|
|
bias_before, running_mean_before = clone_on_policy_batch_norm(model)
|
|
|
|
model.learn(total_timesteps=200)
|
|
|
|
bias_after, running_mean_after = clone_on_policy_batch_norm(model)
|
|
|
|
assert ~th.isclose(bias_before, bias_after).all()
|
|
assert ~th.isclose(running_mean_before, running_mean_after).all()
|
|
|
|
|
|
@pytest.mark.parametrize("model_class", [DQN, TD3, SAC])
|
|
def test_offpolicy_collect_rollout_batch_norm(model_class):
|
|
if model_class in [DQN]:
|
|
env_id = "CartPole-v1"
|
|
else:
|
|
env_id = "Pendulum-v1"
|
|
|
|
clone_helper = CLONE_HELPERS[model_class]
|
|
|
|
learning_starts = 10
|
|
model = model_class(
|
|
"MlpPolicy",
|
|
env_id,
|
|
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
|
|
learning_starts=learning_starts,
|
|
seed=1,
|
|
gradient_steps=0,
|
|
train_freq=1,
|
|
)
|
|
|
|
batch_norm_stats_before = clone_helper(model)
|
|
|
|
model.learn(total_timesteps=100)
|
|
|
|
batch_norm_stats_after = clone_helper(model)
|
|
|
|
# No change in batch norm params
|
|
for param_before, param_after in zip(batch_norm_stats_before, batch_norm_stats_after):
|
|
assert th.isclose(param_before, param_after).all()
|
|
|
|
|
|
@pytest.mark.parametrize("model_class", [A2C, PPO])
|
|
@pytest.mark.parametrize("env_id", ["Pendulum-v1", "CartPole-v1"])
|
|
def test_a2c_ppo_collect_rollouts_with_batch_norm(model_class, env_id):
|
|
model = model_class(
|
|
"MlpPolicy",
|
|
env_id,
|
|
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
|
|
seed=1,
|
|
n_steps=64,
|
|
)
|
|
|
|
bias_before, running_mean_before = clone_on_policy_batch_norm(model)
|
|
|
|
total_timesteps, callback = model._setup_learn(total_timesteps=2 * 64)
|
|
|
|
for _ in range(2):
|
|
model.collect_rollouts(model.get_env(), callback, model.rollout_buffer, n_rollout_steps=model.n_steps)
|
|
|
|
bias_after, running_mean_after = clone_on_policy_batch_norm(model)
|
|
|
|
assert th.isclose(bias_before, bias_after).all()
|
|
assert th.isclose(running_mean_before, running_mean_after).all()
|
|
|
|
|
|
@pytest.mark.parametrize("model_class", MODEL_LIST)
|
|
@pytest.mark.parametrize("env_id", ["Pendulum-v1", "CartPole-v1"])
|
|
def test_predict_with_dropout_batch_norm(model_class, env_id):
|
|
if env_id == "CartPole-v1":
|
|
if model_class in [SAC, TD3]:
|
|
return
|
|
elif model_class in [DQN]:
|
|
return
|
|
|
|
model_kwargs = dict(seed=1)
|
|
clone_helper = CLONE_HELPERS[model_class]
|
|
|
|
if model_class in [DQN, TD3, SAC]:
|
|
model_kwargs["learning_starts"] = 0
|
|
else:
|
|
model_kwargs["n_steps"] = 64
|
|
|
|
policy_kwargs = dict(
|
|
features_extractor_class=FlattenBatchNormDropoutExtractor,
|
|
net_arch=[16, 16],
|
|
)
|
|
model = model_class("MlpPolicy", env_id, policy_kwargs=policy_kwargs, verbose=1, **model_kwargs)
|
|
|
|
batch_norm_stats_before = clone_helper(model)
|
|
|
|
env = model.get_env()
|
|
observation = env.reset()
|
|
first_prediction, _ = model.predict(observation, deterministic=True)
|
|
for _ in range(5):
|
|
prediction, _ = model.predict(observation, deterministic=True)
|
|
np.testing.assert_allclose(first_prediction, prediction)
|
|
|
|
batch_norm_stats_after = clone_helper(model)
|
|
|
|
# No change in batch norm params
|
|
for param_before, param_after in zip(batch_norm_stats_before, batch_norm_stats_after):
|
|
assert th.isclose(param_before, param_after).all()
|