mirror of
https://github.com/saymrwulf/stable-baselines3.git
synced 2026-05-16 21:10:08 +00:00
* make sure DQN policy is always in correct mode - train or eval * make set_training_mode an abstract method of the base policy - safer * update docstring of _build method to note that the target network is put into eval mode * use set_training_mode to put the dqn target network into eval mode * use set_training_mode to set the training model of the q-network * move set_training_mode abstract method from BasePolicy to BaseModel * set train and eval mode for TD3 * make sure critic is always in correct mode during train * set train and eval mode for SAC * add comment re batch norm and dropout * set train and eval mode for A2C and PPO * add tests for collect rollouts with batch norm * fix formatting * update change log * update version * remove Optional typing for batch size - causing type check to fail * Fix scipy dependency for toy text envs * implement set_training_mode method in BaseModel * move all tests of train/eval mode to test_train_eval_mode * call learn with learning_starts = total_timesteps to test that collect_rollouts does not update batch norm * remove extra calls to set_training_mode in train method of TD3 and SAC * Allow gradient_steps=0 * Refactor tests * Add comment + use aliases * Typos Co-authored-by: Antonin Raffin <antonin.raffin@ensta.org>
142 lines
4.3 KiB
Python
142 lines
4.3 KiB
Python
import os
|
|
|
|
from setuptools import find_packages, setup
|
|
|
|
with open(os.path.join("stable_baselines3", "version.txt"), "r") as file_handler:
|
|
__version__ = file_handler.read().strip()
|
|
|
|
|
|
long_description = """
|
|
|
|
# Stable Baselines3
|
|
|
|
Stable Baselines3 is a set of reliable implementations of reinforcement learning algorithms in PyTorch. It is the next major version of [Stable Baselines](https://github.com/hill-a/stable-baselines).
|
|
|
|
These algorithms will make it easier for the research community and industry to replicate, refine, and identify new ideas, and will create good baselines to build projects on top of. We expect these tools will be used as a base around which new ideas can be added, and as a tool for comparing a new approach against existing ones. We also hope that the simplicity of these tools will allow beginners to experiment with a more advanced toolset, without being buried in implementation details.
|
|
|
|
|
|
## Links
|
|
|
|
Repository:
|
|
https://github.com/DLR-RM/stable-baselines3
|
|
|
|
Blog post:
|
|
https://araffin.github.io/post/sb3/
|
|
|
|
Documentation:
|
|
https://stable-baselines3.readthedocs.io/en/master/
|
|
|
|
RL Baselines3 Zoo:
|
|
https://github.com/DLR-RM/rl-baselines3-zoo
|
|
|
|
SB3 Contrib:
|
|
https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
|
|
|
|
## Quick example
|
|
|
|
Most of the library tries to follow a sklearn-like syntax for the Reinforcement Learning algorithms using Gym.
|
|
|
|
Here is a quick example of how to train and run PPO on a cartpole environment:
|
|
|
|
```python
|
|
import gym
|
|
|
|
from stable_baselines3 import PPO
|
|
|
|
env = gym.make('CartPole-v1')
|
|
|
|
model = PPO('MlpPolicy', env, verbose=1)
|
|
model.learn(total_timesteps=10000)
|
|
|
|
obs = env.reset()
|
|
for i in range(1000):
|
|
action, _states = model.predict(obs, deterministic=True)
|
|
obs, reward, done, info = env.step(action)
|
|
env.render()
|
|
if done:
|
|
obs = env.reset()
|
|
```
|
|
|
|
Or just train a model with a one liner if [the environment is registered in Gym](https://github.com/openai/gym/wiki/Environments) and if [the policy is registered](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html):
|
|
|
|
```python
|
|
from stable_baselines3 import PPO
|
|
|
|
model = PPO('MlpPolicy', 'CartPole-v1').learn(10000)
|
|
```
|
|
|
|
""" # noqa:E501
|
|
|
|
|
|
setup(
|
|
name="stable_baselines3",
|
|
packages=[package for package in find_packages() if package.startswith("stable_baselines3")],
|
|
package_data={"stable_baselines3": ["py.typed", "version.txt"]},
|
|
install_requires=[
|
|
"gym>=0.17",
|
|
"numpy",
|
|
"torch>=1.8.1",
|
|
# For saving models
|
|
"cloudpickle",
|
|
# For reading logs
|
|
"pandas",
|
|
# Plotting learning curves
|
|
"matplotlib",
|
|
],
|
|
extras_require={
|
|
"tests": [
|
|
# Run tests and coverage
|
|
"pytest",
|
|
"pytest-cov",
|
|
"pytest-env",
|
|
"pytest-xdist",
|
|
# Type check
|
|
"pytype",
|
|
# Lint code
|
|
"flake8>=3.8",
|
|
# Find likely bugs
|
|
"flake8-bugbear",
|
|
# Sort imports
|
|
"isort>=5.0",
|
|
# Reformat
|
|
"black",
|
|
# For toy text Gym envs
|
|
"scipy>=1.4.1",
|
|
],
|
|
"docs": [
|
|
"sphinx",
|
|
"sphinx-autobuild",
|
|
"sphinx-rtd-theme",
|
|
# For spelling
|
|
"sphinxcontrib.spelling",
|
|
# Type hints support
|
|
"sphinx-autodoc-typehints",
|
|
],
|
|
"extra": [
|
|
# For render
|
|
"opencv-python",
|
|
# For atari games,
|
|
"atari_py~=0.2.0",
|
|
"pillow",
|
|
# Tensorboard support
|
|
"tensorboard>=2.2.0",
|
|
# Checking memory taken by replay buffer
|
|
"psutil",
|
|
],
|
|
},
|
|
description="Pytorch version of Stable Baselines, implementations of reinforcement learning algorithms.",
|
|
author="Antonin Raffin",
|
|
url="https://github.com/DLR-RM/stable-baselines3",
|
|
author_email="antonin.raffin@dlr.de",
|
|
keywords="reinforcement-learning-algorithms reinforcement-learning machine-learning "
|
|
"gym openai stable baselines toolbox python data-science",
|
|
license="MIT",
|
|
long_description=long_description,
|
|
long_description_content_type="text/markdown",
|
|
version=__version__,
|
|
)
|
|
|
|
# python setup.py sdist
|
|
# python setup.py bdist_wheel
|
|
# twine upload --repository-url https://test.pypi.org/legacy/ dist/*
|
|
# twine upload dist/*
|