diff --git a/docs/conf.py b/docs/conf.py index 9d88f265..e8be9ab6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -33,6 +33,9 @@ # -- General configuration --------------------------------------------------- +# organize functions by source order +autodoc_member_order = "bysource" + # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. diff --git a/docs/contents/atari.md b/docs/contents/atari.md index 0ac7c369..55d97c5b 100644 --- a/docs/contents/atari.md +++ b/docs/contents/atari.md @@ -1,5 +1,6 @@ ## Atari Environments + ### Installation ``` pip install shimmy[atari] diff --git a/docs/contents/dm_control.md b/docs/contents/dm_control.md index a5b88897..6accca7a 100644 --- a/docs/contents/dm_control.md +++ b/docs/contents/dm_control.md @@ -1,4 +1,5 @@ -## DM Control (both single and multiagent environments) +## dm-control (single agent) + ### Installation ``` @@ -12,26 +13,7 @@ import gymnasium as gym env = gym.make("dm_control/acrobot_swingup_sparse-v0") ``` -### Usage (Multi agent) -```python -from dm_control.locomotion import soccer as dm_soccer -from shimmy.dm_control_multiagent_compatibility import ( - DmControlMultiAgentCompatibilityV0, -) - -walker_type = dm_soccer.WalkerType.BOXHEAD, - -env = dm_soccer.load( - team_size=2, - time_limit=10.0, - disable_walker_contacts=False, - enable_field_box=True, - terminate_on_goal=False, - walker_type=walker_type, -) - -env = DmControlMultiAgentCompatibilityV0(env) -``` +### Class Description ```{eval-rst} .. autoclass:: shimmy.dm_control_multiagent_compatibility.DmControlMultiAgentCompatibilityV0 @@ -39,7 +21,6 @@ env = DmControlMultiAgentCompatibilityV0(env) :undoc-members: ``` -### Class Description ```{eval-rst} .. autoclass:: shimmy.dm_control_compatibility.DmControlCompatibilityV0 :members: diff --git a/docs/contents/dm_multi.md b/docs/contents/dm_multi.md new file mode 100644 index 00000000..0e874cea --- /dev/null +++ b/docs/contents/dm_multi.md @@ -0,0 +1,35 @@ +## dm-control soccer (multi-agent) + +### Installation +``` +pip install shimmy[dm-control] +``` + +### Usage (Multi agent) +```python +from dm_control.locomotion import soccer as dm_soccer +from shimmy.dm_control_multiagent_compatibility import ( + DmControlMultiAgentCompatibilityV0, +) + +walker_type = dm_soccer.WalkerType.BOXHEAD, + +env = dm_soccer.load( + team_size=2, + time_limit=10.0, + disable_walker_contacts=False, + enable_field_box=True, + terminate_on_goal=False, + walker_type=walker_type, +) + +env = DmControlMultiAgentCompatibilityV0(env) +``` + +### Class Description + +```{eval-rst} +.. autoclass:: shimmy.dm_control_multiagent_compatibility.DmControlMultiAgentCompatibilityV0 + :members: + :undoc-members: +``` \ No newline at end of file diff --git a/docs/contents/index.md b/docs/contents/index.md index b89cdf1e..bbc7e145 100644 --- a/docs/contents/index.md +++ b/docs/contents/index.md @@ -8,6 +8,7 @@ lastpage: gym atari dm_control +dm_multi open_spiel dm_lab ``` diff --git a/docs/index.md b/docs/index.md index a85d2035..58f9d0ab 100644 --- a/docs/index.md +++ b/docs/index.md @@ -49,6 +49,16 @@ Out of the box, Shimmy doesn't install any of the dependencies required for the To install them, you'll have to install the optional extras. All single agent environments have registration under the Gymnasium API, while all multiagent environments must be wrapped using the corresponding compatibility wrappers. +### For Developers and Testing Only +``` +pip install shimmy[testing] +``` + +### To just install everything +``` +pip install shimmy[all, testing] +``` + ### OpenAI Gym #### Installation @@ -56,6 +66,128 @@ All single agent environments have registration under the Gymnasium API, while a pip install shimmy[gym] ``` +#### Usage +```python +import gymnasium as gym + +env = gym.make("GymV22CompatibilityV0", env_name="...") +``` + +### Atari Environments + +#### Installation +``` +pip install shimmy[atari] +``` + +#### Usage +```python +import gymnasium as gym + +env = gym.make("ALE/Pong-v5") +``` + +### DM Control + +#### Installation +``` +pip install shimmy[dm-control] +``` + +#### Usage (Single agent) +```python +import gymnasium as gym + +env = gym.make("dm_control/acrobot_swingup_sparse-v0") +``` + +#### Usage (Multi agent) +```python +from dm_control.locomotion import soccer as dm_soccer +from shimmy.dm_control_multiagent_compatibility import ( + DmControlMultiAgentCompatibilityV0, +) + +walker_type = dm_soccer.WalkerType.BOXHEAD, + +env = dm_soccer.load( + team_size=2, + time_limit=10.0, + disable_walker_contacts=False, + enable_field_box=True, + terminate_on_goal=False, + walker_type=walker_type, +) + +env = DmControlMultiAgentCompatibilityV0(env) +``` + +### DM Lab + +#### Installation + +Courtesy to [Danijar Hafner](https://github.com/deepmind/lab/issues/242) for providing this install script. +```bash +#!/bin/sh +set -eu + +# Dependencies +apt-get update && apt-get install -y \ + build-essential curl freeglut3 gettext git libffi-dev libglu1-mesa \ + libglu1-mesa-dev libjpeg-dev liblua5.1-0-dev libosmesa6-dev \ + libsdl2-dev lua5.1 pkg-config python-setuptools python3-dev \ + software-properties-common unzip zip zlib1g-dev g++ +pip3 install numpy + +# Bazel +apt-get install -y apt-transport-https curl gnupg +curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor > bazel.gpg +mv bazel.gpg /etc/apt/trusted.gpg.d/ +echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list +apt-get update && apt-get install -y bazel + +# Build +git clone https://github.com/deepmind/lab.git +cd lab +echo 'build --cxxopt=-std=c++17' > .bazelrc +bazel build -c opt //python/pip_package:build_pip_package +./bazel-bin/python/pip_package/build_pip_package /tmp/dmlab_pkg +pip3 install --force-reinstall /tmp/dmlab_pkg/deepmind_lab-*.whl +cd .. +rm -rf lab +``` + +#### Usage +```python +import deepmind_lab + +from shimmy.dm_lab_compatibility import DmLabCompatibilityV0 + +observations = ["RGBD"] +config = {"width": "640", "height": "480", "botCount": "2"} +renderer = "hardware" + +env = deepmind_lab.Lab("lt_chasm", observations, config=config, renderer=renderer) +env = DmLabCompatibilityV0(env) +``` + +### OpenSpiel + +#### Installation +``` +pip install shimmy[pettingzoo] +``` + +#### Usage +```python +import pyspiel +from shimmy.openspiel_compatibility import OpenspielCompatibilityV0 + +env = pyspiel.load_game("2048") +env = OpenspielCompatibilityV0(game=env, render_mode=None) +``` + + ## At a glance This is an example of using Shimmy to convert DM Control environments into a Gymnasium compatible environment: @@ -90,7 +222,13 @@ For most usage, we recommend applying the `gym.wrappers.FlattenObservation(env)` If you use this in your research, please cite: ``` -TBD +@software{shimmy2022github, + author = {Jordan Terry, Mark Towers, Jun Jet Tai}, + title = {Shimmy: Gymnasium and Pettingzoo Wrappers for Commonly Used Environments}, + url = {http://github.com/Farama-Foundation/Shimmy}, + version = {0.2.0}, + year = {2022}, +} ``` ```{toctree}