From 4a85589b92af3d5e128eca99895c7e0d56babebf Mon Sep 17 00:00:00 2001 From: Chen Tao <79069012+Tc511@users.noreply.github.com> Date: Mon, 5 Aug 2024 17:18:21 +0800 Subject: [PATCH] Update vector_envs_tutorial.py (#1133) --- docs/tutorials/gymnasium_basics/vector_envs_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tutorials/gymnasium_basics/vector_envs_tutorial.py b/docs/tutorials/gymnasium_basics/vector_envs_tutorial.py index 4b978221f..238cb07a0 100644 --- a/docs/tutorials/gymnasium_basics/vector_envs_tutorial.py +++ b/docs/tutorials/gymnasium_basics/vector_envs_tutorial.py @@ -417,7 +417,7 @@ def update_parameters( # For our training loop, we are using the `RecordEpisodeStatistics` wrapper to record the episode lengths and returns and we are also saving # the losses and entropies to plot them after the agent finished training. # -# You may notice that the don't reset the vectorized envs at the start of each episode like we would usually do. +# You may notice that we don't reset the vectorized envs at the start of each episode like we would usually do. # This is because each environment resets automatically once the episode finishes (each environment takes a different number of timesteps to finish # an episode because of the random seeds). As a result, we are also not collecting data in `episodes`, but rather just play a certain number of steps # (`n_steps_per_update`) in each environment (as an example, this could mean that we play 20 timesteps to finish an episode and then