-
Notifications
You must be signed in to change notification settings - Fork 631
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
2 changed files
with
756 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,311 @@ | ||
# docs and experiment results can be found at https://docs.cleanrl.dev/rl-algorithms/dqn/#dqn_ataripy | ||
import argparse | ||
import os | ||
import random | ||
import time | ||
from distutils.util import strtobool | ||
|
||
import gym | ||
import numpy as np | ||
import torch | ||
import torch.nn as nn | ||
import torch.nn.functional as F | ||
import torch.optim as optim | ||
from stable_baselines3.common.atari_wrappers import ( | ||
ClipRewardEnv, | ||
EpisodicLifeEnv, | ||
FireResetEnv, | ||
MaxAndSkipEnv, | ||
NoopResetEnv, | ||
) | ||
from stable_baselines3.common.buffers import ReplayBuffer | ||
from torch.utils.tensorboard import SummaryWriter | ||
|
||
|
||
def parse_args(): | ||
# fmt: off | ||
parser = argparse.ArgumentParser() | ||
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"), | ||
help="the name of this experiment") | ||
parser.add_argument("--seed", type=int, default=1, | ||
help="seed of the experiment") | ||
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, | ||
help="if toggled, `torch.backends.cudnn.deterministic=False`") | ||
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, | ||
help="if toggled, cuda will be enabled by default") | ||
parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, | ||
help="if toggled, this experiment will be tracked with Weights and Biases") | ||
parser.add_argument("--wandb-project-name", type=str, default="cleanRL", | ||
help="the wandb's project name") | ||
parser.add_argument("--wandb-entity", type=str, default=None, | ||
help="the entity (team) of wandb's project") | ||
parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, | ||
help="whether to capture videos of the agent performances (check out `videos` folder)") | ||
parser.add_argument("--save-model", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, | ||
help="whether to save model into the `runs/{run_name}` folder") | ||
parser.add_argument("--upload-model", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, | ||
help="whether to upload the saved model to huggingface") | ||
parser.add_argument("--hf-entity", type=str, default="", | ||
help="the user or org name of the model repository from the Hugging Face Hub") | ||
|
||
# Algorithm specific arguments | ||
parser.add_argument("--env-id", type=str, default="BreakoutNoFrameskip-v4", | ||
help="the id of the environment") | ||
parser.add_argument("--total-timesteps", type=int, default=10000000, | ||
help="total timesteps of the experiments") | ||
parser.add_argument("--learning-rate", type=float, default=1e-4, | ||
help="the learning rate of the optimizer") | ||
parser.add_argument("--buffer-size", type=int, default=1000000, | ||
help="the replay memory buffer size") | ||
parser.add_argument("--gamma", type=float, default=0.99, | ||
help="the discount factor gamma") | ||
parser.add_argument("--target-network-frequency", type=int, default=1000, | ||
help="the timesteps it takes to update the target network") | ||
parser.add_argument("--batch-size", type=int, default=32, | ||
help="the batch size of sample from the reply memory") | ||
parser.add_argument("--start-e", type=float, default=1, | ||
help="the starting epsilon for exploration") | ||
parser.add_argument("--end-e", type=float, default=0.01, | ||
help="the ending epsilon for exploration") | ||
parser.add_argument("--exploration-fraction", type=float, default=0.10, | ||
help="the fraction of `total-timesteps` it takes from start-e to go end-e") | ||
parser.add_argument("--learning-starts", type=int, default=80000, | ||
help="timestep to start learning") | ||
parser.add_argument("--train-frequency", type=int, default=4, | ||
help="the frequency of training") | ||
args = parser.parse_args() | ||
# fmt: on | ||
return args | ||
|
||
|
||
def make_env(env_id, seed, idx, capture_video, run_name): | ||
def thunk(): | ||
env = gym.make(env_id) | ||
env = gym.wrappers.RecordEpisodeStatistics(env) | ||
if capture_video: | ||
if idx == 0: | ||
env = gym.wrappers.RecordVideo(env, f"videos/{run_name}") | ||
env = NoopResetEnv(env, noop_max=30) | ||
env = MaxAndSkipEnv(env, skip=4) | ||
env = EpisodicLifeEnv(env) | ||
if "FIRE" in env.unwrapped.get_action_meanings(): | ||
env = FireResetEnv(env) | ||
env = ClipRewardEnv(env) | ||
env = gym.wrappers.ResizeObservation(env, (84, 84)) | ||
env = gym.wrappers.GrayScaleObservation(env) | ||
env = gym.wrappers.FrameStack(env, 4) | ||
env.seed(seed) | ||
env.action_space.seed(seed) | ||
env.observation_space.seed(seed) | ||
return env | ||
|
||
return thunk | ||
|
||
|
||
|
||
# taken from https://github.com/AIcrowd/neurips2020-procgen-starter-kit/blob/142d09586d2272a17f44481a115c4bd817cf6a94/models/impala_cnn_torch.py | ||
class ResidualBlock(nn.Module): | ||
def __init__(self, channels): | ||
super().__init__() | ||
self.conv0 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, padding=1) | ||
self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, padding=1) | ||
|
||
def forward(self, x): | ||
inputs = x | ||
x = nn.functional.relu(x) | ||
x = self.conv0(x) | ||
x = nn.functional.relu(x) | ||
x = self.conv1(x) | ||
return x + inputs | ||
|
||
|
||
class ConvSequence(nn.Module): | ||
def __init__(self, input_shape, out_channels): | ||
super().__init__() | ||
self._input_shape = input_shape | ||
self._out_channels = out_channels | ||
print("input channels", self._input_shape[0]) | ||
self.conv = nn.Conv2d(in_channels=self._input_shape[0], out_channels=self._out_channels, kernel_size=3, padding=1) | ||
self.res_block0 = ResidualBlock(self._out_channels) | ||
self.res_block1 = ResidualBlock(self._out_channels) | ||
|
||
def forward(self, x): | ||
x = self.conv(x) | ||
x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1) | ||
x = self.res_block0(x) | ||
x = self.res_block1(x) | ||
assert x.shape[1:] == self.get_output_shape() | ||
return x | ||
|
||
def get_output_shape(self): | ||
_c, h, w = self._input_shape | ||
return (self._out_channels, (h + 1) // 2, (w + 1) // 2) | ||
|
||
|
||
# ALGO LOGIC: initialize agent here: | ||
class QNetwork(nn.Module): | ||
def __init__(self, env): | ||
super().__init__() | ||
c, h, w = envs.single_observation_space.shape | ||
shape = (c, h, w) | ||
print(shape) | ||
conv_seqs = [] | ||
for out_channels in [16, 32, 32]: | ||
conv_seq = ConvSequence(shape, out_channels) | ||
shape = conv_seq.get_output_shape() | ||
conv_seqs.append(conv_seq) | ||
conv_seqs += [ | ||
nn.Flatten(), | ||
nn.ReLU(), | ||
nn.Linear(in_features=shape[0] * shape[1] * shape[2], out_features=256), | ||
nn.ReLU(), | ||
nn.Linear(in_features=256, out_features=env.single_action_space.n), | ||
] | ||
self.network = nn.Sequential(*conv_seqs) | ||
|
||
def forward(self, x): | ||
return self.network(x / 255.0) | ||
|
||
|
||
def linear_schedule(start_e: float, end_e: float, duration: int, t: int): | ||
slope = (end_e - start_e) / duration | ||
return max(slope * t + start_e, end_e) | ||
|
||
|
||
if __name__ == "__main__": | ||
args = parse_args() | ||
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}" | ||
if args.track: | ||
import wandb | ||
|
||
wandb.init( | ||
project=args.wandb_project_name, | ||
entity=args.wandb_entity, | ||
sync_tensorboard=True, | ||
config=vars(args), | ||
name=run_name, | ||
monitor_gym=True, | ||
save_code=True, | ||
) | ||
writer = SummaryWriter(f"runs/{run_name}") | ||
writer.add_text( | ||
"hyperparameters", | ||
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])), | ||
) | ||
|
||
# TRY NOT TO MODIFY: seeding | ||
random.seed(args.seed) | ||
np.random.seed(args.seed) | ||
torch.manual_seed(args.seed) | ||
torch.backends.cudnn.deterministic = args.torch_deterministic | ||
|
||
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") | ||
|
||
# env setup | ||
envs = gym.vector.SyncVectorEnv([make_env(args.env_id, args.seed, 0, args.capture_video, run_name)]) | ||
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported" | ||
|
||
q_network = QNetwork(envs).to(device) | ||
from torchsummary import summary | ||
summary(q_network, (4, 84, 84)) | ||
|
||
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate) | ||
target_network = QNetwork(envs).to(device) | ||
target_network.load_state_dict(q_network.state_dict()) | ||
|
||
rb = ReplayBuffer( | ||
args.buffer_size, | ||
envs.single_observation_space, | ||
envs.single_action_space, | ||
device, | ||
optimize_memory_usage=True, | ||
handle_timeout_termination=True, | ||
) | ||
start_time = time.time() | ||
|
||
# TRY NOT TO MODIFY: start the game | ||
obs = envs.reset() | ||
for global_step in range(args.total_timesteps): | ||
# ALGO LOGIC: put action logic here | ||
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction * args.total_timesteps, global_step) | ||
if random.random() < epsilon: | ||
actions = np.array([envs.single_action_space.sample() for _ in range(envs.num_envs)]) | ||
else: | ||
q_values = q_network(torch.Tensor(obs).to(device)) | ||
actions = torch.argmax(q_values, dim=1).cpu().numpy() | ||
|
||
# TRY NOT TO MODIFY: execute the game and log data. | ||
next_obs, rewards, dones, infos = envs.step(actions) | ||
|
||
# TRY NOT TO MODIFY: record rewards for plotting purposes | ||
for info in infos: | ||
if "episode" in info.keys(): | ||
print(f"global_step={global_step}, episodic_return={info['episode']['r']}") | ||
writer.add_scalar("charts/episodic_return", info["episode"]["r"], global_step) | ||
writer.add_scalar("charts/episodic_length", info["episode"]["l"], global_step) | ||
writer.add_scalar("charts/epsilon", epsilon, global_step) | ||
break | ||
|
||
# TRY NOT TO MODIFY: save data to reply buffer; handle `terminal_observation` | ||
real_next_obs = next_obs.copy() | ||
for idx, d in enumerate(dones): | ||
if d: | ||
real_next_obs[idx] = infos[idx]["terminal_observation"] | ||
rb.add(obs, real_next_obs, actions, rewards, dones, infos) | ||
|
||
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook | ||
obs = next_obs | ||
|
||
# ALGO LOGIC: training. | ||
if global_step > args.learning_starts: | ||
if global_step % args.train_frequency == 0: | ||
data = rb.sample(args.batch_size) | ||
with torch.no_grad(): | ||
target_max, _ = target_network(data.next_observations).max(dim=1) | ||
td_target = data.rewards.flatten() + args.gamma * target_max * (1 - data.dones.flatten()) | ||
old_val = q_network(data.observations).gather(1, data.actions).squeeze() | ||
loss = F.mse_loss(td_target, old_val) | ||
|
||
if global_step % 100 == 0: | ||
writer.add_scalar("losses/td_loss", loss, global_step) | ||
writer.add_scalar("losses/q_values", old_val.mean().item(), global_step) | ||
print("SPS:", int(global_step / (time.time() - start_time))) | ||
writer.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step) | ||
|
||
# optimize the model | ||
optimizer.zero_grad() | ||
loss.backward() | ||
optimizer.step() | ||
|
||
# update the target network | ||
if global_step % args.target_network_frequency == 0: | ||
target_network.load_state_dict(q_network.state_dict()) | ||
|
||
if args.save_model: | ||
model_path = f"runs/{run_name}/{args.exp_name}.cleanrl_model" | ||
torch.save(q_network.state_dict(), model_path) | ||
print(f"model saved to {model_path}") | ||
from cleanrl_utils.evals.dqn_eval import evaluate | ||
|
||
episodic_returns = evaluate( | ||
model_path, | ||
make_env, | ||
args.env_id, | ||
eval_episodes=10, | ||
run_name=f"{run_name}-eval", | ||
Model=QNetwork, | ||
device=device, | ||
epsilon=0.05, | ||
) | ||
for idx, episodic_return in enumerate(episodic_returns): | ||
writer.add_scalar("eval/episodic_return", episodic_return, idx) | ||
|
||
if args.upload_model: | ||
from cleanrl_utils.huggingface import push_to_hub | ||
|
||
repo_name = f"{args.env_id}-{args.exp_name}-seed{args.seed}" | ||
repo_id = f"{args.hf_entity}/{repo_name}" if args.hf_entity else repo_name | ||
push_to_hub(args, episodic_returns, repo_id, "DQN", f"runs/{run_name}", f"videos/{run_name}-eval") | ||
|
||
envs.close() | ||
writer.close() |
Oops, something went wrong.