Skip to content

Commit

Permalink
Run black on tutorials
Browse files Browse the repository at this point in the history
  • Loading branch information
esantorella committed Feb 28, 2023
1 parent e5f4d14 commit 30fb0be
Show file tree
Hide file tree
Showing 32 changed files with 19,084 additions and 18,689 deletions.
914 changes: 462 additions & 452 deletions tutorials/GIBBON_for_efficient_batch_entropy_search.ipynb

Large diffs are not rendered by default.

6,523 changes: 3,275 additions & 3,248 deletions tutorials/Multi_objective_multi_fidelity_BO.ipynb

Large diffs are not rendered by default.

630 changes: 315 additions & 315 deletions tutorials/batch_mode_cross_validation.ipynb

Large diffs are not rendered by default.

125 changes: 78 additions & 47 deletions tutorials/bo_with_warped_gp.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -84,20 +84,20 @@
"from torch.distributions import Kumaraswamy\n",
"import matplotlib.pyplot as plt\n",
"\n",
"%matplotlib inline \n",
"%matplotlib inline\n",
"\n",
"\n",
"fontdict = {\"fontsize\": 15}\n",
"torch.manual_seed(1234567890)\n",
"c1 = torch.rand(6, dtype=dtype, device=device) * 3 + 0.1\n",
"c0 = torch.rand(6, dtype=dtype, device=device) * 3 + 0.1\n",
"x = torch.linspace(0,1,101, dtype=dtype, device=device)\n",
"x = torch.linspace(0, 1, 101, dtype=dtype, device=device)\n",
"k = Kumaraswamy(concentration1=c1, concentration0=c0)\n",
"k_icdfs = k.icdf(x.unsqueeze(1).expand(101, 6))\n",
"fig, ax = plt.subplots(1,1, figsize=(5,5))\n",
"fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n",
"\n",
"for i in range(6):\n",
" ax.plot(x.cpu(), k_icdfs[:,i].cpu())\n",
" ax.plot(x.cpu(), k_icdfs[:, i].cpu())\n",
"ax.set_xlabel(\"Raw Value\", **fontdict)\n",
"ax.set_ylabel(\"Transformed Value\", **fontdict)"
]
Expand All @@ -112,10 +112,10 @@
"\n",
"neg_hartmann6 = Hartmann(negate=True)\n",
"\n",
"\n",
"def obj(X):\n",
" X_warp = k.icdf(X)\n",
" return neg_hartmann6(X_warp)\n",
" "
" return neg_hartmann6(X_warp)"
]
},
{
Expand Down Expand Up @@ -147,11 +147,13 @@
"\n",
"def generate_initial_data(n=14):\n",
" # generate training data\n",
" train_x = draw_sobol_samples(bounds=bounds, n=n, q=1, seed=torch.randint(0,10000,(1,)).item()).squeeze(1)\n",
" train_x = draw_sobol_samples(\n",
" bounds=bounds, n=n, q=1, seed=torch.randint(0, 10000, (1,)).item()\n",
" ).squeeze(1)\n",
" exact_obj = obj(train_x).unsqueeze(-1) # add output dimension\n",
" \n",
"\n",
" best_observed_value = exact_obj.max().item()\n",
" train_obj = exact_obj + NOISE_SE * torch.randn_like(exact_obj)\n",
" train_obj = exact_obj + NOISE_SE * torch.randn_like(exact_obj)\n",
" return train_x, train_obj, best_observed_value"
]
},
Expand Down Expand Up @@ -181,17 +183,17 @@
" indices=list(range(train_x.shape[-1])),\n",
" # use a prior with median at 1.\n",
" # when a=1 and b=1, the Kumaraswamy CDF is the identity function\n",
" concentration1_prior=LogNormalPrior(0.0, 0.75 ** 0.5),\n",
" concentration0_prior=LogNormalPrior(0.0, 0.75 ** 0.5),\n",
" concentration1_prior=LogNormalPrior(0.0, 0.75**0.5),\n",
" concentration0_prior=LogNormalPrior(0.0, 0.75**0.5),\n",
" )\n",
" else:\n",
" warp_tf = None\n",
" # define the model for objective \n",
" # define the model for objective\n",
" model = FixedNoiseGP(\n",
" train_x,\n",
" standardize(train_obj),\n",
" train_yvar.expand_as(train_obj),\n",
" input_transform=warp_tf\n",
" input_transform=warp_tf,\n",
" ).to(train_x)\n",
" mll = ExactMarginalLogLikelihood(model.likelihood, model)\n",
" return mll, model"
Expand Down Expand Up @@ -229,7 +231,7 @@
" raw_samples=raw_samples, # used for intialization heuristic\n",
" options={\"batch_limit\": 5, \"maxiter\": 200},\n",
" )\n",
" # observe new values \n",
" # observe new values\n",
" new_x = candidates.detach()\n",
" exact_obj = obj(new_x).unsqueeze(-1) # add output dimension\n",
" train_obj = exact_obj + NOISE_SE * torch.randn_like(exact_obj)\n",
Expand All @@ -242,7 +244,7 @@
" \"\"\"\n",
" rand_x = draw_sobol_samples(bounds=bounds, n=1, q=1).squeeze(1)\n",
" next_random_best = obj(rand_x).max().item()\n",
" best_random.append(max(best_random[-1], next_random_best)) \n",
" best_random.append(max(best_random[-1], next_random_best))\n",
" return best_random"
]
},
Expand Down Expand Up @@ -271,8 +273,8 @@
"import warnings\n",
"\n",
"\n",
"warnings.filterwarnings('ignore', category=BadInitialCandidatesWarning)\n",
"warnings.filterwarnings('ignore', category=RuntimeWarning)\n",
"warnings.filterwarnings(\"ignore\", category=BadInitialCandidatesWarning)\n",
"warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n",
"\n",
"\n",
"N_TRIALS = 3 if not SMOKE_TEST else 2\n",
Expand All @@ -287,46 +289,53 @@
"\n",
"# average over multiple trials\n",
"for trial in range(1, N_TRIALS + 1):\n",
" \n",
"\n",
" print(f\"\\nTrial {trial:>2} of {N_TRIALS} \", end=\"\")\n",
" best_observed_ei, best_observed_warp, best_random = [], [], []\n",
" \n",
"\n",
" # call helper functions to generate initial training data and initialize model\n",
" train_x_ei, train_obj_ei, best_observed_value_ei = generate_initial_data(n=14)\n",
" mll_ei, model_ei = initialize_model(train_x_ei, train_obj_ei, use_input_warping=False)\n",
" \n",
" train_x_warp, train_obj_warp, = train_x_ei, train_obj_ei\n",
" mll_ei, model_ei = initialize_model(\n",
" train_x_ei, train_obj_ei, use_input_warping=False\n",
" )\n",
"\n",
" train_x_warp, train_obj_warp, = (\n",
" train_x_ei,\n",
" train_obj_ei,\n",
" )\n",
" best_observed_value_warp = best_observed_value_ei\n",
" # use input warping\n",
" mll_warp, model_warp = initialize_model(train_x_warp, train_obj_warp, use_input_warping=True)\n",
" \n",
" mll_warp, model_warp = initialize_model(\n",
" train_x_warp, train_obj_warp, use_input_warping=True\n",
" )\n",
"\n",
" best_observed_ei.append(best_observed_value_ei)\n",
" best_observed_warp.append(best_observed_value_warp)\n",
" best_random.append(best_observed_value_ei)\n",
" \n",
"\n",
" # run N_BATCH rounds of BayesOpt after the initial random batch\n",
" for iteration in range(1, N_BATCH + 1): \n",
" \n",
" for iteration in range(1, N_BATCH + 1):\n",
"\n",
" t0 = time.monotonic()\n",
" \n",
"\n",
" # fit the models\n",
" fit_gpytorch_mll(mll_ei)\n",
" fit_gpytorch_mll(mll_warp)\n",
" \n",
"\n",
" ei = qNoisyExpectedImprovement(\n",
" model=model_ei, \n",
" model=model_ei,\n",
" X_baseline=train_x_ei,\n",
" )\n",
" \n",
"\n",
" ei_warp = qNoisyExpectedImprovement(\n",
" model=model_warp, \n",
" model=model_warp,\n",
" X_baseline=train_x_warp,\n",
" )\n",
" \n",
"\n",
" # optimize and get new observation\n",
" new_x_ei, new_obj_ei = optimize_acqf_and_get_observation(ei)\n",
" new_x_warp, new_obj_warp = optimize_acqf_and_get_observation(ei_warp)\n",
" \n",
"\n",
" # update training points\n",
" train_x_ei = torch.cat([train_x_ei, new_x_ei])\n",
" train_obj_ei = torch.cat([train_obj_ei, new_obj_ei])\n",
Expand All @@ -341,20 +350,25 @@
" best_observed_ei.append(best_value_ei)\n",
" best_observed_warp.append(best_value_warp)\n",
"\n",
" mll_ei, model_ei = initialize_model(train_x_ei, train_obj_ei, use_input_warping=False)\n",
" mll_warp, model_warp = initialize_model(train_x_warp, train_obj_warp, use_input_warping=True)\n",
" \n",
" mll_ei, model_ei = initialize_model(\n",
" train_x_ei, train_obj_ei, use_input_warping=False\n",
" )\n",
" mll_warp, model_warp = initialize_model(\n",
" train_x_warp, train_obj_warp, use_input_warping=True\n",
" )\n",
"\n",
" t1 = time.monotonic()\n",
" \n",
"\n",
" if verbose:\n",
" print(\n",
" f\"\\nBatch {iteration:>2}: best_value (random, ei, ei_warp) = \"\n",
" f\"({max(best_random):>4.2f}, {best_value_ei:>4.2f}, {best_value_warp:>4.2f}), \"\n",
" f\"time = {t1-t0:>4.2f}.\", end=\"\"\n",
" f\"time = {t1-t0:>4.2f}.\",\n",
" end=\"\",\n",
" )\n",
" else:\n",
" print(\".\", end=\"\")\n",
" \n",
"\n",
" best_observed_all_ei.append(best_observed_ei)\n",
" best_observed_all_warp.append(best_observed_warp)\n",
" best_random_all.append(best_random)"
Expand Down Expand Up @@ -428,21 +442,38 @@
"\n",
"iters = np.arange(N_BATCH + 1)\n",
"y_ei = np.log10(GLOBAL_MAXIMUM - np.asarray(best_observed_all_ei))\n",
"y_ei_warp = np.log10(GLOBAL_MAXIMUM - np.asarray(best_observed_all_warp))\n",
"y_rnd = np.log10(GLOBAL_MAXIMUM - np.asarray(best_random_all))\n",
"y_ei_warp = np.log10(GLOBAL_MAXIMUM - np.asarray(best_observed_all_warp))\n",
"y_rnd = np.log10(GLOBAL_MAXIMUM - np.asarray(best_random_all))\n",
"\n",
"fig, ax = plt.subplots(1, 1, figsize=(8, 6))\n",
"ax.errorbar(\n",
" iters, y_rnd.mean(axis=0), yerr=ci(y_rnd), label=\"Sobol\", linewidth=1.5, capsize=3, alpha=0.6\n",
" iters,\n",
" y_rnd.mean(axis=0),\n",
" yerr=ci(y_rnd),\n",
" label=\"Sobol\",\n",
" linewidth=1.5,\n",
" capsize=3,\n",
" alpha=0.6,\n",
")\n",
"ax.errorbar(\n",
" iters, y_ei.mean(axis=0), yerr=ci(y_ei), label=\"NEI\", linewidth=1.5, capsize=3, alpha=0.6,\n",
" iters,\n",
" y_ei.mean(axis=0),\n",
" yerr=ci(y_ei),\n",
" label=\"NEI\",\n",
" linewidth=1.5,\n",
" capsize=3,\n",
" alpha=0.6,\n",
")\n",
"ax.errorbar(\n",
" iters, y_ei_warp.mean(axis=0), yerr=ci(y_ei_warp), label=\"NEI + Input Warping\",\n",
" linewidth=1.5, capsize=3, alpha=0.6,\n",
" iters,\n",
" y_ei_warp.mean(axis=0),\n",
" yerr=ci(y_ei_warp),\n",
" label=\"NEI + Input Warping\",\n",
" linewidth=1.5,\n",
" capsize=3,\n",
" alpha=0.6,\n",
")\n",
"ax.set(xlabel='number of observations (beyond initial points)', ylabel='Log10 Regret')\n",
"ax.set(xlabel=\"number of observations (beyond initial points)\", ylabel=\"Log10 Regret\")\n",
"ax.legend(loc=\"lower left\")"
]
}
Expand Down
Loading

0 comments on commit 30fb0be

Please sign in to comment.