Skip to content

Commit

Permalink
Merge pull request #5 from fmfn/master
Browse files Browse the repository at this point in the history
update from main
  • Loading branch information
bwheelz36 authored May 11, 2023
2 parents 3e55ac6 + 698ca60 commit 2c1308a
Show file tree
Hide file tree
Showing 20 changed files with 316 additions and 237 deletions.
12 changes: 9 additions & 3 deletions .github/workflows/run_tests.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions


name: tests

on:
Expand All @@ -27,14 +26,21 @@ jobs:
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
- name: Install test dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
pip install pytest-cov
pip install coverage
- name: Install notebook dependencies
run: |
pip install nbformat
pip install nbconvert
pip install jupyter
pip install matplotlib
- name: Install package
run: |
pip install -e .
- name: Test with pytest
run: |
pytest --cov-report xml --cov=bayes_opt/
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ optimizer.maximize(
)
```

By default the previous data in the json file is removed. If you want to keep working with the same logger, the `reset` paremeter in `JSONLogger` should be set to False.
By default the previous data in the json file is removed. If you want to keep working with the same logger, the `reset` parameter in `JSONLogger` should be set to False.

### 4.2 Loading progress

Expand Down
6 changes: 3 additions & 3 deletions bayes_opt/bayesian_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ class BayesianOptimization(Observable):
If True, the optimizer will allow duplicate points to be registered.
This behavior may be desired in high noise situations where repeatedly probing
the same point will give different answers. In other situations, the acquisition
may occasionaly generate a duplicate point.
may occasionally generate a duplicate point.
Methods
-------
Expand Down Expand Up @@ -226,7 +226,7 @@ def suggest(self, utility_function):
suggestion = acq_max(ac=utility_function.utility,
gp=self._gp,
constraint=self.constraint,
y_max=self._space.target.max(),
y_max=self._space._target_max(),
bounds=self._space.bounds,
random_state=self._random_state)

Expand Down Expand Up @@ -276,7 +276,7 @@ def maximize(self,
An instance of bayes_opt.util.UtilityFunction.
If nothing is passed, a default using ucb is used
All other parameters are unused, and are only available to ensure backwards compatability - these
All other parameters are unused, and are only available to ensure backwards compatibility - these
will be removed in a future release
"""
self._prime_subscriptions()
Expand Down
8 changes: 4 additions & 4 deletions bayes_opt/domain_reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def transform(self, target_space: TargetSpace):

class SequentialDomainReductionTransformer(DomainTransformer):
"""
A sequential domain reduction transformer bassed on the work by Stander, N. and Craig, K:
A sequential domain reduction transformer based on the work by Stander, N. and Craig, K:
"On the robustness of a simple domain reduction scheme for simulation‐based optimization"
"""

Expand Down Expand Up @@ -68,8 +68,8 @@ def initialize(self, target_space: TargetSpace) -> None:

self.r = self.contraction_rate * self.r

# check if the minimum window fits in the orignal bounds
self._window_bounds_compatiblity(self.original_bounds)
# check if the minimum window fits in the original bounds
self._window_bounds_compatibility(self.original_bounds)

def _update(self, target_space: TargetSpace) -> None:

Expand Down Expand Up @@ -121,7 +121,7 @@ def _trim(self, new_bounds: np.array, global_bounds: np.array) -> np.array:
new_bounds[i, 1] += ddw_r
return new_bounds

def _window_bounds_compatiblity(self, global_bounds: np.array) -> bool:
def _window_bounds_compatibility(self, global_bounds: np.array) -> bool:
"""Checks if global bounds are compatible with the minimum window sizes."""
for i, entry in enumerate(global_bounds):
global_window_width = abs(entry[1] - entry[0])
Expand Down
2 changes: 1 addition & 1 deletion bayes_opt/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def _header(self, instance):
return line + "\n" + ("-" * self._header_length)

def _is_new_max(self, instance):
if instance.max["target"] is None:
if instance.max is None:
# During constrained optimization, there might not be a maximum
# value since the optimizer might've not encountered any points
# that fulfill the constraints.
Expand Down
4 changes: 4 additions & 0 deletions bayes_opt/observer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,12 @@ def __init__(self):
def _update_tracker(self, event, instance):
if event == Events.OPTIMIZATION_STEP:
self._iterations += 1

if instance.max is None:
return

current_max = instance.max

if (self._previous_max is None
or current_max["target"] > self._previous_max):
self._previous_max = current_max["target"]
Expand Down
72 changes: 36 additions & 36 deletions bayes_opt/target_space.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import warnings

import numpy as np
from .util import ensure_rng, NotUniqueError
from .util import Colours
Expand Down Expand Up @@ -214,7 +212,7 @@ def register(self, params, target, constraint_value=None):

def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
Evaluates a single point x, to obtain the value y and then records them
as observations.
Notes
Expand Down Expand Up @@ -265,44 +263,46 @@ def random_sample(self):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
return data.ravel()

def _target_max(self):
"""Get maximum target value found.
If there is a constraint present, the maximum value that fulfills the
constraint is returned."""
if len(self.target) == 0:
return None

if self._constraint is None:
return self.target.max()

allowed = self._constraint.allowed(self._constraint_values)
if allowed.any():
return self.target[allowed].max()

return None

def max(self):
"""Get maximum target value found and corresponding parameters.
If there is a constraint present, the maximum value that fulfills the
constraint is returned."""
if self._constraint is None:
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
else:
allowed = self._constraint.allowed(self._constraint_values)
if allowed.any():
# Getting of all points that fulfill the constraints, find the
# one with the maximum value for the target function.
sorted = np.argsort(self.target)
idx = sorted[allowed[sorted]][-1]
# there must be a better way to do this, right?
res = {
'target': self.target[idx],
'params': dict(
zip(self.keys, self.params[idx])
),
'constraint': self._constraint_values[idx]
}
else:
res = {
'target': None,
'params': None,
'constraint': None
}
return res
target_max = self._target_max()

if target_max is None:
return None

target_max_idx = np.where(self.target == target_max)[0][0]

res = {
'target': target_max,
'params': dict(
zip(self.keys, self.params[target_max_idx])
)
}

if self._constraint is not None:
res['constraint'] = self._constraint_values[target_max_idx]

return res

def res(self):
"""Get all target values and constraint fulfillment for all parameters.
Expand Down
57 changes: 36 additions & 21 deletions bayes_opt/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,38 +44,53 @@ def acq_max(ac, gp, y_max, bounds, random_state, constraint=None, n_warmup=10000
:return: x_max, The arg max of the acquisition function.
"""

# We need to adjust the acquisition function to deal with constraints when there is some
if constraint is not None:
def adjusted_ac(x):
"""Acquisition function adjusted to fulfill the constraint when necessary"""

# Transforms the problem in a minimization problem, this is necessary
# because the solver we are using later on is a minimizer
values = -ac(x.reshape(-1, bounds.shape[0]), gp=gp, y_max=y_max)
p_constraints = constraint.predict(x.reshape(-1, bounds.shape[0]))

# Slower fallback for the case where any values are negative
if np.any(values > 0):
# TODO: This is not exactly how Gardner et al do it.
# Their way would require the result of the acquisition function
# to be strictly positive, which is not the case here. For a
# positive target value, we use Gardner's version. If the target
# is negative, we instead slightly rescale the target depending
# on the probability estimate to fulfill the constraint.
return np.array(
[
value / (0.5 + 0.5 * p) if value > 0 else value * p
for value, p in zip(values, p_constraints)
]
)

# Faster, vectorized version of Gardner et al's method
return values * p_constraints

else:
# Transforms the problem in a minimization problem, this is necessary
# because the solver we are using later on is a minimizer
adjusted_ac = lambda x: -ac(x.reshape(-1, bounds.shape[0]), gp=gp, y_max=y_max)

# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_warmup, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
ys = -adjusted_ac(x_tries)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()

# Explore the parameter space more throughly
# Explore the parameter space more thoroughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_iter, bounds.shape[0]))

if constraint is not None:
def to_minimize(x):
target = -ac(x.reshape(1, -1), gp=gp, y_max=y_max)
p_constraint = constraint.predict(x.reshape(1, -1))

# TODO: This is not exactly how Gardner et al do it.
# Their way would require the result of the acquisition function
# to be strictly positive (or negative), which is not the case
# here. For a negative target value, we use Gardner's version. If
# the target is positive, we instead slightly rescale the target
# depending on the probability estimate to fulfill the constraint.
if target < 0:
return target * p_constraint
else:
return target / (0.5 + p_constraint)
else:
to_minimize = lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max)

for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: to_minimize(x),
res = minimize(adjusted_ac,
x_try,
bounds=bounds,
method="L-BFGS-B")
Expand Down
5 changes: 3 additions & 2 deletions examples/advanced-tour.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Let's start by defining our function, bounds, and instanciating an optimization object.\n",
"# Let's start by defining our function, bounds, and instantiating an optimization object.\n",
"def black_box_function(x, y):\n",
" return -x ** 2 - (y - 1) ** 2 + 1"
]
Expand Down Expand Up @@ -347,12 +347,13 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3.3 Changing kernels\n",
"\n",
"By default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems."
"By default this package uses the Matern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems."
]
},
{
Expand Down
16 changes: 10 additions & 6 deletions examples/basic-tour.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. Specifying the function to be optimized\n",
"\n",
"This is a function optimization package, therefore the first and most important ingreedient is, of course, the function to be optimized.\n",
"This is a function optimization package, therefore the first and most important ingredient is, of course, the function to be optimized.\n",
"\n",
"**DISCLAIMER:** We know exactly how the output of the function below depends on its parameter. Obviously this is just an example, and you shouldn't expect to know it in a real scenario. However, it should be clear that you don't need to. All you need in order to use this package (and more generally, this technique) is a function `f` that takes a known set of parameters and outputs a real number."
]
Expand All @@ -43,12 +44,13 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Getting Started\n",
"\n",
"All we need to get started is to instanciate a `BayesianOptimization` object specifying a function to be optimized `f`, and its parameters with their corresponding bounds, `pbounds`. This is a constrained optimization technique, so you must specify the minimum and maximum values that can be probed for each parameter in order for it to work"
"All we need to get started is to instantiate a `BayesianOptimization` object specifying a function to be optimized `f`, and its parameters with their corresponding bounds, `pbounds`. This is a constrained optimization technique, so you must specify the minimum and maximum values that can be probed for each parameter in order for it to work"
]
},
{
Expand Down Expand Up @@ -306,12 +308,13 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## 4. Saving, loading and restarting\n",
"\n",
"By default you can follow the progress of your optimization by setting `verbose>0` when instanciating the `BayesianOptimization` object. If you need more control over logging/alerting you will need to use an observer. For more information about observers checkout the advanced tour notebook. Here we will only see how to use the native `JSONLogger` object to save to and load progress from files.\n",
"By default you can follow the progress of your optimization by setting `verbose>0` when instantiating the `BayesianOptimization` object. If you need more control over logging/alerting you will need to use an observer. For more information about observers checkout the advanced tour notebook. Here we will only see how to use the native `JSONLogger` object to save to and load progress from files.\n",
"\n",
"### 4.1 Saving progress"
]
Expand All @@ -327,14 +330,15 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"The observer paradigm works by:\n",
"1. Instantiating an observer object.\n",
"2. Tying the observer object to a particular event fired by an optimizer.\n",
"\n",
"The `BayesianOptimization` object fires a number of internal events during optimization, in particular, everytime it probes the function and obtains a new parameter-target combination it will fire an `Events.OPTIMIZATION_STEP` event, which our logger will listen to.\n",
"The `BayesianOptimization` object fires a number of internal events during optimization, in particular, every time it probes the function and obtains a new parameter-target combination it will fire an `Events.OPTIMIZATION_STEP` event, which our logger will listen to.\n",
"\n",
"**Caveat:** The logger will not look back at previously probed points."
]
Expand Down Expand Up @@ -487,7 +491,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
Expand All @@ -501,7 +505,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
"version": "3.9.6"
}
},
"nbformat": 4,
Expand Down
Loading

0 comments on commit 2c1308a

Please sign in to comment.