Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix warmup phase not considering constraints #416

Merged
merged 3 commits into from
May 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions bayes_opt/target_space.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import warnings
till-m marked this conversation as resolved.
Show resolved Hide resolved

import numpy as np
from .util import ensure_rng, NotUniqueError
from .util import Colours
Expand Down
55 changes: 35 additions & 20 deletions bayes_opt/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,38 +44,53 @@ def acq_max(ac, gp, y_max, bounds, random_state, constraint=None, n_warmup=10000
:return: x_max, The arg max of the acquisition function.
"""

# We need to adjust the acquisition function to deal with constraints when there is some
if constraint is not None:
def adjusted_ac(x):
"""Acquisition function adjusted to fulfill the constraint when necessary"""

# Transforms the problem in a minimization problem, this is necessary
# because the solver we are using later on is a minimizer
values = -ac(x.reshape(-1, bounds.shape[0]), gp=gp, y_max=y_max)
p_constraints = constraint.predict(x.reshape(-1, bounds.shape[0]))

# Slower fallback for the case where any values are negative
if np.any(values > 0):
# TODO: This is not exactly how Gardner et al do it.
# Their way would require the result of the acquisition function
# to be strictly positive, which is not the case here. For a
# positive target value, we use Gardner's version. If the target
# is negative, we instead slightly rescale the target depending
# on the probability estimate to fulfill the constraint.
return np.array(
[
value / (0.5 + 0.5 * p) if value > 0 else value * p
for value, p in zip(values, p_constraints)
]
)

# Faster, vectorized version of Gardner et al's method
return values * p_constraints

else:
# Transforms the problem in a minimization problem, this is necessary
# because the solver we are using later on is a minimizer
adjusted_ac = lambda x: -ac(x.reshape(-1, bounds.shape[0]), gp=gp, y_max=y_max)

# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_warmup, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
ys = -adjusted_ac(x_tries)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()

# Explore the parameter space more throughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_iter, bounds.shape[0]))

if constraint is not None:
def to_minimize(x):
target = -ac(x.reshape(1, -1), gp=gp, y_max=y_max)
p_constraint = constraint.predict(x.reshape(1, -1))

# TODO: This is not exactly how Gardner et al do it.
# Their way would require the result of the acquisition function
# to be strictly positive (or negative), which is not the case
# here. For a negative target value, we use Gardner's version. If
# the target is positive, we instead slightly rescale the target
# depending on the probability estimate to fulfill the constraint.
if target < 0:
return target * p_constraint
else:
return target / (0.5 + 0.5 * p_constraint)
else:
to_minimize = lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max)

for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: to_minimize(x),
res = minimize(adjusted_ac,
x_try,
bounds=bounds,
method="L-BFGS-B")
Expand Down
2 changes: 1 addition & 1 deletion tests/test_constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def constraint_function_2_dim(x, y):
params = optimizer.res[0]["params"]
x, y = params['x'], params['y']

assert constraint_function_2_dim(x, y) == approx(optimizer.constraint.approx(np.array([x, y])), rel=1e-5, abs=1e-5)
assert constraint_function_2_dim(x, y) == approx(optimizer.constraint.approx(np.array([x, y])), rel=1e-3, abs=1e-3)
till-m marked this conversation as resolved.
Show resolved Hide resolved


def test_kwargs_not_the_same():
Expand Down