Skip to content

Commit

Permalink
Even more fixes to unused kwargs
Browse files Browse the repository at this point in the history
Summary:
X-link: pytorch/botorch#1985

See previous diff

Differential Revision: D48338443

fbshipit-source-id: 474b23d43b40421b3cc96bbea5cf95b329b46afc
  • Loading branch information
esantorella authored and facebook-github-bot committed Aug 15, 2023
1 parent a42d8cd commit f2f81bf
Show file tree
Hide file tree
Showing 6 changed files with 72 additions and 59 deletions.
1 change: 0 additions & 1 deletion ax/models/tests/test_botorch_moo_defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,6 @@ def test_get_ehvi(self, _) -> None:
X_pending=X_pending,
constraints=cons_tfs,
mc_samples=128,
qmc=True,
alpha=0.0,
seed=seed,
ref_point=new_obj_thresholds.tolist(),
Expand Down
4 changes: 3 additions & 1 deletion ax/models/tests/test_botorch_moo_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,9 @@ def test_BotorchMOOModel_with_qehvi(
"acquisition_function_kwargs": {
"cache_root": False,
"prune_baseline": False,
},
}
if use_qnehvi
else {},
},
)
gen_results = model.gen(
Expand Down
44 changes: 24 additions & 20 deletions ax/models/torch/botorch_defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from ax.models.torch.utils import _to_inequality_constraints
from ax.models.torch_base import TorchModel
from ax.models.types import TConfig
from ax.utils.common.constants import Keys
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective
Expand Down Expand Up @@ -63,6 +62,8 @@ def get_and_fit_model(
use_input_warping: bool = False,
use_loocv_pseudo_likelihood: bool = False,
prior: Optional[Dict[str, Any]] = None,
*,
multitask_gp_ranks: Optional[Dict[str, Union[Prior, float]]] = None,
**kwargs: Any,
) -> GPyTorchModel:
r"""Instantiates and fits a botorch GPyTorchModel using the given data.
Expand All @@ -88,6 +89,7 @@ def get_and_fit_model(
- sd_prior: A scalar prior over nonnegative numbers, which is used for the
default LKJCovariancePrior task_covar_prior.
- eta: The eta parameter on the default LKJ task_covar_prior.
kwargs: Passed to `_get_model`.
Returns:
A fitted GPyTorchModel.
Expand Down Expand Up @@ -160,7 +162,7 @@ def get_and_fit_model(
]
else:
# use multi-task GP
mtgp_rank_dict = kwargs.pop("multitask_gp_ranks", {})
mtgp_rank_dict = {} if multitask_gp_ranks is None else multitask_gp_ranks
# assembles list of ranks associated with each metric
if len({len(Xs), len(Ys), len(Yvars), len(metric_names)}) > 1:
raise ValueError(
Expand Down Expand Up @@ -237,7 +239,11 @@ def _get_acquisition_func(
# pyre-fixme[24]: Generic type `dict` expects 2 type parameters, use
# `typing.Dict` to avoid runtime subscripting errors.
mc_objective_kwargs: Optional[Dict] = None,
**kwargs: Any,
*,
chebyshev_scalarization: bool = False,
prune_baseline: bool = True,
mc_samples: int = 512,
marginalize_dim: Optional[int] = None,
) -> AcquisitionFunction:
r"""Instantiates a acquisition function.
Expand Down Expand Up @@ -266,7 +272,6 @@ def _get_acquisition_func(
For GenericMCObjective, leave it as None. For PenalizedMCObjective,
it needs to be specified in the format of kwargs.
mc_samples: The number of MC samples to use (default: 512).
qmc: If True, use qMC instead of MC (default: True).
prune_baseline: If True, prune the baseline points for NEI (default: True).
chebyshev_scalarization: Use augmented Chebyshev scalarization.
Expand All @@ -276,7 +281,7 @@ def _get_acquisition_func(
if X_observed is None:
raise ValueError(NO_FEASIBLE_POINTS_MESSAGE)
# construct Objective module
if kwargs.get("chebyshev_scalarization", False):
if chebyshev_scalarization:
with torch.no_grad():
Y = model.posterior(X_observed).mean # pyre-ignore [16]
obj_tf = get_chebyshev_scalarization(weights=objective_weights, Y=Y)
Expand Down Expand Up @@ -312,13 +317,12 @@ def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
objective=objective,
X_observed=X_observed,
X_pending=X_pending,
prune_baseline=kwargs.get("prune_baseline", True),
mc_samples=kwargs.get("mc_samples", 512),
qmc=kwargs.get("qmc", True),
prune_baseline=prune_baseline,
mc_samples=mc_samples,
# pyre-fixme[6]: Expected `Optional[int]` for 9th param but got
# `Union[float, int]`.
seed=torch.randint(1, 10000, (1,)).item(),
marginalize_dim=kwargs.get("marginalize_dim"),
marginalize_dim=marginalize_dim,
)


Expand All @@ -330,7 +334,11 @@ def scipy_optimizer(
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
fixed_features: Optional[Dict[int, float]] = None,
rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
**kwargs: Any,
*,
num_restarts: int = 20,
raw_samples: Optional[int] = None,
joint_optimization: bool = False,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
) -> Tuple[Tensor, Tensor]:
r"""Optimizer using scipy's minimize module on a numpy-adpator.
Expand Down Expand Up @@ -360,25 +368,21 @@ def scipy_optimizer(
values, where `i`-th element is the expected acquisition value
conditional on having observed candidates `0,1,...,i-1`.
"""
num_restarts: int = kwargs.pop(Keys.NUM_RESTARTS, 20)
raw_samples: int = kwargs.pop(Keys.RAW_SAMPLES, 50 * num_restarts)

if kwargs.get("joint_optimization", False):
sequential = False
else:
sequential = True
options: Dict[str, Union[bool, float, int, str]] = {
sequential = not joint_optimization
optimize_acqf_options: Dict[str, Union[bool, float, int, str]] = {
"batch_limit": 5,
"init_batch_limit": 32,
}
options.update(kwargs.get("options", {}))
if options is not None:
optimize_acqf_options.update(options)
X, expected_acquisition_value = optimize_acqf(
acq_function=acq_function,
bounds=bounds,
q=n,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
raw_samples=50 * num_restarts if raw_samples is None else raw_samples,
options=optimize_acqf_options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
fixed_features=fixed_features,
Expand Down
13 changes: 11 additions & 2 deletions ax/models/torch/botorch_moo.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,13 +316,22 @@ def gen(
)
bounds_ = bounds_.transpose(0, 1)
botorch_rounding_func = get_rounding_func(torch_opt_config.rounding_func)
if acf_options.get("random_scalarization", False) or acf_options.get(
if acf_options.pop("random_scalarization", False) or acf_options.get(
"chebyshev_scalarization", False
):
# If using a list of acquisition functions, the algorithm to generate
# that list is configured by acquisition_function_kwargs.
if "random_scalarization_distribution" in acf_options:
randomize_weights_kws = {
"random_scalarization_distribution": acf_options[
"random_scalarization_distribution"
]
}
del acf_options["random_scalarization_distribution"]
else:
randomize_weights_kws = {}
objective_weights_list = [
randomize_objective_weights(objective_weights, **acf_options)
randomize_objective_weights(objective_weights, **randomize_weights_kws)
for _ in range(n)
]
acquisition_function_list = [
Expand Down
57 changes: 29 additions & 28 deletions ax/models/torch/botorch_moo_defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,12 @@ def get_NEHVI(
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
X_observed: Optional[Tensor] = None,
X_pending: Optional[Tensor] = None,
**kwargs: Any,
*,
prune_baseline: bool = True,
mc_samples: int = DEFAULT_EHVI_MC_SAMPLES,
alpha: Optional[float] = None,
marginalize_dim: Optional[int] = None,
cache_root: bool = True,
) -> AcquisitionFunction:
r"""Instantiates a qNoisyExpectedHyperVolumeImprovement acquisition function.
Expand All @@ -128,10 +133,8 @@ def get_NEHVI(
that have been submitted for evaluation) present for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
mc_samples: The number of MC samples to use (default: 512).
qmc: If True, use qMC instead of MC (default: True).
prune_baseline: If True, prune the baseline points for NEI (default: True).
chebyshev_scalarization: Use augmented Chebyshev scalarization.
mc_samples: The number of MC samples to use (default: 512).
Returns:
qNoisyExpectedHyperVolumeImprovement: The instantiated acquisition function.
Expand All @@ -151,26 +154,24 @@ def get_NEHVI(
else:
cons_tfs = get_outcome_constraint_transforms(outcome_constraints)
num_objectives = objective_thresholds.shape[0]
if alpha is None:
alpha = get_default_partitioning_alpha(num_objectives=num_objectives)
return get_acquisition_function(
acquisition_function_name="qNEHVI",
model=model,
objective=objective,
X_observed=X_observed,
X_pending=X_pending,
constraints=cons_tfs,
prune_baseline=kwargs.get("prune_baseline", True),
mc_samples=kwargs.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES),
alpha=kwargs.get(
"alpha", get_default_partitioning_alpha(num_objectives=num_objectives)
),
qmc=kwargs.get("qmc", True),
prune_baseline=prune_baseline,
mc_samples=mc_samples,
alpha=alpha,
# pyre-fixme[6]: Expected `Optional[int]` for 11th param but got
# `Union[float, int]`.
seed=torch.randint(1, 10000, (1,)).item(),
ref_point=objective_thresholds.tolist(),
marginalize_dim=kwargs.get("marginalize_dim"),
match_right_most_batch_dim=kwargs.get("match_right_most_batch_dim", False),
cache_root=kwargs.get("cache_root", True),
marginalize_dim=marginalize_dim,
cache_root=cache_root,
)


Expand All @@ -181,7 +182,9 @@ def get_EHVI(
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
X_observed: Optional[Tensor] = None,
X_pending: Optional[Tensor] = None,
**kwargs: Any,
*,
mc_samples: int = DEFAULT_EHVI_MC_SAMPLES,
alpha: Optional[float] = None,
) -> AcquisitionFunction:
r"""Instantiates a qExpectedHyperVolumeImprovement acquisition function.
Expand All @@ -204,7 +207,6 @@ def get_EHVI(
outcomes and outcomes that appear in the outcome constraints (if
there are any).
mc_samples: The number of MC samples to use (default: 512).
qmc: If True, use qMC instead of MC (default: True).
Returns:
qExpectedHypervolumeImprovement: The instantiated acquisition function.
Expand Down Expand Up @@ -233,11 +235,10 @@ def get_EHVI(
X_observed=X_observed,
X_pending=X_pending,
constraints=cons_tfs,
mc_samples=kwargs.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES),
qmc=kwargs.get("qmc", True),
alpha=kwargs.get(
"alpha", get_default_partitioning_alpha(num_objectives=num_objectives)
),
mc_samples=mc_samples,
alpha=get_default_partitioning_alpha(num_objectives=num_objectives)
if alpha is None
else alpha,
# pyre-fixme[6]: Expected `Optional[int]` for 10th param but got
# `Union[float, int]`.
seed=torch.randint(1, 10000, (1,)).item(),
Expand All @@ -253,7 +254,9 @@ def scipy_optimizer_list(
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
fixed_features: Optional[Dict[int, float]] = None,
rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
**kwargs: Any,
num_restarts: int = 20,
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
) -> Tuple[Tensor, Tensor]:
r"""Sequential optimizer using scipy's minimize module on a numpy-adaptor.
Expand Down Expand Up @@ -281,22 +284,20 @@ def scipy_optimizer_list(
values, where `i`-th element is the expected acquisition value
conditional on having observed candidates `0,1,...,i-1`.
"""
num_restarts: int = kwargs.pop(Keys.NUM_RESTARTS, 20)
raw_samples: int = kwargs.pop(Keys.RAW_SAMPLES, 50 * num_restarts)

# Use SLSQP by default for small problems since it yields faster wall times.
options: Dict[str, Union[bool, float, int, str]] = {
optimize_options: Dict[str, Union[bool, float, int, str]] = {
"batch_limit": 5,
"init_batch_limit": 32,
"method": "SLSQP",
}
options.update(kwargs.get("options", {}))
if options is not None:
optimize_options.update(options)
X, expected_acquisition_value = optimize_acqf_list(
acq_function_list=acq_function_list,
bounds=bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
raw_samples=50 * num_restarts if raw_samples is None else raw_samples,
options=optimize_options,
inequality_constraints=inequality_constraints,
fixed_features=fixed_features,
post_processing_func=rounding_func,
Expand Down
12 changes: 5 additions & 7 deletions ax/models/torch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -644,22 +644,20 @@ def predict_from_model(model: Model, X: Tensor) -> Tuple[Tensor, Tensor]:

# TODO(jej): Possibly refactor to use "objective_directions".
def randomize_objective_weights(
objective_weights: Tensor, **acquisition_function_kwargs: Any
objective_weights: Tensor,
random_scalarization_distribution: str = SIMPLEX,
) -> Tensor:
"""Generate a random weighting based on acquisition function settings.
Args:
objective_weights: Base weights to multiply by random values..
**acquisition_function_kwargs: Kwargs containing weight generation algorithm
options.
objective_weights: Base weights to multiply by random values.
random_scalarization_distribution: "simplex" or "hypersphere".
Returns:
A normalized list of indices such that each index is between `0` and `d-1`.
"""
# Set distribution and sample weights.
distribution = acquisition_function_kwargs.get(
"random_scalarization_distribution", SIMPLEX
)
distribution = random_scalarization_distribution
dtype = objective_weights.dtype
device = objective_weights.device
if distribution == SIMPLEX:
Expand Down

0 comments on commit f2f81bf

Please sign in to comment.