From f10b46b16b553cca3eff9b7be26d3f82c55201c6 Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Wed, 28 Aug 2024 10:19:19 -0700 Subject: [PATCH] Merge create_single_objective_problem_from_botorch and create_multi_objective_problem_from_botorch; support constrained MOO (#2722) Summary: Pull Request resolved: https://github.com/facebook/Ax/pull/2722 Context: These functions have a lot of overlapping functionality. Combining them makes it easier to extend their functionality (for example, by supporting constrained MOO). This PR: * Combines `create_single_problem_objective_from_botorch` and `create_multi_objective_problem_from_botorch` into `create_problem_from_botorch` * Reads `lower_is_better` off the test problem -- BoTorch test problems assume minimization unless `negate` is set to True -- instead of requiring the user to pass it (multi-objective problems used to always have lower_is_better=True, so this is more accurate and prevents mismatch) * Adds support for constrained MOO problems from Botorch * Remove stub for constrained MOO problem that only existed to test for an exception that is no longer raised Differential Revision: D61877865 --- ax/benchmark/benchmark_problem.py | 214 +++++++++---------- ax/benchmark/problems/registry.py | 59 ++--- ax/benchmark/tests/test_benchmark.py | 5 +- ax/benchmark/tests/test_benchmark_problem.py | 84 +++++--- ax/utils/testing/benchmark_stubs.py | 23 +- 5 files changed, 173 insertions(+), 212 deletions(-) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index 491bba1640d..0ba9998bbfb 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -28,14 +28,11 @@ from ax.core.types import ComparisonOp from ax.service.utils.best_point_mixin import BestPointMixin from ax.utils.common.base import Base -from ax.utils.common.typeutils import checked_cast from botorch.test_functions.base import ( BaseTestProblem, ConstrainedBaseTestProblem, MultiObjectiveTestProblem, ) -from botorch.test_functions.synthetic import SyntheticTestFunction -from pyre_extensions import assert_is_instance def _get_name( @@ -132,7 +129,33 @@ def get_opt_trace(self, experiment: Experiment) -> np.ndarray: ) -# TODO: Support constrained MOO problems. +def _get_constraints( + num_constraints: int, observe_noise_sd: bool +) -> list[OutcomeConstraint]: + """ + NOTE: Currently we don't support the case where only some of the + outcomes have noise levels observed. + """ + outcome_constraints = [] + + for i in range(num_constraints): + outcome_name = f"constraint_slack_{i}" + outcome_constraints.append( + OutcomeConstraint( + metric=BenchmarkMetric( + name=outcome_name, + lower_is_better=False, # positive slack = feasible + observe_noise_sd=observe_noise_sd, + outcome_index=i, + ), + op=ComparisonOp.GEQ, + bound=0.0, + relative=False, + ) + ) + return outcome_constraints + + def get_soo_config_and_outcome_names( *, num_constraints: int, @@ -151,35 +174,57 @@ def get_soo_config_and_outcome_names( minimize=lower_is_better, ) - outcome_names = [objective_name] - outcome_constraints = [] - - # NOTE: Currently we don't support the case where only some of the - # outcomes have noise levels observed. - - for i in range(num_constraints): - outcome_name = f"constraint_slack_{i}" - outcome_constraints.append( - OutcomeConstraint( - metric=BenchmarkMetric( - name=outcome_name, - lower_is_better=False, # positive slack = feasible - observe_noise_sd=observe_noise_sd, - outcome_index=i, - ), - op=ComparisonOp.GEQ, - bound=0.0, - relative=False, - ) - ) - outcome_names.append(outcome_name) + outcome_constraints = _get_constraints( + num_constraints=num_constraints, observe_noise_sd=observe_noise_sd + ) + constraint_names = [oc.metric.name for oc in outcome_constraints] opt_config = OptimizationConfig( objective=objective, outcome_constraints=outcome_constraints ) + outcome_names = [objective_name] + constraint_names return opt_config, outcome_names +def get_moo_opt_config_and_outcome_names( + *, + num_constraints: int, + lower_is_better: bool, + observe_noise_sd: bool, + objective_names: list[str], + ref_point: list[float], +) -> tuple[MultiObjectiveOptimizationConfig, list[str]]: + metrics = [ + BenchmarkMetric( + name=objective_name, + lower_is_better=lower_is_better, + observe_noise_sd=observe_noise_sd, + outcome_index=i, + ) + for i, objective_name in enumerate(objective_names) + ] + constraints = _get_constraints( + num_constraints=num_constraints, observe_noise_sd=observe_noise_sd + ) + optimization_config = MultiObjectiveOptimizationConfig( + objective=MultiObjective( + objectives=[Objective(metric=metric, minimize=True) for metric in metrics] + ), + objective_thresholds=[ + ObjectiveThreshold( + metric=metric, + bound=ref_point[i], + relative=False, + op=ComparisonOp.LEQ, + ) + for i, metric in enumerate(metrics) + ], + outcome_constraints=constraints, + ) + outcome_names = objective_names + [oc.metric.name for oc in constraints] + return optimization_config, outcome_names + + def get_continuous_search_space(bounds: list[tuple[float, float]]) -> SearchSpace: return SearchSpace( parameters=[ @@ -194,18 +239,19 @@ def get_continuous_search_space(bounds: list[tuple[float, float]]) -> SearchSpac ) -def create_single_objective_problem_from_botorch( - test_problem_class: type[SyntheticTestFunction], +def create_problem_from_botorch( + *, + test_problem_class: type[BaseTestProblem], test_problem_kwargs: dict[str, Any], - lower_is_better: bool, num_trials: int, observe_noise_sd: bool = False, ) -> BenchmarkProblem: """ - Create a `BenchmarkProblem` whose `optimization_config` is a - `SingleObjectiveOptimizationConfig` a BoTorch SyntheticTestFunction using - specialized Metrics and Runners for benchmarking. The test problem's result - will be computed on the Runner and retrieved by the Metric. + Create a `BenchmarkProblem` from a BoTorch `BaseTestProblem`. + + Uses specialized Metrics and Runners for benchmarking. The test problem's + result will be computed by the Runner, `BoTorchTestProblemRunner`, and + retrieved by the Metric(s), which are `BenchmarkMetric`s. Args: test_problem_class: The BoTorch test problem class which will be used @@ -223,18 +269,31 @@ def create_single_objective_problem_from_botorch( is_constrained = isinstance(test_problem, ConstrainedBaseTestProblem) search_space = get_continuous_search_space(test_problem._bounds) + lower_is_better = not test_problem.negate dim = test_problem_kwargs.get("dim", None) + + n_obj = test_problem.num_objectives name = _get_name( test_problem=test_problem, observe_noise_sd=observe_noise_sd, dim=dim ) - optimization_config, outcome_names = get_soo_config_and_outcome_names( - num_constraints=test_problem.num_constraints if is_constrained else 0, - lower_is_better=lower_is_better, - observe_noise_sd=observe_noise_sd, - objective_name=name, - ) + num_constraints = test_problem.num_constraints if is_constrained else 0 + if isinstance(test_problem, MultiObjectiveTestProblem): + optimization_config, outcome_names = get_moo_opt_config_and_outcome_names( + num_constraints=num_constraints, + lower_is_better=lower_is_better, + observe_noise_sd=observe_noise_sd, + objective_names=[f"{name}_{i}" for i in range(n_obj)], + ref_point=test_problem._ref_point, + ) + else: + optimization_config, outcome_names = get_soo_config_and_outcome_names( + num_constraints=num_constraints, + lower_is_better=lower_is_better, + observe_noise_sd=observe_noise_sd, + objective_name=name, + ) optimal_value = ( test_problem.max_hv @@ -254,80 +313,3 @@ def create_single_objective_problem_from_botorch( observe_noise_stds=observe_noise_sd, optimal_value=optimal_value, ) - - -def create_multi_objective_problem_from_botorch( - test_problem_class: type[MultiObjectiveTestProblem], - test_problem_kwargs: dict[str, Any], - # TODO: Figure out whether we should use `lower_is_better` here. - num_trials: int, - observe_noise_sd: bool = False, -) -> BenchmarkProblem: - """Create a BenchmarkProblem from a BoTorch BaseTestProblem using specialized - Metrics and Runners. The test problem's result will be computed on the Runner - once per trial and each Metric will retrieve its own result by index. - """ - if issubclass(test_problem_class, ConstrainedBaseTestProblem): - raise NotImplementedError( - "Constrained multi-objective problems are not supported." - ) - - # pyre-fixme [45]: Invalid class instantiation - test_problem = test_problem_class(**test_problem_kwargs) - - dim = test_problem_kwargs.get("dim", None) - name = _get_name( - test_problem=test_problem, observe_noise_sd=observe_noise_sd, dim=dim - ) - - n_obj = test_problem.num_objectives - if not observe_noise_sd: - noise_sds = [None] * n_obj - elif isinstance(test_problem.noise_std, list): - noise_sds = test_problem.noise_std - else: - noise_sds = [checked_cast(float, test_problem.noise_std or 0.0)] * n_obj - - metrics = [ - BenchmarkMetric( - name=f"{name}_{i}", - lower_is_better=True, - observe_noise_sd=observe_noise_sd, - outcome_index=i, - ) - for i, noise_sd in enumerate(noise_sds) - ] - optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[Objective(metric=metric, minimize=True) for metric in metrics] - ), - objective_thresholds=[ - ObjectiveThreshold( - metric=metric, - bound=test_problem.ref_point[i].item(), - relative=False, - op=ComparisonOp.LEQ, - ) - for i, metric in enumerate(metrics) - ], - ) - runner = BotorchTestProblemRunner( - test_problem_class=test_problem_class, - test_problem_kwargs=test_problem_kwargs, - outcome_names=[ - objective.metric.name - for objective in assert_is_instance( - optimization_config.objective, MultiObjective - ).objectives - ], - ) - - return BenchmarkProblem( - name=name, - search_space=get_continuous_search_space(test_problem._bounds), - optimization_config=optimization_config, - runner=runner, - num_trials=num_trials, - observe_noise_stds=observe_noise_sd, - optimal_value=test_problem.max_hv, - ) diff --git a/ax/benchmark/problems/registry.py b/ax/benchmark/problems/registry.py index 651d2244a03..992dec980e4 100644 --- a/ax/benchmark/problems/registry.py +++ b/ax/benchmark/problems/registry.py @@ -9,11 +9,7 @@ from dataclasses import dataclass from typing import Any, Callable -from ax.benchmark.benchmark_problem import ( - BenchmarkProblem, - create_multi_objective_problem_from_botorch, - create_single_objective_problem_from_botorch, -) +from ax.benchmark.benchmark_problem import BenchmarkProblem, create_problem_from_botorch from ax.benchmark.problems.hd_embedding import embed_higher_dimension from ax.benchmark.problems.hpo.torchvision import ( get_pytorch_cnn_torchvision_benchmark_problem, @@ -31,27 +27,25 @@ class BenchmarkProblemRegistryEntry: BENCHMARK_PROBLEM_REGISTRY = { "ackley4": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Ackley, "test_problem_kwargs": {"dim": 4}, - "lower_is_better": True, "num_trials": 40, "observe_noise_sd": False, }, ), "branin": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Branin, "test_problem_kwargs": {}, - "lower_is_better": True, "num_trials": 30, "observe_noise_sd": False, }, ), "branin_currin": BenchmarkProblemRegistryEntry( - factory_fn=create_multi_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": BraninCurrin, "test_problem_kwargs": {}, @@ -61,7 +55,7 @@ class BenchmarkProblemRegistryEntry: ), "branin_currin30": BenchmarkProblemRegistryEntry( factory_fn=lambda n, num_trials: embed_higher_dimension( - problem=create_multi_objective_problem_from_botorch( + problem=create_problem_from_botorch( test_problem_class=BraninCurrin, test_problem_kwargs={}, num_trials=num_trials, @@ -72,41 +66,37 @@ class BenchmarkProblemRegistryEntry: factory_kwargs={"n": 30, "num_trials": 30}, ), "griewank4": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Griewank, "test_problem_kwargs": {"dim": 4}, - "lower_is_better": True, "num_trials": 40, "observe_noise_sd": False, }, ), "hartmann3": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Hartmann, "test_problem_kwargs": {"dim": 3}, - "lower_is_better": True, "num_trials": 30, "observe_noise_sd": False, }, ), "hartmann6": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Hartmann, "test_problem_kwargs": {"dim": 6}, - "lower_is_better": True, "num_trials": 35, "observe_noise_sd": False, }, ), "hartmann30": BenchmarkProblemRegistryEntry( factory_fn=lambda n, num_trials: embed_higher_dimension( - problem=create_single_objective_problem_from_botorch( + problem=create_problem_from_botorch( test_problem_class=synthetic.Hartmann, test_problem_kwargs={"dim": 6}, - lower_is_better=True, num_trials=num_trials, observe_noise_sd=False, ), @@ -133,68 +123,62 @@ class BenchmarkProblemRegistryEntry: factory_kwargs={"num_trials": 50, "observe_noise_sd": False}, ), "levy4": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Levy, "test_problem_kwargs": {"dim": 4}, - "lower_is_better": True, "num_trials": 40, "observe_noise_sd": False, }, ), "powell4": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Powell, "test_problem_kwargs": {"dim": 4}, - "lower_is_better": True, "num_trials": 40, "observe_noise_sd": False, }, ), "rosenbrock4": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Rosenbrock, "test_problem_kwargs": {"dim": 4}, - "lower_is_better": True, "num_trials": 40, "observe_noise_sd": False, }, ), "six_hump_camel": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.SixHumpCamel, "test_problem_kwargs": {}, - "lower_is_better": True, "num_trials": 30, "observe_noise_sd": False, }, ), "three_hump_camel": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.ThreeHumpCamel, "test_problem_kwargs": {}, - "lower_is_better": True, "num_trials": 30, "observe_noise_sd": False, }, ), # Problems where we observe the noise level "branin_observed_noise": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Branin, "test_problem_kwargs": {}, - "lower_is_better": True, "num_trials": 20, "observe_noise_sd": True, }, ), "branin_currin_observed_noise": BenchmarkProblemRegistryEntry( - factory_fn=create_multi_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": BraninCurrin, "test_problem_kwargs": {}, @@ -204,7 +188,7 @@ class BenchmarkProblemRegistryEntry: ), "branin_currin30_observed_noise": BenchmarkProblemRegistryEntry( factory_fn=lambda n, num_trials: embed_higher_dimension( - problem=create_multi_objective_problem_from_botorch( + problem=create_problem_from_botorch( test_problem_class=BraninCurrin, test_problem_kwargs={}, num_trials=num_trials, @@ -215,21 +199,19 @@ class BenchmarkProblemRegistryEntry: factory_kwargs={"n": 30, "num_trials": 30}, ), "hartmann6_observed_noise": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.Hartmann, "test_problem_kwargs": {"dim": 6}, - "lower_is_better": True, "num_trials": 50, "observe_noise_sd": True, }, ), "hartmann30_observed_noise": BenchmarkProblemRegistryEntry( factory_fn=lambda n, num_trials: embed_higher_dimension( - problem=create_single_objective_problem_from_botorch( + problem=create_problem_from_botorch( test_problem_class=synthetic.Hartmann, test_problem_kwargs={"dim": 6}, - lower_is_better=True, num_trials=num_trials, observe_noise_sd=True, ), @@ -242,11 +224,10 @@ class BenchmarkProblemRegistryEntry: factory_kwargs={"num_trials": 25, "observe_noise_sd": True}, ), "constrained_gramacy_observed_noise": BenchmarkProblemRegistryEntry( - factory_fn=create_single_objective_problem_from_botorch, + factory_fn=create_problem_from_botorch, factory_kwargs={ "test_problem_class": synthetic.ConstrainedGramacy, "test_problem_kwargs": {}, - "lower_is_better": True, "num_trials": 50, "observe_noise_sd": True, }, diff --git a/ax/benchmark/tests/test_benchmark.py b/ax/benchmark/tests/test_benchmark.py index 8fd7f987204..edf51b88f3d 100644 --- a/ax/benchmark/tests/test_benchmark.py +++ b/ax/benchmark/tests/test_benchmark.py @@ -19,7 +19,7 @@ BenchmarkMethod, get_benchmark_scheduler_options, ) -from ax.benchmark.benchmark_problem import create_single_objective_problem_from_botorch +from ax.benchmark.benchmark_problem import create_problem_from_botorch from ax.benchmark.benchmark_result import BenchmarkResult from ax.benchmark.methods.modular_botorch import get_sobol_botorch_modular_acquisition from ax.benchmark.problems.registry import get_problem @@ -368,10 +368,9 @@ def test_benchmark_multiple_problems_methods(self) -> None: self.assertTrue((agg.score_trace[col] <= 100).all()) def test_timeout(self) -> None: - problem = create_single_objective_problem_from_botorch( + problem = create_problem_from_botorch( test_problem_class=Branin, test_problem_kwargs={}, - lower_is_better=True, num_trials=1000, # Unachievable num_trials ) diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index 5c6fca66a53..10bd3c0a166 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -9,19 +9,15 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric -from ax.benchmark.benchmark_problem import ( - create_multi_objective_problem_from_botorch, - create_single_objective_problem_from_botorch, -) +from ax.benchmark.benchmark_problem import create_problem_from_botorch from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner +from ax.core.objective import MultiObjective from ax.core.optimization_config import MultiObjectiveOptimizationConfig from ax.core.types import ComparisonOp from ax.utils.common.testutils import TestCase from ax.utils.common.typeutils import checked_cast -from ax.utils.testing.benchmark_stubs import ( - get_constrained_multi_objective_benchmark_problem, -) -from botorch.test_functions.multi_objective import BraninCurrin +from botorch.test_functions.base import ConstrainedBaseTestProblem +from botorch.test_functions.multi_objective import BraninCurrin, ConstrainedBraninCurrin from botorch.test_functions.synthetic import ( Ackley, ConstrainedGramacy, @@ -40,10 +36,9 @@ def setUp(self) -> None: def test_single_objective_from_botorch(self) -> None: for botorch_test_problem in [Ackley(), ConstrainedHartmann(dim=6)]: - test_problem = create_single_objective_problem_from_botorch( + test_problem = create_problem_from_botorch( test_problem_class=botorch_test_problem.__class__, test_problem_kwargs={}, - lower_is_better=True, num_trials=1, ) @@ -117,33 +112,25 @@ def test_single_objective_from_botorch(self) -> None: self.assertEqual(repr(test_problem), expected_repr) - # pyre-fixme[56]: Invalid decoration. Pyre was not able to infer the type of - # argument `hypothesis.strategies.booleans()` to decorator factory - # `hypothesis.given`. - @given( - st.booleans(), - st.one_of(st.none(), st.just(0.1)), - st.one_of(st.none(), st.just(0.2), st.just([0.3, 0.4])), - ) - def test_constrained_from_botorch( + def _test_constrained_from_botorch( self, observe_noise_sd: bool, objective_noise_std: Optional[float], constraint_noise_std: Optional[Union[float, list[float]]], + test_problem_class: type[ConstrainedBaseTestProblem], ) -> None: - ax_problem = create_single_objective_problem_from_botorch( - test_problem_class=ConstrainedGramacy, + ax_problem = create_problem_from_botorch( + test_problem_class=test_problem_class, test_problem_kwargs={ "noise_std": objective_noise_std, "constraint_noise_std": constraint_noise_std, }, - lower_is_better=True, num_trials=1, observe_noise_sd=observe_noise_sd, ) runner = checked_cast(BotorchTestProblemRunner, ax_problem.runner) self.assertTrue(runner._is_constrained) - botorch_problem = checked_cast(ConstrainedGramacy, runner.test_problem) + botorch_problem = checked_cast(ConstrainedBaseTestProblem, runner.test_problem) self.assertEqual(botorch_problem.noise_std, objective_noise_std) self.assertEqual(botorch_problem.constraint_noise_std, constraint_noise_std) opt_config = ax_problem.optimization_config @@ -152,9 +139,15 @@ def test_constrained_from_botorch( [constraint.metric.name for constraint in outcome_constraints], [f"constraint_slack_{i}" for i in range(botorch_problem.num_constraints)], ) + objective = opt_config.objective + metric = ( + objective.metrics[0] + if isinstance(objective, MultiObjective) + else objective.metric + ) self.assertEqual( - checked_cast(BenchmarkMetric, opt_config.objective.metric).observe_noise_sd, + checked_cast(BenchmarkMetric, metric).observe_noise_sd, observe_noise_sd, ) @@ -165,9 +158,38 @@ def test_constrained_from_botorch( observe_noise_sd, ) + # pyre-fixme[56]: Invalid decoration. Pyre was not able to infer the type of + # argument `hypothesis.strategies.booleans()` to decorator factory + # `hypothesis.given`. + @given( + st.booleans(), + st.one_of(st.none(), st.just(0.1)), + st.one_of(st.none(), st.just(0.2), st.just([0.3, 0.4])), + ) + def test_constrained_soo_from_botorch( + self, + observe_noise_sd: bool, + objective_noise_std: Optional[float], + constraint_noise_std: Optional[Union[float, list[float]]], + ) -> None: + self._test_constrained_from_botorch( + observe_noise_sd=observe_noise_sd, + objective_noise_std=objective_noise_std, + constraint_noise_std=constraint_noise_std, + test_problem_class=ConstrainedGramacy, + ) + + def test_constrained_moo_from_botorch(self) -> None: + self._test_constrained_from_botorch( + observe_noise_sd=False, + objective_noise_std=None, + constraint_noise_std=None, + test_problem_class=ConstrainedBraninCurrin, + ) + def test_moo_from_botorch(self) -> None: test_problem = BraninCurrin() - branin_currin_problem = create_multi_objective_problem_from_botorch( + branin_currin_problem = create_problem_from_botorch( test_problem_class=test_problem.__class__, test_problem_kwargs={}, num_trials=1, @@ -208,18 +230,10 @@ def test_moo_from_botorch(self) -> None: ] self.assertEqual(reference_point, test_problem._ref_point) - def test_moo_from_botorch_constrained(self) -> None: - with self.assertRaisesRegex( - NotImplementedError, - "Constrained multi-objective problems are not supported.", - ): - get_constrained_multi_objective_benchmark_problem() - def test_maximization_problem(self) -> None: - test_problem = create_single_objective_problem_from_botorch( + test_problem = create_problem_from_botorch( test_problem_class=Cosine8, - lower_is_better=False, num_trials=1, - test_problem_kwargs={}, + test_problem_kwargs={"negate": True}, ) self.assertFalse(test_problem.optimization_config.objective.minimize) diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index 4cbe94c5eee..211dcfbd698 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -12,11 +12,7 @@ import torch from ax.benchmark.benchmark_method import BenchmarkMethod from ax.benchmark.benchmark_metric import BenchmarkMetric -from ax.benchmark.benchmark_problem import ( - BenchmarkProblem, - create_multi_objective_problem_from_botorch, - create_single_objective_problem_from_botorch, -) +from ax.benchmark.benchmark_problem import BenchmarkProblem, create_problem_from_botorch from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult from ax.benchmark.problems.surrogate import SurrogateBenchmarkProblem from ax.benchmark.runners.botorch_test import ParamBasedTestProblem @@ -40,7 +36,7 @@ ) from botorch.acquisition.monte_carlo import qNoisyExpectedImprovement from botorch.models.gp_regression import SingleTaskGP -from botorch.test_functions.multi_objective import BraninCurrin, ConstrainedBraninCurrin +from botorch.test_functions.multi_objective import BraninCurrin from botorch.test_functions.synthetic import Branin from pyre_extensions import assert_is_instance from torch.utils.data import Dataset @@ -51,10 +47,9 @@ def get_single_objective_benchmark_problem( num_trials: int = 4, test_problem_kwargs: Optional[dict[str, Any]] = None, ) -> BenchmarkProblem: - return create_single_objective_problem_from_botorch( + return create_problem_from_botorch( test_problem_class=Branin, test_problem_kwargs=test_problem_kwargs or {}, - lower_is_better=True, num_trials=num_trials, observe_noise_sd=observe_noise_sd, ) @@ -65,7 +60,7 @@ def get_multi_objective_benchmark_problem( num_trials: int = 4, test_problem_class: type[BraninCurrin] = BraninCurrin, ) -> BenchmarkProblem: - return create_multi_objective_problem_from_botorch( + return create_problem_from_botorch( test_problem_class=test_problem_class, test_problem_kwargs={}, num_trials=num_trials, @@ -73,16 +68,6 @@ def get_multi_objective_benchmark_problem( ) -def get_constrained_multi_objective_benchmark_problem( - observe_noise_sd: bool = False, num_trials: int = 4 -) -> BenchmarkProblem: - return get_multi_objective_benchmark_problem( - observe_noise_sd=observe_noise_sd, - num_trials=num_trials, - test_problem_class=ConstrainedBraninCurrin, - ) - - def get_sobol_benchmark_method() -> BenchmarkMethod: return BenchmarkMethod( name="SOBOL",