From f609bafe27a6a380b986e9764338ccf669a51515 Mon Sep 17 00:00:00 2001 From: Carles Sala Date: Wed, 12 Feb 2020 01:44:27 -0500 Subject: [PATCH 1/8] Fix BTBSession and integration tests --- btb/session.py | 82 +++++++------ btb/tuning/__init__.py | 2 + tests/integration/test_benchmark.py | 13 +++ tests/integration/test_integration.py | 5 +- tests/integration/test_session.py | 161 ++++++++++++++++++++++++++ tests/integration/test_tuning.py | 24 ++++ tests/test_session.py | 7 +- 7 files changed, 248 insertions(+), 46 deletions(-) create mode 100644 tests/integration/test_benchmark.py create mode 100644 tests/integration/test_session.py create mode 100644 tests/integration/test_tuning.py diff --git a/btb/session.py b/btb/session.py index 22538af3..ad38796b 100644 --- a/btb/session.py +++ b/btb/session.py @@ -30,46 +30,40 @@ class BTBSession: the tunables that have reached as many errors as the user specified. Attributes: - _tunables (dict): + best_proposal (dict): + Best configuration found with the name of the tunable and the hyperparameters + and crossvalidated score obtained for it. + best_score (float): + Best score obtained for this session so far. + proposals (dict): + Dictionary containing all the proposals generated by the ``BTBSession``. + iterations (int): + Amount of iterations run. + errors (list): + A list with produced errors during the session. + + Args: + tunables (dict): Python dictionary that has as keys the name of the tunable and as value a dictionary with the tunable hyperparameters or an ``btb.tuning.tunable.Tunable`` instance. - _scorer (callable object / function): + scorer (callable object / function): A callable object or function with signature ``scorer(tunable_name, config)`` wich should return only a single value. - _tuner_class (btb.tuning.tuner.BaseTuner): + tuner_class (btb.tuning.tuner.BaseTuner): A tuner based on BTB ``BaseTuner`` class. This tuner will manage the new proposals. Defaults to ``btb.tuning.tuners.gaussian_process.GPTuner`` - _selector_class (btb.selection.selector.Selector): + selector_class (btb.selection.selector.Selector): A selector based on BTB ``Selector`` class. This will determinate which one of the tunables is performing better, and which one to test next. Defaults to ``btb.selection.selectors.ucb1.UCB1`` - _maximize (bool): + maximize (bool): If ``True`` the scores are interpreted as bigger is better, if ``False`` then smaller is better, this should depend on the problem type (maximization or minimization). Defaults to ``True``. - _max_erors (int): + max_erors (int): Amount of errors allowed for a tunable to not generate a score. Once this amount of errors is reached, the tunable will be removed from the list. Defaults to 1. - best_proposal (dict): - Best configuration found with the name of the tunable and the hyperparameters - and crossvalidated score obtained for it. - best_score (float): - Best score obtained for this session so far. - proposals (dict): - Dictionary containing all the proposals generated by the ``BTBSession``. - iterations (int): - Amount of iterations run. - errors (list): - A list with produced errors during the session. - _best_normalized (float): - Best normalized score obtained. - _tunable_names (list): - A list that contains the tunables that still have proposals. - _normalized_scores (defaultdict): - Dictionary with the name of the tunables and the obtained normalized scores. - _tuners (dict): - The name of the tunable and the tuner instance to which this belongs. verbose (bool): If ``True`` a progress bar will be displayed for the ``run`` process. """ @@ -79,6 +73,11 @@ class BTBSession: _selector = None _maximize = None _max_errors = None + _best_normalized = None + _tunable_names = None + _normalized_scores = None + _tuners = None + _range = None best_proposal = None best_score = None @@ -86,12 +85,6 @@ class BTBSession: iterations = None errors = None - _best_normalized = None - _tunable_names = None - _normalized_scores = None - _tuners = None - _range = None - def _normalize(self, score): if score is not None: return score if self._maximize else -score @@ -113,7 +106,7 @@ def __init__(self, tunables, scorer, tuner_class=GPTuner, selector_class=UCB1, self.errors = Counter() self.best_score = None - self._best_normalized = self._normalize(-np.inf) + self._best_normalized = -np.inf self._normalized_scores = defaultdict(list) self._tuners = dict() self._range = trange if verbose else range @@ -149,6 +142,21 @@ def _make_id(self, name, config): return md5(hashable).hexdigest() + def _remove_tunable(self, tunable_name): + """Remove a tunable from the candidates list. + + This is necessary when: + - Duplicates are not allowed and the tunable has exhausted all its + configurations. + - The tunable has failed more than ``max_errors`` times. + + When this happens, the tunable is removved from the tunables dict + and its scores are removed from the normmalized_scores dict used by + the selectors. + """ + self._normalized_scores.pop(tunable_name, None) + self._tunables.pop(tunable_name, None) + def propose(self): """Propose a new configuration to score. @@ -178,11 +186,11 @@ def propose(self): StopTuning: If the ``BTBSession`` has run out of proposals to generate. """ - if not self._tunable_names: + if not self._tunables: raise StopTuning('There are no tunables left to try.') if len(self._tuners) < len(self._tunable_names): - tunable_name = self._tunable_names[len(self._normalized_scores)] + tunable_name = self._tunable_names[len(self._tuners)] tunable = self._tunables[tunable_name] if isinstance(tunable, dict): @@ -206,8 +214,7 @@ def propose(self): except StopTuning: LOGGER.info('%s has no more configs to propose.' % tunable_name) - self._normalized_scores.pop(tunable_name, None) - self._tunable_names.remove(tunable_name) + self._remove_tunable(tunable_name) tunable_name, config = self.propose() proposal_id = self._make_id(tunable_name, config) @@ -234,8 +241,7 @@ def handle_error(self, tunable_name): if errors >= self._max_errors: LOGGER.warning('Too many errors: %s. Removing tunable %s', errors, tunable_name) - self._normalized_scores.pop(tunable_name, None) - self._tunable_names.remove(tunable_name) + self._remove_tunable(tunable_name) def record(self, tunable_name, config, score): """Record the configuration and the obtained score to the tuner. diff --git a/btb/tuning/__init__.py b/btb/tuning/__init__.py index 54ec2461..41c47c5c 100644 --- a/btb/tuning/__init__.py +++ b/btb/tuning/__init__.py @@ -6,6 +6,7 @@ from btb.tuning.hyperparams.categorical import CategoricalHyperParam from btb.tuning.hyperparams.numerical import FloatHyperParam, IntHyperParam from btb.tuning.tunable import Tunable +from btb.tuning.tuners.base import StopTuning from btb.tuning.tuners.gaussian_process import GPEiTuner, GPTuner from btb.tuning.tuners.uniform import UniformTuner @@ -16,6 +17,7 @@ 'GPTuner', 'FloatHyperParam', 'IntHyperParam', + 'StopTuning', 'Tunable', 'UniformTuner', ) diff --git a/tests/integration/test_benchmark.py b/tests/integration/test_benchmark.py new file mode 100644 index 00000000..3342c398 --- /dev/null +++ b/tests/integration/test_benchmark.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- + +from btb.benchmark import benchmark +from btb.benchmark.challenges import Rosenbrock +from btb.benchmark.tuners.btb import make_tuning_function +from btb.tuning import GPTuner + + +def test_benchmark_rosenbrock(): + candidate = make_tuning_function(GPTuner) + benchmark(candidate, challenges=Rosenbrock(), iterations=1) + + # TODO: Add asserts diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py index 54b43818..dfd108ad 100644 --- a/tests/integration/test_integration.py +++ b/tests/integration/test_integration.py @@ -21,17 +21,14 @@ def test_benchmark_rosenbrock(): benchmark(candidate, challenges=Rosenbrock(), iterations=1) -def test_tunable_tuner(): - +def test_tuning(): hyperparams = { 'bhp': BooleanHyperParam(default=False), 'chp': CategoricalHyperParam(choices=['a', 'b', None], default=None), 'fhp': FloatHyperParam(min=0.1, max=1.0, default=0.5), 'ihp': IntHyperParam(min=-1, max=1) } - tunable = Tunable(hyperparams) - tuner = GPEiTuner(tunable) for _ in range(10): diff --git a/tests/integration/test_session.py b/tests/integration/test_session.py new file mode 100644 index 00000000..c556ac98 --- /dev/null +++ b/tests/integration/test_session.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- + +from unittest import TestCase + +import pytest + +from btb.session import BTBSession +from btb.tuning import StopTuning + + +class BTBSessionTest(TestCase): + + @staticmethod + def scorer(name, proposal): + """score = name length + parameter. + + best proposal will be `a_tunable + a_parameter=0` + """ + return len(name) + proposal['a_parameter'] + + def test_stop(self): + tunables = { + 'a_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 2] + } + } + } + + session = BTBSession(tunables, self.scorer) + + with pytest.raises(StopTuning): + session.run() + + def test_maximize(self): + tunables = { + 'a_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 2] + } + } + } + + session = BTBSession(tunables, self.scorer) + + best = session.run(3) + + assert best == session.best_proposal + + assert best['name'] == 'a_tunable' + assert best['config'] == {'a_parameter': 2} + + def test_minimize(self): + tunables = { + 'a_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 2] + } + } + } + + session = BTBSession(tunables, self.scorer, maximize=False) + + best = session.run(3) + + assert best == session.best_proposal + assert best['name'] == 'a_tunable' + assert best['config'] == {'a_parameter': 0} + + def test_multiple(self): + tunables = { + 'a_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 2] + } + }, + 'another_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 2] + } + } + } + + session = BTBSession(tunables, self.scorer) + + best = session.run(6) + + assert best['name'] == 'another_tunable' + assert best['config'] == {'a_parameter': 2} + + def test_errors(self): + tunables = { + 'a_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 2] + } + }, + 'another_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 2] + } + } + } + + def scorer(name, proposal): + if name == 'another_tunable': + raise Exception() + else: + return proposal['a_parameter'] + + session = BTBSession(tunables, scorer) + + best = session.run(4) + + assert best['name'] == 'a_tunable' + assert best['config'] == {'a_parameter': 2} + + # def test_accept_errors(self): + # tunables = { + # 'a_tunable': { + # 'a_parameter': { + # 'type': 'int', + # 'default': 0, + # 'range': [0, 2] + # } + # }, + # 'another_tunable': { + # 'a_parameter': { + # 'type': 'int', + # 'default': 0, + # 'range': [0, 2] + # } + # } + # } + + # def scorer(name, proposal): + # if name == 'another_tunable': + # raise Exception() + # else: + # return proposal['a_parameter'] + + # session = BTBSession(tunables, scorer) + + # best = session.run(6) + + # assert best['name'] == 'a_tunable' + # assert best['config'] == {'a_parameter': 2} diff --git a/tests/integration/test_tuning.py b/tests/integration/test_tuning.py new file mode 100644 index 00000000..4e866936 --- /dev/null +++ b/tests/integration/test_tuning.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +import random + +from btb.tuning import GPTuner, Tunable +from btb.tuning.hyperparams import ( + BooleanHyperParam, CategoricalHyperParam, FloatHyperParam, IntHyperParam) + + +def test_tuning(): + hyperparams = { + 'bhp': BooleanHyperParam(default=False), + 'chp': CategoricalHyperParam(choices=['a', 'b', None], default=None), + 'fhp': FloatHyperParam(min=0.1, max=1.0, default=0.5), + 'ihp': IntHyperParam(min=-1, max=1) + } + tunable = Tunable(hyperparams) + tuner = GPTuner(tunable) + + for _ in range(10): + proposed = tuner.propose(1) + tuner.record(proposed, random.random()) + + # TODO add asserts diff --git a/tests/test_session.py b/tests/test_session.py index 5d54e659..6bba5cf0 100644 --- a/tests/test_session.py +++ b/tests/test_session.py @@ -81,7 +81,7 @@ def test___init__custom(self): assert instance._scorer is scorer assert instance._tuner_class == 'my_tuner' assert instance._max_errors == 2 - assert instance._best_normalized == np.inf + assert instance._best_normalized == -np.inf assert instance._normalized_scores == defaultdict(list) assert instance._tuners == {} assert instance._tunable_names == ['my_test_tuner'] @@ -115,7 +115,7 @@ def test__make_dumpable(self): def test_propose_no_tunables(self): # setup instance = MagicMock(spec_set=BTBSession) - instance._tunable_names = None + instance._tunables = None # run with self.assertRaises(StopTuning): @@ -241,8 +241,7 @@ def test_handle_error_errors_gt_max_errors(self): BTBSession.handle_error(instance, 'test') # assert - instance._normalized_scores.pop.assert_called_once_with('test', None) - instance._tunable_names.remove.assert_called_once_with('test') + instance._remove_tunable.assert_called_once_with('test') def test_record_score_is_none(self): # setup From 6a08dc3cf1b68b35630cae6a87783aec4e2c9f83 Mon Sep 17 00:00:00 2001 From: Carles Sala Date: Wed, 12 Feb 2020 02:06:52 -0500 Subject: [PATCH 2/8] Add support for another edge case --- btb/session.py | 9 +++- tests/integration/test_session.py | 73 ++++++++++++++++++------------- 2 files changed, 51 insertions(+), 31 deletions(-) diff --git a/btb/session.py b/btb/session.py index ad38796b..43ff1876 100644 --- a/btb/session.py +++ b/btb/session.py @@ -206,7 +206,14 @@ def propose(self): self._tuners[tunable_name] = self._tuner_class(tunable) else: - tunable_name = self._selector.select(self._normalized_scores) + if self._normalized_scores: + tunable_name = self._selector.select(self._normalized_scores) + else: + # if _normalized_scores is still empty the selector crashes + # this happens when max_errors > 1, all tunables have tuners + # and all previous trials have crashed. + tunable_name = np.random.choice(list(self._tuners.keys())) + tuner = self._tuners[tunable_name] try: LOGGER.info('Generating new proposal configuration for %s', tunable_name) diff --git a/tests/integration/test_session.py b/tests/integration/test_session.py index c556ac98..44371620 100644 --- a/tests/integration/test_session.py +++ b/tests/integration/test_session.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from unittest import TestCase +from unittest.mock import Mock import pytest @@ -129,33 +130,45 @@ def scorer(name, proposal): assert best['name'] == 'a_tunable' assert best['config'] == {'a_parameter': 2} - # def test_accept_errors(self): - # tunables = { - # 'a_tunable': { - # 'a_parameter': { - # 'type': 'int', - # 'default': 0, - # 'range': [0, 2] - # } - # }, - # 'another_tunable': { - # 'a_parameter': { - # 'type': 'int', - # 'default': 0, - # 'range': [0, 2] - # } - # } - # } - - # def scorer(name, proposal): - # if name == 'another_tunable': - # raise Exception() - # else: - # return proposal['a_parameter'] - - # session = BTBSession(tunables, scorer) - - # best = session.run(6) - - # assert best['name'] == 'a_tunable' - # assert best['config'] == {'a_parameter': 2} + @pytest.mark.skip(reason="This is not implemented yet") + def test_allow_duplicates(self): + tunables = { + 'a_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 2] + } + } + } + + session = BTBSession(tunables, self.scorer, allow_duplicates=True) + + best = session.run(10) + + assert best['name'] == 'another_tunable' + assert best['config'] == {'a_parameter': 2} + + def test_allow_errors(self): + tunables = { + 'a_tunable': { + 'a_parameter': { + 'type': 'int', + 'default': 0, + 'range': [0, 1] + } + } + } + + scorer = Mock() + scorer.side_effect = [ + Exception, + Exception, + 1 + ] + session = BTBSession(tunables, scorer, max_errors=3) + + best = session.run(3) + + assert best['name'] == 'a_tunable' + assert best['config'] == {'a_parameter': 1} From 3ebf4205259438c4a7f7804ceb3d8a9d4f9b9276 Mon Sep 17 00:00:00 2001 From: Carles Sala Date: Wed, 12 Feb 2020 02:18:09 -0500 Subject: [PATCH 3/8] Remove test randomness --- tests/integration/test_session.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tests/integration/test_session.py b/tests/integration/test_session.py index 44371620..4a6e092a 100644 --- a/tests/integration/test_session.py +++ b/tests/integration/test_session.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from unittest import TestCase -from unittest.mock import Mock import pytest @@ -160,15 +159,15 @@ def test_allow_errors(self): } } - scorer = Mock() - scorer.side_effect = [ - Exception, - Exception, - 1 - ] - session = BTBSession(tunables, scorer, max_errors=3) + def scorer(name, proposal): + if proposal['a_parameter'] == 0: + raise Exception() - best = session.run(3) + return 1 + + session = BTBSession(tunables, scorer, max_errors=10) + + best = session.run(10) assert best['name'] == 'a_tunable' assert best['config'] == {'a_parameter': 1} From b0bc4774081f914afe637666f3417c043ab5aad5 Mon Sep 17 00:00:00 2001 From: Plamen Valentinov Date: Wed, 12 Feb 2020 12:00:08 +0100 Subject: [PATCH 4/8] Add tuning assertions. --- tests/integration/test_tuning.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/integration/test_tuning.py b/tests/integration/test_tuning.py index 4e866936..c8f13f62 100644 --- a/tests/integration/test_tuning.py +++ b/tests/integration/test_tuning.py @@ -22,3 +22,30 @@ def test_tuning(): tuner.record(proposed, random.random()) # TODO add asserts + assert len(tuner.trials) == 10 + assert len(tuner._trials_set) == 10 + assert len(tuner.raw_scores) == 10 + assert len(tuner.scores) == 10 + assert all(tuner.raw_scores == tuner.scores) + + +def test_tuning_minimize(): + hyperparams = { + 'bhp': BooleanHyperParam(default=False), + 'chp': CategoricalHyperParam(choices=['a', 'b', None], default=None), + 'fhp': FloatHyperParam(min=0.1, max=1.0, default=0.5), + 'ihp': IntHyperParam(min=-1, max=1) + } + tunable = Tunable(hyperparams) + tuner = GPTuner(tunable, maximize=False) + + for _ in range(10): + proposed = tuner.propose(1) + tuner.record(proposed, random.random()) + + # TODO add asserts + assert len(tuner.trials) == 10 + assert len(tuner._trials_set) == 10 + assert len(tuner.raw_scores) == 10 + assert len(tuner.scores) == 10 + assert all(-tuner.raw_scores == tuner.scores) From 03cbd3f754ca658587ce72d58d434c2423f233cb Mon Sep 17 00:00:00 2001 From: Plamen Valentinov Date: Wed, 12 Feb 2020 12:21:27 +0100 Subject: [PATCH 5/8] Update asserts. --- tests/integration/test_benchmark.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_benchmark.py b/tests/integration/test_benchmark.py index 3342c398..82b36233 100644 --- a/tests/integration/test_benchmark.py +++ b/tests/integration/test_benchmark.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import numpy as np from btb.benchmark import benchmark from btb.benchmark.challenges import Rosenbrock @@ -7,7 +8,11 @@ def test_benchmark_rosenbrock(): + # run candidate = make_tuning_function(GPTuner) - benchmark(candidate, challenges=Rosenbrock(), iterations=1) + df = benchmark(candidate, challenges=Rosenbrock(), iterations=1) - # TODO: Add asserts + # Assert + np.testing.assert_equal(df.columns.values, ['Rosenbrock()', 'Mean', 'Std']) + np.testing.assert_equal(df.index.values, ['tuning_function']) + np.testing.assert_equal(df.dtypes.values, [np.int, np.float, np.float]) From 68e36f0f12e1a3a4c2ae3d15cab97f0f5ef0bbc0 Mon Sep 17 00:00:00 2001 From: Plamen Valentinov Date: Wed, 12 Feb 2020 13:17:47 +0100 Subject: [PATCH 6/8] Removed unnecessary test. --- tests/integration/test_integration.py | 109 -------------------------- tests/integration/test_tuning.py | 4 +- 2 files changed, 2 insertions(+), 111 deletions(-) delete mode 100644 tests/integration/test_integration.py diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py deleted file mode 100644 index dfd108ad..00000000 --- a/tests/integration/test_integration.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- - -import random - -from sklearn.datasets import load_boston as load_dataset -from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor -from sklearn.metrics import make_scorer, r2_score -from sklearn.model_selection import cross_val_score, train_test_split - -from btb.benchmark import benchmark -from btb.benchmark.challenges import Rosenbrock -from btb.benchmark.tuners.btb import make_tuning_function -from btb.session import BTBSession -from btb.tuning import GPEiTuner, GPTuner, Tunable -from btb.tuning.hyperparams import ( - BooleanHyperParam, CategoricalHyperParam, FloatHyperParam, IntHyperParam) - - -def test_benchmark_rosenbrock(): - candidate = make_tuning_function(GPTuner) - benchmark(candidate, challenges=Rosenbrock(), iterations=1) - - -def test_tuning(): - hyperparams = { - 'bhp': BooleanHyperParam(default=False), - 'chp': CategoricalHyperParam(choices=['a', 'b', None], default=None), - 'fhp': FloatHyperParam(min=0.1, max=1.0, default=0.5), - 'ihp': IntHyperParam(min=-1, max=1) - } - tunable = Tunable(hyperparams) - tuner = GPEiTuner(tunable) - - for _ in range(10): - proposed = tuner.propose(1) - tuner.record(proposed, random.random()) - - -def test_session(): - - def build_model(name, hyperparameters): - model_class = models[name] - return model_class(random_state=0, **hyperparameters) - - def score_model(name, hyperparameters): - model = build_model(name, hyperparameters) - r2_scorer = make_scorer(r2_score) - scores = cross_val_score(model, X_train, y_train, scoring=r2_scorer) - return scores.mean() - - dataset = load_dataset() - - X_train, X_test, y_train, y_test = train_test_split( - dataset.data, dataset.target, test_size=0.3, random_state=0) - - tunables = { - 'random_forest': { - 'n_estimators': { - 'type': 'int', - 'default': 2, - 'range': [1, 1000] - }, - 'max_features': { - 'type': 'str', - 'default': 'log2', - 'range': [None, 'auto', 'log2', 'sqrt'] - }, - 'min_samples_split': { - 'type': 'int', - 'default': 2, - 'range': [2, 20] - }, - 'min_samples_leaf': { - 'type': 'int', - 'default': 2, - 'range': [1, 20] - }, - }, - 'extra_trees': { - 'n_estimators': { - 'type': 'int', - 'default': 2, - 'range': [1, 1000] - }, - 'max_features': { - 'type': 'str', - 'default': 'log2', - 'range': [None, 'auto', 'log2', 'sqrt'] - }, - 'min_samples_split': { - 'type': 'int', - 'default': 2, - 'range': [2, 20] - }, - 'min_samples_leaf': { - 'type': 'int', - 'default': 2, - 'range': [1, 20] - }, - } - } - - models = { - 'random_forest': RandomForestRegressor, - 'extra_trees': ExtraTreesRegressor, - } - - session = BTBSession(tunables, score_model, verbose=True) - session.run(2) diff --git a/tests/integration/test_tuning.py b/tests/integration/test_tuning.py index c8f13f62..bfa898f4 100644 --- a/tests/integration/test_tuning.py +++ b/tests/integration/test_tuning.py @@ -21,7 +21,7 @@ def test_tuning(): proposed = tuner.propose(1) tuner.record(proposed, random.random()) - # TODO add asserts + # asserts assert len(tuner.trials) == 10 assert len(tuner._trials_set) == 10 assert len(tuner.raw_scores) == 10 @@ -43,7 +43,7 @@ def test_tuning_minimize(): proposed = tuner.propose(1) tuner.record(proposed, random.random()) - # TODO add asserts + # asserts assert len(tuner.trials) == 10 assert len(tuner._trials_set) == 10 assert len(tuner.raw_scores) == 10 From 31c6349932accd6b168ad2d00af6b4110e8c4a66 Mon Sep 17 00:00:00 2001 From: Carles Sala Date: Sun, 16 Feb 2020 11:24:54 -0500 Subject: [PATCH 7/8] Skip tuner usage if tunable cardinality is 1 --- btb/session.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/btb/session.py b/btb/session.py index 43ff1876..072c104c 100644 --- a/btb/session.py +++ b/btb/session.py @@ -203,7 +203,14 @@ def propose(self): LOGGER.info('Obtaining default configuration for %s', tunable_name) config = tunable.get_defaults() - self._tuners[tunable_name] = self._tuner_class(tunable) + if tunable.cardinality == 1: + LOGGER.warn('Skipping tuner creation for Tunable %s with cardinality 1', + tunable_name) + tuner = None + else: + tuner = self._tuner_class(tunable) + + self._tuners[tunable_name] = tuner else: if self._normalized_scores: @@ -216,11 +223,14 @@ def propose(self): tuner = self._tuners[tunable_name] try: + if tuner is None: + raise StopTuning('Tunable %s has no tunable hyperparameters', tunable_name) + LOGGER.info('Generating new proposal configuration for %s', tunable_name) config = tuner.propose(1) except StopTuning: - LOGGER.info('%s has no more configs to propose.' % tunable_name) + LOGGER.info('%s has no more configs to propose.', tunable_name) self._remove_tunable(tunable_name) tunable_name, config = self.propose() @@ -281,9 +291,14 @@ def record(self, tunable_name, config, score): self._best_normalized = normalized try: tuner = self._tuners[tunable_name] - tuner.record(config, normalized) + if tuner is None: + LOGGER.warn('Skipping record for Tunable %s with cardinality 1', tunable_name) + else: + tuner.record(config, normalized) + except Exception: - LOGGER.exception('Could not record configuration and score to tuner.') + LOGGER.exception('Could not record configuration and score for tuner %s.', + tunable_name) def run(self, iterations=None): """Run the selection and tuning loop for the given number of iterations. From e3e02f34a78eea496aa87a969bbdd2d264ada4fd Mon Sep 17 00:00:00 2001 From: Plamen Valentinov Date: Mon, 17 Feb 2020 15:07:27 +0100 Subject: [PATCH 8/8] Update tests. --- tests/test_session.py | 60 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/tests/test_session.py b/tests/test_session.py index 6bba5cf0..220071ff 100644 --- a/tests/test_session.py +++ b/tests/test_session.py @@ -218,6 +218,43 @@ def test_propose_raise_error(self): with self.assertRaises(ValueError): BTBSession.propose(instance) + @patch('btb.session.isinstance') + @patch('btb.session.Tunable') + def test_propose_tunable_cardinality_eq_one(self, mock_tunable, mock_isinstance): + # setup + mock_tunable.from_dict.return_value.cardinality = 1 + mock_tunable.from_dict.return_value.get_defaults.return_value = 'parameters' + mock_isinstance.return_value = True + + instance = MagicMock(spec_set=BTBSession) + instance._tuners = {} + instance._tunable_names = ['test_tunable'] + instance.proposals = {} + + instance._make_id.return_value = 1 + + # run + tunable_name, config = BTBSession.propose(instance) + + # assert + instance._make_id.assert_called_once_with('test_tunable', 'parameters') + instance._tuner_class.assert_not_called() + + assert instance._tuners == {'test_tunable': None} + assert 'test_tunable' == tunable_name + assert 'parameters' == config + + def test_propose_tuner_is_none(self): + # setup + instance = MagicMock(spec_set=BTBSession) + instance._tuners = {'test_tunable': None} + instance._tunable_names = ['test_tunable'] + instance._normalized_scores = None + + # run + with self.assertRaises(ValueError): + BTBSession.propose(instance) + def test_handle_error_errors_lt_max_errors(self): # setup instance = MagicMock(spec_set=BTBSession) @@ -284,6 +321,29 @@ def test_record_score_gt_best(self): tuner.record.assert_called_once_with('config', 1) + def test_record_score_gt_best_tuner_none(self): + # setup + instance = MagicMock(spec_set=BTBSession) + instance._make_id.return_value = 0 + instance.proposals = [{'test': 'test'}] + instance._tuners = {'test': None} + instance.best_proposal = None + + instance._best_normalized = 0 + instance._normalize.return_value = 1 + instance._normalized_scores = defaultdict(list) + + # run + BTBSession.record(instance, 'test', 'config', 1) + + # assert + expected_normalized_scores = defaultdict(list) + expected_normalized_scores['test'].append(1) + + assert instance._normalized_scores == expected_normalized_scores + assert instance.best_proposal == {'test': 'test', 'score': 1} + assert instance._best_normalized == 1 + def test_record_score_lt_best(self): # setup tuner = MagicMock()