From df5d48e24c45e627ed26bf9b56f3583828dcc8f5 Mon Sep 17 00:00:00 2001 From: eddiebergman Date: Thu, 9 Jun 2022 18:20:08 +0200 Subject: [PATCH] Rename `rval` to `return_value` or `run_value` --- autosklearn/automl.py | 8 +- autosklearn/estimators.py | 34 ++-- autosklearn/evaluation/__init__.py | 2 +- autosklearn/evaluation/abstract_evaluator.py | 6 +- autosklearn/evaluation/util.py | 8 +- autosklearn/experimental/selector.py | 16 +- .../metalearning/kNearestDatasets/kND.py | 6 +- autosklearn/pipeline/base.py | 6 +- scripts/2015_nips_paper/run/score_ensemble.py | 4 +- test/test_evaluation/test_test_evaluator.py | 38 ++--- test/test_evaluation/test_train_evaluator.py | 154 +++++++++--------- test/test_scripts/test_metadata_generation.py | 30 ++-- 12 files changed, 157 insertions(+), 155 deletions(-) diff --git a/autosklearn/automl.py b/autosklearn/automl.py index 12e80b8e4e..29ca3f2fdc 100644 --- a/autosklearn/automl.py +++ b/autosklearn/automl.py @@ -2138,10 +2138,10 @@ def has_key(rv, key): return rv.additional_info and key in rv.additional_info table_dict = {} - for rkey, rval in self.runhistory_.data.items(): - if has_key(rval, "num_run"): - model_id = rval.additional_info["num_run"] - table_dict[model_id] = {"model_id": model_id, "cost": rval.cost} + for run_key, run_val in self.runhistory_.data.items(): + if has_key(run_val, "num_run"): + model_id = run_val.additional_info["num_run"] + table_dict[model_id] = {"model_id": model_id, "cost": run_val.cost} # Checking if the dictionary is empty if not table_dict: diff --git a/autosklearn/estimators.py b/autosklearn/estimators.py index 7144fcc39c..e3253d54f1 100644 --- a/autosklearn/estimators.py +++ b/autosklearn/estimators.py @@ -1041,31 +1041,31 @@ def additional_info_has_key(rv, key): return rv.additional_info and key in rv.additional_info model_runs = {} - for rkey, rval in self.automl_.runhistory_.data.items(): - if not additional_info_has_key(rval, "num_run"): + for run_key, run_val in self.automl_.runhistory_.data.items(): + if not additional_info_has_key(run_val, "num_run"): continue else: - model_key = rval.additional_info["num_run"] + model_key = run_val.additional_info["num_run"] model_run = { - "model_id": rval.additional_info["num_run"], - "seed": rkey.seed, - "budget": rkey.budget, - "duration": rval.time, - "config_id": rkey.config_id, - "start_time": rval.starttime, - "end_time": rval.endtime, - "status": str(rval.status), - "train_loss": rval.additional_info["train_loss"] - if additional_info_has_key(rval, "train_loss") + "model_id": run_val.additional_info["num_run"], + "seed": run_key.seed, + "budget": run_key.budget, + "duration": run_val.time, + "config_id": run_key.config_id, + "start_time": run_val.starttime, + "end_time": run_val.endtime, + "status": str(run_val.status), + "train_loss": run_val.additional_info["train_loss"] + if additional_info_has_key(run_val, "train_loss") else None, - "config_origin": rval.additional_info["configuration_origin"] - if additional_info_has_key(rval, "configuration_origin") + "config_origin": run_val.additional_info["configuration_origin"] + if additional_info_has_key(run_val, "configuration_origin") else None, } if num_metrics == 1: - model_run["cost"] = rval.cost + model_run["cost"] = run_val.cost else: - for cost_idx, cost in enumerate(rval.cost): + for cost_idx, cost in enumerate(run_val.cost): model_run[f"cost_{cost_idx}"] = cost model_runs[model_key] = model_run diff --git a/autosklearn/evaluation/__init__.py b/autosklearn/evaluation/__init__.py index aace158c00..9563f4ef8b 100644 --- a/autosklearn/evaluation/__init__.py +++ b/autosklearn/evaluation/__init__.py @@ -71,7 +71,7 @@ def fit_predict_try_except_decorator( # File "auto-sklearn/autosklearn/evaluation/train_evaluator.py", line 616, in fit_predict_and_loss, # noqa E501 # status=status # File "auto-sklearn/autosklearn/evaluation/abstract_evaluator.py", line 320, in finish_up # noqa E501 - # self.queue.put(rval_dict) + # self.queue.put(return_value_dict) # File "miniconda/3-4.5.4/envs/autosklearn/lib/python3.7/multiprocessing/queues.py", line 87, in put # noqa E501 # self._start_thread() # File "miniconda/3-4.5.4/envs/autosklearn/lib/python3.7/multiprocessing/queues.py", line 170, in _start_thread # noqa E501 diff --git a/autosklearn/evaluation/abstract_evaluator.py b/autosklearn/evaluation/abstract_evaluator.py index efd87c6cc3..6a189a86a0 100644 --- a/autosklearn/evaluation/abstract_evaluator.py +++ b/autosklearn/evaluation/abstract_evaluator.py @@ -429,15 +429,15 @@ def finish_up( if test_loss is not None: additional_run_info["test_loss"] = test_loss - rval_dict = { + return_value_dict = { "loss": loss, "additional_run_info": additional_run_info, "status": status, } if final_call: - rval_dict["final_queue_element"] = True + return_value_dict["final_queue_element"] = True - self.queue.put(rval_dict) + self.queue.put(return_value_dict) return self.duration, loss_, self.seed, additional_run_info_ def calculate_auxiliary_losses( diff --git a/autosklearn/evaluation/util.py b/autosklearn/evaluation/util.py index c249c8be1c..158825786b 100644 --- a/autosklearn/evaluation/util.py +++ b/autosklearn/evaluation/util.py @@ -12,19 +12,19 @@ def read_queue( stack = [] while True: try: - rval = queue_.get(timeout=1) + return_value = queue_.get(timeout=1) except queue.Empty: break # Check if there is a special placeholder value which tells us that # we don't have to wait until the queue times out in order to # retrieve the final value! - if "final_queue_element" in rval: - del rval["final_queue_element"] + if "final_queue_element" in return_value: + del return_value["final_queue_element"] do_break = True else: do_break = False - stack.append(rval) + stack.append(return_value) if do_break: break diff --git a/autosklearn/experimental/selector.py b/autosklearn/experimental/selector.py index 125cba6125..b854c7b440 100644 --- a/autosklearn/experimental/selector.py +++ b/autosklearn/experimental/selector.py @@ -297,17 +297,17 @@ def _predict( wins = wins / np.sum(wins) predictions[X.index[x_idx]] = wins - rval = { + return_value = { task_id: { strategy: predictions[task_id][strategy_idx] for strategy_idx, strategy in enumerate(self.strategies_) } for task_id in X.index } - rval = pd.DataFrame(rval).transpose().astype(float) - rval = rval[self.strategies_] - rval = rval.fillna(0.0) - return rval + return_value = pd.DataFrame(return_value).transpose().astype(float) + return_value = return_value[self.strategies_] + return_value = return_value.fillna(0.0) + return return_value def fit_pairwise_model(self, X, y, weights, rng, configuration): raise NotImplementedError() @@ -346,14 +346,14 @@ def fit( ) -> None: self.X_ = X self.strategies_ = y.columns - self.rval_ = np.array( + self.return_value_ = np.array( [ (len(self.strategies_) - self.default_strategies.index(strategy) - 1) / (len(self.strategies_) - 1) for strategy in self.strategies_ ] ) - self.rval_ = self.rval_ / np.sum(self.rval_) + self.return_value_ = self.return_value_ / np.sum(self.return_value_) self.selector.fit(X, y, minima, maxima) def _predict( @@ -377,7 +377,7 @@ def _predict( prediction.loc[task_id] = pd.Series( { strategy: value - for strategy, value in zip(self.strategies_, self.rval_) + for strategy, value in zip(self.strategies_, self.return_value_) } ) diff --git a/autosklearn/metalearning/metalearning/kNearestDatasets/kND.py b/autosklearn/metalearning/metalearning/kNearestDatasets/kND.py index f6c10c95d2..f49ed8ccab 100644 --- a/autosklearn/metalearning/metalearning/kNearestDatasets/kND.py +++ b/autosklearn/metalearning/metalearning/kNearestDatasets/kND.py @@ -122,7 +122,7 @@ def kNearestDatasets(self, x, k=1, return_distance=False): assert k == neighbor_indices.shape[1] - rval = [ + return_value = [ self.metafeatures.index[i] # Neighbor indices is 2d, each row is the indices for one # dataset in x. @@ -130,9 +130,9 @@ def kNearestDatasets(self, x, k=1, return_distance=False): ] if return_distance is False: - return rval + return return_value else: - return rval, distances[0] + return return_value, distances[0] def kBestSuggestions(self, x, k=1, exclude_double_configurations=True): assert type(x) == pd.Series diff --git a/autosklearn/pipeline/base.py b/autosklearn/pipeline/base.py index 93c73b4716..3a13364ea6 100644 --- a/autosklearn/pipeline/base.py +++ b/autosklearn/pipeline/base.py @@ -495,15 +495,15 @@ def __repr__(self): dataset_properties_string.append("}") dataset_properties_string = "".join(dataset_properties_string) - rval = "%s(%s,\n%s)" % ( + return_value = "%s(%s,\n%s)" % ( class_name, configuration, dataset_properties_string, ) else: - rval = "%s(%s)" % (class_name, configuration_string) + return_value = "%s(%s)" % (class_name, configuration_string) - return rval + return return_value def _get_pipeline_steps(self, dataset_properties): raise NotImplementedError() diff --git a/scripts/2015_nips_paper/run/score_ensemble.py b/scripts/2015_nips_paper/run/score_ensemble.py index 1e873f01fd..9842359225 100644 --- a/scripts/2015_nips_paper/run/score_ensemble.py +++ b/scripts/2015_nips_paper/run/score_ensemble.py @@ -227,14 +227,14 @@ def evaluate(input_directory, validation_files, test_files, ensemble_size=50): ensemble_time = time.time() - start - rval = { + return_value = { "ensemble_time": ensemble_time, "time_function_evaluation": time_function_evaluation, "ensemble_error": ensemble_error, "ensemble_test_error": ensemble_test_error, } - return rval + return return_value if __name__ == "__main__": diff --git a/test/test_evaluation/test_test_evaluator.py b/test/test_evaluation/test_test_evaluator.py index 457661df03..02eedcca91 100644 --- a/test/test_evaluation/test_test_evaluator.py +++ b/test/test_evaluation/test_test_evaluator.py @@ -80,10 +80,10 @@ def test_datasets(self): ) evaluator.fit_predict_and_loss() - rval = read_queue(evaluator.queue) - self.assertEqual(len(rval), 1) - self.assertEqual(len(rval[0]), 3) - self.assertTrue(np.isfinite(rval[0]["loss"])) + return_value = read_queue(evaluator.queue) + self.assertEqual(len(return_value), 1) + self.assertEqual(len(return_value[0]), 3) + self.assertTrue(np.isfinite(return_value[0]["loss"])) class FunctionsTest(unittest.TestCase): @@ -124,11 +124,11 @@ def test_eval_test(self): port=self.port, additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 1) - self.assertAlmostEqual(rval[0]["loss"], 0.07999999999999996) - self.assertEqual(rval[0]["status"], StatusType.SUCCESS) - self.assertNotIn("bac_metric", rval[0]["additional_run_info"]) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 1) + self.assertAlmostEqual(return_value[0]["loss"], 0.07999999999999996) + self.assertEqual(return_value[0]["status"], StatusType.SUCCESS) + self.assertNotIn("bac_metric", return_value[0]["additional_run_info"]) def test_eval_test_multi_objective(self): metrics = { @@ -151,12 +151,12 @@ def test_eval_test_multi_objective(self): port=self.port, additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 1) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 1) for metric, loss in metrics.items(): - self.assertAlmostEqual(rval[0]["loss"][metric.name], loss) - self.assertEqual(rval[0]["status"], StatusType.SUCCESS) - self.assertNotIn("bac_metric", rval[0]["additional_run_info"]) + self.assertAlmostEqual(return_value[0]["loss"][metric.name], loss) + self.assertEqual(return_value[0]["status"], StatusType.SUCCESS) + self.assertNotIn("bac_metric", return_value[0]["additional_run_info"]) def test_eval_test_all_loss_functions(self): eval_t( @@ -175,8 +175,8 @@ def test_eval_test_all_loss_functions(self): port=self.port, additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 1) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 1) # Note: All metric here should be minimized fixture = { @@ -195,7 +195,7 @@ def test_eval_test_all_loss_functions(self): "num_run": -1, } - additional_run_info = rval[0]["additional_run_info"] + additional_run_info = return_value[0]["additional_run_info"] for key, value in fixture.items(): self.assertAlmostEqual(additional_run_info[key], fixture[key], msg=key) self.assertEqual( @@ -204,5 +204,5 @@ def test_eval_test_all_loss_functions(self): msg=sorted(additional_run_info.items()), ) self.assertIn("duration", additional_run_info) - self.assertAlmostEqual(rval[0]["loss"], 0.040000000000000036) - self.assertEqual(rval[0]["status"], StatusType.SUCCESS) + self.assertAlmostEqual(return_value[0]["loss"], 0.040000000000000036) + self.assertEqual(return_value[0]["status"], StatusType.SUCCESS) diff --git a/test/test_evaluation/test_train_evaluator.py b/test/test_evaluation/test_train_evaluator.py index 9413af5509..14c36f2afc 100644 --- a/test/test_evaluation/test_train_evaluator.py +++ b/test/test_evaluation/test_train_evaluator.py @@ -150,10 +150,10 @@ def test_holdout(self, pipeline_mock): evaluator.fit_predict_and_loss() - rval = read_queue(evaluator.queue) - self.assertEqual(len(rval), 1) - result = rval[0]["loss"] - self.assertEqual(len(rval[0]), 3) + return_value = read_queue(evaluator.queue) + self.assertEqual(len(return_value), 1) + result = return_value[0]["loss"] + self.assertEqual(len(return_value[0]), 3) self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(evaluator.file_output.call_count, 1) @@ -294,15 +294,15 @@ def side_effect(self, *args, **kwargs): self.assertEqual(evaluator.file_output.call_count, 9) for i in range(1, 10): - rval = evaluator.queue.get(timeout=1) - result = rval["loss"] + return_value = evaluator.queue.get(timeout=1) + result = return_value["loss"] self.assertAlmostEqual(result, 1.0 - (0.1 * (i - 1))) if i < 9: - self.assertEqual(rval["status"], StatusType.DONOTADVANCE) - self.assertEqual(len(rval), 3) + self.assertEqual(return_value["status"], StatusType.DONOTADVANCE) + self.assertEqual(len(return_value), 3) else: - self.assertEqual(rval["status"], StatusType.SUCCESS) - self.assertEqual(len(rval), 4) + self.assertEqual(return_value["status"], StatusType.SUCCESS) + self.assertEqual(len(return_value), 4) self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(pipeline_mock.iterative_fit.call_count, 9) @@ -438,8 +438,8 @@ def side_effect(self, *args, **kwargs): self.assertEqual(evaluator.file_output.call_count, 2) for i in range(1, 3): - rval = evaluator.queue.get(timeout=1) - self.assertAlmostEqual(rval["loss"], 1.0 - (0.2 * i)) + return_value = evaluator.queue.get(timeout=1) + self.assertAlmostEqual(return_value["loss"], 1.0 - (0.2 * i)) self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(pipeline_mock.iterative_fit.call_count, 2) @@ -499,8 +499,8 @@ def test_iterative_holdout_not_iterative(self, pipeline_mock): evaluator.fit_predict_and_loss(iterative=True) self.assertEqual(evaluator.file_output.call_count, 1) - rval = evaluator.queue.get(timeout=1) - self.assertAlmostEqual(rval["loss"], 0.47826086956521741) + return_value = evaluator.queue.get(timeout=1) + self.assertAlmostEqual(return_value["loss"], 0.47826086956521741) self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(pipeline_mock.iterative_fit.call_count, 0) @@ -554,10 +554,10 @@ def test_cv(self, pipeline_mock): evaluator.fit_predict_and_loss() - rval = read_queue(evaluator.queue) - self.assertEqual(len(rval), 1) - result = rval[0]["loss"] - self.assertEqual(len(rval[0]), 3) + return_value = read_queue(evaluator.queue) + self.assertEqual(len(return_value), 1) + result = return_value[0]["loss"] + self.assertEqual(len(return_value[0]), 3) self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(evaluator.file_output.call_count, 1) @@ -623,11 +623,11 @@ def test_partial_cv(self, pipeline_mock): evaluator.partial_fit_predict_and_loss(fold=1) - rval = evaluator.queue.get(timeout=1) + return_value = evaluator.queue.get(timeout=1) self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(evaluator.file_output.call_count, 0) - self.assertEqual(rval["loss"], 0.5) + self.assertEqual(return_value["loss"], 0.5) self.assertEqual(pipeline_mock.fit.call_count, 1) self.assertEqual(pipeline_mock.predict_proba.call_count, 4) # The model prior to fitting is saved, this cannot be directly tested @@ -760,12 +760,12 @@ def side_effect(self, *args, **kwargs): self.assertEqual(evaluator.file_output.call_count, 0) for i in range(1, 10): - rval = evaluator.queue.get(timeout=1) - self.assertAlmostEqual(rval["loss"], 1.0 - (0.1 * (i - 1))) + return_value = evaluator.queue.get(timeout=1) + self.assertAlmostEqual(return_value["loss"], 1.0 - (0.1 * (i - 1))) if i < 9: - self.assertEqual(rval["status"], StatusType.DONOTADVANCE) + self.assertEqual(return_value["status"], StatusType.DONOTADVANCE) else: - self.assertEqual(rval["status"], StatusType.SUCCESS) + self.assertEqual(return_value["status"], StatusType.SUCCESS) self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(pipeline_mock.iterative_fit.call_count, 9) @@ -809,13 +809,13 @@ def test_file_output(self, loss_mock, model_mock): self.backend_mock.get_model_dir.return_value = True evaluator.model = "model" evaluator.Y_optimization = D.data["Y_train"] - rval = evaluator.file_output( + return_value = evaluator.file_output( D.data["Y_train"], D.data["Y_valid"], D.data["Y_test"], ) - self.assertEqual(rval, (None, {})) + self.assertEqual(return_value, (None, {})) self.assertEqual(self.backend_mock.save_additional_data.call_count, 2) self.assertEqual(self.backend_mock.save_numrun_to_dir.call_count, 1) self.assertEqual( @@ -839,12 +839,12 @@ def test_file_output(self, loss_mock, model_mock): ) evaluator.models = ["model2", "model2"] - rval = evaluator.file_output( + return_value = evaluator.file_output( D.data["Y_train"], D.data["Y_valid"], D.data["Y_test"], ) - self.assertEqual(rval, (None, {})) + self.assertEqual(return_value, (None, {})) self.assertEqual(self.backend_mock.save_additional_data.call_count, 4) self.assertEqual(self.backend_mock.save_numrun_to_dir.call_count, 2) self.assertEqual( @@ -870,26 +870,26 @@ def test_file_output(self, loss_mock, model_mock): # Check for not containing NaNs - that the models don't predict nonsense # for unseen data D.data["Y_valid"][0] = np.NaN - rval = evaluator.file_output( + return_value = evaluator.file_output( D.data["Y_train"], D.data["Y_valid"], D.data["Y_test"], ) self.assertEqual( - rval, + return_value, ( 1.0, {"error": "Model predictions for validation set contains NaNs."}, ), ) D.data["Y_train"][0] = np.NaN - rval = evaluator.file_output( + return_value = evaluator.file_output( D.data["Y_train"], D.data["Y_valid"], D.data["Y_test"], ) self.assertEqual( - rval, + return_value, ( 1.0, {"error": "Model predictions for optimization set contains NaNs."}, @@ -1112,8 +1112,8 @@ def test_fit_predict_and_loss_standard_additional_run_info( evaluator.X_targets[0] = np.array([1, 0] * 23) evaluator.Y_targets[0] = np.array([1] * 23) evaluator.Y_train_targets = np.array([1] * 69) - rval = evaluator.fit_predict_and_loss(iterative=False) - self.assertIsNone(rval) + return_value = evaluator.fit_predict_and_loss(iterative=False) + self.assertIsNone(return_value) element = queue_.get() self.assertEqual(element["status"], StatusType.SUCCESS) self.assertEqual(element["additional_run_info"]["a"], 5) @@ -1219,8 +1219,8 @@ def __call__(self): evaluator.file_output.return_value = (None, {}) evaluator.Y_targets[0] = np.array([1] * 23).reshape((-1, 1)) evaluator.Y_train_targets = np.array([1] * 69).reshape((-1, 1)) - rval = evaluator.fit_predict_and_loss(iterative=True) - self.assertIsNone(rval) + return_value = evaluator.fit_predict_and_loss(iterative=True) + self.assertIsNone(return_value) self.assertEqual(finish_up_mock.call_count, 1) self.assertEqual(finish_up_mock.call_args[1]["additional_run_info"], 14678) @@ -1265,8 +1265,8 @@ def test_fit_predict_and_loss_iterative_noniterativemodel_additional_run_info( evaluator.Y_targets[0] = np.array([1] * 23).reshape((-1, 1)) evaluator.Y_train_targets = np.array([1] * 69).reshape((-1, 1)) - rval = evaluator.fit_predict_and_loss(iterative=True) - self.assertIsNone(rval) + return_value = evaluator.fit_predict_and_loss(iterative=True) + self.assertIsNone(return_value) self.assertEqual(finish_up_mock.call_count, 1) self.assertEqual(finish_up_mock.call_args[1]["additional_run_info"], 14678) @@ -1326,8 +1326,8 @@ def __call__(self): evaluator.Y_targets[0] = np.array([1] * 23).reshape((-1, 1)) evaluator.Y_train_targets = np.array([1] * 69).reshape((-1, 1)) - rval = evaluator.fit_predict_and_loss(iterative=False) - self.assertIsNone(rval) + return_value = evaluator.fit_predict_and_loss(iterative=False) + self.assertIsNone(return_value) self.assertEqual(finish_up_mock.call_count, 1) self.assertEqual( finish_up_mock.call_args[1]["additional_run_info"], {"val": 14678} @@ -1373,8 +1373,8 @@ def test_fit_predict_and_loss_budget_2_additional_run_info( evaluator.Y_targets[0] = np.array([1] * 23).reshape((-1, 1)) evaluator.Y_train_targets = np.array([1] * 69).reshape((-1, 1)) - rval = evaluator.fit_predict_and_loss(iterative=False) - self.assertIsNone(rval) + return_value = evaluator.fit_predict_and_loss(iterative=False) + self.assertIsNone(return_value) self.assertEqual(finish_up_mock.call_count, 1) self.assertEqual( finish_up_mock.call_args[1]["additional_run_info"], {"val": 14678} @@ -1422,8 +1422,8 @@ def test_datasets(self): ) evaluator.fit_predict_and_loss() - rval = evaluator.queue.get(timeout=1) - self.assertTrue(np.isfinite(rval["loss"])) + return_value = evaluator.queue.get(timeout=1) + self.assertTrue(np.isfinite(return_value["loss"])) ############################################################################ # Test obtaining a splitter object from scikit-learn @@ -3053,8 +3053,8 @@ def test_eval_holdout_all_loss_functions(self): metrics=[accuracy], additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 1) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 1) fixture = { "accuracy": 0.030303030303030276, @@ -3075,7 +3075,7 @@ def test_eval_holdout_all_loss_functions(self): "train_loss": 0.0, } - additional_run_info = rval[0]["additional_run_info"] + additional_run_info = return_value[0]["additional_run_info"] for key, value in fixture.items(): self.assertAlmostEqual(additional_run_info[key], fixture[key], msg=key) self.assertIn("duration", additional_run_info) @@ -3085,8 +3085,8 @@ def test_eval_holdout_all_loss_functions(self): msg=sorted(additional_run_info.items()), ) - self.assertAlmostEqual(rval[0]["loss"], 0.030303030303030276, places=3) - self.assertEqual(rval[0]["status"], StatusType.SUCCESS) + self.assertAlmostEqual(return_value[0]["loss"], 0.030303030303030276, places=3) + self.assertEqual(return_value[0]["status"], StatusType.SUCCESS) def test_eval_holdout_iterative_fit_no_timeout(self): eval_iterative_holdout( @@ -3107,11 +3107,11 @@ def test_eval_holdout_iterative_fit_no_timeout(self): metrics=[accuracy], additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 9) - self.assertAlmostEqual(rval[-1]["loss"], 0.030303030303030276) - self.assertEqual(rval[0]["status"], StatusType.DONOTADVANCE) - self.assertEqual(rval[-1]["status"], StatusType.SUCCESS) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 9) + self.assertAlmostEqual(return_value[-1]["loss"], 0.030303030303030276) + self.assertEqual(return_value[0]["status"], StatusType.DONOTADVANCE) + self.assertEqual(return_value[-1]["status"], StatusType.SUCCESS) def test_eval_holdout_iterative_fit_no_timeout_multi_objective(self): metrics = { @@ -3136,12 +3136,12 @@ def test_eval_holdout_iterative_fit_no_timeout_multi_objective(self): metrics=list(metrics.keys()), additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 9) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 9) for metric, loss in metrics.items(): - self.assertAlmostEqual(rval[-1]["loss"][metric.name], loss) - self.assertEqual(rval[0]["status"], StatusType.DONOTADVANCE) - self.assertEqual(rval[-1]["status"], StatusType.SUCCESS) + self.assertAlmostEqual(return_value[-1]["loss"][metric.name], loss) + self.assertEqual(return_value[0]["status"], StatusType.DONOTADVANCE) + self.assertEqual(return_value[-1]["status"], StatusType.SUCCESS) def test_eval_holdout_budget_iterations(self): eval_holdout( @@ -3405,11 +3405,11 @@ def test_eval_cv(self): metrics=[accuracy], additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 1) - self.assertAlmostEqual(rval[0]["loss"], 0.04999999999999997) - self.assertEqual(rval[0]["status"], StatusType.SUCCESS) - self.assertNotIn("bac_metric", rval[0]["additional_run_info"]) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 1) + self.assertAlmostEqual(return_value[0]["loss"], 0.04999999999999997) + self.assertEqual(return_value[0]["status"], StatusType.SUCCESS) + self.assertNotIn("bac_metric", return_value[0]["additional_run_info"]) def test_eval_cv_all_loss_functions(self): eval_cv( @@ -3430,8 +3430,8 @@ def test_eval_cv_all_loss_functions(self): metrics=[accuracy], additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 1) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 1) fixture = { "accuracy": 0.04999999999999997, @@ -3452,7 +3452,7 @@ def test_eval_cv_all_loss_functions(self): "train_loss": 0.0, } - additional_run_info = rval[0]["additional_run_info"] + additional_run_info = return_value[0]["additional_run_info"] for key, value in fixture.items(): self.assertAlmostEqual(additional_run_info[key], fixture[key], msg=key) self.assertIn("duration", additional_run_info) @@ -3462,8 +3462,8 @@ def test_eval_cv_all_loss_functions(self): msg=sorted(additional_run_info.items()), ) - self.assertAlmostEqual(rval[0]["loss"], 0.04999999999999997) - self.assertEqual(rval[0]["status"], StatusType.SUCCESS) + self.assertAlmostEqual(return_value[0]["loss"], 0.04999999999999997) + self.assertEqual(return_value[0]["status"], StatusType.SUCCESS) # def test_eval_cv_on_subset(self): # backend_api = backend.create(self.tmp_dir, self.tmp_dir) @@ -3504,10 +3504,10 @@ def test_eval_partial_cv(self): metrics=[accuracy], additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 1) - self.assertAlmostEqual(rval[0]["loss"], results[fold]) - self.assertEqual(rval[0]["status"], StatusType.SUCCESS) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 1) + self.assertAlmostEqual(return_value[0]["loss"], results[fold]) + self.assertEqual(return_value[0]["status"], StatusType.SUCCESS) def test_eval_partial_cv_multi_objective(self): metrics = { @@ -3547,8 +3547,8 @@ def test_eval_partial_cv_multi_objective(self): metrics=list(metrics.keys()), additional_components=dict(), ) - rval = read_queue(self.queue) - self.assertEqual(len(rval), 1) + return_value = read_queue(self.queue) + self.assertEqual(len(return_value), 1) for metric, loss in metrics.items(): - self.assertAlmostEqual(rval[0]["loss"][metric.name], loss[fold]) - self.assertEqual(rval[0]["status"], StatusType.SUCCESS) + self.assertAlmostEqual(return_value[0]["loss"][metric.name], loss[fold]) + self.assertEqual(return_value[0]["status"], StatusType.SUCCESS) diff --git a/test/test_scripts/test_metadata_generation.py b/test/test_scripts/test_metadata_generation.py index 929b90e029..89999d6be1 100644 --- a/test/test_scripts/test_metadata_generation.py +++ b/test/test_scripts/test_metadata_generation.py @@ -52,10 +52,10 @@ def test_metadata_generation(self): script_filename, self.working_directory, ) - rval = subprocess.run( + return_value = subprocess.run( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - self.assertEqual(rval.returncode, 0, msg=str(rval)) + self.assertEqual(return_value.returncode, 0, msg=str(return_value)) # 4. run one of the commands to get some data commands_output_file = os.path.join( @@ -99,11 +99,11 @@ def test_metadata_generation(self): # for training. In production, it would use twice as much! cmd = cmd.replace("-s 1", "-s 1 --unittest") print("COMMAND: %s" % cmd) - rval = subprocess.run( + return_value = subprocess.run( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - print("STDOUT: %s" % repr(rval.stdout), flush=True) - print("STDERR: %s" % repr(rval.stderr), flush=True) + print("STDOUT: %s" % repr(return_value.stdout), flush=True) + print("STDERR: %s" % repr(return_value.stderr), flush=True) self.print_files() @@ -123,7 +123,9 @@ def test_metadata_generation(self): ) with open(smac_log) as fh: smac_output = fh.read() - self.assertEqual(rval.returncode, 0, msg=str(rval) + "\n" + smac_output) + self.assertEqual( + return_value.returncode, 0, msg=str(return_value) + "\n" + smac_output + ) expected_validation_output = os.path.join( expected_output_directory, "..", "validation_trajectory_1.json" ) @@ -172,12 +174,12 @@ def test_metadata_generation(self): self.working_directory, ) print("COMMAND: %s" % cmd) - rval = subprocess.run( + return_value = subprocess.run( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - print("STDOUT: %s" % repr(rval.stdout), flush=True) - print("STDERR: %s" % repr(rval.stderr), flush=True) - self.assertEqual(rval.returncode, 0, msg=str(rval)) + print("STDOUT: %s" % repr(return_value.stdout), flush=True) + print("STDERR: %s" % repr(return_value.stderr), flush=True) + self.assertEqual(return_value.returncode, 0, msg=str(return_value)) for file in [ "algorithm_runs.arff", @@ -215,10 +217,10 @@ def test_metadata_generation(self): script_filename, self.working_directory, ) - rval = subprocess.run( + return_value = subprocess.run( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - self.assertEqual(rval.returncode, 0, msg=str(rval)) + self.assertEqual(return_value.returncode, 0, msg=str(return_value)) for task_type in ("classification", "regression"): for file in [ "calculation_times.csv", @@ -271,10 +273,10 @@ def test_metadata_generation(self): script_filename, self.working_directory, ) - rval = subprocess.run( + return_value = subprocess.run( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - self.assertEqual(rval.returncode, 0, msg=str(rval)) + self.assertEqual(return_value.returncode, 0, msg=str(return_value)) for metric_, combination in ( (metric, "%s_binary.classification_dense" % metric),