Skip to content

Commit

Permalink
[dask] fix mypy errors about padded eval_results (#5716)
Browse files Browse the repository at this point in the history
  • Loading branch information
jameslamb committed Feb 15, 2023
1 parent 7fd708d commit 6f0bc48
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 5 deletions.
7 changes: 3 additions & 4 deletions python-package/lightgbm/dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,12 +160,11 @@ def _pad_eval_names(lgbm_model: LGBMModel, required_names: List[str]) -> LGBMMod
Allows users to rely on expected eval_set names being present when fitting DaskLGBM estimators with ``eval_set``.
"""
not_evaluated = 'not evaluated'
for eval_name in required_names:
if eval_name not in lgbm_model.evals_result_:
lgbm_model.evals_result_[eval_name] = not_evaluated
lgbm_model.evals_result_[eval_name] = {}
if eval_name not in lgbm_model.best_score_:
lgbm_model.best_score_[eval_name] = not_evaluated
lgbm_model.best_score_[eval_name] = {}

return lgbm_model

Expand Down Expand Up @@ -444,7 +443,7 @@ def _train(
List of (X, y) tuple pairs to use as validation sets.
Note, that not all workers may receive chunks of every eval set within ``eval_set``. When the returned
lightgbm estimator is not trained using any chunks of a particular eval set, its corresponding component
of evals_result_ and best_score_ will be 'not_evaluated'.
of ``evals_result_`` and ``best_score_`` will be empty dictionaries.
eval_names : list of str, or None, optional (default=None)
Names of eval_set.
eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None)
Expand Down
2 changes: 1 addition & 1 deletion tests/python_package_test/test_dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -1127,7 +1127,7 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix,

# check that each eval_name and metric exists for all eval sets, allowing for the
# case when a worker receives a fully-padded eval_set component which is not evaluated.
if evals_result[eval_name] != 'not evaluated':
if evals_result[eval_name] != {}:
for metric in eval_metric_names:
assert metric in evals_result[eval_name]
assert metric in best_scores[eval_name]
Expand Down

0 comments on commit 6f0bc48

Please sign in to comment.