diff --git a/mmcv/runner/hooks/eval.py b/mmcv/runner/hooks/eval.py index 114406aee62..440372b52b0 100644 --- a/mmcv/runner/hooks/eval.py +++ b/mmcv/runner/hooks/eval.py @@ -11,11 +11,14 @@ class EvalHook(Hook): """Non-Distributed evaluation hook. + Notes: If new arguments are added for EvalHook, tools/test.py, tools/eval_metric.py may be effected. + This hook will regularly perform evaluation in a given interval when performing in non-distributed environment. + Args: dataloader (DataLoader): A PyTorch dataloader. start (int | None, optional): Evaluation starting epoch. It enables @@ -32,8 +35,9 @@ class EvalHook(Hook): Options are the evaluation metrics to the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox detection and instance segmentation. ``AR@100`` for proposal recall. If ``save_best`` is - ``auto``, the first key will be used. The interval of - ``CheckpointHook`` should device EvalHook. Default: None. + ``auto``, the first key of the returned ``OrderedDict`` result + will be used. The interval of ``CheckpointHook`` should device + ``EvalHook``. Default: None. rule (str | None, optional): Comparison rule for best score. If set to None, it will infer a reasonable rule. Keys such as 'acc', 'top' .etc will be inferred by 'greater' rule. Keys contain 'loss' will @@ -78,7 +82,7 @@ def __init__(self, assert isinstance(save_best, str) or save_best is None self.save_best = save_best self.eval_kwargs = eval_kwargs - self.initial_epoch_flag = True + self.initial_flag = True if self.save_best is not None: self._init_rule(rule, self.save_best) @@ -121,21 +125,21 @@ def before_train_iter(self, runner): """Evaluate the model only at the start of training by iteration.""" if self.by_epoch: return - if not self.initial_epoch_flag: + if not self.initial_flag: return if self.start is not None and runner.iter >= self.start: self.after_train_iter(runner) - self.initial_epoch_flag = False + self.initial_flag = False def before_train_epoch(self, runner): """Evaluate the model only at the start of training by epoch.""" if not self.by_epoch: return - if not self.initial_epoch_flag: + if not self.initial_flag: return if self.start is not None and runner.epoch >= self.start: self.after_train_epoch(runner) - self.initial_epoch_flag = False + self.initial_flag = False def after_train_iter(self, runner): """Called after every training iter to evaluate the results.""" @@ -178,7 +182,8 @@ def evaluation_flag(self, runner): # No evaluation if start is larger than the current time. return False else: - # Evaluation only at epochs 3, 5, 7... if start==3 and interval==2 + # Evaluation only at epochs/iters 3, 5, 7... + # if start==3 and interval==2 if (current + 1 - self.start) % self.interval: return False return True @@ -187,7 +192,7 @@ def _save_ckpt(self, runner, key_score): if self.by_epoch: current = f'epoch_{runner.epoch + 1}' else: - current = f'iter_{runner.epoch + 1}' + current = f'iter_{runner.iter + 1}' best_score = runner.meta['hook_msgs'].get( 'best_score', self.init_value_map[self.rule]) @@ -226,8 +231,10 @@ def evaluate(self, runner, results): class DistEvalHook(EvalHook): """Distributed evaluation hook. + This hook will regularly perform evaluation in a given interval when performing in distributed environment. + Args: dataloader (DataLoader): A PyTorch dataloader. start (int | None, optional): Evaluation starting epoch. It enables @@ -244,8 +251,9 @@ class DistEvalHook(EvalHook): Options are the evaluation metrics to the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox detection and instance segmentation. ``AR@100`` for proposal recall. If ``save_best`` is - ``auto``, the first key will be used. The interval of - ``CheckpointHook`` should device EvalHook. Default: None. + ``auto``, the first key of the returned ``OrderedDict`` result + will be used. The interval of ``CheckpointHook`` should depend on + ``EvalHook``. Default: None. rule (str | None, optional): Comparison rule for best score. If set to None, it will infer a reasonable rule. Keys such as 'acc', 'top' .etc will be inferred by 'greater' rule. Keys contain 'loss' will