From 32d072732ff6f2c502920aa9d015812b6f5c5a25 Mon Sep 17 00:00:00 2001 From: PeganovAnton Date: Thu, 17 Nov 2022 13:03:24 +0400 Subject: [PATCH] Fix setting up of `ReduceLROnPlateau` learning rate scheduler (#5444) * Fix tests Signed-off-by: PeganovAnton * Add accidentally lost changes Signed-off-by: PeganovAnton Signed-off-by: PeganovAnton Signed-off-by: Hainan Xu --- nemo/core/config/schedulers.py | 1 + tests/core/test_optimizers_schedulers.py | 45 ++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/nemo/core/config/schedulers.py b/nemo/core/config/schedulers.py index c24d738fdaa0..94554d90ebdf 100644 --- a/nemo/core/config/schedulers.py +++ b/nemo/core/config/schedulers.py @@ -284,4 +284,5 @@ def get_scheduler_config(name: str, **kwargs: Optional[Dict[str, Any]]) -> Sched 'WarmupAnnealingParams': WarmupAnnealingParams, 'PolynomialDecayAnnealingParams': PolynomialDecayAnnealingParams, 'PolynomialHoldDecayAnnealingParams': PolynomialHoldDecayAnnealingParams, + 'ReduceLROnPlateauParams': ReduceLROnPlateauParams, } diff --git a/tests/core/test_optimizers_schedulers.py b/tests/core/test_optimizers_schedulers.py index 1e3bf2896c99..2797964e3455 100644 --- a/tests/core/test_optimizers_schedulers.py +++ b/tests/core/test_optimizers_schedulers.py @@ -309,6 +309,51 @@ def test_sched_config_parse_from_cls(self): scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config) assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing) + @pytest.mark.unit + def test_sched_config_parse_reduce_on_plateau(self): + model = TempModel() + opt_cls = optim.get_optimizer('novograd') + opt = opt_cls(model.parameters(), lr=self.INITIAL_LR) + reduce_on_plateau_parameters = { + 'mode': 'min', + 'factor': 0.5, + 'patience': 1, + 'threshold': 1e-4, + 'threshold_mode': 'rel', + 'min_lr': 1e-6, + 'eps': 1e-7, + 'verbose': True, + 'cooldown': 1, + } + basic_sched_config = { + 'name': 'ReduceLROnPlateau', + 'monitor': 'val_loss', + 'reduce_on_plateau': True, + 'max_steps': self.MAX_STEPS, + } + basic_sched_config.update(reduce_on_plateau_parameters) + scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config) + assert isinstance(scheduler_setup['scheduler'], torch.optim.lr_scheduler.ReduceLROnPlateau) + for k, v in reduce_on_plateau_parameters.items(): + if k == 'min_lr': + k += 's' + v = [v] + found_v = getattr(scheduler_setup['scheduler'], k) + assert ( + found_v == v + ), f"Wrong value `{repr(found_v)}` for `ReduceLROnPlateau` parameter `{k}`. Expected `{repr(v)}`." + dict_config = omegaconf.OmegaConf.create(basic_sched_config) + scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config) + assert isinstance(scheduler_setup['scheduler'], torch.optim.lr_scheduler.ReduceLROnPlateau) + for k, v in reduce_on_plateau_parameters.items(): + if k == 'min_lr': + k += 's' + v = [v] + found_v = getattr(scheduler_setup['scheduler'], k) + assert ( + found_v == v + ), f"Wrong value `{repr(found_v)}` for `ReduceLROnPlateau` parameter `{k}`. Expected `{repr(v)}`." + @pytest.mark.unit def test_WarmupPolicy(self): model = TempModel()