From 36424ec0e0563b52facbb5db65dea945e80cd89c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Apr 2023 11:34:13 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../language_modeling/megatron_lm_ckpt_to_nemo.py | 2 +- nemo/collections/nlp/models/nlp_model.py | 2 +- nemo/collections/nlp/parts/nlp_overrides.py | 2 +- tests/core/test_exp_manager.py | 13 +++---------- 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/examples/nlp/language_modeling/megatron_lm_ckpt_to_nemo.py b/examples/nlp/language_modeling/megatron_lm_ckpt_to_nemo.py index 240586dbb05d..e06f52a55acd 100644 --- a/examples/nlp/language_modeling/megatron_lm_ckpt_to_nemo.py +++ b/examples/nlp/language_modeling/megatron_lm_ckpt_to_nemo.py @@ -42,11 +42,11 @@ from typing import Any, Optional import torch +from lightning_fabric.utilities.cloud_io import _load as pl_load from megatron.core import parallel_state from pytorch_lightning.core.saving import _load_state as ptl_load_state from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml from pytorch_lightning.trainer.trainer import Trainer -from lightning_fabric.utilities.cloud_io import _load as pl_load from pytorch_lightning.utilities.migration import pl_legacy_patch from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel diff --git a/nemo/collections/nlp/models/nlp_model.py b/nemo/collections/nlp/models/nlp_model.py index cdb9755a5c00..10637f0fbbc6 100644 --- a/nemo/collections/nlp/models/nlp_model.py +++ b/nemo/collections/nlp/models/nlp_model.py @@ -18,12 +18,12 @@ import os from typing import Any, Optional +from lightning_fabric.utilities.cloud_io import _load as pl_load from omegaconf import DictConfig, OmegaConf from pytorch_lightning import Trainer from pytorch_lightning.core.saving import _load_state as ptl_load_state from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml from pytorch_lightning.utilities import rank_zero_only -from lightning_fabric.utilities.cloud_io import _load as pl_load from pytorch_lightning.utilities.migration import pl_legacy_patch from transformers import TRANSFORMERS_CACHE diff --git a/nemo/collections/nlp/parts/nlp_overrides.py b/nemo/collections/nlp/parts/nlp_overrides.py index 6ffceecc923a..5bf1cceedaad 100644 --- a/nemo/collections/nlp/parts/nlp_overrides.py +++ b/nemo/collections/nlp/parts/nlp_overrides.py @@ -24,6 +24,7 @@ import pytorch_lightning as pl import torch from omegaconf import OmegaConf +from pytorch_lightning.loops.fetchers import _DataFetcher from pytorch_lightning.overrides.base import _LightningModuleWrapperBase from pytorch_lightning.plugins import ClusterEnvironment from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO @@ -31,7 +32,6 @@ from pytorch_lightning.strategies.ddp import DDPStrategy from pytorch_lightning.trainer.trainer import Trainer from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.loops.fetchers import _DataFetcher from torch.distributed.algorithms.ddp_comm_hooks.debugging_hooks import noop_hook from torch.nn.parallel import DistributedDataParallel diff --git a/tests/core/test_exp_manager.py b/tests/core/test_exp_manager.py index 46a030d5bc53..2694e86be82d 100644 --- a/tests/core/test_exp_manager.py +++ b/tests/core/test_exp_manager.py @@ -329,17 +329,13 @@ def test_resume(self, tmp_path): {"resume_if_exists": True, "explicit_log_dir": str(tmp_path / "test_resume" / "default" / "version_0")}, ) checkpoint = Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--last.ckpt") - assert ( - Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == checkpoint.resolve() - ) + assert Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == checkpoint.resolve() # Succeed again and make sure that run_0 exists and previous log files were moved test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False) exp_manager(test_trainer, {"resume_if_exists": True, "explicit_log_dir": str(log_dir)}) checkpoint = Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--last.ckpt") - assert ( - Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == checkpoint.resolve() - ) + assert Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == checkpoint.resolve() prev_run_dir = Path(tmp_path / "test_resume" / "default" / "version_0" / "run_0") assert prev_run_dir.exists() prev_log = Path(tmp_path / "test_resume" / "default" / "version_0" / "run_0" / "lightning_logs.txt") @@ -372,10 +368,7 @@ def test_resume(self, tmp_path): "explicit_log_dir": str(dirpath_log_dir), }, ) - assert ( - Path(test_trainer._checkpoint_connector._ckpt_path).resolve() - == dirpath_checkpoint.resolve() - ) + assert Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == dirpath_checkpoint.resolve() @pytest.mark.unit def test_nemo_checkpoint_save_best_model_1(self, tmp_path):