diff --git a/pytorch_lightning/accelerators/ddp2_backend.py b/pytorch_lightning/accelerators/ddp2_backend.py index 7414535350f64..1b55a983c894e 100644 --- a/pytorch_lightning/accelerators/ddp2_backend.py +++ b/pytorch_lightning/accelerators/ddp2_backend.py @@ -16,13 +16,9 @@ import torch -from pytorch_lightning import _logger as log -from pytorch_lightning.utilities import AMPType -from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.core.step_result import Result from pytorch_lightning.accelerators.ddp_base_backend import DDPBase -from pytorch_lightning.plugins.apex import ApexPlugin try: from hydra.utils import to_absolute_path, get_original_cwd @@ -38,7 +34,6 @@ class DDP2Backend(DDPBase): def __init__(self, trainer): super().__init__(trainer) self.task_idx = None - self.precision_backend = None def setup(self, model): self._resolve_task_idx() diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py index 614d38bc4dc41..17739a605fa2b 100644 --- a/pytorch_lightning/accelerators/ddp_backend.py +++ b/pytorch_lightning/accelerators/ddp_backend.py @@ -22,9 +22,7 @@ import numpy as np import torch -from pytorch_lightning import _logger as log -from pytorch_lightning.utilities import AMPType -from pytorch_lightning.utilities.distributed import rank_zero_only, find_free_network_port +from pytorch_lightning.utilities.distributed import find_free_network_port from pytorch_lightning.accelerators.ddp_base_backend import DDPBase try: @@ -35,11 +33,6 @@ else: HYDRA_AVAILABLE = True -try: - from apex import amp -except ImportError: - amp = None - class DDPBackend(DDPBase): diff --git a/pytorch_lightning/accelerators/ddp_base_backend.py b/pytorch_lightning/accelerators/ddp_base_backend.py index 10e5cab3dcbb9..b0349ac6bbd4d 100644 --- a/pytorch_lightning/accelerators/ddp_base_backend.py +++ b/pytorch_lightning/accelerators/ddp_base_backend.py @@ -22,7 +22,6 @@ from pytorch_lightning.utilities.cloud_io import atomic_save from pytorch_lightning.utilities.distributed import rank_zero_warn, rank_zero_only from pytorch_lightning import _logger as log -from pytorch_lightning.plugins.apex import ApexPlugin try: from hydra.utils import to_absolute_path, get_original_cwd @@ -37,7 +36,6 @@ class DDPBase(Accelerator): def __init__(self, trainer): super().__init__(trainer) - self.precision_backend = None def training_step(self, args): if self.trainer.amp_backend == AMPType.NATIVE: @@ -151,9 +149,7 @@ def ddp_train_tmp(self, process_idx, mp_queue, model, is_master=False, proc_offs # AMP - # run through amp wrapper before going to distributed DP - if self.trainer.amp_backend == AMPType.APEX: - self.precision_backend = ApexPlugin(self.trainer) - model, optimizers = self.precision_backend._init(model) + model, optimizers = self.trainer.precision_connector.connect(model, optimizers) # device ids change depending on the DDP setup device_ids = self.get_device_ids() diff --git a/pytorch_lightning/accelerators/ddp_spawn_backend.py b/pytorch_lightning/accelerators/ddp_spawn_backend.py index 467712a121fbb..fc2fc88563e1d 100644 --- a/pytorch_lightning/accelerators/ddp_spawn_backend.py +++ b/pytorch_lightning/accelerators/ddp_spawn_backend.py @@ -19,11 +19,6 @@ from pytorch_lightning.utilities.distributed import find_free_network_port from pytorch_lightning.accelerators.ddp_base_backend import DDPBase -try: - from apex import amp -except ImportError: - amp = None - class DDPSpawnBackend(DDPBase): diff --git a/pytorch_lightning/accelerators/dp_backend.py b/pytorch_lightning/accelerators/dp_backend.py index 09a373c3d4ef0..4bced57e9458c 100644 --- a/pytorch_lightning/accelerators/dp_backend.py +++ b/pytorch_lightning/accelerators/dp_backend.py @@ -20,12 +20,6 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.core.step_result import Result from pytorch_lightning.accelerators.base_backend import Accelerator -from pytorch_lightning.plugins.apex import ApexPlugin - -try: - from apex import amp -except ImportError: - amp = None class DataParallelBackend(Accelerator): @@ -33,7 +27,6 @@ class DataParallelBackend(Accelerator): def __init__(self, trainer): super().__init__(trainer) self.model_autocast_original_forward = None - self.precision_backend = None def setup(self, model): # call setup after the ddp process has connected @@ -91,8 +84,7 @@ def __init_nvidia_apex(self, model): f' See this note from NVIDIA for more info: https://github.com/NVIDIA/apex/issues/227.' f' We recommend you switch to ddp if you want to use amp') else: - self.precision_backend = ApexPlugin(self.trainer) - model, optimizers = self.precision_backend._init(model) + model, optimizers = self.trainer.precision_connector.connect(model, self.trainer.optimizers) return model diff --git a/pytorch_lightning/accelerators/gpu_backend.py b/pytorch_lightning/accelerators/gpu_backend.py index ec3e84e840b02..9a00d34e05293 100644 --- a/pytorch_lightning/accelerators/gpu_backend.py +++ b/pytorch_lightning/accelerators/gpu_backend.py @@ -15,7 +15,6 @@ import torch from pytorch_lightning.utilities import AMPType from pytorch_lightning.accelerators.base_backend import Accelerator -from pytorch_lightning.plugins.apex import ApexPlugin class GPUBackend(Accelerator): @@ -23,7 +22,6 @@ class GPUBackend(Accelerator): def __init__(self, trainer): super().__init__(trainer) - self.precision_backend = None def setup(self, model): @@ -40,9 +38,8 @@ def setup(self, model): self.trainer.lr_schedulers = lr_schedulers self.trainer.optimizer_frequencies = optimizer_frequencies - if self.trainer.amp_backend == AMPType.APEX: - self.precision_backend = ApexPlugin(self.trainer) - model, optimizers = self.precision_backend._init(model) + # init precision + model, optimizers = self.trainer.precision_connector.connect(model, optimizers) self.trainer.model = model diff --git a/pytorch_lightning/accelerators/horovod_backend.py b/pytorch_lightning/accelerators/horovod_backend.py index 76887a6f87214..ba4e927a4ce13 100644 --- a/pytorch_lightning/accelerators/horovod_backend.py +++ b/pytorch_lightning/accelerators/horovod_backend.py @@ -13,12 +13,10 @@ # limitations under the License. from contextlib import ExitStack import torch -from pytorch_lightning.core import LightningModule from pytorch_lightning.utilities import AMPType from pytorch_lightning.accelerators.base_backend import Accelerator from pytorch_lightning.utilities.distributed import rank_zero_only from torch.optim.lr_scheduler import _LRScheduler -from pytorch_lightning.plugins.apex import ApexPlugin try: import horovod.torch as hvd @@ -33,7 +31,6 @@ class HorovodBackend(Accelerator): def __init__(self, trainer): super().__init__(trainer) - self.precision_backend = None def setup(self, model): # call setup after the ddp process has connected @@ -83,9 +80,8 @@ def filter_named_parameters(model, optimizer): for optimizer in self.trainer.optimizers ] - if self.trainer.amp_backend == AMPType.APEX: - self.precision_backend = ApexPlugin(self.trainer) - model, optimizers = self.precision_backend._init(model) + # 16-bit + model, self.trainer.optimizers = self.trainer.precision_connector.connect(model, self.trainer.optimizers) # Update logger rank info from Horovod to avoid race conditions from different ranks # creating directories / writing files in the same locations. diff --git a/pytorch_lightning/plugins/apex.py b/pytorch_lightning/plugins/apex.py index 27ccff5592520..9b51a8a0ab1c5 100644 --- a/pytorch_lightning/plugins/apex.py +++ b/pytorch_lightning/plugins/apex.py @@ -23,8 +23,8 @@ class ApexPlugin: def __init__(self, trainer): self.trainer = trainer - def _init(self, model): - model, optimizers = self.configure_apex(model, self.trainer.optimizers, self.trainer.amp_level) + def connect(self, model, optimizers): + model, optimizers = self.configure_apex(model, optimizers, self.trainer.amp_level) self.trainer.optimizers = optimizers self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers) return model, optimizers diff --git a/pytorch_lightning/plugins/native_amp.py b/pytorch_lightning/plugins/native_amp.py new file mode 100644 index 0000000000000..3089a0558f0a8 --- /dev/null +++ b/pytorch_lightning/plugins/native_amp.py @@ -0,0 +1,29 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + + +class NativeAMP: + + def __init__(self, trainer): + self.trainer = trainer + + def connect(self, model, optimizers): + self.trainer.optimizers = optimizers + return model, optimizers + + def training_step(self, fx, args): + with torch.cuda.amp.autocast(): + output = fx(*args) + return output diff --git a/pytorch_lightning/trainer/connectors/precision_connector.py b/pytorch_lightning/trainer/connectors/precision_connector.py index 55fb945caf09e..296b9a178ab43 100644 --- a/pytorch_lightning/trainer/connectors/precision_connector.py +++ b/pytorch_lightning/trainer/connectors/precision_connector.py @@ -13,12 +13,15 @@ # limitations under the License. from pytorch_lightning import _logger as log from pytorch_lightning.utilities import APEX_AVAILABLE, NATIVE_AMP_AVALAIBLE, rank_zero_warn, AMPType +from pytorch_lightning.plugins.native_amp import NativeAMP +from pytorch_lightning.plugins.apex import ApexPlugin class PrecisionConnector: def __init__(self, trainer): self.trainer = trainer + self.backend = None def on_trainer_init(self, precision, amp_level, amp_backend): # AMP init @@ -52,6 +55,8 @@ def _setup_amp_backend(self, amp_type: str): else: log.info('Using native 16bit precision.') self.trainer.amp_backend = AMPType.NATIVE + self.backend = NativeAMP(self.trainer) + if amp_type == 'apex': if not APEX_AVAILABLE: rank_zero_warn('You have asked for Apex AMP but you have not installed it yet.' @@ -59,8 +64,16 @@ def _setup_amp_backend(self, amp_type: str): else: log.info('Using APEX 16bit precision.') self.trainer.amp_backend = AMPType.APEX + self.backend = ApexPlugin(self.trainer) + if not self.trainer.amp_backend: raise ModuleNotFoundError( f'You have asked for AMP support {amp_type}, but there is no support on your side yet.' f' Consider installing torch >= 1.6 or NVIDIA Apex.' ) + + def connect(self, model, optimizers): + if self.backend: + model, optimizers = self.backend.connect(model, optimizers) + + return model, optimizers