diff --git a/CHANGELOG.md b/CHANGELOG.md index fbb8eb59cc6aa6..9bea3060682fa5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,33 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed +- Prune deprecated classif. metrics from `pytorch_lightning.metrics.functional.classification` ([7499](https://github.com/PyTorchLightning/pytorch-lightning/pull/7499)) + + +- Removed deprecated data parallel classes `LightningDataParallel` and `LightningDistributedDataParallel` from `pytorch_lightning.overrides.data_parallel` ([7510](https://github.com/PyTorchLightning/pytorch-lightning/pull/7510)) + + +- Removed deprecated trainer attributes - `get_model` and `accelerator_backend` ([7502](https://github.com/PyTorchLightning/pytorch-lightning/pull/7502)) + + +- Removed deprecated utils modules `model_utils`, `warning_utils`, `xla_device_utils` and partially `argparse_utils` ([7503](https://github.com/PyTorchLightning/pytorch-lightning/pull/7503)) + + +- Removed deprecated trainer attributes - `on_cpu`, `on_tpu`, `use_tpu`, `on_gpu`, `use_dp`, `use_ddp`, `use_ddp2`, `use_horovod`, `use_single_gpu` ([#7501](https://github.com/PyTorchLightning/pytorch-lightning/pull/7501)) + + +### Fixed + + +- Fixed parsing of multiple training dataloaders ([#7433](https://github.com/PyTorchLightning/pytorch-lightning/pull/7433)) + + +- Fixed recursive passing of `wrong_type` keyword argument in `pytorch_lightning.utilities.apply_to_collection` ([#7433](https://github.com/PyTorchLightning/pytorch-lightning/pull/7433)) + + +- Fixed setting correct `DistribType` for `ddp_cpu` (spawn) backend ([#7492](https://github.com/PyTorchLightning/pytorch-lightning/pull/7492)) + + ## [1.3.1] - 2021-05-11 ### Fixed diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index a8a72c1831600c..d826de1047851c 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -522,7 +522,7 @@ def set_distributed_mode(self, distributed_backend: Optional[str] = None): # special case with DDP on CPUs if self.distributed_backend == "ddp_cpu": - self._distrib_type = DistributedType.DDP + self._distrib_type = DistributedType.DDP_SPAWN if self.num_gpus > 0: rank_zero_warn( 'You requested one or more GPUs, but set the backend to `ddp_cpu`. Training will not use GPUs.' diff --git a/tests/accelerators/test_accelerator_connector.py b/tests/accelerators/test_accelerator_connector.py index a57fbb4afcbdcc..50ea624dbec9d4 100644 --- a/tests/accelerators/test_accelerator_connector.py +++ b/tests/accelerators/test_accelerator_connector.py @@ -437,13 +437,15 @@ def test_ipython_incompatible_backend_error(*_): with pytest.raises(MisconfigurationException, match="backend ddp is not compatible"): Trainer(accelerator="ddp", gpus=2) - with pytest.raises(MisconfigurationException, match="backend ddp is not compatible"): - Trainer(accelerator="ddp_cpu", num_processes=2) - with pytest.raises(MisconfigurationException, match="backend ddp2 is not compatible"): Trainer(accelerator="ddp2", gpus=2) +@mock.patch("pytorch_lightning.utilities._IS_INTERACTIVE", return_value=True) +def test_ipython_compatible_backend(*_): + Trainer(accelerator="ddp_cpu", num_processes=2) + + @pytest.mark.parametrize( ["accelerator", "plugin"], [('ddp_spawn', 'ddp_sharded'), (None, 'ddp_sharded')], diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index b717302adf31fc..d234ba1b36fd09 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1190,6 +1190,7 @@ def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches): ), ( dict(accelerator="ddp_cpu", num_processes=2, gpus=None), +<<<<<<< HEAD dict( use_dp=False, use_ddp=True, @@ -1199,6 +1200,9 @@ def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches): use_single_gpu=False, num_processes=2, ), +======= + dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2), +>>>>>>> 6ac16ff3... Fix DistribType for `ddp_cpu` (spawn) (#7492) ), ( dict(accelerator="ddp2", gpus=None), @@ -1250,6 +1254,7 @@ def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches): ), ( dict(accelerator="ddp_cpu", num_processes=2, gpus=1), +<<<<<<< HEAD dict( use_dp=False, use_ddp=True, @@ -1259,6 +1264,9 @@ def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches): use_single_gpu=False, num_processes=2, ), +======= + dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2), +>>>>>>> 6ac16ff3... Fix DistribType for `ddp_cpu` (spawn) (#7492) ), ( dict(accelerator="ddp2", gpus=1),