diff --git a/otx/algorithms/classification/adapters/mmcls/configurer.py b/otx/algorithms/classification/adapters/mmcls/configurer.py index df1984ac53e..ecdd706ca74 100644 --- a/otx/algorithms/classification/adapters/mmcls/configurer.py +++ b/otx/algorithms/classification/adapters/mmcls/configurer.py @@ -387,12 +387,7 @@ def configure_device(self, cfg, training): cfg.distributed = True self.configure_distributed(cfg) elif "gpu_ids" not in cfg: - gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES") - logger.info(f"CUDA_VISIBLE_DEVICES = {gpu_ids}") - if gpu_ids is not None: - cfg.gpu_ids = range(len(gpu_ids.split(","))) - else: - cfg.gpu_ids = range(1) + cfg.gpu_ids = range(1) # consider "cuda" and "cpu" device only if not torch.cuda.is_available(): diff --git a/otx/algorithms/detection/adapters/mmdet/configurer.py b/otx/algorithms/detection/adapters/mmdet/configurer.py index 54831bfac5c..47fb07ed418 100644 --- a/otx/algorithms/detection/adapters/mmdet/configurer.py +++ b/otx/algorithms/detection/adapters/mmdet/configurer.py @@ -549,12 +549,7 @@ def configure_device(self, cfg, training): cfg.distributed = True self.configure_distributed(cfg) elif "gpu_ids" not in cfg: - gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES") - logger.info(f"CUDA_VISIBLE_DEVICES = {gpu_ids}") - if gpu_ids is not None: - cfg.gpu_ids = range(len(gpu_ids.split(","))) - else: - cfg.gpu_ids = range(1) + cfg.gpu_ids = range(1) # consider "cuda" and "cpu" device only if not torch.cuda.is_available(): diff --git a/otx/algorithms/segmentation/adapters/mmseg/configurer.py b/otx/algorithms/segmentation/adapters/mmseg/configurer.py index 0fc24cd5f49..2444c23c519 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/configurer.py +++ b/otx/algorithms/segmentation/adapters/mmseg/configurer.py @@ -381,12 +381,7 @@ def configure_device(self, cfg: Config, training: bool) -> None: cfg.distributed = True self.configure_distributed(cfg) elif "gpu_ids" not in cfg: - gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES") - logger.info(f"CUDA_VISIBLE_DEVICES = {gpu_ids}") - if gpu_ids is not None: - cfg.gpu_ids = range(len(gpu_ids.split(","))) - else: - cfg.gpu_ids = range(1) + cfg.gpu_ids = range(1) # consider "cuda" and "cpu" device only if not torch.cuda.is_available():