From dca6f7427b2c1c19a28d1023dcc5a1d789f523ea Mon Sep 17 00:00:00 2001 From: yaoyu-33 <54727607+yaoyu-33@users.noreply.github.com> Date: Mon, 15 Apr 2024 09:30:43 -0700 Subject: [PATCH] Remove precision args in trainer due to PTL update (#8908) * Fix precision args in trainer due to PTL update Signed-off-by: yaoyu-33 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * roll back one change Signed-off-by: yaoyu-33 --------- Signed-off-by: yaoyu-33 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Abhishree Thittenamane <47577437+athitten@users.noreply.github.com> Co-authored-by: Pablo Garay --- .../multimodal_llm/neva/convert_hf_llava_to_neva.py | 2 +- .../megatron_change_num_partitions.py | 10 +++------- .../convert_baichuan2_hf_to_nemo.py | 2 +- .../convert_chatglm_hf_to_nemo.py | 2 +- .../convert_mistral_7b_hf_to_nemo.py | 2 +- .../convert_mixtral_hf_to_nemo.py | 2 +- .../convert_starcoder2_hf_to_nemo.py | 2 +- 7 files changed, 9 insertions(+), 13 deletions(-) diff --git a/examples/multimodal/multimodal_llm/neva/convert_hf_llava_to_neva.py b/examples/multimodal/multimodal_llm/neva/convert_hf_llava_to_neva.py index c9263ea85bbf..2cbb4c2b3b82 100644 --- a/examples/multimodal/multimodal_llm/neva/convert_hf_llava_to_neva.py +++ b/examples/multimodal/multimodal_llm/neva/convert_hf_llava_to_neva.py @@ -205,7 +205,7 @@ def convert(args): nemo_config.precision = precision print(f"nemo_config: {nemo_config}") - trainer = Trainer(plugins=plugins, accelerator='cpu', precision=precision, strategy=NLPDDPStrategy()) + trainer = Trainer(plugins=plugins, accelerator='cpu', strategy=NLPDDPStrategy()) hidden_size = hf_config["hidden_size"] head_num = hf_config["num_attention_heads"] diff --git a/examples/nlp/language_modeling/megatron_change_num_partitions.py b/examples/nlp/language_modeling/megatron_change_num_partitions.py index 436661e01b5d..c035346e3bf1 100644 --- a/examples/nlp/language_modeling/megatron_change_num_partitions.py +++ b/examples/nlp/language_modeling/megatron_change_num_partitions.py @@ -938,7 +938,7 @@ def main(): # Set precision None after precision plugins are created as PTL >= 2.1 does not allow both # precision plugins and precision to exist precision = None - trainer = Trainer(plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu", precision=precision) + trainer = Trainer(plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu") if tp_size < 0 or pp_size < 0: logging.info(f"Loading model config from {args.model_file} to get TP and PP size") @@ -1205,9 +1205,7 @@ def main(): if vp_size > 1: set_virtual_parallel_rank_safely(None) - trainer = Trainer( - plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu", precision=precision - ) + trainer = Trainer(plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu") with open_dict(model.cfg): if args.tokenizer_model_path is not None: @@ -1413,9 +1411,7 @@ def main(): app_state.pipeline_model_parallel_size * app_state.tensor_model_parallel_size ) - trainer = Trainer( - plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu", precision=precision - ) + trainer = Trainer(plugins=plugins, devices=1, strategy=NLPDDPStrategy(), accelerator="cpu") if args.tokenizer_model_path is not None: with open_dict(model.cfg): model.cfg.tokenizer.model = args.tokenizer_model_path diff --git a/scripts/checkpoint_converters/convert_baichuan2_hf_to_nemo.py b/scripts/checkpoint_converters/convert_baichuan2_hf_to_nemo.py index 585741de9b9a..b87f7e028cdb 100644 --- a/scripts/checkpoint_converters/convert_baichuan2_hf_to_nemo.py +++ b/scripts/checkpoint_converters/convert_baichuan2_hf_to_nemo.py @@ -175,7 +175,7 @@ def convert(args): nemo_config.precision = precision print(f"nemo_config: {nemo_config}") - trainer = Trainer(plugins=plugins, accelerator='cpu', precision=precision, strategy=NLPDDPStrategy()) + trainer = Trainer(plugins=plugins, accelerator='cpu', strategy=NLPDDPStrategy()) hidden_size = hf_config["hidden_size"] head_num = hf_config["num_attention_heads"] diff --git a/scripts/checkpoint_converters/convert_chatglm_hf_to_nemo.py b/scripts/checkpoint_converters/convert_chatglm_hf_to_nemo.py index c3f210deefac..363e4de09ef7 100644 --- a/scripts/checkpoint_converters/convert_chatglm_hf_to_nemo.py +++ b/scripts/checkpoint_converters/convert_chatglm_hf_to_nemo.py @@ -142,7 +142,7 @@ def convert(args): nemo_config.precision = precision - trainer = Trainer(plugins=plugins, accelerator='cpu', precision=precision, strategy=NLPDDPStrategy()) + trainer = Trainer(plugins=plugins, accelerator='cpu', strategy=NLPDDPStrategy()) hidden_size = hf_config["hidden_size"] head_num = hf_config["num_attention_heads"] diff --git a/scripts/checkpoint_converters/convert_mistral_7b_hf_to_nemo.py b/scripts/checkpoint_converters/convert_mistral_7b_hf_to_nemo.py index db0fe28cbf73..cb11bb5da564 100644 --- a/scripts/checkpoint_converters/convert_mistral_7b_hf_to_nemo.py +++ b/scripts/checkpoint_converters/convert_mistral_7b_hf_to_nemo.py @@ -193,7 +193,7 @@ def convert(args): nemo_config.precision = precision logging.info(f"nemo_config: {nemo_config}") - trainer = Trainer(plugins=plugins, accelerator='cpu', precision=precision, strategy=NLPDDPStrategy()) + trainer = Trainer(plugins=plugins, accelerator='cpu', strategy=NLPDDPStrategy()) hidden_size = nemo_config.hidden_size head_num = nemo_config.num_attention_heads diff --git a/scripts/checkpoint_converters/convert_mixtral_hf_to_nemo.py b/scripts/checkpoint_converters/convert_mixtral_hf_to_nemo.py index d8ad9d5030b8..ac323757a2f6 100644 --- a/scripts/checkpoint_converters/convert_mixtral_hf_to_nemo.py +++ b/scripts/checkpoint_converters/convert_mixtral_hf_to_nemo.py @@ -194,7 +194,7 @@ def convert(args): nemo_config.precision = precision print(f"nemo_config: {nemo_config}") - trainer = Trainer(plugins=plugins, accelerator='cpu', precision=precision, strategy=NLPDDPStrategy()) + trainer = Trainer(plugins=plugins, accelerator='cpu', strategy=NLPDDPStrategy()) hidden_size = nemo_config.hidden_size head_num = nemo_config.num_attention_heads diff --git a/scripts/checkpoint_converters/convert_starcoder2_hf_to_nemo.py b/scripts/checkpoint_converters/convert_starcoder2_hf_to_nemo.py index eccca3a04621..fc898c797a9e 100644 --- a/scripts/checkpoint_converters/convert_starcoder2_hf_to_nemo.py +++ b/scripts/checkpoint_converters/convert_starcoder2_hf_to_nemo.py @@ -194,7 +194,7 @@ def convert(args): nemo_config.precision = precision logging.info(f"nemo_config: {nemo_config}") - trainer = Trainer(plugins=plugins, accelerator='cpu', precision=precision, strategy=NLPDDPStrategy()) + trainer = Trainer(plugins=plugins, accelerator='cpu', strategy=NLPDDPStrategy()) hidden_size = nemo_config.hidden_size head_num = nemo_config.num_attention_heads