From 9a7c518ab11ec9eac734b2a1ce110f339d34022c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 12:41:23 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nemo/collections/asr/models/label_models.py | 2 +- .../language_modeling/megatron_gpt_prompt_learning_model.py | 2 +- nemo/collections/nlp/modules/common/__init__.py | 2 +- nemo/collections/nlp/modules/common/prompt_encoder.py | 2 +- .../text_normalization/zh/data/char/__init__.py | 2 +- tests/core/test_exp_manager.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nemo/collections/asr/models/label_models.py b/nemo/collections/asr/models/label_models.py index 2a5dcf1eae35..5859dbfa40e0 100644 --- a/nemo/collections/asr/models/label_models.py +++ b/nemo/collections/asr/models/label_models.py @@ -502,4 +502,4 @@ def get_batch_embeddings(speaker_model, manifest_filepath, batch_size=32, sample all_logits, true_labels, all_embs = np.asarray(all_logits), np.asarray(all_labels), np.asarray(all_embs) - return all_embs, all_logits, true_labels, dataset.id2label \ No newline at end of file + return all_embs, all_logits, true_labels, dataset.id2label diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py index 16da81195865..3cf532e86415 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py @@ -1013,4 +1013,4 @@ def get_pseudo_tokens(num_virtual_tokens): for i in range(num_virtual_tokens) ] - return pseudo_tokens \ No newline at end of file + return pseudo_tokens diff --git a/nemo/collections/nlp/modules/common/__init__.py b/nemo/collections/nlp/modules/common/__init__.py index db40b0e0cfa6..cc5bb7a3f480 100644 --- a/nemo/collections/nlp/modules/common/__init__.py +++ b/nemo/collections/nlp/modules/common/__init__.py @@ -39,4 +39,4 @@ from nemo.collections.nlp.modules.common.sequence_regression import SequenceRegression from nemo.collections.nlp.modules.common.sequence_token_classifier import SequenceTokenClassifier from nemo.collections.nlp.modules.common.token_classifier import BertPretrainingTokenClassifier, TokenClassifier -from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer, get_tokenizer_list \ No newline at end of file +from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer, get_tokenizer_list diff --git a/nemo/collections/nlp/modules/common/prompt_encoder.py b/nemo/collections/nlp/modules/common/prompt_encoder.py index 7e6e2198f4cf..ef2f84dfaaba 100644 --- a/nemo/collections/nlp/modules/common/prompt_encoder.py +++ b/nemo/collections/nlp/modules/common/prompt_encoder.py @@ -295,4 +295,4 @@ def forward(self, taskname_embeddings) -> torch.Tensor: else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") - return output_embeds \ No newline at end of file + return output_embeds diff --git a/nemo_text_processing/text_normalization/zh/data/char/__init__.py b/nemo_text_processing/text_normalization/zh/data/char/__init__.py index 854406a27271..a1cf281f0908 100644 --- a/nemo_text_processing/text_normalization/zh/data/char/__init__.py +++ b/nemo_text_processing/text_normalization/zh/data/char/__init__.py @@ -10,4 +10,4 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file +# limitations under the License. diff --git a/tests/core/test_exp_manager.py b/tests/core/test_exp_manager.py index 9eb4d2eecbb4..6d84337958b0 100644 --- a/tests/core/test_exp_manager.py +++ b/tests/core/test_exp_manager.py @@ -571,4 +571,4 @@ class CustomLoop(TrainingEpochLoop): loop.trainer = trainer trainer.fit_loop.epoch_loop = loop with pytest.warns(UserWarning, match="Detected custom epoch loop"): - exp_manager(trainer, {"explicit_log_dir": str(tmp_path)}) \ No newline at end of file + exp_manager(trainer, {"explicit_log_dir": str(tmp_path)})