From 722e9364916e527e8d46cbd828a1516bf6aaebd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gema=20Parre=C3=B1o?= Date: Sun, 29 Oct 2023 19:22:26 +0100 Subject: [PATCH 001/268] [Typo fix] flag config in WANDB (#27130) typo fix flag config --- examples/pytorch/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/README.md b/examples/pytorch/README.md index 60d762c69e10..2a00db08d2ae 100644 --- a/examples/pytorch/README.md +++ b/examples/pytorch/README.md @@ -223,7 +223,7 @@ import wandb wandb.login() ``` -To enable logging to W&B, include `"wandb"` in the `report_to` of your `TrainingArguments` or script. Or just pass along `--report_to all` if you have `wandb` installed. +To enable logging to W&B, include `"wandb"` in the `report_to` of your `TrainingArguments` or script. Or just pass along `--report_to_all` if you have `wandb` installed. Whenever you use `Trainer` or `TFTrainer` classes, your losses, evaluation metrics, model topology and gradients (for `Trainer` only) will automatically be logged. From 211ad4c9cc1c0882c4a22eaca7b4d7d1e2f264b3 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 30 Oct 2023 10:48:24 +0100 Subject: [PATCH 002/268] Fix slack report failing for doctest (#27042) * fix slack report for doctest * separate reports * style --------- Co-authored-by: ydshieh --- utils/notification_service.py | 2 +- utils/notification_service_doc_tests.py | 67 ++++++++++++++++--------- 2 files changed, 45 insertions(+), 24 deletions(-) diff --git a/utils/notification_service.py b/utils/notification_service.py index 650d76b28c37..610d597d2307 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -314,7 +314,7 @@ def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_rep return entries_changed @property - def model_failures(self) -> Dict: + def model_failures(self) -> List[Dict]: # Obtain per-model failures def per_model_sum(model_category_dict): return dicts_to_sum(model_category_dict["failed"].values()) diff --git a/utils/notification_service_doc_tests.py b/utils/notification_service_doc_tests.py index aac768fb3943..c516963be1d7 100644 --- a/utils/notification_service_doc_tests.py +++ b/utils/notification_service_doc_tests.py @@ -19,7 +19,7 @@ import re import time from fnmatch import fnmatch -from typing import Dict +from typing import Dict, List import requests from slack_sdk import WebClient @@ -132,30 +132,42 @@ def failures(self) -> Dict: } @property - def category_failures(self) -> Dict: + def category_failures(self) -> List[Dict]: + failure_blocks = [] + + MAX_ERROR_TEXT = 3000 - len("The following examples had failures:\n\n\n\n") - len("[Truncated]\n") line_length = 40 category_failures = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(v, dict)} - report = "" - for category, failures in category_failures.items(): + def single_category_failures(category, failures): + text = "" if len(failures) == 0: - continue + return "" + text += f"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" - if report != "": - report += "\n\n" + for idx, failure in enumerate(failures): + new_text = text + f"`{failure}`\n" + if len(new_text) > MAX_ERROR_TEXT: + text = text + "[Truncated]\n" + break + text = new_text - report += f"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" - report += "`" - report += "`\n`".join(failures) - report += "`" + return text - return { - "type": "section", - "text": { - "type": "mrkdwn", - "text": f"The following examples had failures:\n\n\n{report}\n", - }, - } + for category, failures in category_failures.items(): + report = single_category_failures(category, failures) + if len(report) == 0: + continue + block = { + "type": "section", + "text": { + "type": "mrkdwn", + "text": f"The following examples had failures:\n\n\n{report}\n", + }, + } + failure_blocks.append(block) + + return failure_blocks @property def payload(self) -> str: @@ -165,7 +177,7 @@ def payload(self) -> str: blocks.append(self.failures) if self.n_failures > 0: - blocks.extend([self.category_failures]) + blocks.extend(self.category_failures) if self.n_failures == 0: blocks.append(self.no_failures) @@ -211,10 +223,19 @@ def post(self): ) def get_reply_blocks(self, job_name, job_link, failures, text): - failures_text = "" + # `text` must be less than 3001 characters in Slack SDK + # keep some room for adding "[Truncated]" when necessary + MAX_ERROR_TEXT = 3000 - len("[Truncated]") + + failure_text = "" for key, value in failures.items(): - value = value[:200] + " [Truncated]" if len(value) > 250 else value - failures_text += f"*{key}*\n_{value}_\n\n" + new_text = failure_text + f"*{key}*\n_{value}_\n\n" + if len(new_text) > MAX_ERROR_TEXT: + # `failure_text` here has length <= 3000 + failure_text = failure_text + "[Truncated]" + break + # `failure_text` here has length <= MAX_ERROR_TEXT + failure_text = new_text title = job_name content = {"type": "section", "text": {"type": "mrkdwn", "text": text}} @@ -229,7 +250,7 @@ def get_reply_blocks(self, job_name, job_link, failures, text): return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, - {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, + {"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}, ] def post_reply(self): From 160432110c116003705fd3740e5b1d6690d9d18e Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:04:50 +0100 Subject: [PATCH 003/268] [`FA2`/ `Mistral`] Revert previous behavior with right padding + forward (#27125) Update modeling_mistral.py --- src/transformers/models/mistral/modeling_mistral.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 0eeff04185d4..7cdea6f053aa 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -820,6 +820,7 @@ def forward( attention_mask is not None and hasattr(self.config, "_flash_attn_2_enabled") and self.config._flash_attn_2_enabled + and past_key_values is not None ): is_padding_right = attention_mask[:, -1].sum().item() != batch_size if is_padding_right: From e830495c1ca7fa12653b6dec6ffe0b244ac4dc1d Mon Sep 17 00:00:00 2001 From: Thien Tran Date: Mon, 30 Oct 2023 18:52:24 +0800 Subject: [PATCH 004/268] Fix data2vec-audio note about attention mask (#27116) fix data2vec audio note about attention mask --- .../models/data2vec/modeling_data2vec_audio.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/data2vec/modeling_data2vec_audio.py b/src/transformers/models/data2vec/modeling_data2vec_audio.py index cf15d8508d51..47cf2d6245ef 100755 --- a/src/transformers/models/data2vec/modeling_data2vec_audio.py +++ b/src/transformers/models/data2vec/modeling_data2vec_audio.py @@ -786,12 +786,11 @@ def _get_feature_vector_attention_mask( - `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == - True`. For all models whose processor has `config.return_attention_mask == False`, such as - [data2vec-audio-base](https://huggingface.co/facebook/data2vec-audio-base-960h), `attention_mask` should - **not** be passed to avoid degraded performance when doing batched inference. For such models - `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these - models also yield slightly different results depending on whether `input_values` is padded or not. + `attention_mask` should be passed if the corresponding processor has `config.return_attention_mask == + True`, which is the case for all pre-trained Data2Vec Audio models. Be aware that that even with + `attention_mask`, zero-padded inputs will have slightly different outputs compared to non-padded inputs + because there are more than one convolutional layer in the positional encodings. For a more detailed + explanation, see [here](https://github.com/huggingface/transformers/issues/25621#issuecomment-1713759349). From 5fbed2d7ca6c8690f18b5c33f7b166de1c14fd26 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:41:48 +0100 Subject: [PATCH 005/268] [`Trainer` / `GC`] Add `gradient_checkpointing_kwargs` in trainer and training arguments (#27068) * add `gradient_checkpointing_kwargs` in trainer and training arguments * add comment * add test - currently failing * now tests pass --- src/transformers/trainer.py | 7 +++- src/transformers/training_args.py | 8 +++++ tests/trainer/test_trainer.py | 59 ++++++++++++++++++++++++++++++- 3 files changed, 72 insertions(+), 2 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 25941ff0c7d2..06879cbce722 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1616,7 +1616,12 @@ def _inner_training_loop( # Activate gradient checkpointing if needed if args.gradient_checkpointing: - self.model.gradient_checkpointing_enable() + if args.gradient_checkpointing_kwargs is None: + gradient_checkpointing_kwargs = {} + else: + gradient_checkpointing_kwargs = args.gradient_checkpointing_kwargs + + self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model = self._wrap_model(self.model_wrapped) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index da7fe2b61ee5..cc8a3de56b21 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -572,6 +572,8 @@ class TrainingArguments: Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not finished. gradient_checkpointing (`bool`, *optional*, defaults to `False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. + gradient_checkpointing_args (`dict`, *optional*, defaults to `None`): + Key word arguments to be passed to the `gradient_checkpointing_enable` method. include_inputs_for_metrics (`bool`, *optional*, defaults to `False`): Whether or not the inputs will be passed to the `compute_metrics` function. This is intended for metrics that need inputs, predictions and references for scoring calculation in Metric class. @@ -1119,6 +1121,12 @@ class TrainingArguments: "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) + gradient_checkpointing_kwargs: dict = field( + default=None, + metadata={ + "help": "Gradient checkpointing key word arguments such as `use_reentrant`. Will be passed to `torch.utils.checkpoint.checkpoint` through `model.gradient_checkpointing_enable`." + }, + ) include_inputs_for_metrics: bool = field( default=False, metadata={"help": "Whether or not the inputs will be passed to the `compute_metrics` function."} ) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 6400852e6211..624d3833f4f5 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -283,6 +283,38 @@ def forward(self, input_x, labels=None, **kwargs): loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) + class RegressionPreTrainedModelWithGradientCheckpointing(PreTrainedModel): + config_class = RegressionModelConfig + base_model_prefix = "regression" + supports_gradient_checkpointing = True + + def __init__(self, config): + super().__init__(config) + self.layers = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size) for _ in range(4)]) + self.head = nn.Linear(config.hidden_size, 1) + self.gradient_checkpointing = False + self.double_output = config.double_output + + def forward(self, input_x, labels=None, **kwargs): + y = input_x.unsqueeze(0) + + for layer in self.layers: + if self.training and self.gradient_checkpointing: + outputs = self._gradient_checkpointing_func(layer.__call__, y) + else: + outputs = layer(y) + + y = outputs * 3 + + logits = self.head(y) + + if labels is None: + return (logits, logits) if self.double_output else (logits,) + + loss = nn.functional.mse_loss(logits, labels) + + return (loss, y, y) if self.double_output else (loss, y) + class RegressionRandomPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" @@ -327,6 +359,7 @@ def get_regression_trainer( a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, keep_report_to=False, **kwargs ): label_names = kwargs.get("label_names", None) + gradient_checkpointing = kwargs.get("gradient_checkpointing", False) train_dataset = RegressionDataset(length=train_len, label_names=label_names) eval_dataset = RegressionDataset(length=eval_len, label_names=label_names) @@ -336,7 +369,13 @@ def get_regression_trainer( else: if pretrained: config = RegressionModelConfig(a=a, b=b, double_output=double_output) - model = RegressionPreTrainedModel(config) + # We infer the correct model class if one uses gradient_checkpointing or not + target_cls = ( + RegressionPreTrainedModel + if not gradient_checkpointing + else RegressionPreTrainedModelWithGradientCheckpointing + ) + model = target_cls(config) else: model = RegressionModel(a=a, b=b, double_output=double_output) @@ -548,6 +587,24 @@ def test_gradient_accumulation(self): trainer.train() self.check_trained_model(trainer.model) + def test_gradient_checkpointing(self): + trainer = get_regression_trainer( + per_device_train_batch_size=1, + learning_rate=0.1, + gradient_checkpointing=True, + gradient_checkpointing_kwargs={"use_reentrant": False}, + ) + previous_params = {k: v.detach().clone() for k, v in trainer.model.named_parameters()} + + trainer.train() + + # Check if model weights have been updated + for k, v in trainer.model.named_parameters(): + self.assertFalse( + torch.allclose(previous_params[k], v, rtol=1e-4, atol=1e-4), + f"Model weights for {k} have not been updated", + ) + def test_training_loss(self): n_gpus = max(1, get_gpu_count()) From d751dbecb267e4e3ae77352e97a6470da93a0094 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Mon, 30 Oct 2023 19:55:03 +0800 Subject: [PATCH 006/268] remove the obsolete code related to fairscale FSDP (#26651) * remove the obsolete code related to fairscale FSDP * apple review suggestion --- src/transformers/trainer.py | 6 ------ src/transformers/training_args.py | 5 +---- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 06879cbce722..945b557021c7 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1850,12 +1850,6 @@ def _inner_training_loop( if is_sagemaker_mp_enabled() and args.fp16: self.optimizer.clip_master_grads(args.max_grad_norm) - elif hasattr(self.optimizer, "clip_grad_norm"): - # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping - self.optimizer.clip_grad_norm(args.max_grad_norm) - elif hasattr(model, "clip_grad_norm_"): - # Some models (like FullyShardedDDP) have a specific way to do gradient clipping - model.clip_grad_norm_(args.max_grad_norm) elif self.use_apex: # Revert to normal clipping otherwise, handling Apex or full precision nn.utils.clip_grad_norm_( diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index cc8a3de56b21..507515c696af 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1384,10 +1384,7 @@ def __post_init__(self): if self.bf16: if self.half_precision_backend == "apex": - raise ValueError( - " `--half_precision_backend apex`: GPU bf16 is not supported by apex. Use" - " `--half_precision_backend cuda_amp` instead" - ) + raise ValueError(" `--half_precision_backend apex`: GPU bf16 is not supported by apex.") if self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU: if self.evaluation_strategy == IntervalStrategy.NO: From 691fd8fdded34497451bc638716eb8fbd484883a Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 30 Oct 2023 13:32:17 +0100 Subject: [PATCH 007/268] Add `Kosmos-2` model (#24709) * Add KOSMOS-2 model * update * update * update * address review comment - 001 * address review comment - 002 * address review comment - 003 * style * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * fix * address review comment - 004 * address review comment - 005 * address review comment - 006 * address review comment - 007 * address review comment - 008 * address review comment - 009 * address review comment - 010 * address review comment - 011 * update readme * fix * fix * fix * [skip ci] fix * revert the change in _decode * fix docstring * fix docstring * Update docs/source/en/model_doc/kosmos-2.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * no more Kosmos2Tokenizer * style * remove "returned when being computed by the model" * Apply suggestions from code review Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * UTM5 Atten * fix attn mask * use present_key_value_states instead of next_decoder_cache * style * conversion scripts * conversion scripts * conversion scripts * Add _reorder_cache * fix doctest and copies * rename 1 * rename 2 * rename 3 * make fixup * fix table * fix docstring * rename 4 * change repo_id * remove tip * update md file * make style * update md file * put docs/source/en/model_doc/kosmos-2.md to slow * update conversion script * Use CLIPImageProcessor in Kosmos2Processor * Remove Kosmos2ImageProcessor * Remove to_dict in Kosmos2Config * Remove files * fix import * Update conversion * normalized=False * Not using hardcoded values like * elt --> element * Apply suggestion * Not using hardcoded values like * No assert * No nested functions * Fix md file * copy * update doc * fix docstring * fix name * Remove _add_remove_spaces_around_tag_tokens * Remove dummy docstring of _preprocess_single_example * Use `BatchEncoding` * temp * temp * temp * Update * Update * Make Kosmos2ProcessorTest a bit pretty * Update gradient checkpointing * Fix gradient checkpointing test * Remove one liner remove_special_fields * Simplify conversion script * fix add_eos_token * update readme * update tests * Change to microsoft/kosmos-2-patch14-224 * style * Fix doc --------- Co-authored-by: ydshieh Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- README.md | 1 + README_es.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.md | 1 + docs/source/en/model_doc/kosmos-2.md | 94 + src/transformers/__init__.py | 24 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 4 + src/transformers/models/auto/modeling_auto.py | 2 + .../models/auto/processing_auto.py | 1 + .../models/auto/tokenization_auto.py | 7 + src/transformers/models/kosmos2/__init__.py | 64 + .../models/kosmos2/configuration_kosmos2.py | 297 +++ ..._original_pytorch_checkpoint_to_pytorch.py | 77 + .../models/kosmos2/modeling_kosmos2.py | 2064 +++++++++++++++++ .../models/kosmos2/processing_kosmos2.py | 663 ++++++ src/transformers/utils/dummy_pt_objects.py | 24 + tests/models/kosmos2/__init__.py | 0 tests/models/kosmos2/test_modeling_kosmos2.py | 732 ++++++ .../models/kosmos2/test_processor_kosmos2.py | 471 ++++ utils/check_repo.py | 3 + utils/not_doctested.txt | 1 + utils/slow_documentation_tests.txt | 2 + 28 files changed, 4541 insertions(+) create mode 100644 docs/source/en/model_doc/kosmos-2.md create mode 100644 src/transformers/models/kosmos2/__init__.py create mode 100644 src/transformers/models/kosmos2/configuration_kosmos2.py create mode 100644 src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py create mode 100644 src/transformers/models/kosmos2/modeling_kosmos2.py create mode 100644 src/transformers/models/kosmos2/processing_kosmos2.py create mode 100644 tests/models/kosmos2/__init__.py create mode 100644 tests/models/kosmos2/test_modeling_kosmos2.py create mode 100644 tests/models/kosmos2/test_processor_kosmos2.py diff --git a/README.md b/README.md index 6d390e364ad7..8f4496059fee 100644 --- a/README.md +++ b/README.md @@ -386,6 +386,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. diff --git a/README_es.md b/README_es.md index 4f0bb9e8e594..eeb0990fe58b 100644 --- a/README_es.md +++ b/README_es.md @@ -361,6 +361,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. diff --git a/README_hd.md b/README_hd.md index 2a0a30b2237e..bbc6b45f43e8 100644 --- a/README_hd.md +++ b/README_hd.md @@ -335,6 +335,7 @@ conda install -c huggingface transformers 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce से) Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. द्वाराअनुसंधान पत्र [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) के साथ जारी किया गया 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (माइक्रोसॉफ्ट रिसर्च एशिया से) साथ देने वाला पेपर [लेआउटएलएमवी3: यूनिफाइड टेक्स्ट और इमेज मास्किंग के साथ दस्तावेज़ एआई के लिए पूर्व-प्रशिक्षण](https://arxiv.org/abs/2204.08387) युपन हुआंग, टेंगचाओ लव, लेई कुई, युटोंग लू, फुरु वेई द्वारा पोस्ट किया गया। diff --git a/README_ja.md b/README_ja.md index 0e13c7292e30..b37098c491bb 100644 --- a/README_ja.md +++ b/README_ja.md @@ -395,6 +395,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce から) Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. から公開された研究論文 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (OpenAI から) Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever から公開された研究論文: [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (Microsoft Research Asia から) Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou から公開された研究論文: [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (Microsoft Research Asia から) Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou から公開された研究論文: [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (Microsoft Research Asia から) Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei から公開された研究論文: [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) diff --git a/README_ko.md b/README_ko.md index 1ca116e7d081..ed67748854c4 100644 --- a/README_ko.md +++ b/README_ko.md @@ -310,6 +310,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce 에서 제공)은 Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi.의 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500)논문과 함께 발표했습니다. 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (OpenAI 에서) Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever 의 [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) 논문과 함께 발표했습니다. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (Microsoft Research Asia 에서) Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 의 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 논문과 함께 발표했습니다. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (Microsoft Research Asia 에서) Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 의 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 논문과 함께 발표했습니다. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (Microsoft Research Asia 에서) Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei 의 [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) 논문과 함께 발표했습니다. diff --git a/README_zh-hans.md b/README_zh-hans.md index 7ea09edc91c9..b9ffb8ae8416 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -334,6 +334,7 @@ conda install -c huggingface transformers 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (来自 Salesforce) 伴随论文 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) 由 Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi 发布。 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 由 Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 发布。 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 由 Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 发布。 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) 由 Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index aced5a5b22ae..8d47ef4ef072 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -346,6 +346,7 @@ conda install -c huggingface transformers 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 84e306a786f9..141398c02e4e 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -368,6 +368,8 @@ title: I-BERT - local: model_doc/jukebox title: Jukebox + - local: model_doc/kosmos-2 + title: KOSMOS-2 - local: model_doc/led title: LED - local: model_doc/llama diff --git a/docs/source/en/index.md b/docs/source/en/index.md index 9a9692f35d9b..5a76935b71c7 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -158,6 +158,7 @@ Flax), PyTorch, and/or TensorFlow. | [Informer](model_doc/informer) | ✅ | ❌ | ❌ | | [InstructBLIP](model_doc/instructblip) | ✅ | ❌ | ❌ | | [Jukebox](model_doc/jukebox) | ✅ | ❌ | ❌ | +| [KOSMOS-2](model_doc/kosmos-2) | ✅ | ❌ | ❌ | | [LayoutLM](model_doc/layoutlm) | ✅ | ✅ | ❌ | | [LayoutLMv2](model_doc/layoutlmv2) | ✅ | ❌ | ❌ | | [LayoutLMv3](model_doc/layoutlmv3) | ✅ | ✅ | ❌ | diff --git a/docs/source/en/model_doc/kosmos-2.md b/docs/source/en/model_doc/kosmos-2.md new file mode 100644 index 000000000000..8153ee300924 --- /dev/null +++ b/docs/source/en/model_doc/kosmos-2.md @@ -0,0 +1,94 @@ + + +# KOSMOS-2 + +## Overview + +The KOSMOS-2 model was proposed in [Kosmos-2: Grounding Multimodal Large Language Models to the World] +(https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei + +KOSMOS-2 is a Transformer-based causal language model and is trained using the next-word prediction task on a web-scale +dataset of grounded image-text pairs [GRIT](https://huggingface.co/datasets/zzliang/GRIT). The spatial coordinates of +the bounding boxes in the dataset are converted to a sequence of location tokens, which are appended to their respective +entity text spans (for example, `a snowman` followed by ``). The data format is +similar to “hyperlinks” that connect the object regions in an image to their text span in the corresponding caption. + +The abstract from the paper is the following: + +*We introduce Kosmos-2, a Multimodal Large Language Model (MLLM), enabling new capabilities of perceiving object descriptions (e.g., bounding boxes) and grounding text to the visual world. Specifically, we represent refer expressions as links in Markdown, i.e., ``[text span](bounding boxes)'', where object descriptions are sequences of location tokens. Together with multimodal corpora, we construct large-scale data of grounded image-text pairs (called GrIT) to train the model. In addition to the existing capabilities of MLLMs (e.g., perceiving general modalities, following instructions, and performing in-context learning), Kosmos-2 integrates the grounding capability into downstream applications. We evaluate Kosmos-2 on a wide range of tasks, including (i) multimodal grounding, such as referring expression comprehension, and phrase grounding, (ii) multimodal referring, such as referring expression generation, (iii) perception-language tasks, and (iv) language understanding and generation. This work lays out the foundation for the development of Embodiment AI and sheds light on the big convergence of language, multimodal perception, action, and world modeling, which is a key step toward artificial general intelligence. Code and pretrained models are available at https://aka.ms/kosmos-2.* + +## Example + +```python +>>> from PIL import Image +>>> import requests +>>> from transformers import AutoProcessor, Kosmos2ForConditionalGeneration + +>>> model = Kosmos2ForConditionalGeneration.from_pretrained("microsoft/kosmos-2-patch14-224") +>>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") + +>>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg" +>>> image = Image.open(requests.get(url, stream=True).raw) + +>>> prompt = " An image of" + +>>> inputs = processor(text=prompt, images=image, return_tensors="pt") + +>>> generated_ids = model.generate( +... pixel_values=inputs["pixel_values"], +... input_ids=inputs["input_ids"], +... attention_mask=inputs["attention_mask"], +... image_embeds=None, +... image_embeds_position_mask=inputs["image_embeds_position_mask"], +... use_cache=True, +... max_new_tokens=64, +... ) +>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] +>>> processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False) +>>> processed_text +' An image of a snowman warming himself by a fire.' + +>>> caption, entities = processor.post_process_generation(generated_text) +>>> caption +'An image of a snowman warming himself by a fire.' + +>>> entities +[('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])] +``` + +This model was contributed by [Yih-Dar SHIEH](https://huggingface.co/ydshieh). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/kosmos-2). + +## Kosmos2Config + +[[autodoc]] Kosmos2Config + +## Kosmos2ImageProcessor + +## Kosmos2Processor + +[[autodoc]] Kosmos2Processor + - __call__ + +## Kosmos2Model + +[[autodoc]] Kosmos2Model + - forward + +## Kosmos2ForConditionalGeneration + +[[autodoc]] Kosmos2ForConditionalGeneration + - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 90147df41f26..1fc1ff38d06d 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -388,6 +388,11 @@ "JukeboxTokenizer", "JukeboxVQVAEConfig", ], + "models.kosmos2": [ + "KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "Kosmos2Config", + "Kosmos2Processor", + ], "models.layoutlm": ["LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMConfig", "LayoutLMTokenizer"], "models.layoutlmv2": [ "LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", @@ -2051,6 +2056,14 @@ "JukeboxVQVAE", ] ) + _import_structure["models.kosmos2"].extend( + [ + "KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST", + "Kosmos2ForConditionalGeneration", + "Kosmos2Model", + "Kosmos2PreTrainedModel", + ] + ) _import_structure["models.layoutlm"].extend( [ "LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -4561,6 +4574,11 @@ JukeboxTokenizer, JukeboxVQVAEConfig, ) + from .models.kosmos2 import ( + KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP, + Kosmos2Config, + Kosmos2Processor, + ) from .models.layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMTokenizer from .models.layoutlmv2 import ( LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -5986,6 +6004,12 @@ JukeboxPrior, JukeboxVQVAE, ) + from .models.kosmos2 import ( + KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST, + Kosmos2ForConditionalGeneration, + Kosmos2Model, + Kosmos2PreTrainedModel, + ) from .models.layoutlm import ( LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMForMaskedLM, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 4093ff819e80..81e71500a5cc 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -109,6 +109,7 @@ informer, instructblip, jukebox, + kosmos2, layoutlm, layoutlmv2, layoutlmv3, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index b9fd0022045b..c3baabea56a2 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -117,6 +117,7 @@ ("informer", "InformerConfig"), ("instructblip", "InstructBlipConfig"), ("jukebox", "JukeboxConfig"), + ("kosmos-2", "Kosmos2Config"), ("layoutlm", "LayoutLMConfig"), ("layoutlmv2", "LayoutLMv2Config"), ("layoutlmv3", "LayoutLMv3Config"), @@ -331,6 +332,7 @@ ("informer", "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("instructblip", "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("jukebox", "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("kosmos-2", "KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("layoutlm", "LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("layoutlmv2", "LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("layoutlmv3", "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -546,6 +548,7 @@ ("informer", "Informer"), ("instructblip", "InstructBLIP"), ("jukebox", "Jukebox"), + ("kosmos-2", "KOSMOS-2"), ("layoutlm", "LayoutLM"), ("layoutlmv2", "LayoutLMv2"), ("layoutlmv3", "LayoutLMv3"), @@ -709,6 +712,7 @@ ("data2vec-text", "data2vec"), ("data2vec-vision", "data2vec"), ("donut-swin", "donut"), + ("kosmos-2", "kosmos2"), ("maskformer-swin", "maskformer"), ("xclip", "x_clip"), ] diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index f236b8fc5f58..3c622c815827 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -112,6 +112,7 @@ ("imagegpt", "ImageGPTModel"), ("informer", "InformerModel"), ("jukebox", "JukeboxModel"), + ("kosmos-2", "Kosmos2Model"), ("layoutlm", "LayoutLMModel"), ("layoutlmv2", "LayoutLMv2Model"), ("layoutlmv3", "LayoutLMv3Model"), @@ -570,6 +571,7 @@ ("blip-2", "Blip2ForConditionalGeneration"), ("git", "GitForCausalLM"), ("instructblip", "InstructBlipForConditionalGeneration"), + ("kosmos-2", "Kosmos2ForConditionalGeneration"), ("pix2struct", "Pix2StructForConditionalGeneration"), ("vision-encoder-decoder", "VisionEncoderDecoderModel"), ] diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index 9f795e3bb6ea..c0b4f49893b9 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -60,6 +60,7 @@ ("hubert", "Wav2Vec2Processor"), ("idefics", "IdeficsProcessor"), ("instructblip", "InstructBlipProcessor"), + ("kosmos-2", "Kosmos2Processor"), ("layoutlmv2", "LayoutLMv2Processor"), ("layoutlmv3", "LayoutLMv3Processor"), ("markuplm", "MarkupLMProcessor"), diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index afd0d30ad12e..80d2581882ce 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -181,6 +181,13 @@ ("idefics", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)), ("instructblip", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), ("jukebox", ("JukeboxTokenizer", None)), + ( + "kosmos-2", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), ("layoutlm", ("LayoutLMTokenizer", "LayoutLMTokenizerFast" if is_tokenizers_available() else None)), ("layoutlmv2", ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" if is_tokenizers_available() else None)), ("layoutlmv3", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)), diff --git a/src/transformers/models/kosmos2/__init__.py b/src/transformers/models/kosmos2/__init__.py new file mode 100644 index 000000000000..8d26304c72e1 --- /dev/null +++ b/src/transformers/models/kosmos2/__init__.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_vision_available, +) + + +_import_structure = { + "configuration_kosmos2": ["KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Kosmos2Config"], + "processing_kosmos2": ["Kosmos2Processor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_kosmos2"] = [ + "KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST", + "Kosmos2ForConditionalGeneration", + "Kosmos2Model", + "Kosmos2PreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_kosmos2 import KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP, Kosmos2Config + from .processing_kosmos2 import Kosmos2Processor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_kosmos2 import ( + KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST, + Kosmos2ForConditionalGeneration, + Kosmos2Model, + Kosmos2PreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/src/transformers/models/kosmos2/configuration_kosmos2.py b/src/transformers/models/kosmos2/configuration_kosmos2.py new file mode 100644 index 000000000000..d97269733ff7 --- /dev/null +++ b/src/transformers/models/kosmos2/configuration_kosmos2.py @@ -0,0 +1,297 @@ +# coding=utf-8 +# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" KOSMOS-2 model configuration""" + +import os +from typing import Union + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "microsoft/kosmos-2-patch14-224": ( + "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/config.json" + ), + # See all KOSMOS-2 models at https://huggingface.co/models?filter=kosmos-2 +} + + +class Kosmos2TextConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Kosmos2TextModel`]. It is used to instantiate a + KOSMOS-2 text decoder according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the text decoder of the KOSMOS-2 + [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 65037): + Vocabulary size of the Kosmos2 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`Kosmos2Model`]. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + embed_dim (`int`, *optional*, defaults to 2048): + Dimensionality of the layers and the pooler layer. + layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer encoder. + ffn_dim (`int`, *optional*, defaults to 8192): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + layer_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + scale_embedding (`bool`, *optional*, defaults to `True`): + Scale embeddings by diving by sqrt(embed_dim). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + ```""" + model_type = "kosmos_2_text_model" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "num_attention_heads": "attention_heads", + "hidden_size": "embed_dim", + "num_hidden_layers": "layers", + } + + def __init__( + self, + vocab_size=65037, + max_position_embeddings=2048, + embed_dim=2048, + layers=24, + ffn_dim=8192, + attention_heads=32, + activation_function="gelu", + dropout=0.1, + attention_dropout=0.1, + activation_dropout=0.0, + layerdrop=0.0, + layer_norm_eps=1e-5, + init_std=0.02, + scale_embedding=True, + use_cache=True, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + **kwargs, + ): + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs, + ) + + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.embed_dim = embed_dim + self.layers = layers + self.ffn_dim = ffn_dim + self.attention_heads = attention_heads + self.activation_function = activation_function + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.layerdrop = layerdrop + self.layer_norm_eps = layer_norm_eps + self.init_std = init_std + self.scale_embedding = scale_embedding + self.use_cache = use_cache + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the text config dict if we are loading from Kosmos2Config + if config_dict.get("model_type") == "kosmos-2": + config_dict = config_dict["text_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class Kosmos2VisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Kosmos2VisionModel`]. It is used to instantiate a + KOSMOS-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the vision encoder of the KOSMOS-2 + [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 4096): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 14): + The size (resolution) of each patch. + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 1): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + ```""" + + model_type = "kosmos_2_vision_model" + + def __init__( + self, + hidden_size=1024, + intermediate_size=4096, + num_hidden_layers=24, + num_attention_heads=16, + num_channels=3, + image_size=224, + patch_size=14, + hidden_act="quick_gelu", + layer_norm_eps=1e-5, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the vision config dict if we are loading from Kosmos2Config + if config_dict.get("model_type") == "kosmos-2": + config_dict = config_dict["vision_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class Kosmos2Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Kosmos2Model`]. It is used to instantiate a + KOSMOS-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the KOSMOS-2 + [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. + + Args: + text_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`Kosmos2TextConfig`]. + vision_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`Kosmos2VisionConfig`]. + latent_query_num (`int`, *optional*, defaults to 64): + The number of latent query tokens that represent the image features used in the text decoder component. + kwargs (*optional*): + Dictionary of keyword arguments. + + Example: + + ```python + >>> from transformers import Kosmos2Config, Kosmos2Model + + >>> # Initializing a Kosmos-2 kosmos-2-patch14-224 style configuration + >>> configuration = Kosmos2Config() + + >>> # Initializing a model (with random weights) from the kosmos-2-patch14-224 style configuration + >>> model = Kosmos2Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "kosmos-2" + is_composition = True + + def __init__( + self, + text_config=None, + vision_config=None, + latent_query_num=64, + **kwargs, + ): + super().__init__(**kwargs) + + if text_config is None: + text_config = {} + logger.info("`text_config` is `None`. Initializing the `Kosmos2TextConfig` with default values.") + + if vision_config is None: + vision_config = {} + logger.info("`vision_config` is `None`. Initializing the `Kosmos2VisionConfig` with default values.") + + self.text_config = Kosmos2TextConfig(**text_config) + self.vision_config = Kosmos2VisionConfig(**vision_config) + + self.latent_query_num = latent_query_num diff --git a/src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py new file mode 100644 index 000000000000..04c7712aa846 --- /dev/null +++ b/src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py @@ -0,0 +1,77 @@ +import argparse + +from fairseq.checkpoint_utils import load_checkpoint_to_cpu + +from transformers import Kosmos2Config, Kosmos2ForConditionalGeneration + + +KEYS_TO_MODIFY_MAPPING = { + "gpt_model.decoder.output_projection": "text_model.lm_head", + "gpt_model.decoder": "text_model.model", + "img_connector": "image_to_text_projection", + "img_model.visual.class_embedding": "vision_model.model.embeddings.class_embedding", + "img_model.visual.positional_embedding": "vision_model.model.embeddings.position_embedding.weight", + "img_model.visual.conv1": "vision_model.model.embeddings.patch_embedding", + "img_model.visual": "vision_model.model", + "ln_pre": "pre_layrnorm", + "ln_post": "post_layernorm", + "transformer.resblocks": "encoder.layers", + "ts_attn": "self_attn", + "ln_1": "layer_norm1", + "ln_2": "layer_norm2", + "c_fc": "fc1", + "c_proj": "fc2", +} + + +KEYS_TO_IGNORE = [ + # this buffer in the original code is only used to send weights to the desired device + "gpt_model.decoder.embed_positions._float_tensor", + # this weight is never used in the forward in the original KOSMOS-2) + "gpt_model.decoder.self_attn_sope.scale", +] + + +def rename_key(key): + for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): + if key_to_modify in key: + key = key.replace(key_to_modify, new_key) + + return key + + +def convert_kosmos2_checkpoint_to_pytorch(checkpoint_path, pytorch_dump_folder_path): + state = load_checkpoint_to_cpu(checkpoint_path) + state_dict = state["model"] + state_dict_keys = list(state_dict.keys()) + + config = Kosmos2Config() + # This is necessary to match the results given by the original demo + config.text_config.no_repeat_ngram_size = 3 + model = Kosmos2ForConditionalGeneration(config) + + # convert (by renaming keys) + converted_state_dict = {} + for key in state_dict_keys: + if key in KEYS_TO_IGNORE: + continue + renamed_key = rename_key(key) + converted_state_dict[renamed_key] = state_dict[key] + + # check weight loading + model.load_state_dict(converted_state_dict, strict=True) + # save the result + model.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--kosmos2_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + args = parser.parse_args() + convert_kosmos2_checkpoint_to_pytorch(args.kosmos2_checkpoint_path, args.pytorch_dump_folder_path) diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py new file mode 100644 index 000000000000..600fda750e80 --- /dev/null +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -0,0 +1,2064 @@ +# coding=utf-8 +# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch KOSMOS-2 model.""" + + +import math +from dataclasses import dataclass +from typing import Any, List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPooling, + CausalLMOutputWithCrossAttentions, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_kosmos2 import Kosmos2Config, Kosmos2TextConfig, Kosmos2VisionConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = Kosmos2Config + +KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "microsoft/kosmos-2-patch14-224", + # See all KOSMOS-2 models at https://huggingface.co/models?filter=kosmos-2 +] + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids +def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask + return incremental_indices.long() + padding_idx + + +KOSMOS2_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`Kosmos2Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +KOSMOS2_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`CLIPImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +KOSMOS2_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + image_embeds: (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): + Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. + image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0, + 1]`: + + - 1 for places where to put the image features, + - 0 for places that are not for image features (i.e. for text tokens). + + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +KOSMOS2_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`CLIPImageProcessor.__call__`] for details. + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0, + 1]`: + + - 1 for places where to put the image features, + - 0 for places that are not for image features (i.e. for text tokens). + + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + image_embeds: (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): + Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@dataclass +class Kosmos2ModelOutput(ModelOutput): + """ + Base class for text model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): + Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. + projection_attentions (`tuple(torch.FloatTensor)`, *optional*): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute + the weighted average in the self-attention heads. + vision_model_output(`BaseModelOutputWithPooling`, *optional*): + The output of the [`Kosmos2VisionModel`]. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + image_embeds: Optional[torch.FloatTensor] = None + projection_attentions: Optional[Tuple[torch.FloatTensor]] = None + vision_model_output: BaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +@dataclass +class Kosmos2ForConditionalGenerationModelOutput(ModelOutput): + """ + Model output class for `Kosmos2ForConditionalGeneration`. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): + Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. + projection_attentions (`tuple(torch.FloatTensor)`, *optional*): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute + the weighted average in the self-attention heads. + vision_model_output(`BaseModelOutputWithPooling`, *optional*): + The output of the [`Kosmos2VisionModel`]. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if + `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` + input) to speed up sequential decoding. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + image_embeds: Optional[torch.FloatTensor] = None + projection_attentions: Optional[Tuple[torch.FloatTensor]] = None + vision_model_output: BaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Kosmos2 +class Kosmos2VisionEmbeddings(nn.Module): + def __init__(self, config: Kosmos2VisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=config.num_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False, + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->Kosmos2Vision +class Kosmos2VisionAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, embed_dim = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scale + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + # apply the causal_attention_mask first + if causal_attention_mask is not None: + if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {causal_attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit akward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Kosmos2Vision +class Kosmos2VisionMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Kosmos2Vision +class Kosmos2VisionEncoderLayer(nn.Module): + def __init__(self, config: Kosmos2VisionConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = Kosmos2VisionAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.mlp = Kosmos2VisionMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + causal_attention_mask: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(config.encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Kosmos2Vision +class Kosmos2VisionEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`Kosmos2VisionEncoderLayer`]. + + Args: + config: Kosmos2VisionConfig + """ + + def __init__(self, config: Kosmos2VisionConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([Kosmos2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + inputs_embeds, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Causal mask for the text model. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = self.gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +# Similar to `transformers.models.clip.modeling_clip.CLIPVisionTransformer` but without docstring for `forward` +class Kosmos2VisionTransformer(nn.Module): + # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIPVision->Kosmos2Vision,CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2Vision + def __init__(self, config: Kosmos2VisionConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = Kosmos2VisionEmbeddings(config) + self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = Kosmos2VisionEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layrnorm(hidden_states) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +# Similar to `transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding` but allowing to pass `position_ids` +class Kosmos2TextSinusoidalPositionalEmbedding(nn.Module): + """This module produces sinusoidal positional embeddings of any length.""" + + # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.__init__ + def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): + super().__init__() + self.offset = 2 + self.embedding_dim = embedding_dim + self.padding_idx = padding_idx + self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) + + # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.make_weights + def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): + emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) + if hasattr(self, "weights"): + # in forward put the weights on the correct dtype and device of the param + emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) + + self.register_buffer("weights", emb_weights, persistent=False) + + @staticmethod + # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.get_embedding + def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): + """ + Build sinusoidal embeddings. + + This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of + "Attention Is All You Need". + """ + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) + emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) + if embedding_dim % 2 == 1: + # zero pad + emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) + if padding_idx is not None: + emb[padding_idx, :] = 0 + + return emb.to(torch.get_default_dtype()) + + @torch.no_grad() + def forward( + self, + input_ids: torch.Tensor = None, + inputs_embeds: torch.Tensor = None, + past_key_values_length: int = 0, + position_ids: torch.Tensor = None, + ): + if input_ids is not None: + bsz, seq_len = input_ids.size() + if position_ids is None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = create_position_ids_from_input_ids( + input_ids, self.padding_idx, past_key_values_length + ).to(input_ids.device) + else: + bsz, seq_len = inputs_embeds.size()[:-1] + if position_ids is None: + position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length) + + # expand embeddings if needed + max_pos = self.padding_idx + 1 + seq_len + past_key_values_length + if max_pos > self.weights.size(0): + self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) + + return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() + + # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.create_position_ids_from_inputs_embeds + def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length): + """ + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + + Args: + inputs_embeds: torch.Tensor + + Returns: torch.Tensor + """ + input_shape = inputs_embeds.size()[:-1] + sequence_length = input_shape[1] + + position_ids = torch.arange( + self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device + ) + return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length + + +class KosmosTextAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + # Similar to transformers.models.bart.modeling_bart.BartAttention.__init__ except an additional `inner_attn_ln`. + def __init__( + self, + config, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + add_inner_attn_layernorm: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + # End opy + self.inner_attn_ln = None + if add_inner_attn_layernorm: + self.inner_attn_ln = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + def _shape(self, projection: torch.Tensor) -> torch.Tensor: + new_projection_shape = projection.size()[:-1] + (self.num_heads, self.head_dim) + # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D) + new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) + return new_projection + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = encoder_hidden_states is not None + batch_size, seq_length = hidden_states.shape[:2] + + # use encoder_hidden_states if cross attention + current_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + # checking that the `sequence_length` of the `past_key_value` is the same as the he provided + # `encoder_hidden_states` to support prefix tuning + if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + else: + key_states = self._shape(self.k_proj(current_states)) + value_states = self._shape(self.v_proj(current_states)) + if past_key_value is not None and not is_cross_attention: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + query_states = self._shape(self.q_proj(hidden_states) * self.scaling) + attn_weights = torch.matmul(query_states, key_states.transpose(-1, -2)) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + src_len = key_states.size(2) + + if attention_mask is not None: + if attention_mask.size() != (batch_size, 1, seq_length, src_len): + raise ValueError( + f"Attention mask should be of size {(batch_size, 1, seq_length, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + # Mask heads if we want to + if layer_head_mask is not None: + attn_weights = attn_weights * layer_head_mask + + attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + # attn_output = torch.bmm(attn_probs, value_states) ? + context_states = torch.matmul(attn_weights, value_states) + # attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) ? + context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1) + + if self.inner_attn_ln is not None: + context_states = self.inner_attn_ln(context_states) + + attn_output = self.out_proj(context_states) + + return attn_output, attn_weights, past_key_value + + +class Kosmos2TextFFN(nn.Module): + def __init__(self, config: Kosmos2TextConfig): + super().__init__() + + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.fc1 = nn.Linear(config.embed_dim, config.ffn_dim) + self.fc2 = nn.Linear(config.ffn_dim, config.embed_dim) + + self.ffn_layernorm = nn.LayerNorm(config.ffn_dim, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.ffn_layernorm(hidden_states) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + return hidden_states + + +class Kosmos2TextBlock(nn.Module): + def __init__(self, config: Kosmos2TextConfig): + super().__init__() + self.embed_dim = config.embed_dim + + self.self_attn = KosmosTextAttention( + config, + embed_dim=self.embed_dim, + num_heads=config.attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + add_inner_attn_layernorm=True, + ) + self.dropout = config.dropout + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + if config.add_cross_attention: + self.encoder_attn = KosmosTextAttention( + config, + embed_dim=self.embed_dim, + num_heads=config.attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + add_inner_attn_layernorm=False, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + self.ffn = Kosmos2TextFFN(config) + self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + cross_attn_layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + + hidden_states = self.self_attn_layer_norm(hidden_states) + + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + if not hasattr(self, "encoder_attn"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + residual = hidden_states + + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + + hidden_states = self.final_layer_norm(hidden_states) + + # FFN + hidden_states = self.ffn(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class Kosmos2TextTransformer(nn.Module): + """ + Transformer decoder consisting of `config.layers` layers. Each layer is a [`Kosmos2TextBlock`]. + + Args: + config: Kosmos2TextConfig + """ + + def __init__(self, config: Kosmos2TextConfig): + super().__init__() + self.config = config + self.dropout = config.dropout + self.layerdrop = config.layerdrop + + self.embed_scale = math.sqrt(config.embed_dim) if config.scale_embedding else 1.0 + self.embed_tokens = nn.Embedding(config.vocab_size, config.embed_dim, padding_idx=config.pad_token_id) + + self.embed_positions = Kosmos2TextSinusoidalPositionalEmbedding( + num_positions=config.max_position_embeddings, + embedding_dim=config.embed_dim, + padding_idx=config.pad_token_id, + ) + + self.layers = nn.ModuleList([Kosmos2TextBlock(config) for _ in range(config.layers)]) + self.layer_norm = nn.LayerNorm(config.embed_dim, config.layer_norm_eps) + + self.gradient_checkpointing = False + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def forward_embedding( + self, + input_ids, + inputs_embeds: torch.Tensor = None, + image_embeds: torch.Tensor = None, + img_input_mask: torch.Tensor = None, + past_key_values_length: int = 0, + position_ids: torch.Tensor = None, + ): + # The argument `inputs_embeds` should be the one without being multiplied by `self.embed_scale`. + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if image_embeds is not None: + inputs_embeds[img_input_mask.to(dtype=torch.bool)] = image_embeds.view(-1, image_embeds.size(-1)) + + inputs_embeds = inputs_embeds * self.embed_scale + + # embed positions + positions = self.embed_positions( + input_ids=input_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + position_ids=position_ids, + ) + positions = positions.to(inputs_embeds.device) + + hidden_states = inputs_embeds + positions + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + return hidden_states + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + image_embeds: Optional[torch.Tensor] = None, + image_embeds_position_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.shape + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + # We don't need img info. when `past_key_values_length` > 0 + if past_key_values_length > 0: + image_embeds = None + image_embeds_position_mask = None + + hidden_states = self.forward_embedding( + input_ids=input_ids, + inputs_embeds=inputs_embeds, + image_embeds=image_embeds, + img_input_mask=image_embeds_position_mask, + past_key_values_length=past_key_values_length, + position_ids=position_ids, + ) + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_shape, hidden_states, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + present_key_value_states = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + if attn_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self.gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + head_mask[idx] if head_mask is not None else None, + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + None, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + present_key_value_states += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add final layer norm + hidden_states = self.layer_norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + present_key_value_states, + all_hidden_states, + all_self_attns, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=present_key_value_states, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +class Kosmos2PreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = Kosmos2Config + supports_gradient_checkpointing = True + _no_split_modules = ["Kosmos2VisionEncoderLayer, Kosmos2TextBlock"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(self, Kosmos2VisionModel): + factor = self.config.initializer_factor + elif isinstance(self, (Kosmos2Model, Kosmos2ForConditionalGeneration)): + factor = self.config.vision_config.initializer_factor + + if isinstance(self, (Kosmos2TextModel, Kosmos2TextForCausalLM)): + std = self.config.init_std + elif isinstance(self, (Kosmos2Model, Kosmos2ForConditionalGeneration)): + std = self.config.text_config.init_std + + if isinstance(module, Kosmos2VisionEmbeddings): + nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) + nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) + nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) + elif isinstance(module, Kosmos2VisionAttention): + in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (module.embed_dim**-0.5) * factor + nn.init.normal_(module.q_proj.weight, std=in_proj_std) + nn.init.normal_(module.k_proj.weight, std=in_proj_std) + nn.init.normal_(module.v_proj.weight, std=in_proj_std) + nn.init.normal_(module.out_proj.weight, std=out_proj_std) + if module.q_proj.bias is not None: + module.q_proj.bias.data.zero_() + if module.k_proj.bias is not None: + module.k_proj.bias.data.zero_() + if module.v_proj.bias is not None: + module.v_proj.bias.data.zero_() + if module.out_proj.bias is not None: + module.out_proj.bias.data.zero_() + elif isinstance(module, Kosmos2VisionMLP): + in_proj_std = ( + (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + ) + fc_std = (2 * module.config.hidden_size) ** -0.5 * factor + nn.init.normal_(module.fc1.weight, std=fc_std) + nn.init.normal_(module.fc2.weight, std=in_proj_std) + if module.fc1.bias is not None: + module.fc1.bias.data.zero_() + if module.fc2.bias is not None: + module.fc2.bias.data.zero_() + elif isinstance(module, Kosmos2VisionEncoderLayer): + module.layer_norm1.bias.data.zero_() + module.layer_norm1.weight.data.fill_(1.0) + module.layer_norm2.bias.data.zero_() + module.layer_norm2.weight.data.fill_(1.0) + elif isinstance(module, Kosmos2VisionTransformer): + module.pre_layrnorm.bias.data.zero_() + module.pre_layrnorm.weight.data.fill_(1.0) + module.post_layernorm.bias.data.zero_() + module.post_layernorm.weight.data.fill_(1.0) + elif isinstance(module, KosmosTextAttention): + nn.init.normal_(module.q_proj.weight, std=std) + nn.init.normal_(module.k_proj.weight, std=std) + nn.init.normal_(module.v_proj.weight, std=std) + nn.init.normal_(module.out_proj.weight, std=std) + if module.q_proj.bias is not None: + module.q_proj.bias.data.zero_() + if module.k_proj.bias is not None: + module.k_proj.bias.data.zero_() + if module.v_proj.bias is not None: + module.v_proj.bias.data.zero_() + if module.out_proj.bias is not None: + module.out_proj.bias.data.zero_() + elif isinstance(module, Kosmos2TextFFN): + nn.init.normal_(module.fc1.weight, std=std) + nn.init.normal_(module.fc2.weight, std=std) + if module.fc1.bias is not None: + module.fc1.bias.data.zero_() + if module.fc2.bias is not None: + module.fc2.bias.data.zero_() + elif isinstance(module, Kosmos2TextForCausalLM): + nn.init.normal_(module.lm_head.weight, std=std) + if module.lm_head.bias is not None: + module.lm_head.bias.data.zero_() + elif isinstance(module, Kosmos2ImageToTextProjection): + nn.init.normal_(module.dense.weight, std=std) + if module.dense.bias is not None: + module.dense.bias.data.zero_() + elif isinstance(module, Kosmos2TextTransformer): + module.embed_tokens.weight.data.normal_(mean=0.0, std=std) + if module.embed_tokens.padding_idx is not None: + module.embed_tokens.weight.data[module.embed_tokens.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, gradient_checkpointing_func=None): + if isinstance(module, (Kosmos2TextTransformer, Kosmos2VisionEncoder)): + module.gradient_checkpointing_func = gradient_checkpointing_func + module.gradient_checkpointing = gradient_checkpointing_func is not None + + +class Kosmos2VisionModel(Kosmos2PreTrainedModel): + config_class = Kosmos2VisionConfig + main_input_name = "pixel_values" + + # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model + def __init__(self, config: Kosmos2VisionConfig): + super().__init__(config) + self.model = Kosmos2VisionTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.get_input_embeddings with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model + def get_input_embeddings(self) -> nn.Module: + return self.model.embeddings.patch_embedding + + @add_start_docstrings_to_model_forward(KOSMOS2_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Kosmos2VisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + return self.model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class Kosmos2TextModel(Kosmos2PreTrainedModel): + config_class = Kosmos2TextConfig + + def __init__(self, config: Kosmos2TextConfig): + super().__init__(config) + self.model = Kosmos2TextTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(KOSMOS2_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=Kosmos2TextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + image_embeds: Optional[torch.Tensor] = None, + image_embeds_position_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + r""" + Returns: + + """ + return self.model( + input_ids=input_ids, + attention_mask=attention_mask, + image_embeds=image_embeds, + image_embeds_position_mask=image_embeds_position_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + position_ids=position_ids, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +@add_start_docstrings( + """ + The text model from KOSMOS-2 with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + KOSMOS2_START_DOCSTRING, +) +class Kosmos2TextForCausalLM(Kosmos2PreTrainedModel): + config_class = Kosmos2TextConfig + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config: Kosmos2TextConfig): + super().__init__(config) + + self.model = Kosmos2TextTransformer(config) + self.lm_head = nn.Linear(in_features=config.embed_dim, out_features=config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self) -> nn.Module: + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + @add_start_docstrings_to_model_forward(KOSMOS2_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=Kosmos2TextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + image_embeds: Optional[torch.Tensor] = None, + image_embeds_position_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + + Returns: + + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + image_embeds=image_embeds, + image_embeds_position_mask=image_embeds_position_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + position_ids=position_ids, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + lm_logits = self.lm_head(outputs[0]) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(lm_logits.device) + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + batch_size, seq_length, vocab_size = shift_logits.shape + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct( + shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length) + ) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + image_embeds=None, + image_embeds_position_mask=None, + past_key_values=None, + attention_mask=None, + use_cache=None, + **model_kwargs, + ): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + position_ids = None + + # cut input_ids if past_key_values is used + if past_key_values is not None: + position_ids = create_position_ids_from_input_ids( + input_ids, + padding_idx=self.config.pad_token_id, + past_key_values_length=0, + )[:, -1:] + + input_ids = input_ids[:, -1:] + # the image info. is already encoded into the past keys/values + image_embeds = None + image_embeds_position_mask = None + elif image_embeds_position_mask is not None: + # appending `False` to `image_embeds_position_mask` (because `input_ids` grows during generation) + batch_size, seq_len = input_ids.size() + mask_len = image_embeds_position_mask.size()[-1] + image_embeds_position_mask = torch.cat( + ( + image_embeds_position_mask, + torch.zeros(size=(batch_size, seq_len - mask_len), dtype=torch.bool, device=input_ids.device), + ), + dim=1, + ) + + return { + "input_ids": input_ids, + "image_embeds": image_embeds, + "image_embeds_position_mask": image_embeds_position_mask, + "past_key_values": past_key_values, + "attention_mask": attention_mask, + "position_ids": position_ids, + "use_cache": use_cache, + } + + @staticmethod + # Copied from transformers.models.umt5.modeling_umt5.UMT5ForConditionalGeneration._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +class Kosmos2ImageToTextProjection(nn.Module): + """The layer that transforms the image model's output to part of the text model's input (namely, image features)""" + + def __init__(self, config: Kosmos2Config): + super().__init__() + self.dense = nn.Linear(config.vision_config.hidden_size, config.text_config.embed_dim) + self.latent_query = nn.Parameter(torch.randn(config.latent_query_num, config.text_config.embed_dim)) + + self.x_attn = KosmosTextAttention( + config.text_config, + config.text_config.embed_dim, + config.text_config.attention_heads, + dropout=config.text_config.attention_dropout, + is_decoder=False, + add_inner_attn_layernorm=False, + ) + + def forward(self, features): + hidden_states = self.dense(features) + + # shape = [batch, latent_query_num, h_dim] + latent_query = self.latent_query.unsqueeze(0).expand(hidden_states.size(0), -1, -1) + key_value_states = torch.cat([hidden_states, latent_query], dim=1) + + hidden_states, attn_weights, _ = self.x_attn( + hidden_states=latent_query, + encoder_hidden_states=key_value_states, + past_key_value=None, + attention_mask=None, + output_attentions=None, + ) + + return hidden_states, attn_weights + + +@add_start_docstrings( + """ + KOSMOS-2 Model for generating text and image features. The model consists of a vision encoder and a language model. + """, + KOSMOS2_START_DOCSTRING, +) +class Kosmos2Model(Kosmos2PreTrainedModel): + config_class = Kosmos2Config + main_input_name = "pixel_values" + + def __init__(self, config: Kosmos2Config): + super().__init__(config) + + self.text_model = Kosmos2TextModel(config.text_config) + self.vision_model = Kosmos2VisionModel(config.vision_config) + self.image_to_text_projection = Kosmos2ImageToTextProjection(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.text_model.model.embed_tokens + + def set_input_embeddings(self, value): + self.text_model.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(KOSMOS2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Kosmos2ModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + input_ids: Optional[torch.Tensor] = None, + image_embeds_position_mask: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + image_embeds: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Kosmos2ModelOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, Kosmos2Model + + >>> model = Kosmos2Model.from_pretrained("microsoft/kosmos-2-patch14-224") + >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") + + >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> text = ( + ... " An image of a snowman" + ... " warming himself by a fire" + ... "" + ... ) + + >>> inputs = processor(text=text, images=image, return_tensors="pt", add_eos_token=True) + + >>> last_hidden_state = model( + ... pixel_values=inputs["pixel_values"], + ... input_ids=inputs["input_ids"], + ... attention_mask=inputs["attention_mask"], + ... image_embeds_position_mask=inputs["image_embeds_position_mask"], + ... ).last_hidden_state + >>> list(last_hidden_state.shape) + [1, 91, 2048] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_model_output = None + projection_attentions = None + if image_embeds is None: + if pixel_values is None: + raise ValueError("You have to specify either `pixel_values` or `image_embeds`.") + + vision_model_output = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. + image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) + # normalized features + image_embeds = nn.functional.normalize(image_embeds, dim=-1) + image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) + + outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + image_embeds=image_embeds, + image_embeds_position_mask=image_embeds_position_mask, + head_mask=head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + position_ids=position_ids, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + outputs = outputs + (image_embeds, projection_attentions, vision_model_output) + return tuple(output for output in outputs if output is not None) + + return Kosmos2ModelOutput( + last_hidden_state=outputs.last_hidden_state, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + image_embeds=image_embeds, + projection_attentions=projection_attentions, + vision_model_output=vision_model_output, + ) + + +@add_start_docstrings( + """ + KOSMOS-2 Model for generating text and bounding boxes given an image. The model consists of a vision encoder and a + language model. + """, + KOSMOS2_START_DOCSTRING, +) +class Kosmos2ForConditionalGeneration(Kosmos2PreTrainedModel): + config_class = Kosmos2Config + main_input_name = "pixel_values" + _tied_weights_keys = ["text_model.lm_head.weight"] + + def __init__(self, config: Kosmos2Config): + super().__init__(config) + + self.text_model = Kosmos2TextForCausalLM(config.text_config) + self.vision_model = Kosmos2VisionModel(config.vision_config) + + self.image_to_text_projection = Kosmos2ImageToTextProjection(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.text_model.model.embed_tokens + + def set_input_embeddings(self, value): + self.text_model.model.embed_tokens = value + + def get_output_embeddings(self) -> nn.Module: + return self.text_model.get_output_embeddings() + + def set_output_embeddings(self, new_embeddings): + self.text_model.set_output_embeddings(new_embeddings) + + @add_start_docstrings_to_model_forward(KOSMOS2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Kosmos2ForConditionalGenerationModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + input_ids: Optional[torch.Tensor] = None, + image_embeds_position_mask: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + image_embeds: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Kosmos2ForConditionalGenerationModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, Kosmos2ForConditionalGeneration + + >>> model = Kosmos2ForConditionalGeneration.from_pretrained("microsoft/kosmos-2-patch14-224") + >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") + + >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> prompt = " An image of" + + >>> inputs = processor(text=prompt, images=image, return_tensors="pt") + + >>> generated_ids = model.generate( + ... pixel_values=inputs["pixel_values"], + ... input_ids=inputs["input_ids"], + ... attention_mask=inputs["attention_mask"], + ... image_embeds=None, + ... image_embeds_position_mask=inputs["image_embeds_position_mask"], + ... use_cache=True, + ... max_new_tokens=64, + ... ) + >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + >>> processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False) + >>> processed_text + ' An image of a snowman warming himself by a fire.' + + >>> caption, entities = processor.post_process_generation(generated_text) + >>> caption + 'An image of a snowman warming himself by a fire.' + + >>> entities + [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_model_output = None + projection_attentions = None + if image_embeds is None: + if pixel_values is None: + raise ValueError("You have to specify either `pixel_values` or `image_embeds`.") + + vision_model_output = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. + image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) + # normalized features + image_embeds = nn.functional.normalize(image_embeds, dim=-1) + image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) + + lm_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + image_embeds=image_embeds, + image_embeds_position_mask=image_embeds_position_mask, + head_mask=head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + position_ids=position_ids, + labels=labels, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + outputs = lm_outputs + (image_embeds, projection_attentions, vision_model_output) + return tuple(output for output in outputs if output is not None) + + return Kosmos2ForConditionalGenerationModelOutput( + loss=lm_outputs.loss, + logits=lm_outputs.logits, + past_key_values=lm_outputs.past_key_values, + hidden_states=lm_outputs.hidden_states, + attentions=lm_outputs.attentions, + image_embeds=image_embeds, + projection_attentions=projection_attentions, + vision_model_output=vision_model_output, + ) + + def generate( + self, + pixel_values: Optional[torch.Tensor] = None, + image_embeds_position_mask: Optional[torch.Tensor] = None, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + image_embeds: Optional[torch.Tensor] = None, + **kwargs, + ): + # in order to allow `inputs` argument (as in `GenerationMixin`) + inputs = kwargs.pop("inputs", None) + if pixel_values is not None and inputs is not None: + raise ValueError( + f"`inputs`: {inputs} were passed alongside `pixel_values` which is not allowed." + f"Make sure to either pass `inputs` or pixel_values=..." + ) + if pixel_values is None and inputs is not None: + pixel_values = inputs + + if image_embeds is None: + vision_model_output = self.vision_model(pixel_values) + # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. + image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) + # normalized features + image_embeds = nn.functional.normalize(image_embeds, dim=-1) + image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) + + output = self.text_model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + image_embeds=image_embeds, + image_embeds_position_mask=image_embeds_position_mask, + **kwargs, + ) + + return output diff --git a/src/transformers/models/kosmos2/processing_kosmos2.py b/src/transformers/models/kosmos2/processing_kosmos2.py new file mode 100644 index 000000000000..5dc0fad0de01 --- /dev/null +++ b/src/transformers/models/kosmos2/processing_kosmos2.py @@ -0,0 +1,663 @@ +# coding=utf-8 +# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Processor class for KOSMOS-2.""" + +import copy +import math +import re +from typing import List, Optional, Tuple, Union + +from ...image_processing_utils import BatchFeature +from ...image_utils import ImageInput, is_batched +from ...processing_utils import ProcessorMixin +from ...tokenization_utils import AddedToken +from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy +from ...utils import TensorType + + +BboxInput = Union[ + List[Tuple[int, int]], + List[Tuple[float, float, float, float]], + List[List[Tuple[int, int]]], + List[List[Tuple[float, float, float]]], +] + + +class Kosmos2Processor(ProcessorMixin): + r""" + Constructs an KOSMOS-2 processor which wraps a KOSMOS-2 image processor and a KOSMOS-2 tokenizer into a single + processor. + + [`Kosmos2Processor`] offers all the functionalities of [`CLIPImageProcessor`] and some functionalities of + [`XLMRobertaTokenizerFast`]. See the docstring of [`~Kosmos2Processor.__call__`] and [`~Kosmos2Processor.decode`] + for more information. + + Args: + image_processor (`CLIPImageProcessor`): + An instance of [`CLIPImageProcessor`]. The image processor is a required input. + tokenizer (`XLMRobertaTokenizerFast`): + An instance of ['XLMRobertaTokenizerFast`]. The tokenizer is a required input. + num_patch_index_tokens (`int`, *optional*, defaults to 1024): + The number of tokens that represent patch indices. + """ + attributes = ["image_processor", "tokenizer"] + image_processor_class = "CLIPImageProcessor" + tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") + + def __init__(self, image_processor, tokenizer, num_patch_index_tokens=1024): + tokenizer.return_token_type_ids = False + + self.eod_token = "" + + self.boi_token = "" + self.eoi_token = "" + + self.eoc_token = "" + self.eol_token = "" + + self.bop_token = "" + self.eop_token = "" + + self.boo_token = "" + self.eoo_token = "" + + self.dom_token = "" + + self.grd_token = "" + + self.tag_tokens = [ + self.eod_token, + self.boi_token, + self.eoi_token, + self.eoc_token, + self.eol_token, + self.bop_token, + self.eop_token, + self.boo_token, + self.eoo_token, + self.dom_token, + self.grd_token, + ] + + self.num_patch_index_tokens = num_patch_index_tokens + patch_index_tokens = [f"" for x in range(self.num_patch_index_tokens)] + + tokens_to_add = [] + for token in self.tag_tokens + patch_index_tokens: + tokens_to_add.append(AddedToken(token, lstrip=True, rstrip=False, normalized=False)) + tokenizer.add_tokens(tokens_to_add) + + super().__init__(image_processor, tokenizer) + + def __call__( + self, + images: ImageInput = None, + text: Union[TextInput, List[TextInput]] = None, + bboxes: BboxInput = None, + num_image_tokens: Optional[int] = 64, + first_image_token_id: Optional[int] = None, + add_special_tokens: bool = True, + add_eos_token: bool = False, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + return_length: bool = False, + verbose: bool = True, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> BatchFeature: + """ + This method uses [`CLIPImageProcessor.__call__`] method to prepare image(s) for the model, and + [`XLMRobertaTokenizerFast.__call__`] to prepare text for the model. + + Please refer to the docstring of the above two methods for more information. + + The rest of this documentation shows the arguments specific to `Kosmos2Processor`. + + Args: + bboxes (`Union[List[Tuple[int]], List[Tuple[float]], List[List[Tuple[int]]], List[List[Tuple[float]]]]`, *optional*): + The bounding bboxes associated to `texts`. + num_image_tokens (`int`, defaults to 64): + The number of (consecutive) places that are used to mark the placeholders to store image information. + This should be the same as `latent_query_num` in the instance of `Kosmos2Config` you are using. + first_image_token_id (`int`, *optional*): + The token id that will be used for the first place of the subsequence that is reserved to store image + information. If unset, will default to `self.tokenizer.unk_token_id + 1`. + add_eos_token (`bool`, defaults to `False`): + Whether or not to include `EOS` token id in the encoding when `add_special_tokens=True`. + """ + if images is None and text is None: + raise ValueError("You have to specify either images or text.") + + encoding = BatchFeature() + + if images is not None: + image_encoding = self.image_processor(images, return_tensors=return_tensors) + encoding.update(image_encoding) + + if text is not None: + text = self.preprocess_examples(text, images, bboxes, num_image_tokens=num_image_tokens) + + if add_special_tokens and not add_eos_token: + if isinstance(text, str): + text = f"{self.tokenizer.bos_token}{text}" + elif isinstance(text, list): + text = [f"{self.tokenizer.bos_token}{s}" for s in text] + + text_encoding = self.tokenizer( + text=text, + add_special_tokens=(add_special_tokens and add_eos_token), + padding=padding and images is None, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of if images is None else pad_to_multiple_of, + return_attention_mask=return_attention_mask, + verbose=verbose, + return_tensors=return_tensors if images is None else None, + **kwargs, + ) + encoding.update(text_encoding) + + if text is not None and images is not None: + # Use the id of the first token after + if first_image_token_id is None: + first_image_token_id = self.tokenizer.unk_token_id + 1 + + # To see if we need one more `0` (for ``) at the beginning of `image_embeds_position_mask`. + with_bos = add_special_tokens + + # The first (actual) `` token is always at the 1st or 2nd place (after `` if any). Here we look + # for the second `` token (which indicate the first image token). + start_index = int(with_bos) + 1 + + # Add `image_embeds_position_mask`: the leading and trailing `0` are for `boi` and `eoi` tokens. The `1` indicates + # the places of image tokens. + image_token_ids = list(range(first_image_token_id, first_image_token_id + num_image_tokens)) + base_image_embeds_position_mask = [0] + [1] * num_image_tokens + [0] + + # loop over `encoding["input_ids"]` + input_ids = [] + image_embeds_position_mask = [] + all_input_ids = encoding["input_ids"] + # not batched -> (changed to) batch of size 1 + if isinstance(text, str): + all_input_ids = [all_input_ids] + encoding["attention_mask"] = [encoding["attention_mask"]] + for text_ids in all_input_ids: + # change the ids for the fake `` tokens in `input_ids` + text_ids = text_ids[:start_index] + image_token_ids + text_ids[start_index + num_image_tokens :] + input_ids.append(text_ids) + + mask = copy.copy(base_image_embeds_position_mask) + if with_bos: + # for `` + mask = [0] + mask + # trailing part (which are not related to the image) + mask += [0] * (len(text_ids) - len(mask)) + image_embeds_position_mask.append(mask) + + if isinstance(text, list): + sorted_length = sorted([(idx, len(x)) for idx, x in enumerate(text_encoding.input_ids)]) + _, min_len_not_padded = sorted_length[0] + idx, _ = sorted_length[-1] + + text_encoding = self.tokenizer( + text=[text[idx]], + add_special_tokens=(add_special_tokens and add_eos_token), + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + return_tensors=None, + **kwargs, + ) + max_len_padded = len(text_encoding.input_ids[0]) + + if min_len_not_padded != max_len_padded: + if self.tokenizer.padding_side == "right": + input_ids = [x + [self.tokenizer.pad_token_id] * (max_len_padded - len(x)) for x in input_ids] + image_embeds_position_mask = [ + x + [0] * (max_len_padded - len(x)) for x in image_embeds_position_mask + ] + encoding["attention_mask"] = [ + x + [0] * (max_len_padded - len(x)) for x in encoding["attention_mask"] + ] + elif self.tokenizer.padding_side == "left": + input_ids = [[self.tokenizer.pad_token_id] * (max_len_padded - len(x)) + x for x in input_ids] + image_embeds_position_mask = [ + [0] * (max_len_padded - len(x)) + x for x in image_embeds_position_mask + ] + encoding["attention_mask"] = [ + [0] * (max_len_padded - len(x)) + x for x in encoding["attention_mask"] + ] + + # un-batch if necessary + if isinstance(text, str) and return_tensors is None: + input_ids = input_ids[0] + encoding["attention_mask"] = encoding["attention_mask"][0] + image_embeds_position_mask = image_embeds_position_mask[0] + + # update (with the target tensor type if specified) + encoding.update( + BatchEncoding( + data={ + "input_ids": input_ids, + "attention_mask": encoding["attention_mask"], + "image_embeds_position_mask": image_embeds_position_mask, + }, + tensor_type=return_tensors, + ) + ) + + return encoding + + def _check_bboxes_for_single_text(self, bboxes): + """ + Check `bboxes` for a single text example. It could be + - `None`: no bounding box associated to a text. + - A list with each element being the bounding boxes associated to one ` ... ` pair found + in a text. This could be: + - `None`: no bounding box associated to a ` ... ` pair. + - A tuple of 2 integers: A single bounding box specified by patch indices. + - A tuple of 4 float point number: A single bounding box specified by (normalized) coordinates. + - A list containing the above 2 tuple types: Multiple bounding boxes for a + ` ... ` pair. + """ + if bboxes is None: + return + elif not isinstance(bboxes, list): + raise ValueError("`bboxes` (for a single text example) should be `None` or a list.") + + # `bbox` is the bounding boxes for a single pair + for bbox in bboxes: + if bbox is None: + continue + elif not isinstance(bbox, list): + bbox = [bbox] + for element in bbox: + if not isinstance(element, tuple) or not ( + (len(element) == 2 and all(isinstance(x, int) for x in element)) + or (len(element) == 4 and all(isinstance(x, float) for x in element)) + ): + raise ValueError( + "Each element in `bboxes` (for a single text example) should be either `None`, a tuple containing " + "2 integers or 4 float point numbers, or a list containing such tuples. Also " + "make sure the arguments `texts` and `bboxes` passed to `preprocess_text` are both in " + "batches or both for a single example." + ) + + def _preprocess_single_example(self, text, image, bboxes, img_info_tokens): + text = text.strip() + if image is not None: + # Add ` ... (fake) image tokens ... ` + text = f"{img_info_tokens} {text}" + + # Add ` ` after ` phrase text ` + text = self._insert_patch_index_tokens(text, bboxes) + return text + + def preprocess_examples( + self, + texts: Union[TextInput, List[TextInput]], + images: ImageInput = None, + bboxes: BboxInput = None, + num_image_tokens: Optional[int] = 64, + ) -> Union[str, List[str]]: + """Add image and bounding box information to `texts` as image and patch index tokens. + + Args: + texts (`Union[TextInput, List[TextInput]]`): The texts to be processed. + images (`ImageInput`, *optional*): The images associated to `texts`. + bboxes (`Union[List[Tuple[int]], List[Tuple[float]], List[List[Tuple[int]]], List[List[Tuple[float]]]]`, *optional*): + The bounding bboxes associated to `texts`. + num_image_tokens (`int`, *optional*, defaults to 64): + The number of image tokens (used as latent queries). This should corresponds to the `latent_query_num` + attribute in `Kosmos2Config`. + + Returns: + `Union[TextInput, List[TextInput]]`: The processed texts with image and patch index tokens. + """ + # These are fake `` tokens enclosed between (the actual) `` token and ``. + img_tokens = [self.boi_token] * num_image_tokens + img_info_tokens = " ".join([self.boi_token] + img_tokens + [self.eoi_token]) + + # make batch to simplify processing logic + batched = True + if isinstance(texts, str): + batched = False + texts = [texts] + + if images is None: + images = [None] * len(texts) + elif not is_batched(images): + images = [images] + if len(texts) != len(images): + raise ValueError( + f"The number of examples in `texts` and `images` should be the same. Got {len(texts)} v.s. {len(images)} instead." + ) + + if not batched: + self._check_bboxes_for_single_text(bboxes) + bboxes = [bboxes] + elif bboxes is not None: + if not isinstance(bboxes, list): + raise ValueError("`bboxes` should be `None` or a list (as a batch) when `texts` is passed as a batch.") + for x in bboxes: + self._check_bboxes_for_single_text(x) + else: + bboxes = [None] * len(texts) + + if len(bboxes) != len(texts): + raise ValueError( + f"The number of examples in `texts` and `bboxes` should be the same. Got {len(texts)} v.s. {len(bboxes)} instead." + ) + + result = [ + self._preprocess_single_example(text, image, bbox, img_info_tokens) + for text, image, bbox in zip(texts, images, bboxes) + ] + # un-batch if necessary + if not batched: + result = result[0] + + return result + + # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer + to the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + def post_process_generation(self, text, cleanup_and_extract=True): + caption = text.split(self.eoi_token)[-1] + if cleanup_and_extract: + return clean_text_and_extract_entities_with_bboxes(caption) + return caption + + @property + # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + def _insert_patch_index_tokens(self, text: str, bboxes: Union[List[Tuple[int]], List[Tuple[float]]]) -> str: + if bboxes is None or len(bboxes) == 0: + return text + + matched_phrases = list(re.finditer(r".+?", string=text)) + if len(matched_phrases) != len(bboxes): + raise ValueError( + f"The number of elements in `bboxes` should be the same as the number of ` ... ` pairs in `text`. Got {len(matched_phrases)} v.s. {len(bboxes)} instead." + ) + + # insert object's patch index tokens + # the found ` ... ` pairs. + curr_pos = 0 + buffer = [] + for matched, bbox in zip(matched_phrases, bboxes): + _, end = matched.span() + buffer.append(text[curr_pos:end]) + curr_pos = end + # A phrase without bbox + if bbox is None: + continue + # A phrase with a single bbox + if isinstance(bbox, tuple): + bbox = [bbox] + patch_index_strings = [] + # A phrase could have multiple bboxes + if not all(box is not None for box in bbox): + raise ValueError( + "The multiple bounding boxes for a single phrase should not contain any `None` value." + ) + for box in bbox: + patch_index_1, patch_index_2 = self._convert_bbox_to_patch_index_tokens(box) + patch_index_strings.append(f"{patch_index_1} {patch_index_2}") + # `bbox` being an empty list + if len(patch_index_strings) == 0: + continue + position_str = " ".join(patch_index_strings) + buffer.append(f" {position_str} ") + # remaining + if curr_pos < len(text): + buffer.append(text[curr_pos:]) + + text = "".join(buffer) + return text + + def _convert_bbox_to_patch_index_tokens( + self, bbox: Union[Tuple[int, int], Tuple[float, float, float, float]] + ) -> Tuple[str, str]: + # already computed patch indices + if len(bbox) == 2: + idx_1, idx_2 = bbox + # bbox specified with (normalized) coordinates + else: + # use `self.tokenizer` to get `num_patches_per_side` + num_patches_per_side = int(math.sqrt(self.num_patch_index_tokens)) + idx_1, idx_2 = coordinate_to_patch_index(bbox, num_patches_per_side) + + token_1 = f"" + token_2 = f"" + + return token_1, token_2 + + +def coordinate_to_patch_index(bbox: Tuple[float, float, float, float], num_patches_per_side: int) -> Tuple[int, int]: + """Convert a bounding box to a pair of patch indices. + + Args: + bbox (`Tuple[float, float, float, float]`): + The 4 coordinates of the bounding box, with the format being (x1, y1, x2, y2) specifying the upper-left and + lower-right corners of the box. It should have x2 > x1 and y2 > y1. + num_patches_per_side (`int`): the number of patches along each side. + + Returns: + `Tuple[int, int]`: A pair of patch indices representing the upper-left patch and lower-right patch. + """ + (x1, y1, x2, y2) = bbox + + if not (x2 > x1 and y2 > y1): + raise ValueError("The coordinates in `bbox` should be `(x1, y1, x2, y2)` with `x2 > x1` and `y2 > y1`.") + + ul_x = math.floor(x1 * num_patches_per_side) + ul_y = math.floor(y1 * num_patches_per_side) + + lr_x = math.ceil(x2 * num_patches_per_side - 1) + lr_y = math.ceil(y2 * num_patches_per_side - 1) + + ul_idx = ul_y * num_patches_per_side + ul_x + lr_idx = lr_y * num_patches_per_side + lr_x + + return ul_idx, lr_idx + + +# copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L35C1-L75C38 +# (with format modifications) +def patch_index_to_coordinate(ul_idx: int, lr_idx: int, num_patches_per_side: int): + """ + Given a grid of length `num_patches_per_side` and the indices of the upper-left and lower-right corners of a + bounding box, returns the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2). + + Args: + ul_idx (`int`): the index of the grid cell that corresponds to the upper-left corner of the bounding box. + lr_idx (`int`): the index of the grid cell that corresponds to the lower-right corner of the bounding box. + num_patches_per_side (`int`): the number of patches along each side. + + Returns: + `Tuple[float]`: the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2). + """ + # Compute the size of each cell in the grid + cell_size = 1.0 / num_patches_per_side + + # Compute the x and y indices of the upper-left and lower-right corners of the bounding box + ul_x = ul_idx % num_patches_per_side + ul_y = ul_idx // num_patches_per_side + + lr_x = lr_idx % num_patches_per_side + lr_y = lr_idx // num_patches_per_side + + # Compute the normalized coordinates of the bounding box + if ul_idx == lr_idx: + x1 = ul_x * cell_size + y1 = ul_y * cell_size + x2 = lr_x * cell_size + cell_size + y2 = lr_y * cell_size + cell_size + elif ul_x == lr_x or ul_y == lr_y: + x1 = ul_x * cell_size + y1 = ul_y * cell_size + x2 = lr_x * cell_size + cell_size + y2 = lr_y * cell_size + cell_size + else: + x1 = ul_x * cell_size + cell_size / 2 + y1 = ul_y * cell_size + cell_size / 2 + x2 = lr_x * cell_size + cell_size / 2 + y2 = lr_y * cell_size + cell_size / 2 + + return x1, y1, x2, y2 + + +# copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L4-L33 +# (with format modifications) +def extract_entities_with_patch_indices(text): + """Extract entities contained in `text`. The bounding bboxes is given in the form of patch indices. + + This functioin is only intended to be used within `clean_text_and_extract_entities_with_bboxes` where further + processing happens, including converting to normalized coordinates and whitespace character cleaning up. + + Examples: + + ```python + >>> text = " An image of a snowman warming himself by a fire." + >>> entities = extract_entities_with_patch_indices(text) + >>> entities + [(' a snowman', (31, 41), [(44, 863)]), (' a fire', (130, 137), [(5, 911)])] + ```""" + # The regular expression pattern for matching the required formats + pattern = r"(?:(([^<]+)))?((?:)*)" + + # Find all matches in the given string + matches = re.finditer(pattern, text) + + # Initialize an empty list to store the valid patch_index combinations + entities_with_patch_indices = [] + + for match in matches: + # span of a `phrase` that is between and + span = match.span(2) + phrase_tag, phrase, match_content = match.groups() + if not phrase_tag: + phrase = None + # We take the starting position of `` + span = (match.span(0)[0], match.span(0)[0]) + + # Split the match_content by the delimiter to get individual patch_index pairs + patch_index_pairs = match_content.split("") + + entity_bboxes = [] + for pair in patch_index_pairs: + # Extract the xxxx and yyyy values from the patch_index pair + x = re.search(r"", pair) + y = re.search(r"", pair[1:]) + + if x and y: + if phrase: + entity_bboxes.append((int(x.group(1)), int(y.group(1)))) + else: + entity_bboxes.append((int(x.group(1)), int(y.group(1)))) + + if phrase: + entities_with_patch_indices.append((phrase, span, entity_bboxes)) + else: + for bbox in entity_bboxes: + # fake entity name + entity = f"" + entities_with_patch_indices.append((entity, span, [bbox])) + + return entities_with_patch_indices + + +def adjust_entity_positions(entity, text): + """Adjust the positions of the entities in `text` to be relative to the text with special fields removed.""" + entity_name, (start, end) = entity + # computed the length of strings with special fields (tag tokens, patch index tokens, etc.) removed + adjusted_start = len(re.sub("<.*?>", "", text[:start])) + adjusted_end = len(re.sub("<.*?>", "", text[:end])) + adjusted_entity = (entity_name, (adjusted_start, adjusted_end)) + return adjusted_entity + + +def _cleanup_spaces(text, entities): + """Remove the spaces around the text and the entities in it.""" + new_text = text.strip() + leading_spaces = len(text) - len(text.lstrip()) + + new_entities = [] + for entity_name, (start, end), bboxes in entities: + entity_name_leading_spaces = len(entity_name) - len(entity_name.lstrip()) + entity_name_trailing_spaces = len(entity_name) - len(entity_name.rstrip()) + + start = start - leading_spaces + entity_name_leading_spaces + end = end - leading_spaces - entity_name_trailing_spaces + entity_name = entity_name.strip() + + new_entities.append((entity_name, (start, end), bboxes)) + + return new_text, new_entities + + +# copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L77-L87 +# (with format modifications) +def clean_text_and_extract_entities_with_bboxes(text, num_patches_per_side=32): + """Remove the tag tokens from `text`, extract entities in it with some cleaning up of white characters. + + Examples: + + ```python + >>> text = " An image of a snowman warming himself by a fire." + >>> clean_text, entities = clean_text_and_extract_entities_with_bboxes(text) + >>> clean_text + 'An image of a snowman warming himself by a fire.' + + >>> entities + [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])] + ```""" + # remove special fields (tag tokens, patch index tokens, etc.) + processed_text = re.sub("<.*?>", "", text) + + entities_with_patch_indices = extract_entities_with_patch_indices(text) + entities = [] + for item in entities_with_patch_indices: + entity, bboxes = item[0:2], item[2] + adjusted_entity = adjust_entity_positions(entity, text) + bboxes_in_coords = [patch_index_to_coordinate(bbox[0], bbox[1], num_patches_per_side) for bbox in bboxes] + + entities.append(adjusted_entity + (bboxes_in_coords,)) + + return _cleanup_spaces(processed_text, entities) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 18a2abb11f13..1310312519cc 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -4261,6 +4261,30 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Kosmos2ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Kosmos2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Kosmos2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/kosmos2/__init__.py b/tests/models/kosmos2/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/models/kosmos2/test_modeling_kosmos2.py b/tests/models/kosmos2/test_modeling_kosmos2.py new file mode 100644 index 000000000000..2649b8f41d66 --- /dev/null +++ b/tests/models/kosmos2/test_modeling_kosmos2.py @@ -0,0 +1,732 @@ +# coding=utf-8 +# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch KOSMOS-2 model. """ + + +import copy +import inspect +import os +import tempfile +import unittest + +import numpy as np +import requests + +from transformers import AutoModelForVision2Seq, AutoProcessor, Kosmos2Config +from transformers.models.kosmos2.configuration_kosmos2 import Kosmos2TextConfig, Kosmos2VisionConfig +from transformers.testing_utils import require_torch, require_vision, slow, torch_device +from transformers.utils import is_torch_available, is_vision_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ( + ModelTesterMixin, + _config_zero_init, + floats_tensor, + ids_tensor, + random_attention_mask, +) + + +if is_torch_available(): + import torch + + from transformers import Kosmos2ForConditionalGeneration, Kosmos2Model + from transformers.models.kosmos2.modeling_kosmos2 import KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from PIL import Image + + +class Kosmos2VisionModelTester: + def __init__( + self, + parent, + batch_size=12, + image_size=32, + patch_size=4, + num_channels=3, + is_training=True, + hidden_size=32, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=37, + dropout=0.1, + attention_dropout=0.1, + initializer_range=1e-10, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.is_training = is_training + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.initializer_range = initializer_range + self.scope = scope + + # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) + num_patches = (image_size // patch_size) ** 2 + self.seq_length = num_patches + 1 + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + config = self.get_config() + + return config, pixel_values + + def get_config(self): + return Kosmos2VisionConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + initializer_range=self.initializer_range, + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +class Kosmos2TextModelTester: + def __init__( + self, + parent, + batch_size=12, + seq_length=7, + is_training=True, + use_input_mask=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=37, + dropout=0.1, + attention_dropout=0.1, + max_position_embeddings=512, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.max_position_embeddings = max_position_embeddings + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + if input_mask is not None: + batch_size, seq_length = input_mask.shape + rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) + for batch_idx, start_index in enumerate(rnd_start_indices): + input_mask[batch_idx, :start_index] = 1 + input_mask[batch_idx, start_index:] = 0 + + config = self.get_config() + + return config, input_ids, input_mask + + def get_config(self): + return Kosmos2TextConfig( + vocab_size=self.vocab_size, + embed_dim=self.hidden_size, + layers=self.num_hidden_layers, + attention_heads=self.num_attention_heads, + ffn_dim=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + max_position_embeddings=self.max_position_embeddings, + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, input_mask = config_and_inputs + inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} + return config, inputs_dict + + +class Kosmos2ModelTester: + def __init__(self, parent, text_kwargs=None, vision_kwargs=None, latent_query_num=3, is_training=True): + if text_kwargs is None: + text_kwargs = {} + if vision_kwargs is None: + vision_kwargs = {} + + self.parent = parent + self.text_model_tester = Kosmos2TextModelTester(parent, **text_kwargs) + self.vision_model_tester = Kosmos2VisionModelTester(parent, **vision_kwargs) + self.latent_query_num = latent_query_num + self.is_training = is_training + + def prepare_config_and_inputs(self): + text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() + vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() + + # build `image_embeds_position_mask` + image_embeds_position_mask = torch.zeros_like(input_ids) + image_embeds_position_mask[:, 1 : 1 + self.latent_query_num :] = 1 + + config = self.get_config() + + return config, input_ids, attention_mask, image_embeds_position_mask, pixel_values + + def get_config(self): + return Kosmos2Config( + self.text_model_tester.get_config().to_dict(), + self.vision_model_tester.get_config().to_dict(), + latent_query_num=self.latent_query_num, + ) + + def create_and_check_model(self, config, input_ids, attention_mask, image_embeds_position_mask, pixel_values): + model = Kosmos2Model(config).to(torch_device).eval() + with torch.no_grad(): + result = model(pixel_values, input_ids, image_embeds_position_mask, attention_mask) + self.parent.assertEqual( + result.last_hidden_state.shape, + (self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size), + ) + self.parent.assertEqual( + result.image_embeds.shape, + (self.text_model_tester.batch_size, self.latent_query_num, self.text_model_tester.hidden_size), + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, attention_mask, image_embeds_position_mask, pixel_values = config_and_inputs + inputs_dict = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "image_embeds_position_mask": image_embeds_position_mask, + "pixel_values": pixel_values, + } + return config, inputs_dict + + +@require_torch +class Kosmos2ModelTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = (Kosmos2Model, Kosmos2ForConditionalGeneration) if is_torch_available() else () + all_generative_model_classes = (Kosmos2ForConditionalGeneration,) if is_torch_available() else () + fx_compatible = False + test_head_masking = False + test_pruning = False + test_resize_embeddings = False + test_attention_outputs = False + + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = copy.deepcopy(inputs_dict) + + if return_labels: + if model_class.__name__ == "Kosmos2ForConditionalGeneration": + inputs_dict["labels"] = torch.zeros( + (self.model_tester.text_model_tester.batch_size, self.model_tester.text_model_tester.seq_length), + dtype=torch.long, + device=torch_device, + ) + + return inputs_dict + + def setUp(self): + self.model_tester = Kosmos2ModelTester(self) + self.config_tester = ConfigTester(self, config_class=Kosmos2Config, hidden_size=37) + + # overwrite from common to skip `image_to_text_projection.latent_query` + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if param.requires_grad: + if name == "image_to_text_projection.latent_query": + # The original code use ` nn.Parameter(torch.randn(...))` for which this test won't pass. + continue + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + # overwrite from common in order to use `self.model_tester.text_model_tester.num_hidden_layers` + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, + "expected_num_hidden_layers", + self.model_tester.text_model_tester.num_hidden_layers + 1, + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + seq_length = self.model_tester.text_model_tester.seq_length + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [seq_length, self.model_tester.text_model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + # overwrite from common in order to use `config.text_config.vocab_size` instead of `config.vocab_size` + def test_tie_model_weights(self): + if not self.test_torchscript: + return + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + def check_same_values(layer_1, layer_2): + equal = True + for p1, p2 in zip(layer_1.weight, layer_2.weight): + if p1.data.ne(p2.data).sum() > 0: + equal = False + return equal + + for model_class in self.all_model_classes: + config.torchscript = True + model_not_tied = model_class(config) + if model_not_tied.get_output_embeddings() is None: + continue + + config_tied = copy.deepcopy(config) + config_tied.torchscript = False + model_tied = model_class(config_tied) + params_tied = list(model_tied.parameters()) + # Check that the embedding layer and decoding layer are the same in size and in value + # self.assertTrue(check_same_values(embeddings, decoding)) + + # # Check that after modification, they remain the same. + # embeddings.weight.data.div_(2) + # # Check that the embedding layer and decoding layer are the same in size and in value + # self.assertTrue(embeddings.weight.shape, decoding.weight.shape) + # self.assertTrue(check_same_values(embeddings, decoding)) + + # # Check that after modification, they remain the same. + # decoding.weight.data.div_(4) + # # Check that the embedding layer and decoding layer are the same in size and in value + # self.assertTrue(embeddings.weight.shape, decoding.weight.shape) + # self.assertTrue(check_same_values(embeddings, decoding)) + + # Check that after resize they remain tied. + model_tied.resize_token_embeddings(config.text_config.vocab_size + 10) + params_tied_2 = list(model_tied.parameters()) + self.assertEqual(len(params_tied_2), len(params_tied)) + + # decoding.weight.data.mul_(20) + # # Check that the embedding layer and decoding layer are the same in size and in value + # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape) + # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head)) + + @slow + def test_model_from_pretrained(self): + for model_name in KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = Kosmos2Model.from_pretrained(model_name) + self.assertIsNotNone(model) + + def _create_and_check_torchscript(self, config, inputs_dict): + if not self.test_torchscript: + return + + configs_no_init = _config_zero_init(config) # To be sure we have no Nan + configs_no_init.torchscript = True + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + model.to(torch_device) + model.eval() + inputs = self._prepare_for_class(inputs_dict, model_class) + + main_input_name = model_class.main_input_name + + try: + main_input = inputs[main_input_name] + model(main_input, inputs["input_ids"], inputs["image_embeds_position_mask"]) + traced_model = torch.jit.trace( + model, (main_input, inputs["input_ids"], inputs["image_embeds_position_mask"]) + ) + except RuntimeError: + self.fail("Couldn't trace module.") + + with tempfile.TemporaryDirectory() as tmp_dir_name: + pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") + + try: + torch.jit.save(traced_model, pt_file_name) + except Exception: + self.fail("Couldn't save module.") + + try: + loaded_model = torch.jit.load(pt_file_name) + except Exception: + self.fail("Couldn't load module.") + + model.to(torch_device) + model.eval() + + loaded_model.to(torch_device) + loaded_model.eval() + + model_state_dict = model.state_dict() + loaded_model_state_dict = loaded_model.state_dict() + + non_persistent_buffers = {} + for key in loaded_model_state_dict.keys(): + if key not in model_state_dict.keys(): + non_persistent_buffers[key] = loaded_model_state_dict[key] + + loaded_model_state_dict = { + key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers + } + + self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) + + model_buffers = list(model.buffers()) + for non_persistent_buffer in non_persistent_buffers.values(): + found_buffer = False + for i, model_buffer in enumerate(model_buffers): + if torch.equal(non_persistent_buffer, model_buffer): + found_buffer = True + break + + self.assertTrue(found_buffer) + model_buffers.pop(i) + + models_equal = True + for layer_name, p1 in model_state_dict.items(): + if layer_name in loaded_model_state_dict: + p2 = loaded_model_state_dict[layer_name] + if p1.data.ne(p2.data).sum() > 0: + models_equal = False + + self.assertTrue(models_equal) + + # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. + # (Even with this call, there are still memory leak by ~0.04MB) + self.clear_torch_jit_class_registry() + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "https://huggingface.co/hf-internal-testing/Kosmos2-test-image/resolve/main/demo.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +@require_vision +@require_torch +@slow +class Kosmos2ModelIntegrationTest(unittest.TestCase): + def run_example(self, prompt, image, model, processor): + inputs = processor(text=prompt, images=image, return_tensors="pt", padding=True).to(torch_device) + + generation_outputs = model.generate( + pixel_values=inputs["pixel_values"], + input_ids=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + image_embeds=None, + image_embeds_position_mask=inputs["image_embeds_position_mask"], + use_cache=True, + max_new_tokens=128, + output_scores=True, + return_dict_in_generate=True, + ) + + scores = generation_outputs.scores + generated_ids = generation_outputs.sequences + generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) + # Specify `cleanup_and_extract=False` in order to see the raw model generation. + processed_text = [processor.post_process_generation(x, cleanup_and_extract=False) for x in generated_text] + # By default, the generated text is cleanup and the entities are extracted. + final_text_with_entities = [processor.post_process_generation(x) for x in generated_text] + + return scores, generated_ids, generated_text, processed_text, final_text_with_entities + + def test_snowman_image_captioning(self): + url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png" + + image = Image.open(requests.get(url, stream=True).raw) + image.save("new_image.jpg") + image = Image.open("new_image.jpg") + + model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device) + processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") + + prompt = "An image of" + scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( + prompt, image, model, processor + ) + processed_text = processed_text[0] + final_text, entities = final_text_with_entities[0] + + np.testing.assert_allclose( + torch.concat(scores[1:4])[:3, :3].to("cpu").numpy(), + np.array( + [ + [-1.5672581195831299, -5.007406711578369, 4.36448860168457], + [-2.147017002105713, -4.966302871704102, 4.592559337615967], + [-0.9352350831031799, -4.688288688659668, 6.240612983703613], + ] + ), + atol=1e-5, + ) + np.testing.assert_allclose( + torch.concat(scores[-3:])[-3:, -3:].to("cpu").numpy(), + np.array( + [ + [2.9916205406188965, 2.481820583343506, 4.646594524383545], + [-2.8381078243255615, -2.9687185287475586, -2.6926779747009277], + [-2.8909168243408203, -3.2228589057922363, -1.7056822776794434], + ] + ), + atol=1e-5, + ) + + # fmt: off + EXPECTED_IDS = [ + [ + 0, 64003, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 64004, 64012, 712, 1648, 9, 64007, 10, 43867, 64008, + 64009, 64057, 64876, 64010, 5950, 597, 32, 64007, 10, 646, 64008, 64009, 64018, 64924, 64010, 4, 2 + ] + ] + # fmt: on + self.assertListEqual(generated_ids.to("cpu").numpy().tolist(), EXPECTED_IDS) + + EXPECTED_PROCESSED_TEXT = ( + " An image of a snowman " + "warming himself by a fire." + ) + self.assertEqual(processed_text, EXPECTED_PROCESSED_TEXT) + + self.assertEqual(final_text, "An image of a snowman warming himself by a fire.") + + EXPECTED_ENTITIES = [ + ("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), + ("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]), + ] + self.assertListEqual(entities, EXPECTED_ENTITIES) + + # test with the detail caption generation + + prompt = "Describe this image in detail:" + scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( + prompt, image, model, processor + ) + processed_text = processed_text[0] + final_text, entities = final_text_with_entities[0] + + np.testing.assert_allclose( + torch.concat(scores[1:4])[:3, :3].to("cpu").numpy(), + np.array( + [ + [-0.9093570113182068, -4.578373908996582, 5.96360969543457], + [2.452126979827881, -4.090598106384277, 8.738677024841309], + [-0.7624598741531372, -4.771658897399902, 6.576295852661133], + ] + ), + atol=1e-5, + ) + np.testing.assert_allclose( + torch.concat(scores[-3:])[-3:, -3:].to("cpu").numpy(), + np.array( + [ + [-1.673659086227417, -2.162452220916748, -1.95430588722229], + [-2.006824493408203, -2.2038745880126953, -1.24686861038208], + [-3.2783470153808594, -2.814181089401245, -1.390632152557373], + ] + ), + atol=1e-5, + ) + + # fmt: off + EXPECTED_IDS_LONG = [ + [ + 0, 64003, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 64004, 64012, 34645, 247, 38, 1648, 12, 3391, 55, + 24, 1648, 1338, 10, 43867, 1280, 32, 64007, 10, 30879, 64008, 64009, 64018, 65020, 64010, 12, 5, 1842, + 4, 71, 17, 1679, 64007, 10, 3958, 64008, 64009, 64061, 64263, 64010, 6, 64007, 15719, 64008, 64009, + 64253, 64617, 64010, 6, 8, 64007, 9626, 64008, 64009, 64413, 64545, 64010, 6, 23, 64007, 10, 4363, + 64008, 64009, 64623, 64885, 64010, 2255, 8, 64007, 10, 3486, 64008, 64009, 64809, 65036, 64010, 1560, + 2255, 4, 24, 43867, 1684, 7, 27, 3774, 5, 10356, 9, 5, 646, 6, 8, 22, 1684, 7, 30, 10, 2007, 8, 16239, + 4337, 4, 2 + ] + ] + # fmt: on + self.assertListEqual(generated_ids.to("cpu").numpy().tolist(), EXPECTED_IDS_LONG) + + EXPECTED_PROCESSED_TEXT_LONG = ( + " Describe this image in detail: The image features a snowman sitting by a campfire" + " in the snow. He is wearing a hat" + ", scarf" + ", and gloves" + ", with a pot" + " nearby and a cup placed " + "nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy " + "atmosphere." + ) + self.assertEqual(processed_text, EXPECTED_PROCESSED_TEXT_LONG) + + EXPECTED_FINAL_TEXT_LONG = ( + "Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is " + "wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be " + "enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere." + ) + self.assertEqual(final_text, EXPECTED_FINAL_TEXT_LONG) + + EXPECTED_ENTITIES_LONG = [ + ("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]), + ("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]), + ("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]), + ("gloves", (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]), + ("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]), + ("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]), + ] + self.assertListEqual(entities, EXPECTED_ENTITIES_LONG) + + def test_snowman_image_captioning_batch(self): + url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png" + + image = Image.open(requests.get(url, stream=True).raw) + image.save("new_image.jpg") + image = Image.open("new_image.jpg") + + model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device) + + prompt = ["An image of", "Describe this image in detail:"] + + # left padding + processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") + + scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( + prompt, [image] * len(prompt), model, processor + ) + all_final_text = [x[0] for x in final_text_with_entities] + all_entities = [x[1] for x in final_text_with_entities] + + # left padding gives identical results as non-padding + EXPECTED_PROCESSED_TEXT_0 = ( + " An image of a snowman " + "warming himself by a fire." + ) + EXPECTED_PROCESSED_TEXT_1 = ( + " Describe this image in detail: The image features a snowman sitting by a campfire" + " in the snow. He is wearing a hat" + ", scarf" + ", and gloves" + ", with a pot" + " nearby and a cup placed " + "nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy " + "atmosphere." + ) + self.assertListEqual(processed_text, [EXPECTED_PROCESSED_TEXT_0, EXPECTED_PROCESSED_TEXT_1]) + + EXPECTED_FINAL_TEXT_0 = "An image of a snowman warming himself by a fire." + EXPECTED_FINAL_TEXT_1 = ( + "Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is " + "wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be " + "enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere." + ) + self.assertListEqual(all_final_text, [EXPECTED_FINAL_TEXT_0, EXPECTED_FINAL_TEXT_1]) + + EXPECTED_ENTITIES_0 = [ + ("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), + ("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]), + ] + EXPECTED_ENTITIES_1 = [ + ("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]), + ("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]), + ("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]), + ("gloves", (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]), + ("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]), + ("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]), + ] + self.assertListEqual(all_entities, [EXPECTED_ENTITIES_0, EXPECTED_ENTITIES_1]) + + # right padding + processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") + + scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( + prompt, [image] * len(prompt), model, processor + ) + all_final_text = [x[0] for x in final_text_with_entities] + all_entities = [x[1] for x in final_text_with_entities] + + # For right padding, only the non-padded sequences will give the same results as non-padding + self.assertEqual(processed_text[1], EXPECTED_PROCESSED_TEXT_1) + self.assertEqual(all_final_text[1], EXPECTED_FINAL_TEXT_1) + self.assertListEqual(all_entities[1], EXPECTED_ENTITIES_1) diff --git a/tests/models/kosmos2/test_processor_kosmos2.py b/tests/models/kosmos2/test_processor_kosmos2.py new file mode 100644 index 000000000000..e2147ee06085 --- /dev/null +++ b/tests/models/kosmos2/test_processor_kosmos2.py @@ -0,0 +1,471 @@ +# coding=utf-8 +# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import pytest +import requests + +from transformers.testing_utils import ( + get_tests_dir, + require_sentencepiece, + require_tokenizers, + require_torch, + require_vision, +) +from transformers.utils import is_vision_available + + +if is_vision_available(): + from PIL import Image + + from transformers import ( + AutoProcessor, + CLIPImageProcessor, + Kosmos2Processor, + PreTrainedTokenizerFast, + XLMRobertaTokenizer, + XLMRobertaTokenizerFast, + ) + + +SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") + + +@require_sentencepiece +@require_tokenizers +@require_vision +class Kosmos2ProcessorTest(unittest.TestCase): + def setUp(self): + self.tmpdirname = tempfile.mkdtemp() + + image_processor = CLIPImageProcessor(use_square_size=True) + + # We have a SentencePiece fixture for testing + slow_tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB) + fast_tokenizer = XLMRobertaTokenizerFast(__slow_tokenizer=slow_tokenizer) + + processor = Kosmos2Processor(image_processor, fast_tokenizer) + processor.save_pretrained(self.tmpdirname) + + def get_tokenizer(self, **kwargs): + return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer + + def get_image_processor(self, **kwargs): + return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor + + def tearDown(self): + shutil.rmtree(self.tmpdirname) + + def prepare_image_inputs(self): + """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, + or a list of PyTorch tensors if one specifies torchify=True. + """ + + image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] + + image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] + + return image_inputs + + def test_save_load_pretrained_additional_features(self): + processor = Kosmos2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) + processor.save_pretrained(self.tmpdirname) + + tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") + image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) + + processor = Kosmos2Processor.from_pretrained( + self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 + ) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) + self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) + + self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) + self.assertIsInstance(processor.image_processor, CLIPImageProcessor) + + def test_image_processor(self): + image_processor = self.get_image_processor() + tokenizer = self.get_tokenizer() + + processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) + + image_input = self.prepare_image_inputs() + + input_image_processor = image_processor(image_input, return_tensors="np") + input_processor = processor(images=image_input, return_tensors="np") + + for key in input_image_processor.keys(): + self.assertAlmostEqual(input_image_processor[key].sum(), input_processor[key].sum(), delta=1e-2) + + def test_tokenizer(self): + image_processor = self.get_image_processor() + tokenizer = self.get_tokenizer() + + processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) + + input_str = "This is a test" + + encoded_processor = processor(text=input_str, add_eos_token=True) + + encoded_tok = tokenizer(input_str, return_token_type_ids=False) + + for key in encoded_tok.keys(): + self.assertListEqual(encoded_tok[key], encoded_processor[key]) + + def test_processor(self): + image_processor = self.get_image_processor() + tokenizer = self.get_tokenizer() + + processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) + + input_str = "This is a test" + image_input = self.prepare_image_inputs() + + inputs = processor(text=input_str, images=image_input) + + self.assertListEqual( + list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] + ) + + # test if it raises when no input is passed + with pytest.raises(ValueError): + processor() + + def test_tokenizer_decode(self): + image_processor = self.get_image_processor() + tokenizer = self.get_tokenizer() + + processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) + + predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] + + decoded_processor = processor.batch_decode(predicted_ids) + decoded_tok = tokenizer.batch_decode(predicted_ids) + + self.assertListEqual(decoded_tok, decoded_processor) + + def test_model_input_names(self): + image_processor = self.get_image_processor() + tokenizer = self.get_tokenizer() + + processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) + + input_str = "This is a test" + image_input = self.prepare_image_inputs() + + # both image and text + inputs = processor(text=input_str, images=image_input) + self.assertListEqual( + list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] + ) + + # only text + inputs = processor(text=input_str) + self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) + + # only image + inputs = processor(images=image_input) + self.assertListEqual(list(inputs.keys()), ["pixel_values"]) + + @require_torch + def test_full_processor(self): + url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/two_dogs.jpg" + + processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224") + + # test with different input formats. + # fmt: off + texts = [ + # no phrase + " Two puppies sit in a field of grass.", + # 1 phrase + " Two puppies sit in a field of grass.", + # 2 phrases + " Two puppies sit in a field of grass .", + # 2 phrases: bboxes already specified for the 1st phrase + " Two puppies sit in a field of grass .", + ] + # fmt: on + + image = Image.open(requests.get(url, stream=True).raw) + # To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed + image_path = os.path.join(self.tmpdirname, "image.jpg") + image.save(image_path) + image = Image.open(image_path) + + # fmt: off + bboxes = [ + [None, []], + [[None], [[]], [(79, 1016)], [[(79, 1016)]], [[(79, 1016), (135, 1008)]]], + [[[(79, 1016), (135, 1008)], None], [[(79, 1016), (135, 1008)], []], [[(79, 1016), (135, 1008)], (480, 1023)], [[(79, 1016), (135, 1008)], [(480, 1023)]]], + [[None, [(480, 1023)]]], + ] + # fmt: on + + batch_image = [image] * 4 + batch_text = [texts[0], texts[1], texts[1], texts[2]] + batch_bboxes = [ + None, # no phrase + [[]], # 1 phrase: no bbox + [(79, 1016)], # 1 phrase: 1 bbox + [[(79, 1016), (135, 1008)], (480, 1023)], # 2 phrase: 2 bboxes + 1 bbox + ] + + # fmt: off + expected_input_ids = [ + [0, 64012, 1264, 17772, 1357, 12, 10, 770, 9, 4464, 4, 2], + [0, 64012, 64007, 1264, 17772, 64008, 1357, 12, 10, 770, 9, 4464, 4, 2], + [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], + [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], + [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 106, 4, 2], + [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 64009, 64493, 65036, 64010, 106, 4, 2], + ] + # fmt: on + + EXPECTED_PIXEL_VALUES_1 = np.array( + [ + [ + [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], + [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], + [-0.6243883967399597, -0.6243883967399597, -0.5951915383338928], + ], + [ + [-0.20629698038101196, -0.19128920137882233, -0.19128920137882233], + [-0.20629698038101196, -0.19128920137882233, -0.17628143727779388], + [-0.2213047444820404, -0.20629698038101196, -0.16127367317676544], + ], + [ + [-0.5843556523323059, -0.5701355338096619, -0.5701355338096619], + [-0.5843556523323059, -0.5701355338096619, -0.5559154152870178], + [-0.5843556523323059, -0.5559154152870178, -0.5416953563690186], + ], + ] + ) + EXPECTED_PIXEL_VALUES_2 = np.array( + [ + [ + [-0.4346088469028473, -0.47840413451194763, -0.7849710583686829], + [-0.5221993923187256, -0.5076009631156921, -0.755774199962616], + [-0.5221993923187256, -0.5076009631156921, -0.7411757707595825], + ], + [ + [-0.2813358008861542, -0.2963435649871826, -0.431413471698761], + [-0.26632803678512573, -0.2963435649871826, -0.4764367938041687], + [-0.2213047444820404, -0.2813358008861542, -0.49144455790519714], + ], + [ + [-0.5701355338096619, -0.641235888004303, -0.7549964189529419], + [-0.5843556523323059, -0.641235888004303, -0.7834365367889404], + [-0.5559154152870178, -0.641235888004303, -0.7834365367889404], + ], + ] + ) + + def check(texts, bboxes, expected_input_ids): + outputs = processor(images=None, text=texts, bboxes=bboxes, add_eos_token=True) + self.assertListEqual(outputs.input_ids, expected_input_ids) + + # no phrase + check(texts[0], bboxes[0][0], expected_input_ids[0]) + + # no phrase + check(texts[0], bboxes[0][1], expected_input_ids[0]) + + # 1 phrase: no bbox + check(texts[1], bboxes[1][0], expected_input_ids[1]) + + # 1 phrase: no bbox + check(texts[1], bboxes[1][1], expected_input_ids[1]) + + # 1 phrase: 1 bbox + check(texts[1], bboxes[1][2], expected_input_ids[2]) + + # 1 phrase: 1 bbox + check(texts[1], bboxes[1][3], expected_input_ids[2]) + + # 1 phrase: 2 bboxes + check(texts[1], bboxes[1][4], expected_input_ids[3]) + + # could not contain `[None]` + with pytest.raises(ValueError): + _ = processor.preprocess_examples(images=None, texts=texts[1], bboxes=[[None]]) + + # 2 phrase: 2 bboxes + no bbox + check(texts[2], bboxes[2][0], expected_input_ids[4]) + + # 2 phrase: 2 bboxes + no bbox + check(texts[2], bboxes[2][1], expected_input_ids[4]) + + # 2 phrase: 2 bboxes + 1 bbox + check(texts[2], bboxes[2][2], expected_input_ids[5]) + + # 2 phrase: 2 bboxes + 1 bbox + check(texts[2], bboxes[2][3], expected_input_ids[5]) + + # 2 phrase: no box (as already specified in the text) + 1 bbox + check(texts[3], bboxes[3][0], expected_input_ids[5]) + + # could not contain `[None]` + with pytest.raises(ValueError): + _ = processor.preprocess_examples(images=None, texts=texts[2], bboxes=[[(79, 1016), (135, 1008)], [None]]) + + # test batch + outputs = processor( + images=None, + text=batch_text, + bboxes=batch_bboxes, + add_eos_token=True, + ) + self.assertListEqual( + outputs.input_ids, + [expected_input_ids[0], expected_input_ids[1], expected_input_ids[2], expected_input_ids[5]], + ) + + # test batch with padding (without `return_tensors`) + outputs = processor( + images=None, + text=batch_text, + bboxes=batch_bboxes, + padding=True, + add_eos_token=True, + ) + # padding on the right + self.assertListEqual( + outputs.input_ids[0], + expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), + ) + self.assertListEqual( + outputs.attention_mask[0], + [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), + ) + # no padding for the longest sequence + self.assertListEqual(outputs.input_ids[-1], expected_input_ids[5]) + self.assertListEqual(outputs.attention_mask[-1], [1] * len(expected_input_ids[5])) + + # test batch with padding (with `return_tensors`) + outputs = processor( + images=None, + text=batch_text, + bboxes=batch_bboxes, + return_tensors="pt", + padding=True, + add_eos_token=True, + ) + # padding on the right + self.assertListEqual( + outputs.input_ids.numpy().tolist()[0], + expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), + ) + self.assertListEqual( + outputs.attention_mask.numpy().tolist()[0], + [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), + ) + # no padding for the longest sequence + self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], expected_input_ids[5]) + self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], [1] * len(expected_input_ids[5])) + + # test with image + num_image_tokens = 64 + + outputs = processor(images=image, text=texts[0], bboxes=None, add_eos_token=True) + self.assertTupleEqual(outputs.pixel_values[0].shape, (3, 224, 224)) + self.assertListEqual( + outputs.input_ids, + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], + ) + self.assertListEqual( + outputs.image_embeds_position_mask, + [0] * 2 + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[0]) - 1), + ) + np.testing.assert_allclose(outputs.pixel_values[0][:3, :3, :3], EXPECTED_PIXEL_VALUES_1, atol=1e-9) + np.testing.assert_allclose(outputs.pixel_values[0][:3, -3:, -3:], EXPECTED_PIXEL_VALUES_2, atol=1e-9) + + # test with image in batch (right padding) + outputs = processor( + images=batch_image, + text=batch_text, + bboxes=batch_bboxes, + return_tensors="pt", + padding=True, + add_eos_token=True, + ) + self.assertTupleEqual(outputs.pixel_values.shape, (4, 3, 224, 224)) + np.testing.assert_allclose( + outputs.pixel_values[:, :3, :3, :3].numpy(), [EXPECTED_PIXEL_VALUES_1] * len(batch_image), atol=1e-9 + ) + np.testing.assert_allclose( + outputs.pixel_values[:, :3, -3:, -3:].numpy(), [EXPECTED_PIXEL_VALUES_2] * len(batch_image), atol=1e-9 + ) + # padding on the right: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa + # fmt: off + EXPECTED_IDS_BATCH_RIGHT_PADDING = [ + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], + ] + EXPECTED_MASK_BATCH_RIGHT_PADDING = [ + [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), + [1] * (2 + num_image_tokens + len(expected_input_ids[5])), + ] + # fmt: on + self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH_RIGHT_PADDING[0]) + self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH_RIGHT_PADDING[0]) + self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH_RIGHT_PADDING[-1]) + self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH_RIGHT_PADDING[-1]) + self.assertListEqual( + outputs.image_embeds_position_mask.numpy().tolist(), + [[0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1)] * len(batch_image), + ) + + processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") + + # test with image in batch (left padding) + outputs = processor( + images=batch_image, + text=batch_text, + bboxes=batch_bboxes, + return_tensors="pt", + padding=True, + add_eos_token=True, + ) + # padding on the left: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa + # fmt: off + EXPECTED_IDS_BATCH = [ + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], + ] + EXPECTED_MASK_BATCH =[ + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]), + [1] * (2 + num_image_tokens + len(expected_input_ids[5])), + ] + EXPECTED_IMG_POS_MASK_BATCH = [ + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 0] + [1] * num_image_tokens + [0] + [0] * len(expected_input_ids[0][1:]), + [0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1), + ] + # fmt: on + + self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH[0]) + self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH[0]) + self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[0], EXPECTED_IMG_POS_MASK_BATCH[0]) + + # no padding for the longest sequence + self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH[-1]) + self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH[-1]) + self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[-1], EXPECTED_IMG_POS_MASK_BATCH[-1]) diff --git a/utils/check_repo.py b/utils/check_repo.py index 329e9ec076e9..181905dab9f7 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -73,6 +73,9 @@ "MaskFormerSwinPreTrainedModel", "BridgeTowerTextModel", "BridgeTowerVisionModel", + "Kosmos2TextModel", + "Kosmos2TextForCausalLM", + "Kosmos2VisionModel", ] # Update this list for models that are not tested with a comment explaining the reason it should not be. diff --git a/utils/not_doctested.txt b/utils/not_doctested.txt index 9ee13cc0836c..fe25c2e59a23 100644 --- a/utils/not_doctested.txt +++ b/utils/not_doctested.txt @@ -618,6 +618,7 @@ src/transformers/models/instructblip/processing_instructblip.py src/transformers/models/jukebox/configuration_jukebox.py src/transformers/models/jukebox/convert_jukebox.py src/transformers/models/jukebox/modeling_jukebox.py +src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/led/configuration_led.py src/transformers/models/led/modeling_led.py src/transformers/models/led/modeling_tf_led.py diff --git a/utils/slow_documentation_tests.txt b/utils/slow_documentation_tests.txt index 83e400ba7e95..98b3156e1d23 100644 --- a/utils/slow_documentation_tests.txt +++ b/utils/slow_documentation_tests.txt @@ -1,7 +1,9 @@ docs/source/en/generation_strategies.md docs/source/en/model_doc/ctrl.md +docs/source/en/model_doc/kosmos-2.md docs/source/en/model_doc/seamless_m4t.md docs/source/en/task_summary.md docs/source/en/tasks/prompting.md src/transformers/models/blip_2/modeling_blip_2.py src/transformers/models/ctrl/modeling_ctrl.py +src/transformers/models/kosmos2/modeling_kosmos2.py From 576994963f2940587a7753d4af991fec89ad0cb9 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 30 Oct 2023 15:27:15 +0100 Subject: [PATCH 008/268] Fix some tests using `"common_voice"` (#27147) * Use mozilla-foundation/common_voice_11_0 * Update expected values * Update expected values * For test_word_time_stamp_integration --------- Co-authored-by: ydshieh --- .../models/wav2vec2/test_modeling_wav2vec2.py | 26 +++++++++---------- .../test_processor_wav2vec2_with_lm.py | 8 +++--- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/tests/models/wav2vec2/test_modeling_wav2vec2.py b/tests/models/wav2vec2/test_modeling_wav2vec2.py index 16fb9ddab7b1..096246fe62b1 100644 --- a/tests/models/wav2vec2/test_modeling_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_wav2vec2.py @@ -97,7 +97,7 @@ def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): try: _ = in_queue.get(timeout=timeout) - ds = load_dataset("common_voice", "es", split="test", streaming=True) + ds = load_dataset("mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( @@ -119,7 +119,7 @@ def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): transcription = processor.batch_decode(logits.cpu().numpy(), pool).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) - unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") + unittest.TestCase().assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") # force batch_decode to internally create a spawn pool, which should trigger a warning if different than fork multiprocessing.set_start_method("spawn", force=True) @@ -127,7 +127,7 @@ def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): transcription = processor.batch_decode(logits.cpu().numpy()).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) - unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") + unittest.TestCase().assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") except Exception: error = f"{traceback.format_exc()}" @@ -1833,7 +1833,7 @@ def test_phoneme_recognition(self): @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm(self): - ds = load_dataset("common_voice", "es", split="test", streaming=True) + ds = load_dataset("mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( @@ -1852,12 +1852,12 @@ def test_wav2vec2_with_lm(self): transcription = processor.batch_decode(logits.cpu().numpy()).text - self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") + self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_pool(self): - ds = load_dataset("common_voice", "es", split="test", streaming=True) + ds = load_dataset("mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( @@ -1878,7 +1878,7 @@ def test_wav2vec2_with_lm_pool(self): with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text - self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") + self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") # user-managed pool + num_processes should trigger a warning with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool( @@ -1889,7 +1889,7 @@ def test_wav2vec2_with_lm_pool(self): self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) - self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") + self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") @require_pyctcdecode @require_torchaudio @@ -1957,7 +1957,7 @@ def test_inference_mms_1b_all(self): LANG_MAP = {"it": "ita", "es": "spa", "fr": "fra", "en": "eng"} def run_model(lang): - ds = load_dataset("common_voice", lang, split="test", streaming=True) + ds = load_dataset("mozilla-foundation/common_voice_11_0", lang, split="test", streaming=True) sample = next(iter(ds)) wav2vec2_lang = LANG_MAP[lang] @@ -1982,10 +1982,10 @@ def run_model(lang): return transcription TRANSCRIPTIONS = { - "it": "mi hanno fatto un'offerta che non potevo proprio rifiutare", - "es": "bien y qué regalo vas a abrir primero", - "fr": "un vrai travail intéressant va enfin être mené sur ce sujet", - "en": "twas the time of day and olof spen slept during the summer", + "it": "il libro ha suscitato molte polemiche a causa dei suoi contenuti", + "es": "habitan aguas poco profundas y rocosas", + "fr": "ce dernier est volé tout au long de l'histoire romaine", + "en": "joe keton disapproved of films and buster also had reservations about the media", } for lang in LANG_MAP.keys(): diff --git a/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py b/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py index bd1582ceb134..2c52a921653c 100644 --- a/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py +++ b/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py @@ -434,7 +434,7 @@ def test_offsets_integration_fast_batch(self): def test_word_time_stamp_integration(self): import torch - ds = load_dataset("common_voice", "en", split="train", streaming=True) + ds = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="train", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) ds_iter = iter(ds) sample = next(ds_iter) @@ -442,7 +442,6 @@ def test_word_time_stamp_integration(self): processor = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") - # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train input_values = processor(sample["audio"]["array"], return_tensors="pt").input_values with torch.no_grad(): @@ -461,6 +460,7 @@ def test_word_time_stamp_integration(self): ] EXPECTED_TEXT = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" + EXPECTED_TEXT = "THE TRACK APPEARS ON THE COMPILATION ALBUM CRAFT FORKS" # output words self.assertEqual(" ".join(self.get_from_offsets(word_time_stamps, "word")), EXPECTED_TEXT) @@ -471,8 +471,8 @@ def test_word_time_stamp_integration(self): end_times = torch.tensor(self.get_from_offsets(word_time_stamps, "end_time")) # fmt: off - expected_start_tensor = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599]) - expected_end_tensor = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94]) + expected_start_tensor = torch.tensor([0.6800, 0.8800, 1.1800, 1.8600, 1.9600, 2.1000, 3.0000, 3.5600, 3.9800]) + expected_end_tensor = torch.tensor([0.7800, 1.1000, 1.6600, 1.9200, 2.0400, 2.8000, 3.3000, 3.8800, 4.2800]) # fmt: on self.assertTrue(torch.allclose(start_times, expected_start_tensor, atol=0.01)) From 6b466771b09437b05c69e8d5ac5a2ca5a97e175d Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Mon, 30 Oct 2023 15:43:08 +0100 Subject: [PATCH 009/268] [`tests` / `Quantization`] Fix bnb test (#27145) * fix bnb test * link to GH issue --- tests/quantization/bnb/test_mixed_int8.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/quantization/bnb/test_mixed_int8.py b/tests/quantization/bnb/test_mixed_int8.py index bbd1879fb152..4666fe3576c2 100644 --- a/tests/quantization/bnb/test_mixed_int8.py +++ b/tests/quantization/bnb/test_mixed_int8.py @@ -124,13 +124,13 @@ def tearDown(self): gc.collect() torch.cuda.empty_cache() - def test_get_keys_to_not_convert(self): + @unittest.skip("Un-skip once https://github.com/mosaicml/llm-foundry/issues/703 is resolved") + def test_get_keys_to_not_convert_trust_remote_code(self): r""" - Test the `get_keys_to_not_convert` function. + Test the `get_keys_to_not_convert` function with `trust_remote_code` models. """ from accelerate import init_empty_weights - from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" @@ -142,7 +142,17 @@ def test_get_keys_to_not_convert(self): config, trust_remote_code=True, code_revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7" ) self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"]) - # without trust_remote_code + + def test_get_keys_to_not_convert(self): + r""" + Test the `get_keys_to_not_convert` function. + """ + from accelerate import init_empty_weights + + from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM + from transformers.integrations.bitsandbytes import get_keys_to_not_convert + + model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7") with init_empty_weights(): model = MptForCausalLM(config) From cd19b193785fd9d224b8d2b509c63387bb48bc14 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Mon, 30 Oct 2023 22:56:41 +0800 Subject: [PATCH 010/268] make tests of pytorch_example device agnostic (#27081) --- examples/pytorch/test_accelerate_examples.py | 24 ++++++------ examples/pytorch/test_pytorch_examples.py | 41 ++++++++++---------- 2 files changed, 32 insertions(+), 33 deletions(-) diff --git a/examples/pytorch/test_accelerate_examples.py b/examples/pytorch/test_accelerate_examples.py index 4cfe45b02294..d5e20d820e82 100644 --- a/examples/pytorch/test_accelerate_examples.py +++ b/examples/pytorch/test_accelerate_examples.py @@ -24,11 +24,16 @@ import unittest from unittest import mock -import torch from accelerate.utils import write_basic_config -from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device -from transformers.utils import is_apex_available +from transformers.testing_utils import ( + TestCasePlus, + backend_device_count, + is_torch_fp16_available_on_device, + run_command, + slow, + torch_device, +) logging.basicConfig(level=logging.DEBUG) @@ -54,11 +59,6 @@ def get_results(output_dir): return results -def is_cuda_and_apex_available(): - is_using_cuda = torch.cuda.is_available() and torch_device == "cuda" - return is_using_cuda and is_apex_available() - - stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @@ -93,7 +93,7 @@ def test_run_glue_no_trainer(self): --with_tracking """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") run_command(self._launch_args + testargs) @@ -119,7 +119,7 @@ def test_run_clm_no_trainer(self): --with_tracking """.split() - if torch.cuda.device_count() > 1: + if backend_device_count(torch_device) > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return @@ -152,7 +152,7 @@ def test_run_mlm_no_trainer(self): @mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) def test_run_ner_no_trainer(self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu - epochs = 7 if get_gpu_count() > 1 else 2 + epochs = 7 if backend_device_count(torch_device) > 1 else 2 tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" @@ -326,7 +326,7 @@ def test_run_image_classification_no_trainer(self): --checkpointing_steps 1 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") run_command(self._launch_args + testargs) diff --git a/examples/pytorch/test_pytorch_examples.py b/examples/pytorch/test_pytorch_examples.py index 269d7844f79f..7d27804a7330 100644 --- a/examples/pytorch/test_pytorch_examples.py +++ b/examples/pytorch/test_pytorch_examples.py @@ -20,11 +20,15 @@ import sys from unittest.mock import patch -import torch - from transformers import ViTMAEForPreTraining, Wav2Vec2ForPreTraining -from transformers.testing_utils import CaptureLogger, TestCasePlus, get_gpu_count, slow, torch_device -from transformers.utils import is_apex_available +from transformers.testing_utils import ( + CaptureLogger, + TestCasePlus, + backend_device_count, + is_torch_fp16_available_on_device, + slow, + torch_device, +) SRC_DIRS = [ @@ -86,11 +90,6 @@ def get_results(output_dir): return results -def is_cuda_and_apex_available(): - is_using_cuda = torch.cuda.is_available() and torch_device == "cuda" - return is_using_cuda and is_apex_available() - - stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @@ -116,7 +115,7 @@ def test_run_glue(self): --max_seq_length=128 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): @@ -141,7 +140,7 @@ def test_run_clm(self): --overwrite_output_dir """.split() - if torch.cuda.device_count() > 1: + if backend_device_count(torch_device) > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return @@ -203,7 +202,7 @@ def test_run_mlm(self): def test_run_ner(self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu - epochs = 7 if get_gpu_count() > 1 else 2 + epochs = 7 if backend_device_count(torch_device) > 1 else 2 tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" @@ -312,7 +311,7 @@ def test_run_swag(self): def test_generation(self): testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"] - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") model_type, model_name = ( @@ -401,7 +400,7 @@ def test_run_image_classification(self): --seed 42 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): @@ -431,7 +430,7 @@ def test_run_speech_recognition_ctc(self): --seed 42 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): @@ -462,7 +461,7 @@ def test_run_speech_recognition_ctc_adapter(self): --seed 42 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): @@ -493,7 +492,7 @@ def test_run_speech_recognition_seq2seq(self): --seed 42 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): @@ -525,7 +524,7 @@ def test_run_audio_classification(self): --seed 42 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): @@ -551,7 +550,7 @@ def test_run_wav2vec2_pretraining(self): --seed 42 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): @@ -579,7 +578,7 @@ def test_run_vit_mae_pretraining(self): --seed 42 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): @@ -604,7 +603,7 @@ def test_run_semantic_segmentation(self): --seed 32 """.split() - if is_cuda_and_apex_available(): + if is_torch_fp16_available_on_device(torch_device): testargs.append("--fp16") with patch.object(sys, "argv", testargs): From 3224c0c13fc9ef351c7a1a6e9f1401b3ce13859a Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 30 Oct 2023 16:07:27 +0100 Subject: [PATCH 011/268] Remove some Kosmos-2 `copied from` (#27149) * fix * fix * fix * fix --------- Co-authored-by: ydshieh --- src/transformers/models/kosmos2/modeling_kosmos2.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index 600fda750e80..781813db4570 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -52,7 +52,6 @@ ] -# Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. @@ -67,7 +66,6 @@ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) -# Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): @@ -660,7 +658,7 @@ def forward( if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: - layer_outputs = self.gradient_checkpointing_func( + layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, @@ -1114,7 +1112,6 @@ def __init__(self, config: Kosmos2TextConfig): self.gradient_checkpointing = False - # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] @@ -1268,7 +1265,7 @@ def forward( past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: - layer_outputs = self.gradient_checkpointing_func( + layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, @@ -1428,11 +1425,6 @@ def _init_weights(self, module): if module.embed_tokens.padding_idx is not None: module.embed_tokens.weight.data[module.embed_tokens.padding_idx].zero_() - def _set_gradient_checkpointing(self, module, gradient_checkpointing_func=None): - if isinstance(module, (Kosmos2TextTransformer, Kosmos2VisionEncoder)): - module.gradient_checkpointing_func = gradient_checkpointing_func - module.gradient_checkpointing = gradient_checkpointing_func is not None - class Kosmos2VisionModel(Kosmos2PreTrainedModel): config_class = Kosmos2VisionConfig From 9093b19b13f642ed63e0fa49f4091fc0283a84e3 Mon Sep 17 00:00:00 2001 From: Yeyang <76979429+yyLeaves@users.noreply.github.com> Date: Mon, 30 Oct 2023 23:50:29 +0800 Subject: [PATCH 012/268] =?UTF-8?q?=F0=9F=8C=90=20[i18n-ZH]=20Translate=20?= =?UTF-8?q?serialization.md=20into=20Chinese=20(#27076)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs(zh): translate serialization.md * docs(zh): add space around links --- docs/source/zh/_toctree.yml | 2 + docs/source/zh/serialization.md | 181 ++++++++++++++++++++++++++++++++ 2 files changed, 183 insertions(+) create mode 100644 docs/source/zh/serialization.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index cb2f08d0f312..4dc8920a1900 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -25,4 +25,6 @@ title: 使用特定于模型的 API - local: custom_models title: 共享自定义模型 + - local: serialization + title: 导出为 ONNX title: 开发者指南 diff --git a/docs/source/zh/serialization.md b/docs/source/zh/serialization.md new file mode 100644 index 000000000000..584befebe2d7 --- /dev/null +++ b/docs/source/zh/serialization.md @@ -0,0 +1,181 @@ + + +# 导出为 ONNX + +在生产环境中部署 🤗 Transformers 模型通常需要或者能够受益于,将模型导出为可在专门的运行时和硬件上加载和执行的序列化格式。 + +🤗 Optimum 是 Transformers 的扩展,可以通过其 `exporters` 模块将模型从 PyTorch 或 TensorFlow 导出为 ONNX 及 TFLite 等序列化格式。🤗 Optimum 还提供了一套性能优化工具,可以在目标硬件上以最高效率训练和运行模型。 + +本指南演示了如何使用 🤗 Optimum 将 🤗 Transformers 模型导出为 ONNX。有关将模型导出为 TFLite 的指南,请参考 [导出为 TFLite 页面](tflite)。 + +## 导出为 ONNX + +[ONNX (Open Neural Network eXchange 开放神经网络交换)](http://onnx.ai) 是一个开放的标准,它定义了一组通用的运算符和一种通用的文件格式,用于表示包括 PyTorch 和 TensorFlow 在内的各种框架中的深度学习模型。当一个模型被导出为 ONNX时,这些运算符被用于构建计算图(通常被称为*中间表示*),该图表示数据在神经网络中的流动。 + +通过公开具有标准化运算符和数据类型的图,ONNX使得模型能够轻松在不同深度学习框架间切换。例如,在 PyTorch 中训练的模型可以被导出为 ONNX,然后再导入到 TensorFlow(反之亦然)。 + +导出为 ONNX 后,模型可以: +- 通过 [图优化(graph optimization)](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/optimization) 和 [量化(quantization)](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/quantization) 等技术进行推理优化。 +- 通过 [`ORTModelForXXX` 类](https://huggingface.co/docs/optimum/onnxruntime/package_reference/modeling_ort) 使用 ONNX Runtime 运行,它同样遵循你熟悉的 Transformers 中的 `AutoModel` API。 +- 使用 [优化推理流水线(pipeline)](https://huggingface.co/docs/optimum/main/en/onnxruntime/usage_guides/pipelines) 运行,其 API 与 🤗 Transformers 中的 [`pipeline`] 函数相同。 + +🤗 Optimum 通过利用配置对象提供对 ONNX 导出的支持。多种模型架构已经有现成的配置对象,并且配置对象也被设计得易于扩展以适用于其他架构。 + +现有的配置列表请参考 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/exporters/onnx/overview)。 + +有两种方式可以将 🤗 Transformers 模型导出为 ONNX,这里我们展示这两种方法: + +- 使用 🤗 Optimum 的 CLI(命令行)导出。 +- 使用 🤗 Optimum 的 `optimum.onnxruntime` 模块导出。 + +### 使用 CLI 将 🤗 Transformers 模型导出为 ONNX + +要将 🤗 Transformers 模型导出为 ONNX,首先需要安装额外的依赖项: + +```bash +pip install optimum[exporters] +``` + +请参阅 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) 以查看所有可用参数,或者在命令行中查看帮助: + +```bash +optimum-cli export onnx --help +``` + +运行以下命令,以从 🤗 Hub 导出模型的检查点(checkpoint),以 `distilbert-base-uncased-distilled-squad` 为例: + +```bash +optimum-cli export onnx --model distilbert-base-uncased-distilled-squad distilbert_base_uncased_squad_onnx/ +``` + +你应该能在日志中看到导出进度以及生成的 `model.onnx` 文件的保存位置,如下所示: + +```bash +Validating ONNX model distilbert_base_uncased_squad_onnx/model.onnx... + -[✓] ONNX model output names match reference model (start_logits, end_logits) + - Validating ONNX Model output "start_logits": + -[✓] (2, 16) matches (2, 16) + -[✓] all values close (atol: 0.0001) + - Validating ONNX Model output "end_logits": + -[✓] (2, 16) matches (2, 16) + -[✓] all values close (atol: 0.0001) +The ONNX export succeeded and the exported model was saved at: distilbert_base_uncased_squad_onnx +``` + +上面的示例说明了从 🤗 Hub 导出检查点的过程。导出本地模型时,首先需要确保将模型的权重和分词器文件保存在同一目录(`local_path`)中。在使用 CLI 时,将 `local_path` 传递给 `model` 参数,而不是 🤗 Hub 上的检查点名称,并提供 `--task` 参数。你可以在 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/exporters/task_manager)中查看支持的任务列表。如果未提供 `task` 参数,将默认导出不带特定任务头的模型架构。 + +```bash +optimum-cli export onnx --model local_path --task question-answering distilbert_base_uncased_squad_onnx/ +``` + +生成的 `model.onnx` 文件可以在支持 ONNX 标准的 [许多加速引擎(accelerators)](https://onnx.ai/supported-tools.html#deployModel) 之一上运行。例如,我们可以使用 [ONNX Runtime](https://onnxruntime.ai/) 加载和运行模型,如下所示: + +```python +>>> from transformers import AutoTokenizer +>>> from optimum.onnxruntime import ORTModelForQuestionAnswering + +>>> tokenizer = AutoTokenizer.from_pretrained("distilbert_base_uncased_squad_onnx") +>>> model = ORTModelForQuestionAnswering.from_pretrained("distilbert_base_uncased_squad_onnx") +>>> inputs = tokenizer("What am I using?", "Using DistilBERT with ONNX Runtime!", return_tensors="pt") +>>> outputs = model(**inputs) +``` + +从 Hub 导出 TensorFlow 检查点的过程也一样。例如,以下是从 [Keras 组织](https://huggingface.co/keras-io) 导出纯 TensorFlow 检查点的命令: + +```bash +optimum-cli export onnx --model keras-io/transformers-qa distilbert_base_cased_squad_onnx/ +``` + +### 使用 `optimum.onnxruntime` 将 🤗 Transformers 模型导出为 ONNX + +除了 CLI 之外,你还可以使用代码将 🤗 Transformers 模型导出为 ONNX,如下所示: + +```python +>>> from optimum.onnxruntime import ORTModelForSequenceClassification +>>> from transformers import AutoTokenizer + +>>> model_checkpoint = "distilbert_base_uncased_squad" +>>> save_directory = "onnx/" + +>>> # 从 transformers 加载模型并将其导出为 ONNX +>>> ort_model = ORTModelForSequenceClassification.from_pretrained(model_checkpoint, export=True) +>>> tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) + +>>> # 保存 onnx 模型以及分词器 +>>> ort_model.save_pretrained(save_directory) +>>> tokenizer.save_pretrained(save_directory) +``` + +### 导出尚未支持的架构的模型 + +如果你想要为当前无法导出的模型添加支持,请先检查 [`optimum.exporters.onnx`](https://huggingface.co/docs/optimum/exporters/onnx/overview) 是否支持该模型,如果不支持,你可以 [直接为 🤗 Optimum 贡献代码](https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/contribute)。 + +### 使用 `transformers.onnx` 导出模型 + + + +`tranformers.onnx` 不再进行维护,请如上所述,使用 🤗 Optimum 导出模型。这部分内容将在未来版本中删除。 + + + +要使用 `tranformers.onnx` 将 🤗 Transformers 模型导出为 ONNX,请安装额外的依赖项: + +```bash +pip install transformers[onnx] +``` + +将 `transformers.onnx` 包作为 Python 模块使用,以使用现成的配置导出检查点: + +```bash +python -m transformers.onnx --model=distilbert-base-uncased onnx/ +``` + +以上代码将导出由 `--model` 参数定义的检查点的 ONNX 图。传入任何 🤗 Hub 上或者存储与本地的检查点。生成的 `model.onnx` 文件可以在支持 ONNX 标准的众多加速引擎上运行。例如,使用 ONNX Runtime 加载并运行模型,如下所示: + +```python +>>> from transformers import AutoTokenizer +>>> from onnxruntime import InferenceSession + +>>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") +>>> session = InferenceSession("onnx/model.onnx") +>>> # ONNX Runtime expects NumPy arrays as input +>>> inputs = tokenizer("Using DistilBERT with ONNX Runtime!", return_tensors="np") +>>> outputs = session.run(output_names=["last_hidden_state"], input_feed=dict(inputs)) +``` + +可以通过查看每个模型的 ONNX 配置来获取所需的输出名(例如 `["last_hidden_state"]`)。例如,对于 DistilBERT,可以用以下代码获取输出名称: + +```python +>>> from transformers.models.distilbert import DistilBertConfig, DistilBertOnnxConfig + +>>> config = DistilBertConfig() +>>> onnx_config = DistilBertOnnxConfig(config) +>>> print(list(onnx_config.outputs.keys())) +["last_hidden_state"] +``` + +从 Hub 导出 TensorFlow 检查点的过程也一样。导出纯 TensorFlow 检查点的示例代码如下: + +```bash +python -m transformers.onnx --model=keras-io/transformers-qa onnx/ +``` + +要导出本地存储的模型,请将模型的权重和分词器文件保存在同一目录中(例如 `local-pt-checkpoint`),然后通过将 `transformers.onnx` 包的 `--model` 参数指向该目录,将其导出为 ONNX: + +```bash +python -m transformers.onnx --model=local-pt-checkpoint onnx/ +``` \ No newline at end of file From 84724efd101af52ed3d6af878e41ff8fd651a9cc Mon Sep 17 00:00:00 2001 From: Rockerz <64583161+rajveer43@users.noreply.github.com> Date: Mon, 30 Oct 2023 22:09:14 +0530 Subject: [PATCH 013/268] =?UTF-8?q?Translating=20`en/main=5Fclasses`=20fol?= =?UTF-8?q?der=20docs=20to=20Japanese=20=F0=9F=87=AF=F0=9F=87=B5=20(#26894?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add * add * add * Add deepspeed.md * Add * add * Update docs/source/ja/main_classes/callback.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/ja/main_classes/output.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/ja/main_classes/pipelines.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/ja/main_classes/processors.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/ja/main_classes/processors.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/ja/main_classes/text_generation.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/ja/main_classes/processors.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update logging.md * Update toctree.yml * Update docs/source/ja/main_classes/deepspeed.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Add suggesitons * m * Update docs/source/ja/main_classes/trainer.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update toctree.yml * Update Quantization.md * Update docs/source/ja/_toctree.yml Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update toctree.yml * Update docs/source/en/main_classes/deepspeed.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/main_classes/deepspeed.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/en/main_classes/output.md | 1 + docs/source/en/main_classes/quantization.md | 12 +- docs/source/en/main_classes/trainer.md | 2 + docs/source/ja/_toctree.yml | 155 +- docs/source/ja/main_classes/agent.md | 105 + docs/source/ja/main_classes/callback.md | 131 + docs/source/ja/main_classes/configuration.md | 31 + docs/source/ja/main_classes/data_collator.md | 67 + docs/source/ja/main_classes/deepspeed.md | 2255 +++++++++++++++++ .../ja/main_classes/feature_extractor.md | 41 + .../source/ja/main_classes/image_processor.md | 33 + .../source/ja/main_classes/keras_callbacks.md | 28 + docs/source/ja/main_classes/logging.md | 121 + docs/source/ja/main_classes/model.md | 160 ++ docs/source/ja/main_classes/onnx.md | 55 + .../ja/main_classes/optimizer_schedules.md | 77 + docs/source/ja/main_classes/output.md | 321 +++ docs/source/ja/main_classes/pipelines.md | 494 ++++ docs/source/ja/main_classes/processors.md | 160 ++ docs/source/ja/main_classes/quantization.md | 447 ++++ .../source/ja/main_classes/text_generation.md | 63 + docs/source/ja/main_classes/tokenizer.md | 80 + docs/source/ja/main_classes/trainer.md | 728 ++++++ 23 files changed, 5544 insertions(+), 23 deletions(-) create mode 100644 docs/source/ja/main_classes/agent.md create mode 100644 docs/source/ja/main_classes/callback.md create mode 100644 docs/source/ja/main_classes/configuration.md create mode 100644 docs/source/ja/main_classes/data_collator.md create mode 100644 docs/source/ja/main_classes/deepspeed.md create mode 100644 docs/source/ja/main_classes/feature_extractor.md create mode 100644 docs/source/ja/main_classes/image_processor.md create mode 100644 docs/source/ja/main_classes/keras_callbacks.md create mode 100644 docs/source/ja/main_classes/logging.md create mode 100644 docs/source/ja/main_classes/model.md create mode 100644 docs/source/ja/main_classes/onnx.md create mode 100644 docs/source/ja/main_classes/optimizer_schedules.md create mode 100644 docs/source/ja/main_classes/output.md create mode 100644 docs/source/ja/main_classes/pipelines.md create mode 100644 docs/source/ja/main_classes/processors.md create mode 100644 docs/source/ja/main_classes/quantization.md create mode 100644 docs/source/ja/main_classes/text_generation.md create mode 100644 docs/source/ja/main_classes/tokenizer.md create mode 100644 docs/source/ja/main_classes/trainer.md diff --git a/docs/source/en/main_classes/output.md b/docs/source/en/main_classes/output.md index 578b9e6542d1..64101fd82445 100644 --- a/docs/source/en/main_classes/output.md +++ b/docs/source/en/main_classes/output.md @@ -44,6 +44,7 @@ an optional `attentions` attribute. Here we have the `loss` since we passed alon When passing `output_hidden_states=True` you may expect the `outputs.hidden_states[-1]` to match `outputs.last_hidden_states` exactly. However, this is not always the case. Some models apply normalization or subsequent process to the last hidden state when it's returned. + diff --git a/docs/source/en/main_classes/quantization.md b/docs/source/en/main_classes/quantization.md index 36ef2eefa896..5a822f37135f 100644 --- a/docs/source/en/main_classes/quantization.md +++ b/docs/source/en/main_classes/quantization.md @@ -48,6 +48,7 @@ Note that GPTQ integration supports for now only text models and you may encount GPTQ is a quantization method that requires weights calibration before using the quantized models. If you want to quantize transformers model from scratch, it might take some time before producing the quantized model (~5 min on a Google colab for `facebook/opt-350m` model). Hence, there are two different scenarios where you want to use GPTQ-quantized models. The first use case would be to load models that has been already quantized by other users that are available on the Hub, the second use case would be to quantize your model from scratch and save it or push it on the Hub so that other users can also use it. + #### GPTQ Configuration In order to load and quantize a model, you need to create a [`GPTQConfig`]. You need to pass the number of `bits`, a `dataset` in order to calibrate the quantization and the `tokenizer` of the model in order prepare the dataset. @@ -59,6 +60,7 @@ gptq_config = GPTQConfig(bits=4, dataset = "c4", tokenizer=tokenizer) ``` Note that you can pass your own dataset as a list of string. However, it is highly recommended to use the dataset from the GPTQ paper. + ```python dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."] quantization = GPTQConfig(bits=4, dataset = dataset, tokenizer=tokenizer) @@ -71,14 +73,17 @@ You can quantize a model by using `from_pretrained` and setting the `quantizatio ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=gptq_config) + ``` Note that you will need a GPU to quantize a model. We will put the model in the cpu and move the modules back and forth to the gpu in order to quantize them. If you want to maximize your gpus usage while using cpu offload, you can set `device_map = "auto"`. + ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config) ``` + Note that disk offload is not supported. Furthermore, if you are out of memory because of the dataset, you may have to pass `max_memory` in `from_pretained`. Checkout this [guide](https://huggingface.co/docs/accelerate/usage_guides/big_modeling#designing-a-device-map) to learn more about `device_map` and `max_memory`. @@ -95,12 +100,14 @@ tokenizer.push_to_hub("opt-125m-gptq") ``` If you want to save your quantized model on your local machine, you can also do it with `save_pretrained`: + ```python quantized_model.save_pretrained("opt-125m-gptq") tokenizer.save_pretrained("opt-125m-gptq") ``` -Note that if you have quantized your model with a `device_map`, make sure to move the entire model to one of your gpus or the `cpu` before saving it. +Note that if you have quantized your model with a `device_map`, make sure to move the entire model to one of your gpus or the `cpu` before saving it. + ```python quantized_model.to("cpu") quantized_model.save_pretrained("opt-125m-gptq") @@ -117,6 +124,7 @@ model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq") ``` If you want to load a model faster and without allocating more memory than needed, the `device_map` argument also works with quantized model. Make sure that you have `accelerate` library installed. + ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto") @@ -336,6 +344,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("{your_username}/bloom-560m-8bit", device_map="auto") ``` + Note that in this case, you don't need to specify the arguments `load_in_8bit=True`, but you need to make sure that `bitsandbytes` and `accelerate` are installed. Note also that `device_map` is optional but setting `device_map = 'auto'` is prefered for inference as it will dispatch efficiently the model on the available ressources. @@ -356,6 +365,7 @@ quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) ``` Let's say you want to load `bigscience/bloom-1b7` model, and you have just enough GPU RAM to fit the entire model except the `lm_head`. Therefore write a custom device_map as follows: + ```python device_map = { "transformer.word_embeddings": 0, diff --git a/docs/source/en/main_classes/trainer.md b/docs/source/en/main_classes/trainer.md index 46341804ce75..462cea55dc46 100644 --- a/docs/source/en/main_classes/trainer.md +++ b/docs/source/en/main_classes/trainer.md @@ -210,6 +210,7 @@ python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ... ``` if you have either [`accelerate`](https://github.com/huggingface/accelerate) or [`deepspeed`](https://github.com/microsoft/DeepSpeed) installed you can also accomplish the same by using one of: + ```bash accelerate launch --num_processes 2 trainer-program.py ... ``` @@ -246,6 +247,7 @@ CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py . Here your physical GPUs 0 and 2 are mapped to `cuda:1` and `cuda:0` correspondingly. The above examples were all for `DistributedDataParallel` use pattern, but the same method works for [`DataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html) as well: + ```bash CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ... ``` diff --git a/docs/source/ja/_toctree.yml b/docs/source/ja/_toctree.yml index 2947a20c6469..69169d6fffc0 100644 --- a/docs/source/ja/_toctree.yml +++ b/docs/source/ja/_toctree.yml @@ -29,9 +29,77 @@ title: LLM を使用した生成 title: Tutorials - sections: - - local: generation_strategies - title: 生成戦略をカスタマイズする - title: Generation + - isExpanded: false + # sections: + # - local: tasks/sequence_classification + # title: Text classification + # - local: tasks/token_classification + # title: Token classification + # - local: tasks/question_answering + # title: Question answering + # - local: tasks/language_modeling + # title: Causal language modeling + # - local: tasks/masked_language_modeling + # title: Masked language modeling + # - local: tasks/translation + # title: Translation + # - local: tasks/summarization + # title: Summarization + # - local: tasks/multiple_choice + # title: Multiple choice + # title: Natural Language Processing + # - isExpanded: false + # sections: + # - local: tasks/audio_classification + # title: Audio classification + # - local: tasks/asr + # title: Automatic speech recognition + # title: Audio + # - isExpanded: false + # sections: + # - local: tasks/image_classification + # title: Image classification + # - local: tasks/semantic_segmentation + # title: Semantic segmentation + # - local: tasks/video_classification + # title: Video classification + # - local: tasks/object_detection + # title: Object detection + # - local: tasks/zero_shot_object_detection + # title: Zero-shot object detection + # - local: tasks/zero_shot_image_classification + # title: Zero-shot image classification + # - local: tasks/monocular_depth_estimation + # title: Depth estimation + # - local: tasks/image_to_image + # title: Image-to-Image + # - local: tasks/knowledge_distillation_for_image_classification + # title: Knowledge Distillation for Computer Vision + # title: Computer Vision + # - isExpanded: false + # sections: + # - local: tasks/image_captioning + # title: Image captioning + # - local: tasks/document_question_answering + # title: Document Question Answering + # - local: tasks/visual_question_answering + # title: Visual Question Answering + # - local: tasks/text-to-speech + # title: Text to speech + # title: Multimodal + - isExpanded: false + sections: + - local: generation_strategies + title: 生成戦略をカスタマイズする + title: Generation + # - isExpanded: false + # sections: + # - local: tasks/idefics + # title: Image tasks with IDEFICS + # - local: tasks/prompting + # title: LLM prompting guide + # title: Prompting + title: Task Guides - sections: - local: fast_tokenizers title: 🤗 トークナイザーの高速トークナイザーを使用する @@ -135,23 +203,66 @@ title: モデルトレーニングの解剖学 title: コンセプチュアルガイド - sections: - - local: internal/modeling_utils - title: カスタムレイヤーとユーティリティ - - local: internal/pipelines_utils - title: パイプライン用のユーティリティ - - local: internal/tokenization_utils - title: ト=ークナイザー用のユーティリティ - - local: internal/trainer_utils - title: トレーナー用ユーティリティ - - local: internal/generation_utils - title: 発電用ユーティリティ - - local: internal/image_processing_utils - title: 画像プロセッサ用ユーティリティ - - local: internal/audio_utils - title: オーディオ処理用のユーティリティ - - local: internal/file_utils - title: 一般公共事業 - - local: internal/time_series_utils - title: 時系列用のユーティリティ - title: 内部ヘルパー + - sections: + - local: main_classes/agent + title: エージェントとツール + # - local: model_doc/auto + # title: Auto Classes + - local: main_classes/callback + title: コールバック + - local: main_classes/configuration + title: 構成 + - local: main_classes/data_collator + title: データ照合者 + - local: main_classes/keras_callbacks + title: Keras コールバック + - local: main_classes/logging + title: ロギング + - local: main_classes/model + title: モデル + - local: main_classes/text_generation + title: テキストの生成 + - local: main_classes/onnx + title: ONNX + - local: main_classes/optimizer_schedules + title: 最適化 + - local: main_classes/output + title: モデルの出力 + - local: main_classes/pipelines + title: パイプライン + - local: main_classes/processors + title: プロセッサー + - local: main_classes/quantization + title: 量子化 + - local: main_classes/tokenizer + title: トークナイザー + - local: main_classes/trainer + title: トレーナー + - local: main_classes/deepspeed + title: ディープスピードの統合 + - local: main_classes/feature_extractor + title: 特徴抽出器 + - local: main_classes/image_processor + title: 画像処理プロセッサ + title: 主要なクラス + - sections: + - local: internal/modeling_utils + title: カスタムレイヤーとユーティリティ + - local: internal/pipelines_utils + title: パイプライン用のユーティリティ + - local: internal/tokenization_utils + title: ト=ークナイザー用のユーティリティ + - local: internal/trainer_utils + title: トレーナー用ユーティリティ + - local: internal/generation_utils + title: 発電用ユーティリティ + - local: internal/image_processing_utils + title: 画像プロセッサ用ユーティリティ + - local: internal/audio_utils + title: オーディオ処理用のユーティリティ + - local: internal/file_utils + title: 一般公共事業 + - local: internal/time_series_utils + title: 時系列用のユーティリティ + title: 内部ヘルパー title: API diff --git a/docs/source/ja/main_classes/agent.md b/docs/source/ja/main_classes/agent.md new file mode 100644 index 000000000000..290f3b5b8c72 --- /dev/null +++ b/docs/source/ja/main_classes/agent.md @@ -0,0 +1,105 @@ + + +# エージェントとツール + + + +Transformers Agents は実験的な API であり、いつでも変更される可能性があります。エージェントから返される結果 +API または基礎となるモデルは変更される傾向があるため、変更される可能性があります。 + + + +エージェントとツールの詳細については、[入門ガイド](../transformers_agents) を必ずお読みください。このページ +基礎となるクラスの API ドキュメントが含まれています。 + +## エージェント + +私たちは 3 種類のエージェントを提供します。[`HfAgent`] はオープンソース モデルの推論エンドポイントを使用し、[`LocalAgent`] は選択したモデルをローカルで使用し、[`OpenAiAgent`] は OpenAI クローズド モデルを使用します。 + +### HfAgent + +[[autodoc]] HfAgent + +### LocalAgent + +[[autodoc]] LocalAgent + +### OpenAiAgent + +[[autodoc]] OpenAiAgent + +### AzureOpenAiAgent + +[[autodoc]] AzureOpenAiAgent + +### Agent + +[[autodoc]] Agent + - chat + - run + - prepare_for_new_chat + +## Tools + +### load_tool + +[[autodoc]] load_tool + +### Tool + +[[autodoc]] Tool + +### PipelineTool + +[[autodoc]] PipelineTool + +### RemoteTool + +[[autodoc]] RemoteTool + +### launch_gradio_demo + +[[autodoc]] launch_gradio_demo + +## エージェントの種類 + +エージェントはツール間であらゆる種類のオブジェクトを処理できます。ツールは完全にマルチモーダルであるため、受け取りと返品が可能です +テキスト、画像、オーディオ、ビデオなどのタイプ。ツール間の互換性を高めるためだけでなく、 +これらの戻り値を ipython (jupyter、colab、ipython ノートブックなど) で正しくレンダリングするには、ラッパー クラスを実装します。 +このタイプの周り。 + +ラップされたオブジェクトは最初と同じように動作し続けるはずです。テキストオブジェクトは依然として文字列または画像として動作する必要があります +オブジェクトは依然として `PIL.Image` として動作するはずです。 + +これらのタイプには、次の 3 つの特定の目的があります。 + +- 型に対して `to_raw` を呼び出すと、基になるオブジェクトが返されるはずです +- 型に対して `to_string` を呼び出すと、オブジェクトを文字列として返す必要があります。`AgentText` の場合は文字列になる可能性があります。 + ただし、他のインスタンスのオブジェクトのシリアル化されたバージョンのパスになります。 +- ipython カーネルで表示すると、オブジェクトが正しく表示されるはずです + +### AgentText + +[[autodoc]] transformers.tools.agent_types.AgentText + +### AgentImage + +[[autodoc]] transformers.tools.agent_types.AgentImage + +### AgentAudio + +[[autodoc]] transformers.tools.agent_types.AgentAudio diff --git a/docs/source/ja/main_classes/callback.md b/docs/source/ja/main_classes/callback.md new file mode 100644 index 000000000000..75938bb6a45b --- /dev/null +++ b/docs/source/ja/main_classes/callback.md @@ -0,0 +1,131 @@ + + + +# コールバック数 + +コールバックは、PyTorch のトレーニング ループの動作をカスタマイズできるオブジェクトです。 +トレーニング ループを検査できる [`Trainer`] (この機能は TensorFlow にはまだ実装されていません) +状態を確認し (進捗レポート、TensorBoard または他の ML プラットフォームへのログ記録など)、決定を下します (初期段階など)。 +停止中)。 + +コールバックは、返される [`TrainerControl`] オブジェクトを除けば、「読み取り専用」のコード部分です。 +トレーニング ループ内では何も変更できません。トレーニング ループの変更が必要なカスタマイズの場合は、次のことを行う必要があります。 +[`Trainer`] をサブクラス化し、必要なメソッドをオーバーライドします (例については、[trainer](trainer) を参照してください)。 + +デフォルトでは、`TrainingArguments.report_to` は `"all"` に設定されているため、[`Trainer`] は次のコールバックを使用します。 + +- [`DefaultFlowCallback`] は、ログ記録、保存、評価のデフォルトの動作を処理します。 +- [`PrinterCallback`] または [`ProgressCallback`] で進行状況を表示し、 + ログ (最初のログは、[`TrainingArguments`] を通じて tqdm を非アクティブ化する場合に使用され、そうでない場合に使用されます) + 2番目です)。 +- [`~integrations.TensorBoardCallback`] (PyTorch >= 1.4 を介して) tensorboard にアクセスできる場合 + またはテンソルボードX)。 +- [`~integrations.WandbCallback`] [wandb](https://www.wandb.com/) がインストールされている場合。 +- [`~integrations.CometCallback`] [comet_ml](https://www.comet.ml/site/) がインストールされている場合。 +- [mlflow](https://www.mlflow.org/) がインストールされている場合は [`~integrations.MLflowCallback`]。 +- [`~integrations.NeptuneCallback`] [neptune](https://neptune.ai/) がインストールされている場合。 +- [`~integrations.AzureMLCallback`] [azureml-sdk](https://pypi.org/project/azureml-sdk/) の場合 + インストールされています。 +- [`~integrations.CodeCarbonCallback`] [codecarbon](https://pypi.org/project/codecarbon/) の場合 + インストールされています。 +- [`~integrations.ClearMLCallback`] [clearml](https://github.com/allegroai/clearml) がインストールされている場合。 +- [`~integrations.DagsHubCallback`] [dagshub](https://dagshub.com/) がインストールされている場合。 +- [`~integrations.FlyteCallback`] [flyte](https://flyte.org/) がインストールされている場合。 + +パッケージがインストールされているが、付随する統合を使用したくない場合は、`TrainingArguments.report_to` を、使用したい統合のみのリストに変更できます (例: `["azure_ml", "wandb"]`) 。 + +コールバックを実装するメインクラスは [`TrainerCallback`] です。それは、 +[`TrainingArguments`] は [`Trainer`] をインスタンス化するために使用され、それにアクセスできます。 +[`TrainerState`] を介してトレーナーの内部状態を取得し、トレーニング ループ上でいくつかのアクションを実行できます。 +[`TrainerControl`]。 + +## 利用可能なコールバック + +ライブラリで利用可能な [`TrainerCallback`] のリストは次のとおりです。 + +[[autodoc]] integrations.CometCallback + - setup + +[[autodoc]] DefaultFlowCallback + +[[autodoc]] PrinterCallback + +[[autodoc]] ProgressCallback + +[[autodoc]] EarlyStoppingCallback + +[[autodoc]] integrations.TensorBoardCallback + +[[autodoc]] integrations.WandbCallback + - setup + +[[autodoc]] integrations.MLflowCallback + - setup + +[[autodoc]] integrations.AzureMLCallback + +[[autodoc]] integrations.CodeCarbonCallback + +[[autodoc]] integrations.NeptuneCallback + +[[autodoc]] integrations.ClearMLCallback + +[[autodoc]] integrations.DagsHubCallback + +[[autodoc]] integrations.FlyteCallback + +## TrainerCallback + +[[autodoc]] TrainerCallback + +以下は、カスタム コールバックを PyTorch [`Trainer`] に登録する方法の例です。 + +```python +class MyCallback(TrainerCallback): + "A callback that prints a message at the beginning of training" + + def on_train_begin(self, args, state, control, **kwargs): + print("Starting training") + + +trainer = Trainer( + model, + args, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + callbacks=[MyCallback], # We can either pass the callback class this way or an instance of it (MyCallback()) +) +``` + +コールバックを登録する別の方法は、次のように `trainer.add_callback()` を呼び出すことです。 + +```python +trainer = Trainer(...) +trainer.add_callback(MyCallback) +# Alternatively, we can pass an instance of the callback class +trainer.add_callback(MyCallback()) +``` + +## TrainerState + +[[autodoc]] TrainerState + +## TrainerControl + +[[autodoc]] TrainerControl + + diff --git a/docs/source/ja/main_classes/configuration.md b/docs/source/ja/main_classes/configuration.md new file mode 100644 index 000000000000..7fab5269e204 --- /dev/null +++ b/docs/source/ja/main_classes/configuration.md @@ -0,0 +1,31 @@ + + +# 構成 + +基本クラス [`PretrainedConfig`] は、設定をロード/保存するための一般的なメソッドを実装します。 +ローカル ファイルまたはディレクトリから、またはライブラリ (ダウンロードされた) によって提供される事前トレーニング済みモデル構成から +HuggingFace の AWS S3 リポジトリから)。 + +各派生構成クラスはモデル固有の属性を実装します。すべての構成クラスに存在する共通の属性は次のとおりです。 +`hidden_​​size`、`num_attention_heads`、および `num_hidden_​​layers`。テキスト モデルはさらに以下を実装します。 +`vocab_size`。 + +## PretrainedConfig + +[[autodoc]] PretrainedConfig + - push_to_hub + - all diff --git a/docs/source/ja/main_classes/data_collator.md b/docs/source/ja/main_classes/data_collator.md new file mode 100644 index 000000000000..c37f1aeef4d1 --- /dev/null +++ b/docs/source/ja/main_classes/data_collator.md @@ -0,0 +1,67 @@ + + +# データ照合者 + +データ照合器は、データセット要素のリストを入力として使用してバッチを形成するオブジェクトです。これらの要素は、 +`train_dataset` または `eval_dataset` の要素と同じ型。 + +バッチを構築できるようにするために、データ照合者は何らかの処理 (パディングなど) を適用する場合があります。そのうちのいくつかは( +[`DataCollat​​orForLanguageModeling`]) ランダムなデータ拡張 (ランダム マスキングなど) も適用します +形成されたバッチ上で。 + +使用例は、[サンプル スクリプト](../examples) または [サンプル ノートブック](../notebooks) にあります。 + +## Default data collator + +[[autodoc]] data.data_collator.default_data_collator + +## DefaultDataCollator + +[[autodoc]] data.data_collator.DefaultDataCollator + +## DataCollatorWithPadding + +[[autodoc]] data.data_collator.DataCollatorWithPadding + +## DataCollatorForTokenClassification + +[[autodoc]] data.data_collator.DataCollatorForTokenClassification + +## DataCollatorForSeq2Seq + +[[autodoc]] data.data_collator.DataCollatorForSeq2Seq + +## DataCollatorForLanguageModeling + +[[autodoc]] data.data_collator.DataCollatorForLanguageModeling + - numpy_mask_tokens + - tf_mask_tokens + - torch_mask_tokens + +## DataCollatorForWholeWordMask + +[[autodoc]] data.data_collator.DataCollatorForWholeWordMask + - numpy_mask_tokens + - tf_mask_tokens + - torch_mask_tokens + +## DataCollatorForPermutationLanguageModeling + +[[autodoc]] data.data_collator.DataCollatorForPermutationLanguageModeling + - numpy_mask_tokens + - tf_mask_tokens + - torch_mask_tokens diff --git a/docs/source/ja/main_classes/deepspeed.md b/docs/source/ja/main_classes/deepspeed.md new file mode 100644 index 000000000000..3e65dd21bcf1 --- /dev/null +++ b/docs/source/ja/main_classes/deepspeed.md @@ -0,0 +1,2255 @@ + + +# DeepSpeed Integration + +[DeepSpeed](https://github.com/microsoft/DeepSpeed) は、[ZeRO 論文](https://arxiv.org/abs/1910.02054) で説明されているすべてを実装します。現在、次のものを完全にサポートしています。 + +1. オプティマイザーの状態分割 (ZeRO ステージ 1) +2. 勾配分割 (ZeRO ステージ 2) +3. パラメーターの分割 (ZeRO ステージ 3) +4. カスタム混合精度トレーニング処理 +5. 一連の高速 CUDA 拡張ベースのオプティマイザー +6. CPU および NVMe への ZeRO オフロード + +ZeRO-Offload には独自の専用ペーパーがあります: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840)。 NVMe サポートについては、論文 [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857)。 + +DeepSpeed ZeRO-2 は、その機能が推論には役に立たないため、主にトレーニングのみに使用されます。 + +DeepSpeed ZeRO-3 は、巨大なモデルを複数の GPU にロードできるため、推論にも使用できます。 +単一の GPU では不可能です。 + +🤗 Transformers は、2 つのオプションを介して [DeepSpeed](https://github.com/microsoft/DeepSpeed) を統合します。 + +1. [`Trainer`] によるコア DeepSpeed 機能の統合。何でもやってくれるタイプです + 統合の場合 - カスタム構成ファイルを指定するか、テンプレートを使用するだけで、他に何もする必要はありません。たいていの + このドキュメントではこの機能に焦点を当てています。 +2. [`Trainer`] を使用せず、DeepSpeed を統合した独自のトレーナーを使用したい場合 + `from_pretrained` や `from_config` などのコア機能には、重要な機能の統合が含まれています。 + ZeRO ステージ 3 以降の `zero.Init`などの DeepSpeed の部分。この機能を活用するには、次のドキュメントをお読みください。 + [非トレーナー DeepSpeed 統合](#nontrainer-deepspeed-integration)。 + +統合されているもの: + +トレーニング: + +1. DeepSpeed ZeRO トレーニングは、ZeRO-Infinity (CPU および NVME オフロード) を使用して完全な ZeRO ステージ 1、2、および 3 をサポートします。 + +推論: + +1. DeepSpeed ZeRO Inference は、ZeRO-Infinity による ZeRO ステージ 3 をサポートします。トレーニングと同じ ZeRO プロトコルを使用しますが、 + オプティマイザと lr スケジューラは使用せず、ステージ 3 のみが関連します。詳細については、以下を参照してください。 + [ゼロ推論](#zero-inference)。 + +DeepSpeed Inference もあります。これは、Tensor Parallelism の代わりに Tensor Parallelism を使用するまったく異なるテクノロジーです。 +ZeRO (近日公開)。 + + + + +## Trainer Deepspeed Integration + + + + +### Installation + +pypi 経由でライブラリをインストールします。 +```bash +pip install deepspeed +``` + +または`tansformers`, `extras`経由: + +```bash +pip install transformers[deepspeed] +``` + +または、[DeepSpeed の GitHub ページ](https://github.com/microsoft/deepspeed#installation) で詳細を確認してください。 +[高度なインストール](https://www.deepspeed.ai/tutorials/advanced-install/)。 + +それでもビルドに苦労する場合は、まず [CUDA 拡張機能のインストール ノート](trainer#cuda-extension-installation-notes) を必ず読んでください。 + +拡張機能を事前ビルドせず、実行時に拡張機能がビルドされることに依存しており、上記の解決策をすべて試した場合 +それが役に立たなかった場合、次に試すべきことは、モジュールをインストールする前にモジュールを事前にビルドすることです。 + +DeepSpeed のローカル ビルドを作成するには: + +```bash +git clone https://github.com/microsoft/DeepSpeed/ +cd DeepSpeed +rm -rf build +TORCH_CUDA_ARCH_LIST="8.6" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install . \ +--global-option="build_ext" --global-option="-j8" --no-cache -v \ +--disable-pip-version-check 2>&1 | tee build.log +``` + +NVMe オフロードを使用する場合は、上記の手順に`DS_BUILD_AIO=1`を含める必要があります (また、 +*libaio-dev* システム全体にインストールします)。 + +`TORCH_CUDA_ARCH_LIST` を編集して、使用する GPU カードのアーキテクチャのコードを挿入します。すべてを仮定すると +あなたのカードは同じで、次の方法でアーチを取得できます。 + +```bash +CUDA_VISIBLE_DEVICES=0 python -c "import torch; print(torch.cuda.get_device_capability())" +``` + +したがって、`8, 6`を取得した場合は、`TORCH_CUDA_ARCH_LIST="8.6"`を使用します。複数の異なるカードをお持ちの場合は、すべてをリストすることができます +それらのうち、`TORCH_CUDA_ARCH_LIST="6.1;8.6"`が好きです + +複数のマシンで同じセットアップを使用する必要がある場合は、バイナリ ホイールを作成します。 + +```bash +git clone https://github.com/microsoft/DeepSpeed/ +cd DeepSpeed +rm -rf build +TORCH_CUDA_ARCH_LIST="8.6" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 \ +python setup.py build_ext -j8 bdist_wheel +``` + +`dist/deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl`のようなものが生成されるので、これをインストールできます +`pip install deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl`としてローカルまたは他のマシンにインストールします。 + +繰り返しますが、`TORCH_CUDA_ARCH_LIST`をターゲット アーキテクチャに合わせて調整することを忘れないでください。 + +NVIDIA GPU の完全なリストと、それに対応する **コンピューティング機能** (この記事の Arch と同じ) を見つけることができます。 +コンテキスト) [ここ](https://developer.nvidia.com/cuda-gpus)。 + +以下を使用して、pytorch が構築されたアーチを確認できます。 + +```bash +python -c "import torch; print(torch.cuda.get_arch_list())" +``` + +ここでは、インストールされている GPU の 1 つのアーチを見つける方法を説明します。たとえば、GPU 0 の場合: + +```bash +CUDA_VISIBLE_DEVICES=0 python -c "import torch; \ +print(torch.cuda.get_device_properties(torch.device('cuda')))" +``` + +出力が次の場合: + +```bash +_CudaDeviceProperties(name='GeForce RTX 3090', major=8, minor=6, total_memory=24268MB, multi_processor_count=82) +``` + +そうすれば、このカードのアーチが`8.6`であることがわかります。 + +`TORCH_CUDA_ARCH_LIST` を完全に省略することもできます。そうすれば、ビルド プログラムが自動的にクエリを実行します。 +ビルドが行われる GPU のアーキテクチャ。これは、ターゲット マシンの GPU と一致する場合もあれば、一致しない場合もあります。 +目的のアーチを明示的に指定することをお勧めします。 + +提案されたことをすべて試してもまだビルドの問題が発生する場合は、GitHub の問題に進んでください。 +[ディープスピード](https://github.com/microsoft/DeepSpeed/issues)、 + + + +### Deployment with multiple GPUs + +DeepSpeed 統合をデプロイするには、[`Trainer`] コマンド ライン引数を調整して新しい引数 `--deepspeed ds_config.json` を含めます。ここで、`ds_config.json` は DeepSpeed 構成ファイルです。 + [こちら](https://www.deepspeed.ai/docs/config-json/)に記載されています。ファイル名はあなた次第です。 + DeepSpeed の`add_config_arguments`ユーティリティを使用して、必要なコマンド ライン引数をコードに追加することをお勧めします。 + 詳細については、[DeepSpeed の引数解析](https://deepspeed.readthedocs.io/en/latest/initialize.html#argument-parsing) ドキュメントを参照してください。 + +ここで選択したランチャーを使用できます。 pytorch ランチャーを引き続き使用できます。 + +```bash +torch.distributed.run --nproc_per_node=2 your_program.py --deepspeed ds_config.json +``` + +または、`deepspeed`によって提供されるランチャーを使用します。 + +```bash +deepspeed --num_gpus=2 your_program.py --deepspeed ds_config.json +``` + +ご覧のとおり、引数は同じではありませんが、ほとんどのニーズではどちらでも機能します。の +さまざまなノードと GPU を構成する方法の詳細については、[こちら](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) を参照してください。 + +`deepspeed`ランチャーを使用し、利用可能なすべての GPU を使用したい場合は、`--num_gpus`フラグを省略するだけです。 + +以下は、利用可能なすべての GPU をデプロイする DeepSpeed で`run_translation.py`を実行する例です。 + +```bash +deepspeed examples/pytorch/translation/run_translation.py \ +--deepspeed tests/deepspeed/ds_config_zero3.json \ +--model_name_or_path t5-small --per_device_train_batch_size 1 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ +--do_train --max_train_samples 500 --num_train_epochs 1 \ +--dataset_name wmt16 --dataset_config "ro-en" \ +--source_lang en --target_lang ro +``` + +DeepSpeed のドキュメントには、`--deepspeed --deepspeed_config ds_config.json`が表示される可能性が高いことに注意してください。 +DeepSpeed 関連の引数が 2 つありますが、簡単にするためであり、処理すべき引数がすでに非常に多いためです。 +この 2 つを 1 つの引数に結合しました。 + +実際の使用例については、この [投稿](https://github.com/huggingface/transformers/issues/8771#issuecomment-759248400) を参照してください。 + + + + +### Deployment with one GPU + +1 つの GPU で DeepSpeed をデプロイするには、[`Trainer`] コマンド ライン引数を次のように調整します。 + +```bash +deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ +--deepspeed tests/deepspeed/ds_config_zero2.json \ +--model_name_or_path t5-small --per_device_train_batch_size 1 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ +--do_train --max_train_samples 500 --num_train_epochs 1 \ +--dataset_name wmt16 --dataset_config "ro-en" \ +--source_lang en --target_lang ro +``` + +これは複数の GPU の場合とほぼ同じですが、ここでは、DeepSpeed に 1 つの GPU だけを使用するように明示的に指示します。 +`--num_gpus=1`。デフォルトでは、DeepSpeed は指定されたノード上で認識できるすべての GPU をデプロイします。起動する GPU が 1 つだけの場合 +の場合、この引数は必要ありません。次の [ドキュメント](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) では、ランチャー オプションについて説明しています。 + +1 つの GPU だけで DeepSpeed を使用したいのはなぜですか? + +1. 一部の計算とメモリをホストの CPU と RAM に委任できる ZeRO オフロード機能を備えているため、 + モデルのニーズに合わせてより多くの GPU リソースを残しておきます。より大きなバッチ サイズ、または非常に大きなモデルのフィッティングを可能にする + 普通は合わないでしょう。 +2. スマートな GPU メモリ管理システムを提供し、メモリの断片化を最小限に抑えます。 + より大きなモデルとデータ バッチ。 + +次に構成について詳しく説明しますが、単一の GPU で大幅な改善を実現するための鍵は次のとおりです。 +DeepSpeed を使用するには、構成ファイルに少なくとも次の構成が必要です。 + +```json +{ + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "overlap_comm": true, + "contiguous_gradients": true + } +} +``` + +これにより、オプティマイザーのオフロードやその他の重要な機能が有効になります。バッファ サイズを試してみるとよいでしょう。 +詳細については、以下のディスカッションを参照してください。 + +このタイプのデプロイメントの実際的な使用例については、この [投稿](https://github.com/huggingface/transformers/issues/8771#issuecomment-759176685) を参照してください。 + +このドキュメントで詳しく説明されているように、CPU および NVMe オフロードを備えた ZeRO-3 を試すこともできます。 + +ノート: + +- GPU 0 とは異なる特定の GPU で実行する必要がある場合、`CUDA_VISIBLE_DEVICES` を使用して制限することはできません。 + 利用可能な GPU の表示範囲。代わりに、次の構文を使用する必要があります。 + + ```bash + deepspeed --include localhost:1 examples/pytorch/translation/run_translation.py ... + ``` + + この例では、DeepSpeed に GPU 1 (2 番目の GPU) を使用するように指示します。 + + + +### 複数のノードを使用したデプロイメント + +このセクションの情報は DeepSpeed 統合に固有のものではなく、あらゆるマルチノード プログラムに適用できます。ただし、DeepSpeed は、SLURM 環境でない限り、他のランチャーよりも使いやすい`deepspeed`ランチャーを提供します。 + +このセクションでは、それぞれ 8 GPU を備えた 2 つのノードがあると仮定します。また、最初のノードには `ssh hostname1` を使用して、2 番目のノードには `ssh hostname2` を使用して接続できます。両方ともパスワードなしでローカルの ssh 経由で相互に接続できる必要があります。もちろん、これらのホスト (ノード) 名を、作業している実際のホスト名に変更する必要があります。 + +#### The torch.distributed.run launcher + + +たとえば、`torch.distributed.run` を使用するには、次のようにします。 + +```bash +python -m torch.distributed.run --nproc_per_node=8 --nnode=2 --node_rank=0 --master_addr=hostname1 \ +--master_port=9901 your_program.py --deepspeed ds_config.json +``` + +各ノードに SSH で接続し、それぞれのノードで同じコマンドを実行する必要があります。急ぐ必要はありません。ランチャーは両方のノードが同期するまで待機します。 + +詳細については、[torchrun](https://pytorch.org/docs/stable/elastic/run.html) を参照してください。ちなみに、これは pytorch の数バージョン前の`torch.distributed.launch`を置き換えたランチャーでもあります。 + +#### ディープスピード ランチャー + +代わりに`deepspeed`ランチャーを使用するには、まず`hostfile`ファイルを作成する必要があります。 + +``` +hostname1 slots=8 +hostname2 slots=8 +``` + +そして、次のように起動できます。 + +```bash +deepspeed --num_gpus 8 --num_nodes 2 --hostfile hostfile --master_addr hostname1 --master_port=9901 \ +your_program.py --deepspeed ds_config.json +``` + +`torch.distributed.run`ランチャーとは異なり、`deepspeed`は両方のノードでこのコマンドを自動的に起動します。 + +詳細については、[リソース構成 (マルチノード)](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) を参照してください。 + +#### Launching in a SLURM environment + +SLURM 環境では、次のアプローチを使用できます。以下は、特定の SLURM 環境に適合させるために必要な slurm スクリプト `launch.slurm` です。 + +```bash +#SBATCH --job-name=test-nodes # name +#SBATCH --nodes=2 # nodes +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=10 # number of cores per tasks +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name + +export GPUS_PER_NODE=8 +export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +export MASTER_PORT=9901 + +srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ + --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ +your_program.py --deepspeed ds_config.json' +``` + +あとは実行をスケジュールするだけです。 +```bash +sbatch launch.slurm +``` + +#### Use of Non-shared filesystem + +デフォルトでは、DeepSpeed はマルチノード環境が共有ストレージを使用することを想定しています。これが当てはまらず、各ノードがローカル ファイルシステムしか参照できない場合は、設定ファイルを調整して [`checkpoint`_section](https://www.deepspeed.ai/docs/config-json/#) を含める必要があります。チェックポイント オプション) を次の設定で指定します。 + + +```json +{ + "checkpoint": { + "use_node_local_storage": true + } +} +``` + +あるいは、[`Trainer`] の `--save_on_each_node` 引数を使用することもでき、上記の設定は自動的に追加されます。 + + + +### Deployment in Notebooks + +ノートブックのセルをスクリプトとして実行する場合の問題は、依存する通常の`deepspeed`ランチャーがないことです。 +特定の設定では、それをエミュレートする必要があります。 + +GPU を 1 つだけ使用している場合、DeepSpeed を使用するためにノートブック内のトレーニング コードを調整する必要がある方法は次のとおりです。 + +```python +# DeepSpeed requires a distributed environment even when only one process is used. +# This emulates a launcher in the notebook +import os + +os.environ["MASTER_ADDR"] = "localhost" +os.environ["MASTER_PORT"] = "9994" # modify if RuntimeError: Address already in use +os.environ["RANK"] = "0" +os.environ["LOCAL_RANK"] = "0" +os.environ["WORLD_SIZE"] = "1" + +# Now proceed as normal, plus pass the deepspeed config file +training_args = TrainingArguments(..., deepspeed="ds_config_zero3.json") +trainer = Trainer(...) +trainer.train() +``` + +注: `...` は、関数に渡す通常の引数を表します。 + +複数の GPU を使用する場合、DeepSpeed が動作するにはマルチプロセス環境を使用する必要があります。つまり、あなたは持っています +その目的でランチャーを使用することはできませんが、これは、提示された分散環境をエミュレートすることによっては実現できません。 +このセクションの冒頭で。 + +現在のディレクトリのノートブックにその場で構成ファイルを作成したい場合は、専用の +セルの内容: + +```python no-style +%%bash +cat <<'EOT' > ds_config_zero3.json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +EOT +``` + +トレーニング スクリプトがノートブックのセルではなく通常のファイルにある場合は、次のようにして`deepspeed`を通常どおり起動できます。 +細胞からのシェル。たとえば、`run_translation.py` を使用するには、次のように起動します。 + +```python no-style +!git clone https://github.com/huggingface/transformers +!cd transformers; deepspeed examples/pytorch/translation/run_translation.py ... +``` + +または、`%%bash` マジックを使用すると、シェル プログラムを実行するための複数行のコードを記述することができます。 + +```python no-style +%%bash + +git clone https://github.com/huggingface/transformers +cd transformers +deepspeed examples/pytorch/translation/run_translation.py ... +``` + +そのような場合、このセクションの最初に示したコードは必要ありません。 + +注: `%%bash` マジックは優れていますが、現時点では出力をバッファリングするため、プロセスが終了するまでログは表示されません。 +完了します。 + + + +### Configuration + +設定ファイルで使用できる DeepSpeed 設定オプションの完全なガイドについては、次を参照してください。 +[次のドキュメント](https://www.deepspeed.ai/docs/config-json/) にアクセスしてください。 + +さまざまな実際のニーズに対応する数十の DeepSpeed 構成例を [DeepSpeedExamples] (https://github.com/microsoft/DeepSpeedExamples)で見つけることができます。 +リポジトリ: + +```bash +git clone https://github.com/microsoft/DeepSpeedExamples +cd DeepSpeedExamples +find . -name '*json' +``` + +上記のコードを続けて、Lamb オプティマイザーを構成しようとしているとします。したがって、次の中から検索できます +`.json` ファイルの例: + +```bash +grep -i Lamb $(find . -name '*json') +``` + +さらにいくつかの例が [メイン リポジトリ](https://github.com/microsoft/DeepSpeed) にもあります。 + +DeepSpeed を使用する場合は、常に DeepSpeed 構成ファイルを指定する必要がありますが、一部の構成パラメータには +コマンドライン経由で設定します。微妙な違いについては、このガイドの残りの部分で説明します。 + +DeepSpeed 構成ファイルがどのようなものかを理解するために、ZeRO ステージ 2 機能を有効にする構成ファイルを次に示します。 +オプティマイザー状態の CPU オフロードを含み、`AdamW`オプティマイザーと`WarmupLR`スケジューラーを使用し、混合を有効にします。 +`--fp16` が渡された場合の精度トレーニング: + + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "contiguous_gradients": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", +} +``` + +プログラムを実行すると、DeepSpeed は [`Trainer`] から受け取った設定をログに記録します。 +コンソールに渡されるため、最終的にどのような設定が渡されたのかを正確に確認できます。 + + + +### Passing Configuration + +このドキュメントで説明したように、通常、DeepSpeed 設定は json ファイルへのパスとして渡されますが、 +トレーニングの設定にコマンド ライン インターフェイスを使用せず、代わりにインスタンスを作成します。 +[`Trainer`] via [`TrainingArguments`] その後、`deepspeed` 引数については次のことができます +ネストされた `dict` を渡します。これにより、その場で構成を作成でき、それを書き込む必要がありません。 +[`TrainingArguments`] に渡す前にファイル システムを変更します。 + +要約すると、次のことができます。 + +```python +TrainingArguments(..., deepspeed="/path/to/ds_config.json") +``` + +または: + +```python +ds_config_dict = dict(scheduler=scheduler_params, optimizer=optimizer_params) +TrainingArguments(..., deepspeed=ds_config_dict) +``` + + + + +### Shared Configuration + + + +このセクションは必読です + + + +[`Trainer`] と DeepSpeed の両方が正しく機能するには、いくつかの設定値が必要です。 +したがって、検出が困難なエラーにつながる可能性のある定義の競合を防ぐために、それらを構成することにしました。 +[`Trainer`] コマンドライン引数経由。 + +さらに、一部の構成値はモデルの構成に基づいて自動的に導出されます。 +複数の値を手動で調整することを忘れないでください。[`Trainer`] に大部分を任せるのが最善です +の設定を行います。 + +したがって、このガイドの残りの部分では、特別な設定値 `auto` が表示されます。これを設定すると、 +正しい値または最も効率的な値に自動的に置き換えられます。これを無視することを自由に選択してください +推奨事項を参照し、値を明示的に設定します。この場合、次の点に十分注意してください。 +[`Trainer`] 引数と DeepSpeed 設定は一致します。たとえば、同じものを使用していますか +学習率、バッチサイズ、または勾配累積設定?これらが一致しない場合、トレーニングは非常に失敗する可能性があります +方法を検出するのが難しい。あなたは警告を受けました。 + +DeepSpeed のみに固有の値や、それに合わせて手動で設定する必要がある値が他にも複数あります。 +あなたの要望。 + +独自のプログラムで、DeepSpeed 構成をマスターとして変更したい場合は、次のアプローチを使用することもできます。 +それに基づいて [`TrainingArguments`] を設定します。手順は次のとおりです。 + +1. マスター構成として使用する DeepSpeed 構成を作成またはロードします +2. これらの値に基づいて [`TrainingArguments`] オブジェクトを作成します + +`scheduler.params.total_num_steps`などの一部の値は次のように計算されることに注意してください。 +`train` 中に [`Trainer`] を実行しますが、もちろん自分で計算することもできます。 + + + +### ZeRO + +[Zero Redundancy Optimizer (ZeRO)](https://www.deepspeed.ai/tutorials/zero/) は、DeepSpeed の主力製品です。それ +3 つの異なるレベル (段階) の最適化をサポートします。最初のものは、スケーラビリティの観点からはあまり興味深いものではありません。 +したがって、このドキュメントではステージ 2 と 3 に焦点を当てます。ステージ 3 は、最新の ZeRO-Infinity の追加によってさらに改善されています。 +詳細については、DeepSpeed のドキュメントを参照してください。 + +構成ファイルの `zero_optimization` セクションは最も重要な部分です ([docs](https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training))。ここで定義します +どの ZeRO ステージを有効にするか、そしてそれらをどのように構成するか。各パラメータの説明は、 +DeepSpeed のドキュメント。 + +このセクションは、DeepSpeed 設定を介してのみ設定する必要があります - [`Trainer`] が提供します +同等のコマンドライン引数はありません。 + +注: 現在、DeepSpeed はパラメーター名を検証しないため、スペルを間違えると、デフォルト設定が使用されます。 +スペルが間違っているパラメータ。 DeepSpeed エンジンの起動ログ メッセージを見て、その値を確認できます。 +使用するつもりです。 + + + +#### ZeRO-2 Config + +以下は、ZeRO ステージ 2 の構成例です。 + +```json +{ + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients": true + } +} +``` + +**性能調整:** + +- `offload_optimizer` を有効にすると、GPU RAM の使用量が削減されます (`"stage": 2` が必要です) +- `"overlap_comm": true` は、GPU RAM 使用量の増加とトレードオフして、遅延をすべて削減します。 `overlap_comm`は 4.5x を使用します + `allgather_bucket_size`と`reduce_bucket_size`の値。したがって、5e8 に設定されている場合、9GB が必要になります。 + フットプリント (`5e8 x 2Bytes x 2 x 4.5`)。したがって、8GB 以下の RAM を搭載した GPU を使用している場合、 + OOM エラーが発生した場合は、これらのパラメータを`2e8`程度に減らす必要があり、それには 3.6GB が必要になります。やりたくなるでしょう + OOM に達し始めている場合は、より大容量の GPU でも同様です。 +- これらのバッファを減らすと、より多くの GPU RAM を利用するために通信速度を犠牲にすることになります。バッファサイズが小さいほど、 + 通信が遅くなり、他のタスクで使用できる GPU RAM が増えます。したがって、バッチサイズが大きい場合は、 + 重要なのは、トレーニング時間を少し遅らせることは良いトレードになる可能性があります。 + +さらに、`deepspeed==0.4.4`には、次のコマンドで有効にできる新しいオプション`round_robin_gradients`が追加されました。 + +```json +{ + "zero_optimization": { + "round_robin_gradients": true + } +} +``` + +これは、きめ細かい勾配パーティショニングによってランク間の CPU メモリへの勾配コピーを並列化する、CPU オフロードのステージ 2 最適化です。パフォーマンスの利点は、勾配累積ステップ (オプティマイザー ステップ間のコピーの増加) または GPU 数 (並列処理の増加) に応じて増加します。 + + + +#### ZeRO-3 Config + +以下は、ZeRO ステージ 3 の構成例です。 + +```json +{ + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + } +} +``` + +モデルまたはアクティベーションが GPU メモリに適合せず、CPU が未使用であるために OOM が発生している場合 +`"device": "cpu"` を使用してオプティマイザの状態とパラメータを CPU メモリにメモリオフロードすると、この制限が解決される可能性があります。 +CPU メモリにオフロードしたくない場合は、`device`エントリに`cpu`の代わりに`none`を使用します。オフロード先 +NVMe については後ほど説明します。 + +固定メモリは、`pin_memory`を`true`に設定すると有効になります。この機能により、次のようなコストをかけてスループットを向上させることができます。 +他のプロセスが使用できるメモリが少なくなります。ピン留めされたメモリは、それを要求した特定のプロセスのために確保されます。 +通常、通常の CPU メモリよりもはるかに高速にアクセスされます。 + +**性能調整:** + +- `stage3_max_live_parameters`: `1e9` +- `stage3_max_reuse_distance`: `1e9` + +OOM に達した場合は、「stage3_max_live_parameters」と「stage3_max_reuse_ distance」を減らします。影響は最小限に抑えられるはずです +アクティブ化チェックポイントを実行しない限り、パフォーマンスに影響します。 `1e9`は約 2GB を消費します。記憶を共有しているのは、 +`stage3_max_live_parameters` と `stage3_max_reuse_distance` なので、加算されるものではなく、合計で 2GB になります。 + +`stage3_max_live_parameters` は、特定の時点で GPU 上に保持する完全なパラメータの数の上限です。 +時間。 「再利用距離」は、パラメータが将来いつ再び使用されるかを判断するために使用する指標です。 +`stage3_max_reuse_ distance`を使用して、パラメータを破棄するか保持するかを決定します。パラメータが +近い将来に再び使用される予定 (`stage3_max_reuse_distance`未満) なので、通信を減らすために保持します。 +オーバーヘッド。これは、アクティベーション チェックポイントを有効にしている場合に非常に役立ちます。フォワード再計算が行われ、 +backward は単一レイヤー粒度を渡し、後方再計算までパラメータを前方再計算に保持したいと考えています。 + +次の構成値は、モデルの非表示サイズによって異なります。 + +- `reduce_bucket_size`: `hidden_size*hidden_size` +- `stage3_prefetch_bucket_size`: `0.9 * hidden_size * hidden_size` +- `stage3_param_persistence_threshold`: `10 * hidden_size` + +したがって、これらの値を `auto` に設定すると、[`Trainer`] が推奨される値を自動的に割り当てます。 +価値観。ただし、もちろん、これらを明示的に設定することもできます。 + +`stage3_gather_16bit_weights_on_model_save` は、モデルの保存時にモデル fp16 の重み統合を有効にします。大きい +モデルと複数の GPU の場合、これはメモリと速度の両方の点で高価な操作です。現在必須となっているのは、 +トレーニングを再開する予定です。この制限を取り除き、より便利にする今後のアップデートに注目してください。 +フレキシブル。 + +ZeRO-2 構成から移行している場合は、`allgather_partitions`、`allgather_bucket_size`、および +`reduce_scatter`設定パラメータは ZeRO-3 では使用されません。これらを設定ファイルに保存しておくと、 +無視される。 + +- `sub_group_size`: `1e9` + + +`sub_group_size` は、オプティマイザーのステップ中にパラメーターが更新される粒度を制御します。パラメータは次のとおりです。 +`sub_group_size` のバケットにグループ化され、各バケットは一度に 1 つずつ更新されます。 NVMeオフロードで使用する場合 +したがって、ZeRO-Infinity の `sub_group_size`は、モデルの状態が CPU に出入りする粒度を制御します。 +オプティマイザステップ中に NVMe からメモリを取得します。これにより、非常に大規模なモデルの CPU メモリ不足が防止されます。 + +NVMe オフロードを使用しない場合は、`sub_group_size`をデフォルト値の *1e9* のままにすることができます。変更することもできます +次の場合のデフォルト値: + +1. オプティマイザー ステップ中に OOM が発生する: `sub_group_size` を減らして、一時バッファーのメモリ使用量を削減します。 +2. オプティマイザー ステップに時間がかかります。`sub_group_size`を増やして、帯域幅の使用率を向上させます。 + データバッファの増加。 + +#### ZeRO-0 Config + +ステージ 0 と 1 はめったに使用されないため、最後にリストしていることに注意してください。 + +ステージ 0 では、すべてのタイプのシャーディングを無効にし、DDP として DeepSpeed のみを使用します。次のコマンドでオンにできます。 + +```json +{ + "zero_optimization": { + "stage": 0 + } +} +``` + +これにより、他に何も変更する必要がなく、基本的に ZeRO が無効になります。 + +#### ZeRO-1 Config + +ステージ 1 は、ステージ 2 からグラデーション シャーディングを除いたものです。オプティマイザーの状態をシャード化するだけで、処理を少し高速化するためにいつでも試すことができます。 + +```json +{ + "zero_optimization": { + "stage": 1 + } +} +``` + + + +### NVMe Support + +ZeRO-Infinity は、GPU と CPU メモリを NVMe メモリで拡張することで、非常に大規模なモデルのトレーニングを可能にします。おかげで +スマート パーティショニングおよびタイリング アルゴリズムでは、各 GPU が非常に少量のデータを送受信する必要があります。 +オフロードにより、最新の NVMe がトレーニングに利用できる合計メモリ プールをさらに大きくするのに適していることが判明しました。 +プロセス。 ZeRO-Infinity には、ZeRO-3 が有効になっている必要があります。 + +次の設定例では、NVMe がオプティマイザの状態とパラメータの両方をオフロードできるようにします。 + +```json +{ + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "nvme", + "nvme_path": "/local_nvme", + "pin_memory": true, + "buffer_count": 4, + "fast_init": false + }, + "offload_param": { + "device": "nvme", + "nvme_path": "/local_nvme", + "pin_memory": true, + "buffer_count": 5, + "buffer_size": 1e8, + "max_in_cpu": 1e9 + }, + "aio": { + "block_size": 262144, + "queue_depth": 32, + "thread_count": 1, + "single_submit": false, + "overlap_events": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, +} +``` + +オプティマイザの状態とパラメータの両方を NVMe にオフロードするか、どちらか 1 つだけをオフロードするか、まったくオフロードしないかを選択できます。たとえば、次の場合 +利用可能な CPU メモリが大量にある場合は、高速になるため、必ず CPU メモリのみにオフロードしてください (ヒント: +*"device": "CPU"*)。 + +[オプティマイザーの状態](https://www.deepspeed.ai/docs/config-json/#optimizer-offloading) と [パラメーター](https://www.deepspeed.ai/docs/config-json/#parameter-offloading)。 + +`nvme_path`が実際に NVMe であることを確認してください。NVMe は通常のハードドライブまたは SSD で動作しますが、 +はるかに遅くなります。高速スケーラブルなトレーニングは、最新の NVMe 転送速度を念頭に置いて設計されました (この時点では +書き込みでは、読み取り最大 3.5 GB/秒、書き込み最大 3 GB/秒のピーク速度が得られます)。 + +最適な`aio`構成ブロックを見つけるには、ターゲット設定でベンチマークを実行する必要があります。 +[ここで説明](https://github.com/microsoft/DeepSpeed/issues/998)。 + + + + +#### ZeRO-2 vs ZeRO-3 Performance + +ZeRO-3 は、他のすべてが同じように構成されている場合、ZeRO-2 よりも遅くなる可能性があります。前者は収集する必要があるためです。 +ZeRO-2 の機能に加えてモデルの重み付けを行います。 ZeRO-2 がニーズを満たし、数個の GPU を超えて拡張する必要がない場合 +そうすれば、それに固執することを選択することもできます。 ZeRO-3 により、はるかに高いスケーラビリティ容量が可能になることを理解することが重要です +スピードを犠牲にして。 + +ZeRO-3 の構成を調整して、ZeRO-2 に近づけることができます。 + +- `stage3_param_persistence_threshold` を非常に大きな数値に設定します。たとえば、`6 * hidden_​​size * hidden_​​size` のように、最大​​パラメータよりも大きくなります。これにより、パラメータが GPU に保持されます。 +- ZeRO-2 にはそのオプションがないため、`offload_params` をオフにします。 + +変更しなくても、`offload_params`をオフにするだけでパフォーマンスが大幅に向上する可能性があります。 +`stage3_param_persistence_threshold`。もちろん、これらの変更はトレーニングできるモデルのサイズに影響します。それで +これらは、ニーズに応じて、スケーラビリティと引き換えに速度を向上させるのに役立ちます。 + + + +#### ZeRO-2 Example + +以下は、完全な ZeRO-2 自動構成ファイル `ds_config_zero2.json` です。 + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "contiguous_gradients": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +``` + +以下は、手動で設定された完全な ZeRO-2 のすべてが有効な構成ファイルです。ここでは主に、典型的なものを確認するためのものです。 +値は次のようになりますが、複数の`auto`設定が含まれる値を使用することを強くお勧めします。 + +```json +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": 3e-5, + "betas": [0.8, 0.999], + "eps": 1e-8, + "weight_decay": 3e-7 + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": 3e-5, + "warmup_num_steps": 500 + } + }, + + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "contiguous_gradients": true + }, + + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +``` + + + +#### ZeRO-3 Example + +以下は、完全な ZeRO-3 自動構成ファイル`ds_config_zero3.json`です。 + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +``` + +以下は、手動で設定された完全な ZeRO-3 のすべてが有効な構成ファイルです。ここでは主に、典型的なものを確認するためのものです。 +値は次のようになりますが、複数の`auto`設定が含まれる値を使用することを強くお勧めします。 + +```json +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": 3e-5, + "betas": [0.8, 0.999], + "eps": 1e-8, + "weight_decay": 3e-7 + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": 3e-5, + "warmup_num_steps": 500 + } + }, + + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": 1e6, + "stage3_prefetch_bucket_size": 0.94e6, + "stage3_param_persistence_threshold": 1e4, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, + + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +``` + +#### How to Choose Which ZeRO Stage and Offloads To Use For Best Performance + +これで、さまざまな段階があることがわかりました。どちらを使用するかをどのように決定すればよいでしょうか?このセクションでは、この質問に答えていきます。 + +一般に、次のことが当てはまります。 + +- 速度の点(左の方が右より速い) + +ステージ 0 (DDP) > ステージ 1 > ステージ 2 > ステージ 2 + オフロード > ステージ 3 > ステージ 3 + オフロード + +- GPU メモリの使用状況 (右は左よりも GPU メモリ効率が高い) + +ステージ 0 (DDP) < ステージ 1 < ステージ 2 < ステージ 2 + オフロード < ステージ 3 < ステージ 3 + オフロード + +したがって、最小限の数の GPU に収まりながら最速の実行を実現したい場合は、次のプロセスに従うことができます。最も速いアプローチから開始し、GPU OOM に陥った場合は、次に遅いアプローチに進みますが、これにより使用される GPU メモリが少なくなります。などなど。 + +まず、バッチ サイズを 1 に設定します (必要な有効バッチ サイズに対して、いつでも勾配累積を使用できます)。 + +1. `--gradient_checkpointing 1` (HF Trainer) または直接 `model.gradient_checkpointing_enable()` を有効にします - OOM の場合 +2. 最初に ZeRO ステージ 2 を試してください。 OOMの場合 +3. ZeRO ステージ 2 + `offload_optimizer` を試します - OOM の場合 +4. ZeRO ステージ 3 に切り替える - OOM の場合 +5. `cpu` に対して `offload_param` を有効にします - OOM の場合 +6. OOM の場合は、`cpu`に対して`offload_optimizer`を有効にします。 + +7. それでもバッチ サイズ 1 に適合しない場合は、まずさまざまなデフォルト値を確認し、可能であれば値を下げます。たとえば、`generate`を使用し、広い検索ビームを使用しない場合は、大量のメモリを消費するため、検索ビームを狭くします。 + +8. fp32 では必ず混合半精度を使用します。つまり、Ampere 以上の GPU では bf16、古い GPU アーキテクチャでは fp16 を使用します。 + +9. それでも OOM を行う場合は、ハードウェアを追加するか、ZeRO-Infinity を有効にすることができます。つまり、オフロード `offload_param` と `offload_optimizer` を `nvme` に切り替えます。非常に高速な nvme であることを確認する必要があります。逸話として、ZeRO-Infinity を使用して小さな GPU で BLOOM-176B を推論することができましたが、非常に遅かったです。でも、うまくいきました! + +もちろん、最も GPU メモリ効率の高い構成から始めて、後から逆に進むことで、これらの手順を逆に実行することもできます。あるいは二等分してみてください。 + +OOM を引き起こさないバッチ サイズ 1 を取得したら、実効スループットを測定します。 + +次に、バッチ サイズをできるだけ大きくしてみます。バッチ サイズが大きいほど、乗算する行列が巨大な場合に GPU のパフォーマンスが最高になるため、GPU の効率が向上します。 + +ここで、パフォーマンス最適化ゲームが始まります。一部のオフロード機能をオフにするか、ZeRO 段階でステップダウンしてバッチ サイズを増減して、実効スループットを再度測定することができます。満足するまで洗い流し、繰り返します。 + +永遠にこれに費やす必要はありませんが、3 か月のトレーニングを開始しようとしている場合は、スループットに関して最も効果的な設定を見つけるために数日かけてください。そのため、トレーニングのコストが最小限になり、トレーニングをより早く完了できます。現在の目まぐるしく変化する ML の世界では、何かをトレーニングするのにさらに 1 か月かかる場合、絶好の機会を逃す可能性があります。もちろん、これは私が意見を共有しているだけであり、決してあなたを急かそうとしているわけではありません。 BLOOM-176B のトレーニングを開始する前に、このプロセスに 2 日間費やし、スループットを 90 TFLOP から 150 TFLOP に向上させることができました。この取り組みにより、トレーニング時間を 1 か月以上節約できました。 + +これらのメモは主にトレーニング モード用に書かれたものですが、ほとんどの場合は推論にも適用されるはずです。たとえば、勾配チェックポイントはトレーニング中にのみ役立つため、推論中は何も行われません。さらに、マルチ GPU 推論を実行していて、[DeepSpeed-Inference](https://www.deepspeed.ai/tutorials/inference-tutorial/)、[Accelerate](https://ハグフェイス.co/blog/bloom-inference-pytorch-scripts) は優れたパフォーマンスを提供するはずです。 + + +その他のパフォーマンス関連の簡単なメモ: +- 何かを最初からトレーニングしている場合は、常に 16 で割り切れる形状のテンソル (隠れたサイズなど) を使用するようにしてください。バッチ サイズについては、少なくとも 2 で割り切れるようにしてください。 GPU からさらに高いパフォーマンスを引き出したい場合は、ハードウェア固有の [波とタイルの量子化](https://developer.nvidia.com/blog/optimizing-gpu-performance-tensor-cores/) の可分性があります。 + +### Activation Checkpointing or Gradient Checkpointing + +アクティベーション チェックポイントと勾配チェックポイントは、同じ方法論を指す 2 つの異なる用語です。とてもややこしいですが、こんな感じです。 + +勾配チェックポイントを使用すると、速度を GPU メモリと引き換えにできます。これにより、GPU OOM を克服したり、バッチ サイズを増やすことができ、多くの場合、パフォーマンスの向上につながります。 + +HF Transformers モデルは、DeepSpeed のアクティベーション チェックポイントについて何も知らないため、DeepSpeed 構成ファイルでその機能を有効にしようとしても、何も起こりません。 + +したがって、この非常に有益な機能を活用するには 2 つの方法があります。 + +1. HF Transformers モデルを使用したい場合は、`model.gradient_checkpointing_enable()` を実行するか、HF トレーナーで `--gradient_checkpointing` を使用します。これにより、これが自動的に有効になります。そこで使われるのが `torch.utils.checkpoint` です。 +2. 独自のモデルを作成し、DeepSpeed のアクティベーション チェックポイントを使用したい場合は、[そこで規定されている API](https://deepspeed.readthedocs.io/en/latest/activation-checkpointing.html) を使用できます。 HF Transformers モデリング コードを使用して、`torch.utils.checkpoint` を DeepSpeed の API に置き換えることもできます。後者は、順方向アクティベーションを再計算する代わりに CPU メモリにオフロードできるため、より柔軟です。 + +### Optimizer and Scheduler + +`offload_optimizer`を有効にしない限り、DeepSpeed スケジューラーと HuggingFace スケジューラーを組み合わせて使用​​できます。 +オプティマイザー (HuggingFace スケジューラーと DeepSpeed オプティマイザーの組み合わせを除く): + +| Combos | HF Scheduler | DS Scheduler | +|:-------------|:-------------|:-------------| +| HF Optimizer | Yes | Yes | +| DS Optimizer | No | Yes | + +`offload_optimizer`が有効な場合、CPU と +GPU 実装 (LAMB を除く)。 + + + + +#### Optimizer + +DeepSpeed の主なオプティマイザーは、Adam、AdamW、OneBitAdam、Lamb です。これらは ZeRO で徹底的にテストされており、 +したがって、使用することをお勧めします。ただし、他のオプティマイザを「torch」からインポートすることはできます。完全なドキュメントは [こちら](https://www.deepspeed.ai/docs/config-json/#optimizer-parameters) にあります。 + +設定ファイルで `optimizer` エントリを設定しない場合、[`Trainer`] は +自動的に`AdamW`に設定され、指定された値または次のコマンドラインのデフォルトが使用されます。 +引数: `--learning_rate`、`--adam_beta1`、`--adam_beta2`、`--adam_epsilon`、および `--weight_decay`。 + +以下は、`AdamW`の自動構成された`optimizer`エントリの例です。 + +```json +{ + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + } +} +``` + +コマンドライン引数によって構成ファイル内の値が設定されることに注意してください。これは 1 つあるためです +値の決定的なソースを提供し、たとえば学習率が次のように設定されている場合に、見つけにくいエラーを回避します。 +さまざまな場所でさまざまな価値観。コマンドラインのルール。オーバーライドされる値は次のとおりです。 + +- `lr` と `--learning_rate` の値 +- `betas` と `--adam_beta1 --adam_beta2` の値 +- `eps` と `--adam_epsilon` の値 +- `weight_decay` と `--weight_decay` の値 + +したがって、コマンドラインで共有ハイパーパラメータを調整することを忘れないでください。 + +値を明示的に設定することもできます。 + +```json +{ + "optimizer": { + "type": "AdamW", + "params": { + "lr": 0.001, + "betas": [0.8, 0.999], + "eps": 1e-8, + "weight_decay": 3e-7 + } + } +} +``` + +ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 +構成。 + +上記にリストされていない別のオプティマイザーを使用する場合は、トップレベルの構成に追加する必要があります。 + +```json +{ + "zero_allow_untested_optimizer": true +} +``` + +`AdamW`と同様に、公式にサポートされている他のオプティマイザーを構成できます。これらは異なる設定値を持つ可能性があることに注意してください。例えばAdam の場合は、`weight_decay`を`0.01`付近にする必要があります。 + +さらに、オフロードは、Deepspeed の CPU Adam オプティマイザーと併用すると最も効果的に機能します。 `deepspeed==0.8.3` なので、オフロードで別のオプティマイザーを使用したい場合は、以下も追加する必要があります。 + +```json +{ + "zero_force_ds_cpu_optimizer": false +} +``` + +最上位の構成に移行します。 + + + + +#### Scheduler + + +DeepSpeed は、`LRRangeTest`、`OneCycle`、`WarmupLR`、および`WarmupDecayLR`学習率スケジューラーをサポートしています。完全な +ドキュメントは[ここ](https://www.deepspeed.ai/docs/config-json/#scheduler-parameters)です。 + +ここでは、🤗 Transformers と DeepSpeed の間でスケジューラーが重複する場所を示します。 + +- `--lr_scheduler_type constant_with_warmup` 経由の `WarmupLR` +- `--lr_scheduler_type Linear` を介した `WarmupDecayLR`。これは `--lr_scheduler_type` のデフォルト値でもあります。 + したがって、スケジューラを設定しない場合、これがデフォルトで設定されるスケジューラになります。 + +設定ファイルで `scheduler` エントリを設定しない場合、[`Trainer`] は +`--lr_scheduler_type`、`--learning_rate`、および `--warmup_steps` または `--warmup_ratio` の値を設定します。 +🤗 それのトランスフォーマーバージョン。 + +以下は、`WarmupLR`の自動構成された`scheduler`エントリの例です。 + +```json +{ + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + } +} +``` + +*"auto"* が使用されているため、[`Trainer`] 引数は設定に正しい値を設定します。 +ファイル。これは、値の決定的なソースが 1 つあることと、たとえば次のような場合に見つけにくいエラーを避けるためです。 +学習率は、場所ごとに異なる値に設定されます。コマンドラインのルール。設定される値は次のとおりです。 + +- `warmup_min_lr` の値は `0` です。 +- `warmup_max_lr` と `--learning_rate` の値。 +- `warmup_num_steps` と `--warmup_steps` の値 (指定されている場合)。それ以外の場合は `--warmup_ratio` を使用します + トレーニング ステップの数を乗算し、切り上げます。 +- `total_num_steps` には `--max_steps` の値を指定するか、指定されていない場合は実行時に自動的に導出されます。 + 環境、データセットのサイズ、およびその他のコマンド ライン引数 ( + `WarmupDecayLR`)。 + +もちろん、構成値の一部またはすべてを引き継いで、自分で設定することもできます。 + +```json +{ + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": 0.001, + "warmup_num_steps": 1000 + } + } +} +``` + +ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 +構成。 + +たとえば、`WarmupDecayLR`の場合は、次のエントリを使用できます。 + +```json +{ + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "last_batch_iteration": -1, + "total_num_steps": "auto", + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + } +} +``` + +`total_num_steps`、`warmup_max_lr`、`warmup_num_steps`、および `total_num_steps` はロード時に設定されます。 + + + +### fp32 Precision + +Deepspeed は、完全な fp32 と fp16 の混合精度をサポートします。 + +fp16 混合精度を使用すると、必要なメモリが大幅に削減され、速度が向上するため、 +使用しているモデルがこのトレーニング モードで適切に動作しない場合は、使用しない方がよいでしょう。通常これ +モデルが fp16 混合精度で事前トレーニングされていない場合に発生します (たとえば、これは bf16 で事前トレーニングされた場合によく発生します) +モデル)。このようなモデルでは、オーバーフローまたはアンダーフローが発生し、`NaN`損失が発生する可能性があります。これがあなたの場合は、使用したいと思うでしょう +完全な fp32 モード。デフォルトの fp16 混合精度モードを次のように明示的に無効にします。 + +```json +{ + "fp16": { + "enabled": false, + } +} +``` + +Ampere アーキテクチャ ベースの GPU を使用している場合、pytorch バージョン 1.7 以降は自動的に を使用するように切り替わります。 +一部の操作でははるかに効率的な tf32 形式を使用しますが、結果は依然として fp32 になります。詳細と +ベンチマークについては、[Ampere デバイス上の TensorFloat-32(TF32)](https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices) を参照してください。文書には以下が含まれます +何らかの理由でこの自動変換を使用したくない場合は、この自動変換を無効にする方法について説明します。 + +🤗 トレーナーでは、`--tf32` を使用して有効にするか、`--tf32 0` または `--no_tf32` を使用して無効にすることができます。デフォルトでは、PyTorch のデフォルトが使用されます。 + + + +### Automatic Mixed Precision + +pytorch のような AMP の方法または apex のような方法で自動混合精度を使用できます。 + +### fp16 + +fp16 (float16) を設定して pytorch AMP のようなモードを設定するには: + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + } +} +``` + +[`Trainer`] は、の値に基づいてそれを自動的に有効または無効にします。 +`args.fp16_backend`。残りの設定値はあなた次第です。 + +このモードは、`--fp16 --fp16_backend amp`または`--fp16_full_eval`コマンドライン引数が渡されると有効になります。 + +このモードを明示的に有効/無効にすることもできます。 + +```json +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + } +} +``` + +ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 +構成。 + +これが[ドキュメント](https://www.deepspeed.ai/docs/config-json/#fp16-training-options)です。 + +### BF16 + +fp16 の代わりに bf16 (bfloat16) が必要な場合は、次の構成セクションが使用されます。 + +```json +{ + "bf16": { + "enabled": "auto" + } +} +``` + +bf16 は fp32 と同じダイナミック レンジを備えているため、損失スケーリングは必要ありません。 + +このモードは、`--bf16` または `--bf16_full_eval` コマンドライン引数が渡されると有効になります。 + +このモードを明示的に有効/無効にすることもできます。 + +```json +{ + "bf16": { + "enabled": true + } +} +``` + + + +`deepspeed==0.6.0`の時点では、bf16 サポートは新しく実験的なものです。 + +bf16 が有効な状態で [勾配累積](#gradient-accumulation) を使用する場合は、bf16 で勾配が累積されることに注意する必要があります。この形式の精度が低いため、これは希望どおりではない可能性があります。損失のある蓄積につながります。 + +この問題を修正し、より高精度の `dtype` (fp16 または fp32) を使用するオプションを提供するための作業が行われています。 + + + + +### NCCL Collectives + +訓練体制の`dtype`があり、さまざまな削減や収集/分散操作などのコミュニケーション集合体に使用される別の`dtype`があります。 + +すべての収集/分散操作は、データが含まれているのと同じ `dtype` で実行されるため、bf16 トレーニング体制を使用している場合、データは bf16 で収集されます。収集は損失のない操作です。 + +さまざまなリデュース操作は非常に損失が大きい可能性があります。たとえば、複数の GPU 間で勾配が平均化される場合、通信が fp16 または bf16 で行われる場合、結果は損失が多くなる可能性があります。複数の数値を低精度でアドバタイズすると結果は正確ではないためです。 。 bf16 では fp16 よりも精度が低いため、さらにそうです。通常は非常に小さい grad を平均する際の損失が最小限に抑えられるため、fp16 で十分であることがよくあります。したがって、デフォルトでは、半精度トレーニングでは fp16 がリダクション演算のデフォルトとして使用されます。ただし、この機能を完全に制御でき、必要に応じて小さなオーバーヘッドを追加して、リダクションが累積 dtype として fp32 を使用し、結果の準備ができた場合にのみ半精度 `dtype` にダウンキャストするようにすることもできます。でトレーニング中です。 + +デフォルトをオーバーライドするには、新しい構成エントリを追加するだけです。 + +```json +{ + "communication_data_type": "fp32" +} +``` + +この記事の執筆時点での有効な値は、"fp16"、"bfp16"、"fp32"です。 + +注: ステージ ゼロ 3 には、bf16 通信タイプに関するバグがあり、`deepspeed==0.8.1`で修正されました。 + +### apex + +apex AMP のようなモード セットを設定するには: + +```json +"amp": { + "enabled": "auto", + "opt_level": "auto" +} +``` + +[`Trainer`] は `args.fp16_backend` の値に基づいて自動的に設定します。 +`args.fp16_opt_level`。 + +このモードは、`--fp16 --fp16_backend apex --fp16_opt_level 01`コマンド ライン引数が渡されると有効になります。 + +このモードを明示的に構成することもできます。 + +```json +{ + "amp": { + "enabled": true, + "opt_level": "O1" + } +} +``` + +ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 +構成。 + +これは[ドキュメント](https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options)です。 + + + +### Batch Size + +バッチサイズを設定するには、次を使用します。 + + +```json +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto" +} +``` + +[`Trainer`] は自動的に `train_micro_batch_size_per_gpu` を次の値に設定します。 +`args.per_device_train_batch_size`と`train_batch_size`を`args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps`に変更します。 + +値を明示的に設定することもできます。 + +```json +{ + "train_batch_size": 12, + "train_micro_batch_size_per_gpu": 4 +} +``` + +ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 +構成。 + + + +### Gradient Accumulation + +勾配累積セットを構成するには: + +```json +{ + "gradient_accumulation_steps": "auto" +} +``` + +[`Trainer`] は自動的にそれを `args.gradient_accumulation_steps` の値に設定します。 + +値を明示的に設定することもできます。 + +```json +{ + "gradient_accumulation_steps": 3 +} +``` + +ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 +構成。 + + + +### Gradient Clipping + +グラデーション グラデーション クリッピング セットを構成するには: + +```json +{ + "gradient_clipping": "auto" +} +``` + +[`Trainer`] は自動的にそれを `args.max_grad_norm` の値に設定します。 + +値を明示的に設定することもできます。 + +```json +{ + "gradient_clipping": 1.0 +} +``` + +ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 +構成。 + + + +### Getting The Model Weights Out + +トレーニングを継続し、DeepSpeed の使用を再開する限り、何も心配する必要はありません。 DeepSpeed ストア +fp32 のカスタム チェックポイント オプティマイザー ファイル内のマスターの重み。これは `global_step*/*optim_states.pt` (これは glob +パターン)、通常のチェックポイントの下に保存されます。 + +**FP16 ウェイト:** + +モデルを ZeRO-2 で保存すると、モデルの重みを含む通常の `pytorch_model.bin` ファイルが作成されますが、 +これらは重みの fp16 バージョンにすぎません。 + +ZeRO-3 では、モデルの重みが複数の GPU に分割されるため、状況はさらに複雑になります。 +したがって、fp16 を保存するための `Trainer` を取得するには、`"stage3_gather_16bit_weights_on_model_save": true` が必要です。 +重みのバージョン。この設定が`False`の場合、`pytorch_model.bin`は作成されません。これは、デフォルトで DeepSpeed の `state_dict` に実際の重みではなくプレースホルダーが含まれるためです。この `state_dict` を保存した場合、ロードし直すことはできません。 + +```json +{ + "zero_optimization": { + "stage3_gather_16bit_weights_on_model_save": true + } +} +``` + +**FP32 重量:** + +fp16 ウェイトはトレーニングを再開するのに適していますが、モデルの微調整が完了し、それを +[モデル ハブ](https://huggingface.co/models) にアクセスするか、fp32 を入手したいと思われる他の人に渡します。 +重み。これは大量のメモリを必要とするプロセスであるため、トレーニング中に行うべきではないのが理想的です。 +したがって、トレーニングの完了後にオフラインで実行するのが最適です。ただし、必要に応じて、空き CPU が十分にある場合は、 +同じトレーニング スクリプトで実行できることを思い出してください。次のセクションでは、両方のアプローチについて説明します。 + + +**ライブ FP32 ウェイト リカバリ:** + +モデルが大きく、トレーニングの終了時に空き CPU メモリがほとんど残っていない場合、このアプローチは機能しない可能性があります。 + +少なくとも 1 つのチェックポイントを保存していて、最新のチェックポイントを使用したい場合は、次の手順を実行できます。 + +```python +from transformers.trainer_utils import get_last_checkpoint +from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + +checkpoint_dir = get_last_checkpoint(trainer.args.output_dir) +fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) +``` + +`--load_best_model_at_end` class:*~transformers.TrainingArguments* 引数を使用している場合 (最適なモデルを追跡するため) +チェックポイント)、最初に最終モデルを明示的に保存してから、上記と同じことを行うことでトレーニングを終了できます。 + +```python +from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + +checkpoint_dir = os.path.join(trainer.args.output_dir, "checkpoint-final") +trainer.deepspeed.save_checkpoint(checkpoint_dir) +fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) +``` + + + +`load_state_dict_from_zero_checkpoint` が実行されると、`model` はもはや使用できなくなることに注意してください。 +同じアプリケーションの DeepSpeed コンテキスト。つまり、deepspeed エンジンを再初期化する必要があります。 +`model.load_state_dict(state_dict)` はそこからすべての DeepSpeed マジックを削除します。したがって、これは最後にのみ実行してください +トレーニングの様子。 + + + + +もちろん、class:*~transformers.Trainer* を使用する必要はなく、上記の例を独自のものに調整することができます。 +トレーナー。 + +何らかの理由でさらに改良したい場合は、重みの fp32 `state_dict` を抽出して適用することもできます。 +次の例に示すように、これらは自分で作成します。 + +```python +from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + +state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu +model = model.cpu() +model.load_state_dict(state_dict) +``` + +**オフライン FP32 ウェイト リカバリ:** + +DeepSpeed は特別な変換スクリプト`zero_to_fp32.py`を作成し、チェックポイントの最上位に配置します。 +フォルダ。このスクリプトを使用すると、いつでも重みを抽出できます。スクリプトはスタンドアロンなので、もう必要ありません。 +抽出を行うための設定ファイルまたは `Trainer` が必要です。 + +チェックポイント フォルダーが次のようになっているとします。 + +```bash +$ ls -l output_dir/checkpoint-1/ +-rw-rw-r-- 1 stas stas 1.4K Mar 27 20:42 config.json +drwxrwxr-x 2 stas stas 4.0K Mar 25 19:52 global_step1/ +-rw-rw-r-- 1 stas stas 12 Mar 27 13:16 latest +-rw-rw-r-- 1 stas stas 827K Mar 27 20:42 optimizer.pt +-rw-rw-r-- 1 stas stas 231M Mar 27 20:42 pytorch_model.bin +-rw-rw-r-- 1 stas stas 623 Mar 27 20:42 scheduler.pt +-rw-rw-r-- 1 stas stas 1.8K Mar 27 20:42 special_tokens_map.json +-rw-rw-r-- 1 stas stas 774K Mar 27 20:42 spiece.model +-rw-rw-r-- 1 stas stas 1.9K Mar 27 20:42 tokenizer_config.json +-rw-rw-r-- 1 stas stas 339 Mar 27 20:42 trainer_state.json +-rw-rw-r-- 1 stas stas 2.3K Mar 27 20:42 training_args.bin +-rwxrw-r-- 1 stas stas 5.5K Mar 27 13:16 zero_to_fp32.py* +``` + +この例では、DeepSpeed チェックポイント サブフォルダー *global_step1* が 1 つだけあります。したがって、FP32を再構築するには +重みを実行するだけです: + +```bash +python zero_to_fp32.py . pytorch_model.bin +``` + +これだよ。 `pytorch_model.bin`には、複数の GPU から統合された完全な fp32 モデルの重みが含まれるようになります。 + +スクリプトは、ZeRO-2 または ZeRO-3 チェックポイントを自動的に処理できるようになります。 + +`python zero_to_fp32.py -h` を実行すると、使用方法の詳細が表示されます。 + +スクリプトは、ファイル`latest`の内容を使用して deepspeed サブフォルダーを自動検出します。 +例には`global_step1`が含まれます。 + +注: 現在、スクリプトには最終的な fp32 モデルの重みの 2 倍の一般 RAM が必要です。 + +### ZeRO-3 と Infinity Nuances + +ZeRO-3 は、パラメータ シャーディング機能の点で ZeRO-2 とは大きく異なります。 + +ZeRO-Infinity は ZeRO-3 をさらに拡張し、NVMe メモリやその他の複数の速度とスケーラビリティの向上をサポートします。 + +モデルに特別な変更を加える必要がなくても正常に動作するようにあらゆる努力が払われてきましたが、特定の点では +状況によっては、次の情報が必要になる場合があります。 + +#### Constructing Massive Models + + +DeepSpeed/ZeRO-3 は、既存の RAM に収まらない可能性のある数兆のパラメータを持つモデルを処理できます。そのような場合、 +また、初期化をより高速に実行したい場合は、*deepspeed.zero.Init()* を使用してモデルを初期化します。 +コンテキスト マネージャー (関数デコレーターでもあります)。次のようになります。 + +```python +from transformers import T5ForConditionalGeneration, T5Config +import deepspeed + +with deepspeed.zero.Init(): + config = T5Config.from_pretrained("t5-small") + model = T5ForConditionalGeneration(config) +``` + +ご覧のとおり、これによりランダムに初期化されたモデルが得られます。 + +事前トレーニングされたモデルを使用したい場合、`model_class.from_pretrained` は次の条件を満たす限りこの機能を有効にします。 +`is_deepspeed_zero3_enabled()` は `True` を返します。これは現在、 +[`TrainingArguments`] オブジェクト (渡された DeepSpeed 構成ファイルに ZeRO-3 構成が含まれている場合) +セクション。したがって、呼び出しの前に** [`TrainingArguments`] オブジェクトを作成する必要があります。 +`from_pretrained`。考えられるシーケンスの例を次に示します。 + +```python +from transformers import AutoModel, Trainer, TrainingArguments + +training_args = TrainingArguments(..., deepspeed=ds_config) +model = AutoModel.from_pretrained("t5-small") +trainer = Trainer(model=model, args=training_args, ...) +``` + +公式のサンプル スクリプトを使用していて、コマンド ライン引数に `--deepspeed ds_config.json` が含まれている場合 +ZeRO-3 設定を有効にすると、これがサンプル スクリプトの記述方法であるため、すべてがすでに完了しています。 + +注: モデルの fp16 重みが単一の GPU のメモリに収まらない場合は、この機能を使用する必要があります。 + +この方法とその他の関連機能の詳細については、[大規模モデルの構築](https://deepspeed.readthedocs.io/en/latest/zero3.html#constructing-massive-models) を参照してください。 + +また、fp16 で事前訓練されたモデルをロードするときは、`from_pretrained` に使用するように指示する必要があります。 +`torch_dtype=torch.float16`。詳細については、[from_pretrained-torch-dtype](#from_pretrained-torch-dtype) を参照してください。 + +#### Gathering Parameters + +複数の GPU 上の ZeRO-3 では、現在の GPU のパラメータでない限り、単一の GPU がすべてのパラメータを持つことはありません。 +実行層。したがって、すべてのレイヤーのすべてのパラメーターに一度にアクセスする必要がある場合は、それを行うための特定の方法があります。 +ほとんどの場合は必要ありませんが、必要な場合は、[パラメータの収集](https://deepspeed.readthedocs.io/en/latest/zero3.html#manual-parameter-coordination) を参照してください。 + +ただし、いくつかの場所で内部的に使用しています。その例の 1 つは、事前トレーニングされたモデルの重みをロードするときです。 +`from_pretrained`。一度に 1 つのレイヤーをロードし、参加しているすべての GPU に即座に分割します。 +大規模なモデルでは、メモリの関係で、1 つの GPU にロードしてから複数の GPU に分散することはできません。 +制限。 + +また、ZeRO-3 では、独自のコードを作成し、次のようなモデル パラメーターの重みが発生するとします。 + +```python +tensor([1.0], device="cuda:0", dtype=torch.float16, requires_grad=True) +``` + +`tensor([1.])` にストレスを感じた場合、またはパラメータのサイズが `1` であるというエラーが発生した場合 +より大きな多次元形状。これは、パラメーターが分割されており、表示されるのは ZeRO-3 プレースホルダーであることを意味します。 + + + + +### ZeRO Inference + +ZeRO Inference は、ZeRO-3 Training と同じ構成を使用します。オプティマイザーとスケジューラーのセクションは必要ありません。で +実際、同じものをトレーニングと共有したい場合は、これらを設定ファイルに残すことができます。彼らはただそうなるだろう +無視されました。 + +それ以外の場合は、通常の [`TrainingArguments`] 引数を渡すだけです。例えば: + +```bash +deepspeed --num_gpus=2 your_program.py --do_eval --deepspeed ds_config.json +``` + +唯一重要なことは、ZeRO-2 には何の利点もないため、ZeRO-3 構成を使用する必要があるということです。 +ZeRO-3 のみがパラメーターのシャーディングを実行するのに対し、ZeRO-1 は勾配とオプティマイザーの状態をシャーディングするため、推論に役立ちます。 + +以下は、利用可能なすべての GPU をデプロイする DeepSpeed で`run_translation.py`を実行する例です。 + + +```bash +deepspeed examples/pytorch/translation/run_translation.py \ +--deepspeed tests/deepspeed/ds_config_zero3.json \ +--model_name_or_path t5-small --output_dir output_dir \ +--do_eval --max_eval_samples 50 --warmup_steps 50 \ +--max_source_length 128 --val_max_target_length 128 \ +--overwrite_output_dir --per_device_eval_batch_size 4 \ +--predict_with_generate --dataset_config "ro-en" --fp16 \ +--source_lang en --target_lang ro --dataset_name wmt16 \ +--source_prefix "translate English to Romanian: " +``` + +推論のために、オプティマイザーの状態と勾配によって使用される追加の大きなメモリは必要ないため、 +はるかに大きなバッチやシーケンス長を同じハードウェアに適合できる必要があります。 + +さらに、DeepSpeed は現在、Deepspeed-Inference と呼ばれる関連製品を開発していますが、これとは何の関係もありません。 +ZeRO テクノロジーに準拠していますが、代わりにテンソル並列処理を使用して、単一の GPU に収まらないモデルをスケーリングします。これは +現在開発中です。製品が完成したら統合を提供する予定です。 + + +### Memory Requirements + +Deepspeed ZeRO はメモリを CPU (および NVMe) にオフロードできるため、フレームワークは、使用されている GPU の数に応じて必要な CPU および GPU メモリの量を知ることができるユーティリティを提供します。 + +単一の GPU で `bigscience/T0_3B`を微調整するために必要なメモリの量を見積もってみましょう。 + +```bash +$ python -c 'from transformers import AutoModel; \ +from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ +model = AutoModel.from_pretrained("bigscience/T0_3B"); \ +estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1)' +[...] +Estimated memory needed for params, optim states and gradients for a: +HW: Setup with 1 node, 1 GPU per node. +SW: Model with 2783M total params, 65M largest layer params. + per CPU | per GPU | Options + 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 + 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 + 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=1 + 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=0 + 0.37GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=1 + 15.56GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=0 +``` + +したがって、単一の 80 GB GPU で CPU オフロードなしで搭載することも、小さな 8 GB GPU でも最大 60 GB の CPU メモリが必要になることも可能です。 (これはパラメータ、オプティマイザの状態、および勾配のためのメモリであることに注意してください。cuda カーネル、アクティベーション、および一時メモリにはもう少し多くのメモリが必要です。) + +次に、コストと速度のトレードオフになります。より小さい GPU を購入またはレンタルした方が安くなります (Deepspeed ZeRO では複数の GPU を使用できるため、GPU の数を減らすこともできます)。しかし、その場合は遅くなります。そのため、何かを実行する速度を気にしなくても、速度の低下は GPU の使用時間に直接影響し、コストが増大するため、どれが最も効果的かを実験して比較してください。 + +十分な GPU メモリがある場合は、すべてが高速になるため、CPU/NVMe オフロードを必ず無効にしてください。 + +たとえば、2 つの GPU に対して同じことを繰り返してみましょう。 + +```bash +$ python -c 'from transformers import AutoModel; \ +from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ +model = AutoModel.from_pretrained("bigscience/T0_3B"); \ +estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=2, num_nodes=1)' +[...] +Estimated memory needed for params, optim states and gradients for a: +HW: Setup with 1 node, 2 GPUs per node. +SW: Model with 2783M total params, 65M largest layer params. + per CPU | per GPU | Options + 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 + 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 + 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=1 + 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=0 + 0.74GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=1 + 31.11GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=0 + +``` + +したがって、ここでは、CPU にオフロードせずに 2x 32GB 以上の GPU が必要になります。 + +詳細については、[メモリ推定ツール](https://deepspeed.readthedocs.io/en/latest/memory.html) を参照してください。 + + +### Filing Issues + + +ここでは、問題の真相をすぐに解明し、作業のブロックを解除できるよう、問題を報告する方法を説明します。 + +レポートには必ず次の内容を含めてください。 + +1. レポート内の完全な Deepspeed 構成ファイル + +2. [`Trainer`] を使用している場合はコマンドライン引数、または + トレーナーのセットアップを自分でスクリプト作成している場合は、[`TrainingArguments`] 引数。しないでください + [`TrainingArguments`] には無関係なエントリが多数含まれているため、ダンプします。 + +3. 次の出力: + + ```bash + python -c 'import torch; print(f"torch: {torch.__version__}")' + python -c 'import transformers; print(f"transformers: {transformers.__version__}")' + python -c 'import deepspeed; print(f"deepspeed: {deepspeed.__version__}")' + ``` + +4. 可能であれば、問題を再現できる Google Colab ノートブックへのリンクを含めてください。これを使えます + [ノートブック](https://github.com/stas00/porting/blob/master/transformers/deepspeed/DeepSpeed_on_colab_CLI.ipynb) として + 出発点。 + +5. 不可能でない限り、カスタムデータセットではなく、常に使用できる標準データセットを使用してください。 + +6. 可能であれば、既存の [サンプル](https://github.com/huggingface/transformers/tree/main/examples/pytorch) のいずれかを使用して問題を再現してみてください。 + +- Deepspeed が問題の原因ではないことがよくあります。 + + 提出された問題の一部は、Deepspeed とは無関係であることが判明しました。それは、Deepspeed がセットアップから削除された後です。 + 問題はまだ残っていた。 + + したがって、完全に明白でない場合は、DeepSpeed 関連の問題です。 + 例外が発生し、DeepSpeed モジュールが関係していることがわかります。まず、DeepSpeed を含まないセットアップを再テストしてください。 + 問題が解決しない場合にのみ、Deepspeed について言及し、必要な詳細をすべて提供してください。 + +- 問題が統合部分ではなく DeepSpeed コアにあることが明らかな場合は、問題を提出してください。 + [Deepspeed](https://github.com/microsoft/DeepSpeed/) を直接使用します。よくわからない場合でも、ご安心ください。 + どちらの問題トラッカーでも問題ありません。投稿されたらそれを判断し、次の場合は別の問題トラッカーにリダイレクトします。 + そうである必要がある。 + + +### Troubleshooting + +#### the `deepspeed` process gets killed at startup without a traceback + +`deepspeed`プロセスが起動時にトレースバックなしで強制終了された場合、それは通常、プログラムが試行したことを意味します。 +システムが持っているよりも多くの CPU メモリを割り当てるか、プロセスが割り当てを許可されているため、OS カーネルがそれを強制終了します。 +プロセス。これは、設定ファイルに `offload_optimizer` または `offload_param` が含まれている可能性が高いためです。 +どちらも`cpu`にオフロードするように設定されています。 NVMe を使用している場合は、次の環境で実行している場合は NVMe へのオフロードを試してください。 +ゼロ-3。 [特定のモデルに必要なメモリ量を見積もる]方法は次のとおりです(https://deepspeed.readthedocs.io/en/latest/memory.html)。 + +#### training and/or eval/predict loss is `NaN` + +これは、bf16 混合精度モードで事前トレーニングされたモデルを取得し、それを fp16 (混合精度の有無にかかわらず) で使用しようとした場合によく発生します。 TPU でトレーニングされたほとんどのモデル、および多くの場合、Google によってリリースされたモデルは、このカテゴリに分類されます (たとえば、ほぼすべての t5 ベースのモデル)。ここでの解決策は、ハードウェアがサポートしている場合 (TPU、Ampere GPU 以降)、fp32 または bf16 を使用することです。 + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + } +} +``` + +ログには、Deepspeed が次のように`OVERFLOW!`を報告していることがわかります。 + +``` +0%| | 0/189 [00:00=4.28` 以降、`synced_gpus` が明示的に指定されていない場合、これらの条件が検出されると自動的に `True` に設定されます。ただし、必要に応じて `synced_gpus` の値をオーバーライドすることもできます。 + +## Deepspeed 統合のテスト + +DeepSpeed 統合を含む PR を送信する場合は、CircleCI PR CI セットアップには GPU がないことに注意してください。そのため、GPU を必要とするテストは別の CI で毎晩のみ実行されます。したがって、PR で緑色の CI レポートが表示されても、DeepSpeed テストが合格したことを意味するわけではありません。 + +DeepSpeed テストを実行するには、少なくとも以下を実行してください。 + +``` +RUN_SLOW=1 pytest tests/deepspeed/test_deepspeed.py +``` + +モデリングまたは pytorch サンプル コードのいずれかを変更した場合は、Model Zoo テストも実行します。以下はすべての DeepSpeed テストを実行します。 + +``` +RUN_SLOW=1 pytest tests/deepspeed +``` + + +## Main DeepSpeed Resources + +- [プロジェクトの github](https://github.com/microsoft/deepspeed) +- [使用方法ドキュメント](https://www.deepspeed.ai/getting-started/) +- [API ドキュメント](https://deepspeed.readthedocs.io/en/latest/index.html) +- [ブログ投稿](https://www.microsoft.com/en-us/research/search/?q=deepspeed) + +論文: + +- [ZeRO: 兆パラメータ モデルのトレーニングに向けたメモリの最適化](https://arxiv.org/abs/1910.02054) +- [ZeRO-Offload: 10 億規模のモデル トレーニングの民主化](https://arxiv.org/abs/2101.06840) +- [ZeRO-Infinity: 極限スケールの深層学習のための GPU メモリの壁を打ち破る](https://arxiv.org/abs/2104.07857) + +最後に、HuggingFace [`Trainer`] は DeepSpeed のみを統合していることを覚えておいてください。 +DeepSpeed の使用に関して問題や質問がある場合は、[DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues) に問題を提出してください。 diff --git a/docs/source/ja/main_classes/feature_extractor.md b/docs/source/ja/main_classes/feature_extractor.md new file mode 100644 index 000000000000..a2bd8c59a84f --- /dev/null +++ b/docs/source/ja/main_classes/feature_extractor.md @@ -0,0 +1,41 @@ + + +# Feature Extractor + + +フィーチャーエクストラクタは、オーディオまたはビジョンモデルのための入力フィーチャーの準備を担当しています。これには、シーケンスからのフィーチャー抽出(例:オーディオファイルの前処理からLog-Melスペクトログラムフィーチャーへの変換)、画像からのフィーチャー抽出(例:画像ファイルのクロッピング)、またパディング、正規化、そしてNumpy、PyTorch、TensorFlowテンソルへの変換も含まれます。 + + +## FeatureExtractionMixin + +[[autodoc]] feature_extraction_utils.FeatureExtractionMixin + - from_pretrained + - save_pretrained + +## SequenceFeatureExtractor + +[[autodoc]] SequenceFeatureExtractor + - pad + +## BatchFeature + +[[autodoc]] BatchFeature + +## ImageFeatureExtractionMixin + +[[autodoc]] image_utils.ImageFeatureExtractionMixin diff --git a/docs/source/ja/main_classes/image_processor.md b/docs/source/ja/main_classes/image_processor.md new file mode 100644 index 000000000000..bfd33b83c2e5 --- /dev/null +++ b/docs/source/ja/main_classes/image_processor.md @@ -0,0 +1,33 @@ + + +# Image Processor + +画像プロセッサは、ビジョン モデルの入力特徴の準備とその出力の後処理を担当します。これには、サイズ変更、正規化、PyTorch、TensorFlow、Flax、Numpy テンソルへの変換などの変換が含まれます。ロジットをセグメンテーション マスクに変換するなど、モデル固有の後処理も含まれる場合があります。 + +## ImageProcessingMixin + +[[autodoc]] image_processing_utils.ImageProcessingMixin + - from_pretrained + - save_pretrained + +## BatchFeature + +[[autodoc]] BatchFeature + +## BaseImageProcessor + +[[autodoc]] image_processing_utils.BaseImageProcessor diff --git a/docs/source/ja/main_classes/keras_callbacks.md b/docs/source/ja/main_classes/keras_callbacks.md new file mode 100644 index 000000000000..ff28107a4345 --- /dev/null +++ b/docs/source/ja/main_classes/keras_callbacks.md @@ -0,0 +1,28 @@ + + +# Keras callbacks + +Keras を使用して Transformers モデルをトレーニングする場合、一般的な処理を自動化するために使用できるライブラリ固有のコールバックがいくつかあります。 +タスク: + +## KerasMetricCallback + +[[autodoc]] KerasMetricCallback + +## PushToHubCallback + +[[autodoc]] PushToHubCallback diff --git a/docs/source/ja/main_classes/logging.md b/docs/source/ja/main_classes/logging.md new file mode 100644 index 000000000000..4b4f4a2a3e09 --- /dev/null +++ b/docs/source/ja/main_classes/logging.md @@ -0,0 +1,121 @@ + + +# Logging + +🤗 Transformersには、ライブラリの詳細度を簡単に設定できる中央集中型のロギングシステムがあります。 + +現在、ライブラリのデフォルトの詳細度は「WARNING」です。 + +詳細度を変更するには、直接設定メソッドの1つを使用するだけです。例えば、詳細度をINFOレベルに変更する方法は以下の通りです。 + + +```python +import transformers + +transformers.logging.set_verbosity_info() +``` + + +環境変数 `TRANSFORMERS_VERBOSITY` を使用して、デフォルトの冗長性をオーバーライドすることもできます。設定できます +`debug`、`info`、`warning`、`error`、`critical` のいずれかに変更します。例えば: + +```bash +TRANSFORMERS_VERBOSITY=error ./myprogram.py +``` + + +さらに、一部の「警告」は環境変数を設定することで無効にできます。 +`TRANSFORMERS_NO_ADVISORY_WARNINGS` を *1* などの true 値に設定します。これにより、次を使用してログに記録される警告が無効になります。 +[`logger.warning_advice`]。例えば: + +```bash +TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py +``` + + +以下は、独自のモジュールまたはスクリプトでライブラリと同じロガーを使用する方法の例です。 + +```python +from transformers.utils import logging + +logging.set_verbosity_info() +logger = logging.get_logger("transformers") +logger.info("INFO") +logger.warning("WARN") +``` + +このロギング モジュールのすべてのメソッドは以下に文書化されています。主なメソッドは次のとおりです。 +[`logging.get_verbosity`] ロガーの現在の冗長レベルを取得します。 +[`logging.set_verbosity`] を使用して、冗長性を選択したレベルに設定します。順番に(少ないものから) +冗長から最も冗長まで)、それらのレベル (括弧内は対応する int 値) は次のとおりです。 + +- `transformers.logging.CRITICAL` または `transformers.logging.FATAL` (int 値、50): 最も多いもののみをレポートします。 + 重大なエラー。 +- `transformers.logging.ERROR` (int 値、40): エラーのみを報告します。 +- `transformers.logging.WARNING` または `transformers.logging.WARN` (int 値、30): エラーと + 警告。これはライブラリで使用されるデフォルトのレベルです。 +- `transformers.logging.INFO` (int 値、20): エラー、警告、および基本情報をレポートします。 +- `transformers.logging.DEBUG` (int 値、10): すべての情報をレポートします。 + +デフォルトでは、モデルのダウンロード中に「tqdm」進行状況バーが表示されます。 [`logging.disable_progress_bar`] および [`logging.enable_progress_bar`] を使用して、この動作を抑制または抑制解除できます。 + +## `logging` vs `warnings` + +Python には、よく組み合わせて使用​​される 2 つのロギング システムがあります。上で説明した `logging` と `warnings` です。 +これにより、特定のバケット内の警告をさらに分類できます (例: 機能またはパスの`FutureWarning`) +これはすでに非推奨になっており、`DeprecationWarning`は今後の非推奨を示します。 + +両方とも`transformers`ライブラリで使用します。 `logging`の`captureWarning`メソッドを活用して適応させて、 +これらの警告メッセージは、上記の冗長設定ツールによって管理されます。 + +それはライブラリの開発者にとって何を意味しますか?次のヒューリスティックを尊重する必要があります。 +- `warnings`は、ライブラリおよび`transformers`に依存するライブラリの開発者に優先されるべきです。 +- `logging`は、日常のプロジェクトでライブラリを使用するライブラリのエンドユーザーに使用する必要があります。 + +以下の`captureWarnings`メソッドのリファレンスを参照してください。 + +[[autodoc]] logging.captureWarnings + +## Base setters + +[[autodoc]] logging.set_verbosity_error + +[[autodoc]] logging.set_verbosity_warning + +[[autodoc]] logging.set_verbosity_info + +[[autodoc]] logging.set_verbosity_debug + +## Other functions + +[[autodoc]] logging.get_verbosity + +[[autodoc]] logging.set_verbosity + +[[autodoc]] logging.get_logger + +[[autodoc]] logging.enable_default_handler + +[[autodoc]] logging.disable_default_handler + +[[autodoc]] logging.enable_explicit_format + +[[autodoc]] logging.reset_format + +[[autodoc]] logging.enable_progress_bar + +[[autodoc]] logging.disable_progress_bar diff --git a/docs/source/ja/main_classes/model.md b/docs/source/ja/main_classes/model.md new file mode 100644 index 000000000000..916040c4a3b2 --- /dev/null +++ b/docs/source/ja/main_classes/model.md @@ -0,0 +1,160 @@ + + +# Models + +ベースクラスである [`PreTrainedModel`]、[`TFPreTrainedModel`]、[`FlaxPreTrainedModel`] は、モデルの読み込みと保存に関する共通のメソッドを実装しており、これはローカルのファイルやディレクトリから、またはライブラリが提供する事前学習モデル構成(HuggingFaceのAWS S3リポジトリからダウンロード)からモデルを読み込むために使用できます。 + +[`PreTrainedModel`] と [`TFPreTrainedModel`] は、次の共通のメソッドも実装しています: + +- 語彙に新しいトークンが追加された場合に、入力トークン埋め込みのリサイズを行う +- モデルのアテンションヘッドを刈り込む + +各モデルに共通するその他のメソッドは、[`~modeling_utils.ModuleUtilsMixin`](PyTorchモデル用)および[`~modeling_tf_utils.TFModuleUtilsMixin`](TensorFlowモデル用)で定義されており、テキスト生成の場合、[`~generation.GenerationMixin`](PyTorchモデル用)、[`~generation.TFGenerationMixin`](TensorFlowモデル用)、および[`~generation.FlaxGenerationMixin`](Flax/JAXモデル用)もあります。 + + +## PreTrainedModel + +[[autodoc]] PreTrainedModel + - push_to_hub + - all + + + + +### 大規模モデルの読み込み + +Transformers 4.20.0では、[`~PreTrainedModel.from_pretrained`] メソッドが再設計され、[Accelerate](https://huggingface.co/docs/accelerate/big_modeling) を使用して大規模モデルを扱うことが可能になりました。これには Accelerate >= 0.9.0 と PyTorch >= 1.9.0 が必要です。以前の方法でフルモデルを作成し、その後事前学習の重みを読み込む代わりに(これにはメモリ内のモデルサイズが2倍必要で、ランダムに初期化されたモデル用と重み用の2つが必要でした)、モデルを空の外殻として作成し、事前学習の重みが読み込まれるときにパラメーターを実体化するオプションが追加されました。 + +このオプションは `low_cpu_mem_usage=True` で有効にできます。モデルはまず空の重みを持つメタデバイス上に作成され、その後状態辞書が内部に読み込まれます(シャードされたチェックポイントの場合、シャードごとに読み込まれます)。この方法で使用される最大RAMは、モデルの完全なサイズだけです。 + + +```py +from transformers import AutoModelForSeq2SeqLM + +t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", low_cpu_mem_usage=True) +``` + +さらに、モデルが完全にRAMに収まらない場合(現時点では推論のみ有効)、異なるデバイスにモデルを直接配置できます。`device_map="auto"` を使用すると、Accelerateは各レイヤーをどのデバイスに配置するかを決定し、最速のデバイス(GPU)を最大限に活用し、残りの部分をCPU、あるいはGPU RAMが不足している場合はハードドライブにオフロードします。モデルが複数のデバイスに分割されていても、通常どおり実行されます。 + +`device_map` を渡す際、`low_cpu_mem_usage` は自動的に `True` に設定されるため、それを指定する必要はありません。 + + +```py +from transformers import AutoModelForSeq2SeqLM + +t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") +``` + +モデルがデバイス間でどのように分割されたかは、その `hf_device_map` 属性を見ることで確認できます: + +```py +t0pp.hf_device_map +``` + +```python out +{'shared': 0, + 'decoder.embed_tokens': 0, + 'encoder': 0, + 'decoder.block.0': 0, + 'decoder.block.1': 1, + 'decoder.block.2': 1, + 'decoder.block.3': 1, + 'decoder.block.4': 1, + 'decoder.block.5': 1, + 'decoder.block.6': 1, + 'decoder.block.7': 1, + 'decoder.block.8': 1, + 'decoder.block.9': 1, + 'decoder.block.10': 1, + 'decoder.block.11': 1, + 'decoder.block.12': 1, + 'decoder.block.13': 1, + 'decoder.block.14': 1, + 'decoder.block.15': 1, + 'decoder.block.16': 1, + 'decoder.block.17': 1, + 'decoder.block.18': 1, + 'decoder.block.19': 1, + 'decoder.block.20': 1, + 'decoder.block.21': 1, + 'decoder.block.22': 'cpu', + 'decoder.block.23': 'cpu', + 'decoder.final_layer_norm': 'cpu', + 'decoder.dropout': 'cpu', + 'lm_head': 'cpu'} +``` + +同じフォーマットに従って、独自のデバイスマップを作成することもできます(レイヤー名からデバイスへの辞書です)。モデルのすべてのパラメータを指定されたデバイスにマップする必要がありますが、1つのレイヤーが完全に同じデバイスにある場合、そのレイヤーのサブモジュールのすべてがどこに行くかの詳細を示す必要はありません。例えば、次のデバイスマップはT0ppに適しています(GPUメモリがある場合): + +```python +device_map = {"shared": 0, "encoder": 0, "decoder": 1, "lm_head": 1} +``` + +モデルのメモリへの影響を最小限に抑えるもう 1 つの方法は、低精度の dtype (`torch.float16` など) でモデルをインスタンス化するか、以下で説明する直接量子化手法を使用することです。 + +### Model Instantiation dtype + +Pytorch では、モデルは通常 `torch.float32` 形式でインスタンス化されます。これは、しようとすると問題になる可能性があります +重みが fp16 にあるモデルをロードすると、2 倍のメモリが必要になるためです。この制限を克服するには、次のことができます。 +`torch_dtype` 引数を使用して、目的の `dtype` を明示的に渡します。 + +```python +model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype=torch.float16) +``` +または、モデルを常に最適なメモリ パターンでロードしたい場合は、特別な値 `"auto"` を使用できます。 +そして、`dtype` はモデルの重みから自動的に導出されます。 + +```python +model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype="auto") +``` + +スクラッチからインスタンス化されたモデルには、どの `dtype` を使用するかを指示することもできます。 + +```python +config = T5Config.from_pretrained("t5") +model = AutoModel.from_config(config) +``` + +Pytorch の設計により、この機能は浮動小数点 dtype でのみ使用できます。 + +## ModuleUtilsMixin + +[[autodoc]] modeling_utils.ModuleUtilsMixin + +## TFPreTrainedModel + +[[autodoc]] TFPreTrainedModel + - push_to_hub + - all + +## TFModelUtilsMixin + +[[autodoc]] modeling_tf_utils.TFModelUtilsMixin + +## FlaxPreTrainedModel + +[[autodoc]] FlaxPreTrainedModel + - push_to_hub + - all + +## Pushing to the Hub + +[[autodoc]] utils.PushToHubMixin + +## Sharded checkpoints + +[[autodoc]] modeling_utils.load_sharded_checkpoint diff --git a/docs/source/ja/main_classes/onnx.md b/docs/source/ja/main_classes/onnx.md new file mode 100644 index 000000000000..f12427760976 --- /dev/null +++ b/docs/source/ja/main_classes/onnx.md @@ -0,0 +1,55 @@ + + +# Exporting 🤗 Transformers models to ONNX + +🤗 Transformers は `transformers.onnx` パッケージを提供します。 +設定オブジェクトを利用することで、モデルのチェックポイントをONNXグラフに変換することができます。 + +詳細は[ガイド](../serialization) を参照してください。 +を参照してください。 + +## ONNX Configurations + +以下の3つの抽象クラスを提供しています。 +エクスポートしたいモデルアーキテクチャのタイプに応じて、継承すべき3つの抽象クラスを提供します: + +* エンコーダーベースのモデルは [`~onnx.config.OnnxConfig`] を継承します。 +* デコーダーベースのモデルは [`~onnx.config.OnnxConfigWithPast`] を継承します。 +* エンコーダー・デコーダーモデルは [`~onnx.config.OnnxSeq2SeqConfigWithPast`] を継承しています。 + + +### OnnxConfig + +[[autodoc]] onnx.config.OnnxConfig + +### OnnxConfigWithPast + +[[autodoc]] onnx.config.OnnxConfigWithPast + +### OnnxSeq2SeqConfigWithPast + +[[autodoc]] onnx.config.OnnxSeq2SeqConfigWithPast + +## ONNX Features + +各 ONNX 構成は、次のことを可能にする一連の _機能_ に関連付けられています。 +さまざまなタイプのトポロジまたはタスクのモデルをエクスポートします。 + +### FeaturesManager + +[[autodoc]] onnx.features.FeaturesManager + diff --git a/docs/source/ja/main_classes/optimizer_schedules.md b/docs/source/ja/main_classes/optimizer_schedules.md new file mode 100644 index 000000000000..fc7a13b9df53 --- /dev/null +++ b/docs/source/ja/main_classes/optimizer_schedules.md @@ -0,0 +1,77 @@ + + +# Optimization + +`.optimization` モジュールは以下を提供します。 + +- モデルの微調整に使用できる重み減衰が修正されたオプティマイザー、および +- `_LRSchedule` から継承するスケジュール オブジェクトの形式のいくつかのスケジュール: +- 複数のバッチの勾配を累積するための勾配累積クラス + +## AdamW (PyTorch) + +[[autodoc]] AdamW + +## AdaFactor (PyTorch) + +[[autodoc]] Adafactor + +## AdamWeightDecay (TensorFlow) + +[[autodoc]] AdamWeightDecay + +[[autodoc]] create_optimizer + +## Schedules + +### Learning Rate Schedules (Pytorch) + +[[autodoc]] SchedulerType + +[[autodoc]] get_scheduler + +[[autodoc]] get_constant_schedule + +[[autodoc]] get_constant_schedule_with_warmup + + + +[[autodoc]] get_cosine_schedule_with_warmup + + + +[[autodoc]] get_cosine_with_hard_restarts_schedule_with_warmup + + + +[[autodoc]] get_linear_schedule_with_warmup + + + +[[autodoc]] get_polynomial_decay_schedule_with_warmup + +[[autodoc]] get_inverse_sqrt_schedule + +### Warmup (TensorFlow) + +[[autodoc]] WarmUp + +## Gradient Strategies + +### GradientAccumulator (TensorFlow) + +[[autodoc]] GradientAccumulator diff --git a/docs/source/ja/main_classes/output.md b/docs/source/ja/main_classes/output.md new file mode 100644 index 000000000000..7f906544a8f8 --- /dev/null +++ b/docs/source/ja/main_classes/output.md @@ -0,0 +1,321 @@ + + +# Model outputs + +すべてのモデルには、[`~utils.ModelOutput`] のサブクラスのインスタンスである出力があります。それらは +モデルによって返されるすべての情報を含むデータ構造ですが、タプルまたは +辞書。 + +これがどのようになるかを例で見てみましょう。 + +```python +from transformers import BertTokenizer, BertForSequenceClassification +import torch + +tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") +model = BertForSequenceClassification.from_pretrained("bert-base-uncased") + +inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") +labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 +outputs = model(**inputs, labels=labels) +``` + +`outputs`オブジェクトは[`~modeling_outputs.SequenceClassifierOutput`]である。 +これは、オプションで `loss`、`logits`、オプションで `hidden_states`、オプションで `attentions` 属性を持つことを意味します。 +オプションの `attentions` 属性を持つことを意味する。ここでは、`labels`を渡したので`loss`があるが、`hidden_states`と`attentions`はない。 +`output_hidden_states=True`や`output_attentions=True`を渡していないので、`hidden_states`と`attentions`はない。 +`output_attentions=True`を渡さなかったからだ。 + + + +`output_hidden_states=True`を渡すと、`outputs.hidden_states[-1]`が `outputs.last_hidden_states` と正確に一致することを期待するかもしれない。 +しかし、必ずしもそうなるとは限りません。モデルによっては、最後に隠された状態が返されたときに、正規化やその後の処理を適用するものもあります。 + + + + +通常と同じように各属性にアクセスできます。その属性がモデルから返されなかった場合は、 +は `None`を取得します。ここで、たとえば`outputs.loss`はモデルによって計算された損失であり、`outputs.attentions`は +`None`。 + +`outputs`オブジェクトをタプルとして考える場合、`None`値を持たない属性のみが考慮されます。 +たとえば、ここには 2 つの要素、`loss`、次に`logits`があります。 + +```python +outputs[:2] +``` + +たとえば、タプル `(outputs.loss, Outputs.logits)` を返します。 + +`outputs`オブジェクトを辞書として考慮する場合、「None」を持たない属性のみが考慮されます。 +価値観。たとえば、ここには`loss` と `logits`という 2 つのキーがあります。 + +ここでは、複数のモデル タイプで使用される汎用モデルの出力を文書化します。具体的な出力タイプは次のとおりです。 +対応するモデルのページに記載されています。 + +## ModelOutput + +[[autodoc]] utils.ModelOutput + - to_tuple + +## BaseModelOutput + +[[autodoc]] modeling_outputs.BaseModelOutput + +## BaseModelOutputWithPooling + +[[autodoc]] modeling_outputs.BaseModelOutputWithPooling + +## BaseModelOutputWithCrossAttentions + +[[autodoc]] modeling_outputs.BaseModelOutputWithCrossAttentions + +## BaseModelOutputWithPoolingAndCrossAttentions + +[[autodoc]] modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions + +## BaseModelOutputWithPast + +[[autodoc]] modeling_outputs.BaseModelOutputWithPast + +## BaseModelOutputWithPastAndCrossAttentions + +[[autodoc]] modeling_outputs.BaseModelOutputWithPastAndCrossAttentions + +## Seq2SeqModelOutput + +[[autodoc]] modeling_outputs.Seq2SeqModelOutput + +## CausalLMOutput + +[[autodoc]] modeling_outputs.CausalLMOutput + +## CausalLMOutputWithCrossAttentions + +[[autodoc]] modeling_outputs.CausalLMOutputWithCrossAttentions + +## CausalLMOutputWithPast + +[[autodoc]] modeling_outputs.CausalLMOutputWithPast + +## MaskedLMOutput + +[[autodoc]] modeling_outputs.MaskedLMOutput + +## Seq2SeqLMOutput + +[[autodoc]] modeling_outputs.Seq2SeqLMOutput + +## NextSentencePredictorOutput + +[[autodoc]] modeling_outputs.NextSentencePredictorOutput + +## SequenceClassifierOutput + +[[autodoc]] modeling_outputs.SequenceClassifierOutput + +## Seq2SeqSequenceClassifierOutput + +[[autodoc]] modeling_outputs.Seq2SeqSequenceClassifierOutput + +## MultipleChoiceModelOutput + +[[autodoc]] modeling_outputs.MultipleChoiceModelOutput + +## TokenClassifierOutput + +[[autodoc]] modeling_outputs.TokenClassifierOutput + +## QuestionAnsweringModelOutput + +[[autodoc]] modeling_outputs.QuestionAnsweringModelOutput + +## Seq2SeqQuestionAnsweringModelOutput + +[[autodoc]] modeling_outputs.Seq2SeqQuestionAnsweringModelOutput + +## Seq2SeqSpectrogramOutput + +[[autodoc]] modeling_outputs.Seq2SeqSpectrogramOutput + +## SemanticSegmenterOutput + +[[autodoc]] modeling_outputs.SemanticSegmenterOutput + +## ImageClassifierOutput + +[[autodoc]] modeling_outputs.ImageClassifierOutput + +## ImageClassifierOutputWithNoAttention + +[[autodoc]] modeling_outputs.ImageClassifierOutputWithNoAttention + +## DepthEstimatorOutput + +[[autodoc]] modeling_outputs.DepthEstimatorOutput + +## Wav2Vec2BaseModelOutput + +[[autodoc]] modeling_outputs.Wav2Vec2BaseModelOutput + +## XVectorOutput + +[[autodoc]] modeling_outputs.XVectorOutput + +## Seq2SeqTSModelOutput + +[[autodoc]] modeling_outputs.Seq2SeqTSModelOutput + +## Seq2SeqTSPredictionOutput + +[[autodoc]] modeling_outputs.Seq2SeqTSPredictionOutput + +## SampleTSPredictionOutput + +[[autodoc]] modeling_outputs.SampleTSPredictionOutput + +## TFBaseModelOutput + +[[autodoc]] modeling_tf_outputs.TFBaseModelOutput + +## TFBaseModelOutputWithPooling + +[[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPooling + +## TFBaseModelOutputWithPoolingAndCrossAttentions + +[[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions + +## TFBaseModelOutputWithPast + +[[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPast + +## TFBaseModelOutputWithPastAndCrossAttentions + +[[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions + +## TFSeq2SeqModelOutput + +[[autodoc]] modeling_tf_outputs.TFSeq2SeqModelOutput + +## TFCausalLMOutput + +[[autodoc]] modeling_tf_outputs.TFCausalLMOutput + +## TFCausalLMOutputWithCrossAttentions + +[[autodoc]] modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions + +## TFCausalLMOutputWithPast + +[[autodoc]] modeling_tf_outputs.TFCausalLMOutputWithPast + +## TFMaskedLMOutput + +[[autodoc]] modeling_tf_outputs.TFMaskedLMOutput + +## TFSeq2SeqLMOutput + +[[autodoc]] modeling_tf_outputs.TFSeq2SeqLMOutput + +## TFNextSentencePredictorOutput + +[[autodoc]] modeling_tf_outputs.TFNextSentencePredictorOutput + +## TFSequenceClassifierOutput + +[[autodoc]] modeling_tf_outputs.TFSequenceClassifierOutput + +## TFSeq2SeqSequenceClassifierOutput + +[[autodoc]] modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput + +## TFMultipleChoiceModelOutput + +[[autodoc]] modeling_tf_outputs.TFMultipleChoiceModelOutput + +## TFTokenClassifierOutput + +[[autodoc]] modeling_tf_outputs.TFTokenClassifierOutput + +## TFQuestionAnsweringModelOutput + +[[autodoc]] modeling_tf_outputs.TFQuestionAnsweringModelOutput + +## TFSeq2SeqQuestionAnsweringModelOutput + +[[autodoc]] modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput + +## FlaxBaseModelOutput + +[[autodoc]] modeling_flax_outputs.FlaxBaseModelOutput + +## FlaxBaseModelOutputWithPast + +[[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPast + +## FlaxBaseModelOutputWithPooling + +[[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPooling + +## FlaxBaseModelOutputWithPastAndCrossAttentions + +[[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions + +## FlaxSeq2SeqModelOutput + +[[autodoc]] modeling_flax_outputs.FlaxSeq2SeqModelOutput + +## FlaxCausalLMOutputWithCrossAttentions + +[[autodoc]] modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions + +## FlaxMaskedLMOutput + +[[autodoc]] modeling_flax_outputs.FlaxMaskedLMOutput + +## FlaxSeq2SeqLMOutput + +[[autodoc]] modeling_flax_outputs.FlaxSeq2SeqLMOutput + +## FlaxNextSentencePredictorOutput + +[[autodoc]] modeling_flax_outputs.FlaxNextSentencePredictorOutput + +## FlaxSequenceClassifierOutput + +[[autodoc]] modeling_flax_outputs.FlaxSequenceClassifierOutput + +## FlaxSeq2SeqSequenceClassifierOutput + +[[autodoc]] modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput + +## FlaxMultipleChoiceModelOutput + +[[autodoc]] modeling_flax_outputs.FlaxMultipleChoiceModelOutput + +## FlaxTokenClassifierOutput + +[[autodoc]] modeling_flax_outputs.FlaxTokenClassifierOutput + +## FlaxQuestionAnsweringModelOutput + +[[autodoc]] modeling_flax_outputs.FlaxQuestionAnsweringModelOutput + +## FlaxSeq2SeqQuestionAnsweringModelOutput + +[[autodoc]] modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput diff --git a/docs/source/ja/main_classes/pipelines.md b/docs/source/ja/main_classes/pipelines.md new file mode 100644 index 000000000000..321659de95ba --- /dev/null +++ b/docs/source/ja/main_classes/pipelines.md @@ -0,0 +1,494 @@ + + +# Pipelines + +パイプラインは、推論にモデルを使うための簡単で優れた方法である。パイプラインは、複雑なコードのほとんどを抽象化したオブジェクトです。 +パイプラインは、ライブラリから複雑なコードのほとんどを抽象化したオブジェクトで、名前付き固有表現認識、マスク言語モデリング、感情分析、特徴抽出、質問応答などのタスクに特化したシンプルなAPIを提供します。 +Recognition、Masked Language Modeling、Sentiment Analysis、Feature Extraction、Question Answeringなどのタスクに特化したシンプルなAPIを提供します。以下を参照のこと。 +[タスク概要](../task_summary)を参照してください。 + + +パイプラインの抽象化には2つのカテゴリーがある: + +- [`pipeline`] は、他のすべてのパイプラインをカプセル化する最も強力なオブジェクトです。 +- タスク固有のパイプラインは、[オーディオ](#audio)、[コンピューター ビジョン](#computer-vision)、[自然言語処理](#natural-language-processing)、および [マルチモーダル](#multimodal) タスクで使用できます。 + +## The pipeline abstraction + +*パイプライン* 抽象化は、他のすべての利用可能なパイプラインのラッパーです。他のものと同様にインスタンス化されます +パイプラインですが、さらなる生活の質を提供できます。 + +1 つの項目に対する単純な呼び出し: + +```python +>>> pipe = pipeline("text-classification") +>>> pipe("This restaurant is awesome") +[{'label': 'POSITIVE', 'score': 0.9998743534088135}] +``` + +[ハブ](https://huggingface.co) の特定のモデルを使用したい場合は、モデルがオンになっている場合はタスクを無視できます。 +ハブはすでにそれを定義しています。 + +```python +>>> pipe = pipeline(model="roberta-large-mnli") +>>> pipe("This restaurant is awesome") +[{'label': 'NEUTRAL', 'score': 0.7313136458396912}] +``` + +多くの項目に対してパイプラインを呼び出すには、*list* を使用してパイプラインを呼び出すことができます。 + +```python +>>> pipe = pipeline("text-classification") +>>> pipe(["This restaurant is awesome", "This restaurant is awful"]) +[{'label': 'POSITIVE', 'score': 0.9998743534088135}, + {'label': 'NEGATIVE', 'score': 0.9996669292449951}] +``` + +完全なデータセットを反復するには、`Dataset`を直接使用することをお勧めします。これは、割り当てる必要がないことを意味します +データセット全体を一度に処理することも、自分でバッチ処理を行う必要もありません。これはカスタムループと同じくらい速く動作するはずです。 +GPU。それが問題でない場合は、ためらわずに問題を作成してください。 + +```python +import datasets +from transformers import pipeline +from transformers.pipelines.pt_utils import KeyDataset +from tqdm.auto import tqdm + +pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) +dataset = datasets.load_dataset("superb", name="asr", split="test") + +# KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item +# as we're not interested in the *target* part of the dataset. For sentence pair use KeyPairDataset +for out in tqdm(pipe(KeyDataset(dataset, "file"))): + print(out) + # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} + # {"text": ....} + # .... +``` + +使いやすくするために、ジェネレーターを使用することもできます。 + +```python +from transformers import pipeline + +pipe = pipeline("text-classification") + + +def data(): + while True: + # This could come from a dataset, a database, a queue or HTTP request + # in a server + # Caveat: because this is iterative, you cannot use `num_workers > 1` variable + # to use multiple threads to preprocess data. You can still have 1 thread that + # does the preprocessing while the main runs the big inference + yield "This is a test" + + +for out in pipe(data()): + print(out) + # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} + # {"text": ....} + # .... +``` + +[[autodoc]] pipeline + + +## Pipeline batching + + +すべてのパイプラインでバッチ処理を使用できます。これはうまくいきます +パイプラインがストリーミング機能を使用するときは常に (つまり、リスト、`dataset`、または `generator`を渡すとき)。 + +```python +from transformers import pipeline +from transformers.pipelines.pt_utils import KeyDataset +import datasets + +dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") +pipe = pipeline("text-classification", device=0) +for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"): + print(out) + # [{'label': 'POSITIVE', 'score': 0.9998743534088135}] + # Exactly the same output as before, but the content are passed + # as batches to the model +``` + + + + +ただし、これによってパフォーマンスが自動的に向上するわけではありません。状況に応じて、10 倍の高速化または 5 倍の低速化のいずれかになります。 +ハードウェア、データ、使用されている実際のモデルについて。 + +主に高速化である例: + + + + +```python +from transformers import pipeline +from torch.utils.data import Dataset +from tqdm.auto import tqdm + +pipe = pipeline("text-classification", device=0) + + +class MyDataset(Dataset): + def __len__(self): + return 5000 + + def __getitem__(self, i): + return "This is a test" + + +dataset = MyDataset() + +for batch_size in [1, 8, 64, 256]: + print("-" * 30) + print(f"Streaming batch_size={batch_size}") + for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): + pass +``` + +``` +# On GTX 970 +------------------------------ +Streaming no batching +100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26<00:00, 187.52it/s] +------------------------------ +Streaming batch_size=8 +100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04<00:00, 1205.95it/s] +------------------------------ +Streaming batch_size=64 +100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02<00:00, 2478.24it/s] +------------------------------ +Streaming batch_size=256 +100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01<00:00, 2554.43it/s] +(diminishing returns, saturated the GPU) +``` + +最も速度が低下する例: + + +```python +class MyDataset(Dataset): + def __len__(self): + return 5000 + + def __getitem__(self, i): + if i % 64 == 0: + n = 100 + else: + n = 1 + return "This is a test" * n +``` + +これは、他の文に比べて非常に長い文が時折あります。その場合、**全体**のバッチは 400 である必要があります。 +トークンが長いため、バッチ全体が [64, 4] ではなく [64, 400] になり、速度が大幅に低下します。さらに悪いことに、 +バッチが大きくなると、プログラムは単純にクラッシュします。 + +``` +------------------------------ +Streaming no batching +100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:05<00:00, 183.69it/s] +------------------------------ +Streaming batch_size=8 +100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:03<00:00, 265.74it/s] +------------------------------ +Streaming batch_size=64 +100%|██████████████████████████████████████████████████████████████████████| 1000/1000 [00:26<00:00, 37.80it/s] +------------------------------ +Streaming batch_size=256 + 0%| | 0/1000 [00:00 + for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)): +.... + q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) +RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch) +``` + +この問題に対する適切な (一般的な) 解決策はなく、使用できる距離はユースケースによって異なる場合があります。のルール +親指: + +ユーザーにとっての経験則は次のとおりです。 + +- **ハードウェアを使用して、負荷に対するパフォーマンスを測定します。測って、測って、測り続ける。実数というのは、 + 進むべき唯一の方法。** +- レイテンシに制約がある場合 (実際の製品が推論を実行している場合)、バッチ処理を行わないでください。 +- CPU を使用している場合は、バッチ処理を行わないでください。 +- GPU でスループットを使用している場合 (大量の静的データでモデルを実行したい場合)、次のようにします。 + + - sequence_length (「自然な」データ) のサイズについてまったくわからない場合は、デフォルトではバッチ処理や測定を行わず、 + 暫定的に追加してみます。失敗した場合に回復するために OOM チェックを追加します (失敗した場合は、ある時点で回復します)。 + sequence_length を制御します。) + - sequence_length が非常に規則的である場合、バッチ処理は非常に興味深いものとなる可能性が高く、測定してプッシュしてください。 + OOM が発生するまで続けます。 + - GPU が大きいほど、バッチ処理がより興味深いものになる可能性が高くなります。 +- バッチ処理を有効にしたらすぐに、OOM を適切に処理できることを確認してください。 + + +## Pipeline chunk batching + +`zero-shot-classification` と `question-answering` は、単一の入力で結果が得られる可能性があるという意味で、少し特殊です。 +モデルの複数の前方パス。通常の状況では、これにより `batch_size` 引数に関する問題が発生します。 + +この問題を回避するために、これらのパイプラインはどちらも少し特殊になっており、代わりに `ChunkPipeline` になっています。 +通常の `Pipeline`。要するに: + +```python +preprocessed = pipe.preprocess(inputs) +model_outputs = pipe.forward(preprocessed) +outputs = pipe.postprocess(model_outputs) +``` + +今は次のようになります: + +```python +all_model_outputs = [] +for preprocessed in pipe.preprocess(inputs): + model_outputs = pipe.forward(preprocessed) + all_model_outputs.append(model_outputs) +outputs = pipe.postprocess(all_model_outputs) +``` + +パイプラインは以下で使用されるため、これはコードに対して非常に透過的である必要があります。 +同じ方法。 + +パイプラインはバッチを自動的に処理できるため、これは簡略化されたビューです。気にする必要はないという意味です +入力が実際にトリガーする前方パスの数については、`batch_size` を最適化できます。 +入力とは独立して。前のセクションの注意事項が引き続き適用されます。 + +## Pipeline custom code + +特定のパイプラインをオーバーライドする場合。 + +目の前のタスクに関する問題を作成することを躊躇しないでください。パイプラインの目標は、使いやすく、ほとんどのユーザーをサポートすることです。 +したがって、`transformers`があなたのユースケースをサポートする可能性があります。 + + +単純に試してみたい場合は、次のことができます。 + +- 選択したパイプラインをサブクラス化します + +```python +class MyPipeline(TextClassificationPipeline): + def postprocess(): + # Your code goes here + scores = scores * 100 + # And here + + +my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) +# or if you use *pipeline* function, then: +my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline) +``` + +これにより、必要なカスタム コードをすべて実行できるようになります。 + +## Implementing a pipeline + +[Implementing a new pipeline](../add_new_pipeline) + +## Audio + +オーディオ タスクに使用できるパイプラインには次のものがあります。 + +### AudioClassificationPipeline + +[[autodoc]] AudioClassificationPipeline + - __call__ + - all + +### AutomaticSpeechRecognitionPipeline + +[[autodoc]] AutomaticSpeechRecognitionPipeline + - __call__ + - all + +### TextToAudioPipeline + +[[autodoc]] TextToAudioPipeline + - __call__ + - all + + +### ZeroShotAudioClassificationPipeline + +[[autodoc]] ZeroShotAudioClassificationPipeline + - __call__ + - all + +## Computer vision + +コンピューター ビジョン タスクに使用できるパイプラインには次のものがあります。 + +### DepthEstimationPipeline +[[autodoc]] DepthEstimationPipeline + - __call__ + - all + +### ImageClassificationPipeline + +[[autodoc]] ImageClassificationPipeline + - __call__ + - all + +### ImageSegmentationPipeline + +[[autodoc]] ImageSegmentationPipeline + - __call__ + - all + +### ImageToImagePipeline + +[[autodoc]] ImageToImagePipeline + - __call__ + - all + +### ObjectDetectionPipeline + +[[autodoc]] ObjectDetectionPipeline + - __call__ + - all + +### VideoClassificationPipeline + +[[autodoc]] VideoClassificationPipeline + - __call__ + - all + +### ZeroShotImageClassificationPipeline + +[[autodoc]] ZeroShotImageClassificationPipeline + - __call__ + - all + +### ZeroShotObjectDetectionPipeline + +[[autodoc]] ZeroShotObjectDetectionPipeline + - __call__ + - all + +## Natural Language Processing + +自然言語処理タスクに使用できるパイプラインには次のものがあります。 + +### ConversationalPipeline + +[[autodoc]] Conversation + +[[autodoc]] ConversationalPipeline + - __call__ + - all + +### FillMaskPipeline + +[[autodoc]] FillMaskPipeline + - __call__ + - all + +### NerPipeline + +[[autodoc]] NerPipeline + +詳細については、[`TokenClassificationPipeline`] を参照してください。 + +### QuestionAnsweringPipeline + +[[autodoc]] QuestionAnsweringPipeline + - __call__ + - all + +### SummarizationPipeline + +[[autodoc]] SummarizationPipeline + - __call__ + - all + +### TableQuestionAnsweringPipeline + +[[autodoc]] TableQuestionAnsweringPipeline + - __call__ + +### TextClassificationPipeline + +[[autodoc]] TextClassificationPipeline + - __call__ + - all + +### TextGenerationPipeline + +[[autodoc]] TextGenerationPipeline + - __call__ + - all + +### Text2TextGenerationPipeline + +[[autodoc]] Text2TextGenerationPipeline + - __call__ + - all + +### TokenClassificationPipeline + +[[autodoc]] TokenClassificationPipeline + - __call__ + - all + +### TranslationPipeline + +[[autodoc]] TranslationPipeline + - __call__ + - all + +### ZeroShotClassificationPipeline + +[[autodoc]] ZeroShotClassificationPipeline + - __call__ + - all + +## Multimodal + +マルチモーダル タスクに使用できるパイプラインには次のものがあります。 + +### DocumentQuestionAnsweringPipeline + +[[autodoc]] DocumentQuestionAnsweringPipeline + - __call__ + - all + +### FeatureExtractionPipeline + +[[autodoc]] FeatureExtractionPipeline + - __call__ + - all + +### ImageToTextPipeline + +[[autodoc]] ImageToTextPipeline + - __call__ + - all + +### VisualQuestionAnsweringPipeline + +[[autodoc]] VisualQuestionAnsweringPipeline + - __call__ + - all + +## Parent class: `Pipeline` + +[[autodoc]] Pipeline diff --git a/docs/source/ja/main_classes/processors.md b/docs/source/ja/main_classes/processors.md new file mode 100644 index 000000000000..bd459758aa17 --- /dev/null +++ b/docs/source/ja/main_classes/processors.md @@ -0,0 +1,160 @@ + + +# Processors + +Transformers ライブラリでは、プロセッサは 2 つの異なる意味を持ちます。 +- [Wav2Vec2](../model_doc/wav2vec2) などのマルチモーダル モデルの入力を前処理するオブジェクト (音声とテキスト) + または [CLIP](../model_doc/clip) (テキストとビジョン) +- 古いバージョンのライブラリで GLUE または SQUAD のデータを前処理するために使用されていたオブジェクトは非推奨になりました。 + +## Multi-modal processors + +マルチモーダル モデルでは、オブジェクトが複数のモダリティ (テキスト、 +視覚と音声)。これは、2 つ以上の処理オブジェクトをグループ化するプロセッサーと呼ばれるオブジェクトによって処理されます。 +トークナイザー (テキスト モダリティ用)、画像プロセッサー (視覚用)、特徴抽出器 (オーディオ用) など。 + +これらのプロセッサは、保存およびロード機能を実装する次の基本クラスを継承します。 + +[[autodoc]] ProcessorMixin + +## Deprecated processors + +すべてのプロセッサは、同じアーキテクチャに従っています。 +[`~data.processors.utils.DataProcessor`]。プロセッサは次のリストを返します。 +[`~data.processors.utils.InputExample`]。これら +[`~data.processors.utils.InputExample`] は次のように変換できます。 +[`~data.processors.utils.Input features`] をモデルにフィードします。 + +[[autodoc]] data.processors.utils.DataProcessor + +[[autodoc]] data.processors.utils.InputExample + +[[autodoc]] data.processors.utils.InputFeatures + +## GLUE + +[一般言語理解評価 (GLUE)](https://gluebenchmark.com/) は、 +既存の NLU タスクの多様なセットにわたるモデルのパフォーマンス。紙と同時発売された [GLUE: A +自然言語理解のためのマルチタスクベンチマークおよび分析プラットフォーム](https://openreview.net/pdf?id=rJ4km2R5t7) + +このライブラリは、MRPC、MNLI、MNLI (不一致)、CoLA、SST2、STSB、 +QQP、QNLI、RTE、WNLI。 + +それらのプロセッサは次のとおりです。 + +- [`~data.processors.utils.MrpcProcessor`] +- [`~data.processors.utils.MnliProcessor`] +- [`~data.processors.utils.MnliMismatchedProcessor`] +- [`~data.processors.utils.Sst2Processor`] +- [`~data.processors.utils.StsbProcessor`] +- [`~data.processors.utils.QqpProcessor`] +- [`~data.processors.utils.QnliProcessor`] +- [`~data.processors.utils.RteProcessor`] +- [`~data.processors.utils.WnliProcessor`] + + +さらに、次のメソッドを使用して、データ ファイルから値をロードし、それらをリストに変換することができます。 +[`~data.processors.utils.InputExample`]。 + +[[autodoc]] data.processors.glue.glue_convert_examples_to_features + +## XNLI + +[クロスリンガル NLI コーパス (XNLI)](https://www.nyu.edu/projects/bowman/xnli/) は、 +言語を超えたテキスト表現の品質。 XNLI は、[*MultiNLI*](http://www.nyu.edu/projects/bowman/multinli/) に基づくクラウドソースのデータセットです。テキストのペアには、15 個のテキスト含意アノテーションがラベル付けされています。 +さまざまな言語 (英語などの高リソース言語とスワヒリ語などの低リソース言語の両方を含む)。 + +論文 [XNLI: Evaluating Cross-lingual Sentence Representations](https://arxiv.org/abs/1809.05053) と同時にリリースされました。 + +このライブラリは、XNLI データをロードするプロセッサをホストします。 + +- [`~data.processors.utils.XnliProcessor`] + +テストセットにはゴールドラベルが付いているため、評価はテストセットで行われますのでご了承ください。 + +これらのプロセッサを使用する例は、[run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/text-classification/run_xnli.py) スクリプトに示されています。 + +## SQuAD + +[The Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer//) は、次のベンチマークです。 +質問応答に関するモデルのパフォーマンスを評価します。 v1.1 と v2.0 の 2 つのバージョンが利用可能です。最初のバージョン +(v1.1) は、論文 [SQuAD: 100,000+ question for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) とともにリリースされました。 2 番目のバージョン (v2.0) は、論文 [Know What You Don't と同時にリリースされました。 +知っておくべき: SQuAD の答えられない質問](https://arxiv.org/abs/1806.03822)。 + +このライブラリは、次の 2 つのバージョンのそれぞれのプロセッサをホストします。 + +### Processors + +それらのプロセッサは次のとおりです。 + +- [`~data.processors.utils.SquadV1Processor`] +- [`~data.processors.utils.SquadV2Processor`] + +どちらも抽象クラス [`~data.processors.utils.SquadProcessor`] を継承しています。 + +[[autodoc]] data.processors.squad.SquadProcessor + - all + +さらに、次のメソッドを使用して、SQuAD の例を次の形式に変換できます。 +モデルの入力として使用できる [`~data.processors.utils.SquadFeatures`]。 + +[[autodoc]] data.processors.squad.squad_convert_examples_to_features + +これらのプロセッサと前述の方法は、データを含むファイルだけでなく、 +*tensorflow_datasets* パッケージ。以下に例を示します。 + +### Example usage + +以下にプロセッサを使用した例と、データ ファイルを使用した変換方法を示します。 + +```python +# Loading a V2 processor +processor = SquadV2Processor() +examples = processor.get_dev_examples(squad_v2_data_dir) + +# Loading a V1 processor +processor = SquadV1Processor() +examples = processor.get_dev_examples(squad_v1_data_dir) + +features = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=args.doc_stride, + max_query_length=max_query_length, + is_training=not evaluate, +) +``` + +*tensorflow_datasets* の使用は、データ ファイルを使用するのと同じくらい簡単です。 + +```python +# tensorflow_datasets only handle Squad V1. +tfds_examples = tfds.load("squad") +examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) + +features = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=args.doc_stride, + max_query_length=max_query_length, + is_training=not evaluate, +) +``` + +これらのプロセッサを使用する別の例は、[run_squad.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py) スクリプトに示されています。 diff --git a/docs/source/ja/main_classes/quantization.md b/docs/source/ja/main_classes/quantization.md new file mode 100644 index 000000000000..3af3130a849f --- /dev/null +++ b/docs/source/ja/main_classes/quantization.md @@ -0,0 +1,447 @@ + + +# Quantize 🤗 Transformers models + +## `AutoGPTQ` Integration + + +🤗 Transformers には、言語モデルで GPTQ 量子化を実行するための `optimum` API が統合されています。パフォーマンスを大幅に低下させることなく、推論速度を高速化することなく、モデルを 8、4、3、さらには 2 ビットでロードおよび量子化できます。これは、ほとんどの GPU ハードウェアでサポートされています。 + +量子化モデルの詳細については、以下を確認してください。 +- [GPTQ](https://arxiv.org/pdf/2210.17323.pdf) 論文 +- GPTQ 量子化に関する `optimum` [ガイド](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization) +- バックエンドとして使用される [`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ) ライブラリ + +### Requirements + +以下のコードを実行するには、以下の要件がインストールされている必要があります: + +- 最新の `AutoGPTQ` ライブラリをインストールする。 +`pip install auto-gptq` をインストールする。 + +- 最新の `optimum` をソースからインストールする。 +`git+https://github.com/huggingface/optimum.git` をインストールする。 + +- 最新の `transformers` をソースからインストールする。 +最新の `transformers` をソースからインストールする `pip install git+https://github.com/huggingface/transformers.git` + +- 最新の `accelerate` ライブラリをインストールする。 +`pip install --upgrade accelerate` を実行する。 + +GPTQ統合は今のところテキストモデルのみをサポートしているので、視覚、音声、マルチモーダルモデルでは予期せぬ挙動に遭遇するかもしれないことに注意してください。 + +### Load and quantize a model + +GPTQ は、量子化モデルを使用する前に重みのキャリブレーションを必要とする量子化方法です。トランスフォーマー モデルを最初から量子化する場合は、量子化モデルを作成するまでに時間がかかることがあります (`facebook/opt-350m`モデルの Google colab では約 5 分)。 + +したがって、GPTQ 量子化モデルを使用するシナリオは 2 つあります。最初の使用例は、ハブで利用可能な他のユーザーによってすでに量子化されたモデルをロードすることです。2 番目の使用例は、モデルを最初から量子化し、保存するかハブにプッシュして、他のユーザーが使用できるようにすることです。それも使ってください。 + +#### GPTQ Configuration + +モデルをロードして量子化するには、[`GPTQConfig`] を作成する必要があります。データセットを準備するには、`bits`の数、量子化を調整するための`dataset`、およびモデルの`Tokenizer`を渡す必要があります。 + +```python +model_id = "facebook/opt-125m" +tokenizer = AutoTokenizer.from_pretrained(model_id) +gptq_config = GPTQConfig(bits=4, dataset = "c4", tokenizer=tokenizer) +``` + +独自のデータセットを文字列のリストとして渡すことができることに注意してください。ただし、GPTQ 論文のデータセットを使用することを強くお勧めします。 + +```python +dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."] +quantization = GPTQConfig(bits=4, dataset = dataset, tokenizer=tokenizer) +``` + +#### Quantization + +`from_pretrained` を使用し、`quantization_config` を設定することでモデルを量子化できます。 + +```python +from transformers import AutoModelForCausalLM +model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=gptq_config) +``` + +モデルを量子化するには GPU が必要であることに注意してください。モデルを CPU に配置し、量子化するためにモジュールを GPU に前後に移動させます。 + +CPU オフロードの使用中に GPU の使用量を最大化したい場合は、`device_map = "auto"` を設定できます。 + +```python +from transformers import AutoModelForCausalLM +model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config) +``` + +ディスク オフロードはサポートされていないことに注意してください。さらに、データセットが原因でメモリが不足している場合は、`from_pretained` で `max_memory` を渡す必要がある場合があります。 `device_map`と`max_memory`の詳細については、この [ガイド](https://huggingface.co/docs/accelerate/usage_guides/big_modeling#designing-a-device-map) を参照してください。 + + +GPTQ 量子化は、現時点ではテキスト モデルでのみ機能します。さらに、量子化プロセスはハードウェアによっては長時間かかる場合があります (NVIDIA A100 を使用した場合、175B モデル = 4 gpu 時間)。モデルの GPTQ 量子化バージョンが存在しない場合は、ハブで確認してください。そうでない場合は、github で要求を送信できます。 + + +### Push quantized model to 🤗 Hub + +他の 🤗 モデルと同様に、`push_to_hub` を使用して量子化モデルをハブにプッシュできます。量子化構成は保存され、モデルに沿ってプッシュされます。 + +```python +quantized_model.push_to_hub("opt-125m-gptq") +tokenizer.push_to_hub("opt-125m-gptq") +``` + +量子化されたモデルをローカル マシンに保存したい場合は、`save_pretrained` を使用して行うこともできます。 + + +```python +quantized_model.save_pretrained("opt-125m-gptq") +tokenizer.save_pretrained("opt-125m-gptq") +``` + +`device_map` を使用してモデルを量子化した場合は、保存する前にモデル全体を GPU または `cpu` のいずれかに移動してください。 + +```python +quantized_model.to("cpu") +quantized_model.save_pretrained("opt-125m-gptq") +``` + +### Load a quantized model from the 🤗 Hub + +`from_pretrained`を使用して、量子化されたモデルをハブからロードできます。 +属性 `quantization_config` がモデル設定オブジェクトに存在することを確認して、プッシュされた重みが量子化されていることを確認します。 + +```python +from transformers import AutoModelForCausalLM +model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq") +``` + +必要以上のメモリを割り当てずにモデルをより速くロードしたい場合は、`device_map` 引数は量子化モデルでも機能します。 `accelerate`ライブラリがインストールされていることを確認してください。 + +```python +from transformers import AutoModelForCausalLM +model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto") +``` + +### Exllama kernels for faster inference + +4 ビット モデルの場合、推論速度を高めるために exllama カーネルを使用できます。デフォルトで有効になっています。 [`GPTQConfig`] で `disable_exllama` を渡すことで、その動作を変更できます。これにより、設定に保存されている量子化設定が上書きされます。カーネルに関連する属性のみを上書きできることに注意してください。さらに、exllama カーネルを使用したい場合は、モデル全体を GPU 上に置く必要があります。 + + +```py +import torch +gptq_config = GPTQConfig(bits=4, disable_exllama=False) +model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config) +``` + +現時点では 4 ビット モデルのみがサポートされていることに注意してください。さらに、peft を使用して量子化モデルを微調整している場合は、exllama カーネルを非アクティブ化することをお勧めします。 + +#### Fine-tune a quantized model + +Hugging Face エコシステムのアダプターの公式サポートにより、GPTQ で量子化されたモデルを微調整できます。 +詳細については、[`peft`](https://github.com/huggingface/peft) ライブラリをご覧ください。 + +### Example demo + +GPTQ を使用してモデルを量子化する方法と、peft を使用して量子化されたモデルを微調整する方法については、Google Colab [ノートブック](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb?usp=sharing) を参照してください。 + +### GPTQConfig + +[[autodoc]] GPTQConfig + +## `bitsandbytes` Integration + +🤗 Transformers は、`bitsandbytes` で最もよく使用されるモジュールと緊密に統合されています。数行のコードでモデルを 8 ビット精度でロードできます。 +これは、`bitsandbytes`の `0.37.0`リリース以降、ほとんどの GPU ハードウェアでサポートされています。 + +量子化方法の詳細については、[LLM.int8()](https://arxiv.org/abs/2208.07339) 論文、または [ブログ投稿](https://huggingface.co/blog/hf-bitsandbytes-) をご覧ください。統合)コラボレーションについて。 + +`0.39.0`リリース以降、FP4 データ型を活用し、4 ビット量子化を使用して`device_map`をサポートする任意のモデルをロードできます。 + +独自の pytorch モデルを量子化したい場合は、🤗 Accelerate ライブラリの [ドキュメント](https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization) をチェックしてください。 + +`bitsandbytes`統合を使用してできることは次のとおりです + +### General usage + +モデルが 🤗 Accelerate による読み込みをサポートし、`torch.nn.Linear` レイヤーが含まれている限り、 [`~PreTrainedModel.from_pretrained`] メソッドを呼び出すときに `load_in_8bit` または `load_in_4bit` 引数を使用してモデルを量子化できます。これはどのようなモダリティでも同様に機能するはずです。 + +```python +from transformers import AutoModelForCausalLM + +model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True) +model_4bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_4bit=True) +``` + +デフォルトでは、他のすべてのモジュール (例: `torch.nn.LayerNorm`) は `torch.float16` に変換されますが、その `dtype` を変更したい場合は、`torch_dtype` 引数を上書きできます。 + +```python +>>> import torch +>>> from transformers import AutoModelForCausalLM + +>>> model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True, torch_dtype=torch.float32) +>>> model_8bit.model.decoder.layers[-1].final_layer_norm.weight.dtype +torch.float32 +``` + +### FP4 quantization + +#### Requirements + +以下のコード スニペットを実行する前に、以下の要件がインストールされていることを確認してください。 + +- 最新の`bitsandbytes`ライブラリ +`pip install bitsandbytes>=0.39.0` + +- 最新の`accelerate`をインストールする +`pip install --upgrade accelerate` + +- 最新の `transformers` をインストールする +`pip install --upgrade transformers` + +#### Tips and best practices + +- **高度な使用法:** 可能なすべてのオプションを使用した 4 ビット量子化の高度な使用法については、[この Google Colab ノートブック](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf) を参照してください。 + +- **`batch_size=1` による高速推論 :** bitsandbytes の `0.40.0` リリース以降、`batch_size=1` では高速推論の恩恵を受けることができます。 [これらのリリース ノート](https://github.com/TimDettmers/bitsandbytes/releases/tag/0.40.0) を確認し、この機能を活用するには`0.40.0`以降のバージョンを使用していることを確認してください。箱の。 + +- **トレーニング:** [QLoRA 論文](https://arxiv.org/abs/2305.14314) によると、4 ビット基本モデルをトレーニングする場合 (例: LoRA アダプターを使用)、`bnb_4bit_quant_type='nf4'` を使用する必要があります。 。 + +- **推論:** 推論の場合、`bnb_4bit_quant_type` はパフォーマンスに大きな影響を与えません。ただし、モデルの重みとの一貫性を保つために、必ず同じ `bnb_4bit_compute_dtype` および `torch_dtype` 引数を使用してください。 + + +#### Load a large model in 4bit + +`.from_pretrained` メソッドを呼び出すときに `load_in_4bit=True` を使用すると、メモリ使用量を (おおよそ) 4 で割ることができます。 + +```python +# pip install transformers accelerate bitsandbytes +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_id = "bigscience/bloom-1b7" + +tokenizer = AutoTokenizer.from_pretrained(model_id) +model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True) +``` + + + +モデルが 4 ビットでロードされると、現時点では量子化された重みをハブにプッシュすることはできないことに注意してください。 4 ビットの重みはまだサポートされていないため、トレーニングできないことにも注意してください。ただし、4 ビット モデルを使用して追加のパラメーターをトレーニングすることもできます。これについては次のセクションで説明します。 + + + +### Load a large model in 8bit + +`.from_pretrained` メソッドを呼び出すときに `load_in_8bit=True` 引数を使用すると、メモリ要件をおよそ半分にしてモデルをロードできます。 + +```python +# pip install transformers accelerate bitsandbytes +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_id = "bigscience/bloom-1b7" + +tokenizer = AutoTokenizer.from_pretrained(model_id) +model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True) +``` + +次に、通常 [`PreTrainedModel`] を使用するのと同じようにモデルを使用します。 + +`get_memory_footprint` メソッドを使用して、モデルのメモリ フットプリントを確認できます。 + +```python +print(model.get_memory_footprint()) +``` + +この統合により、大きなモデルを小さなデバイスにロードし、問題なく実行できるようになりました。 + + +モデルが 8 ビットでロードされると、最新の `transformers`と`bitsandbytes`を使用する場合を除き、量子化された重みをハブにプッシュすることは現在不可能であることに注意してください。 8 ビットの重みはまだサポートされていないため、トレーニングできないことにも注意してください。ただし、8 ビット モデルを使用して追加のパラメーターをトレーニングすることもできます。これについては次のセクションで説明します。 +また、`device_map` はオプションですが、利用可能なリソース上でモデルを効率的にディスパッチするため、推論には `device_map = 'auto'` を設定することが推奨されます。 + + + +#### Advanced use cases + +ここでは、FP4 量子化を使用して実行できるいくつかの高度な使用例について説明します。 + +##### Change the compute dtype + +compute dtype は、計算中に使用される dtype を変更するために使用されます。たとえば、隠し状態は`float32`にありますが、高速化のために計算を bf16 に設定できます。デフォルトでは、compute dtype は `float32` に設定されます。 + +```python +import torch +from transformers import BitsAndBytesConfig + +quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16) +``` + +##### Using NF4 (Normal Float 4) data type + +NF4 データ型を使用することもできます。これは、正規分布を使用して初期化された重みに適合した新しい 4 ビット データ型です。その実行のために: + +```python +from transformers import BitsAndBytesConfig + +nf4_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", +) + +model_nf4 = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=nf4_config) +``` + +##### Use nested quantization for more memory efficient inference + +また、ネストされた量子化手法を使用することをお勧めします。これにより、パフォーマンスを追加することなく、より多くのメモリが節約されます。経験的な観察から、これにより、NVIDIA-T4 16GB 上でシーケンス長 1024、バッチ サイズ 1、勾配累積ステップ 4 の llama-13b モデルを微調整することが可能になります。 + +```python +from transformers import BitsAndBytesConfig + +double_quant_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, +) + +model_double_quant = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=double_quant_config) +``` + + +### Push quantized models on the 🤗 Hub + +`push_to_hub`メソッドを単純に使用することで、量子化されたモデルをハブにプッシュできます。これにより、最初に量子化構成ファイルがプッシュされ、次に量子化されたモデルの重みがプッシュされます。 +この機能を使用できるようにするには、必ず `bitsandbytes>0.37.2` を使用してください (この記事の執筆時点では、`bitsandbytes==0.38.0.post1` でテストしました)。 + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", device_map="auto", load_in_8bit=True) +tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") + +model.push_to_hub("bloom-560m-8bit") +``` + + + +大規模なモデルでは、ハブ上で 8 ビット モデルをプッシュすることが強く推奨されます。これにより、コミュニティはメモリ フットプリントの削減と、たとえば Google Colab での大規模なモデルの読み込みによる恩恵を受けることができます。 + + + +### Load a quantized model from the 🤗 Hub + +`from_pretrained`メソッドを使用して、ハブから量子化モデルをロードできます。属性 `quantization_config` がモデル設定オブジェクトに存在することを確認して、プッシュされた重みが量子化されていることを確認します。 + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("{your_username}/bloom-560m-8bit", device_map="auto") +``` + +この場合、引数 `load_in_8bit=True` を指定する必要はありませんが、`bitsandbytes` と `accelerate` がインストールされていることを確認する必要があることに注意してください。 +また、`device_map` はオプションですが、利用可能なリソース上でモデルを効率的にディスパッチするため、推論には `device_map = 'auto'` を設定することが推奨されます。 + +### Advanced use cases + +このセクションは、8 ビット モデルのロードと実行以外に何ができるかを探求したい上級ユーザーを対象としています。 + +#### Offload between `cpu` and `gpu` + +この高度な使用例の 1 つは、モデルをロードし、`CPU`と`GPU`の間で重みをディスパッチできることです。 CPU 上でディスパッチされる重みは **8 ビットに変換されない**ため、`float32`に保持されることに注意してください。この機能は、非常に大規模なモデルを適合させ、そのモデルを GPU と CPU の間でディスパッチしたいユーザーを対象としています。 + +まず、`transformers` から [`BitsAndBytesConfig`] をロードし、属性 `llm_int8_enable_fp32_cpu_offload` を `True` に設定します。 + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) +``` + +`bigscience/bloom-1b7`モデルをロードする必要があり、`lm_head`を除くモデル全体に​​適合するのに十分な GPU RAM があるとします。したがって、次のようにカスタム device_map を作成します。 + +```python +device_map = { + "transformer.word_embeddings": 0, + "transformer.word_embeddings_layernorm": 0, + "lm_head": "cpu", + "transformer.h": 0, + "transformer.ln_f": 0, +} +``` + +そして、次のようにモデルをロードします。 +```python +model_8bit = AutoModelForCausalLM.from_pretrained( + "bigscience/bloom-1b7", + device_map=device_map, + quantization_config=quantization_config, +) +``` + +以上です!モデルを楽しんでください! + +#### Play with `llm_int8_threshold` + +`llm_int8_threshold` 引数を操作して、外れ値のしきい値を変更できます。 外れ値 とは、特定のしきい値より大きい隠れた状態の値です。 +これは、`LLM.int8()`論文で説明されている外れ値検出の外れ値しきい値に対応します。このしきい値を超える隠し状態の値は外れ値とみなされ、それらの値に対する操作は fp16 で実行されます。通常、値は正規分布します。つまり、ほとんどの値は [-3.5, 3.5] の範囲内にありますが、大規模なモデルでは大きく異なる分布を示す例外的な系統的外れ値がいくつかあります。これらの外れ値は、多くの場合 [-60, -6] または [6, 60] の範囲内にあります。 Int8 量子化は、大きさが 5 程度までの値ではうまく機能しますが、それを超えると、パフォーマンスが大幅に低下します。適切なデフォルトのしきい値は 6 ですが、より不安定なモデル (小規模なモデル、微調整) では、より低いしきい値が必要になる場合があります。 +この引数は、モデルの推論速度に影響を与える可能性があります。このパラメータを試してみて、ユースケースに最適なパラメータを見つけることをお勧めします。 + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +model_id = "bigscience/bloom-1b7" + +quantization_config = BitsAndBytesConfig( + llm_int8_threshold=10, +) + +model_8bit = AutoModelForCausalLM.from_pretrained( + model_id, + device_map=device_map, + quantization_config=quantization_config, +) +tokenizer = AutoTokenizer.from_pretrained(model_id) +``` + +#### Skip the conversion of some modules + +一部のモデルには、安定性を確保するために 8 ビットに変換する必要がないモジュールがいくつかあります。たとえば、ジュークボックス モデルには、スキップする必要があるいくつかの `lm_head` モジュールがあります。 `llm_int8_skip_modules` で遊んでみる + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +model_id = "bigscience/bloom-1b7" + +quantization_config = BitsAndBytesConfig( + llm_int8_skip_modules=["lm_head"], +) + +model_8bit = AutoModelForCausalLM.from_pretrained( + model_id, + device_map=device_map, + quantization_config=quantization_config, +) +tokenizer = AutoTokenizer.from_pretrained(model_id) +``` + +#### Fine-tune a model that has been loaded in 8-bit + +Hugging Face エコシステムのアダプターの公式サポートにより、8 ビットでロードされたモデルを微調整できます。 +これにより、単一の Google Colab で`flan-t5-large`や`facebook/opt-6.7b`などの大規模モデルを微調整することができます。詳細については、[`peft`](https://github.com/huggingface/peft) ライブラリをご覧ください。 + +トレーニング用のモデルをロードするときに `device_map` を渡す必要がないことに注意してください。モデルが GPU に自動的にロードされます。必要に応じて、デバイス マップを特定のデバイスに設定することもできます (例: `cuda:0`、`0`、`torch.device('cuda:0')`)。 `device_map=auto`は推論のみに使用する必要があることに注意してください。 + +### BitsAndBytesConfig + +[[autodoc]] BitsAndBytesConfig + +## Quantization with 🤗 `optimum` + +`optimum`でサポートされている量子化方法の詳細については、[Optimum ドキュメント](https://huggingface.co/docs/optimum/index) を参照し、これらが自分のユースケースに適用できるかどうかを確認してください。 diff --git a/docs/source/ja/main_classes/text_generation.md b/docs/source/ja/main_classes/text_generation.md new file mode 100644 index 000000000000..279d9b40735b --- /dev/null +++ b/docs/source/ja/main_classes/text_generation.md @@ -0,0 +1,63 @@ + + +# Generation + +各フレームワークには、それぞれの `GenerationMixin` クラスに実装されたテキスト生成のための Generate メソッドがあります。 + +- PyTorch [`~generation.GenerationMixin.generate`] は [`~generation.GenerationMixin`] に実装されています。 +- TensorFlow [`~generation.TFGenerationMixin.generate`] は [`~generation.TFGenerationMixin`] に実装されています。 +- Flax/JAX [`~generation.FlaxGenerationMixin.generate`] は [`~generation.FlaxGenerationMixin`] に実装されています。 + +選択したフレームワークに関係なく、[`~generation.GenerationConfig`] を使用して生成メソッドをパラメータ化できます。 +クラスインスタンス。動作を制御する生成パラメータの完全なリストについては、このクラスを参照してください。 +生成方法のこと。 + +モデルの生成構成を検査する方法、デフォルトとは何か、パラメーターをアドホックに変更する方法を学習するには、 +カスタマイズされた生成構成を作成して保存する方法については、「 +[テキスト生成戦略ガイド](../generation_strategies)。このガイドでは、関連機能の使用方法についても説明しています。 +トークンストリーミングのような。 + +## GenerationConfig + +[[autodoc]] generation.GenerationConfig + - from_pretrained + - from_model_config + - save_pretrained + +## GenerationMixin + +[[autodoc]] generation.GenerationMixin + - generate + - compute_transition_scores + - greedy_search + - sample + - beam_search + - beam_sample + - contrastive_search + - group_beam_search + - constrained_beam_search + +## TFGenerationMixin + +[[autodoc]] generation.TFGenerationMixin + - generate + - compute_transition_scores + +## FlaxGenerationMixin + +[[autodoc]] generation.FlaxGenerationMixin + - generate diff --git a/docs/source/ja/main_classes/tokenizer.md b/docs/source/ja/main_classes/tokenizer.md new file mode 100644 index 000000000000..1cf5885bc812 --- /dev/null +++ b/docs/source/ja/main_classes/tokenizer.md @@ -0,0 +1,80 @@ + + +# Tokenizer + +トークナイザーは、モデルの入力の準備を担当します。ライブラリには、すべてのモデルのトークナイザーが含まれています。ほとんど +トークナイザーの一部は、完全な Python 実装と、 +Rust ライブラリ [🤗 Tokenizers](https://github.com/huggingface/tokenizers)。 「高速」実装では次のことが可能になります。 + +1. 特にバッチトークン化を行う場合の大幅なスピードアップと +2. 元の文字列 (文字と単語) とトークン空間の間でマッピングする追加のメソッド (例: + 特定の文字を含むトークンのインデックス、または特定のトークンに対応する文字の範囲)。 + +基本クラス [`PreTrainedTokenizer`] および [`PreTrainedTokenizerFast`] +モデル入力の文字列入力をエンコードし (以下を参照)、Python をインスタンス化/保存するための一般的なメソッドを実装します。 +ローカル ファイルまたはディレクトリ、またはライブラリによって提供される事前トレーニング済みトークナイザーからの「高速」トークナイザー +(HuggingFace の AWS S3 リポジトリからダウンロード)。二人とも頼りにしているのは、 +共通メソッドを含む [`~tokenization_utils_base.PreTrainedTokenizerBase`] +[`~tokenization_utils_base.SpecialTokensMixin`]。 + +したがって、[`PreTrainedTokenizer`] と [`PreTrainedTokenizerFast`] はメインを実装します。 +すべてのトークナイザーを使用するためのメソッド: + +- トークン化 (文字列をサブワード トークン文字列に分割)、トークン文字列を ID に変換したり、その逆の変換を行ったりします。 + エンコード/デコード (つまり、トークン化と整数への変換)。 +- 基礎となる構造 (BPE、SentencePiece...) から独立した方法で、語彙に新しいトークンを追加します。 +- 特別なトークン (マスク、文の始まりなど) の管理: トークンの追加、属性への割り当て。 + トークナイザーにより、簡単にアクセスでき、トークン化中に分割されないようにすることができます。 + +[`BatchEncoding`] は、 +[`~tokenization_utils_base.PreTrainedTokenizerBase`] のエンコード メソッド (`__call__`、 +`encode_plus` および `batch_encode_plus`) であり、Python 辞書から派生しています。トークナイザーが純粋な Python の場合 +tokenizer の場合、このクラスは標準の Python 辞書と同じように動作し、によって計算されたさまざまなモデル入力を保持します。 +これらのメソッド (`input_ids`、`attention_mask`...)。トークナイザーが「高速」トークナイザーである場合 (つまり、 +HuggingFace [トークナイザー ライブラリ](https://github.com/huggingface/tokenizers))、このクラスはさらに提供します +元の文字列 (文字と単語) と +トークンスペース (例: 指定された文字または対応する文字の範囲を構成するトークンのインデックスの取得) +与えられたトークンに)。 + +## PreTrainedTokenizer + +[[autodoc]] PreTrainedTokenizer + - __call__ + - apply_chat_template + - batch_decode + - decode + - encode + - push_to_hub + - all + +## PreTrainedTokenizerFast + +[`PreTrainedTokenizerFast`] は [tokenizers](https://huggingface.co/docs/tokenizers) ライブラリに依存します。 🤗 トークナイザー ライブラリから取得したトークナイザーは、 +🤗 トランスに非常に簡単にロードされます。これがどのように行われるかを理解するには、[🤗 tokenizers からの tokenizers を使用する](../fast_tokenizers) ページを参照してください。 + +[[autodoc]] PreTrainedTokenizerFast + - __call__ + - apply_chat_template + - batch_decode + - decode + - encode + - push_to_hub + - all + +## BatchEncoding + +[[autodoc]] BatchEncoding diff --git a/docs/source/ja/main_classes/trainer.md b/docs/source/ja/main_classes/trainer.md new file mode 100644 index 000000000000..4c1ce95ca38a --- /dev/null +++ b/docs/source/ja/main_classes/trainer.md @@ -0,0 +1,728 @@ + + +# Trainer + +[`Trainer`] クラスは、ほとんどの標準的なユースケースに対して、PyTorch で機能を完全にトレーニングするための API を提供します。これは、[サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples) のほとんどで使用されています。 + +[`Trainer`] をインスタンス化する前に、トレーニング中にカスタマイズのすべてのポイントにアクセスするために [`TrainingArguments`] を作成します。 + +この API は、複数の GPU/TPU での分散トレーニング、[NVIDIA Apex](https://github.com/NVIDIA/apex) および PyTorch のネイティブ AMP による混合精度をサポートします。 + +[`Trainer`] には、上記の機能をサポートする基本的なトレーニング ループが含まれています。カスタム動作を挿入するには、それらをサブクラス化し、次のメソッドをオーバーライドします。 + +- **get_train_dataloader** -- トレーニング データローダーを作成します。 +- **get_eval_dataloader** -- 評価用データローダーを作成します。 +- **get_test_dataloader** -- テスト データローダーを作成します。 +- **log** -- トレーニングを監視しているさまざまなオブジェクトに関する情報をログに記録します。 +- **create_optimizer_and_scheduler** -- オプティマイザと学習率スケジューラが渡されなかった場合にセットアップします。 + 初期化。 `create_optimizer`メソッドと`create_scheduler`メソッドをサブクラス化またはオーバーライドすることもできることに注意してください。 + 別々に。 +- **create_optimizer** -- init で渡されなかった場合にオプティマイザーをセットアップします。 +- **create_scheduler** -- init で渡されなかった場合、学習率スケジューラを設定します。 +- **compute_loss** - トレーニング入力のバッチの損失を計算します。 +- **training_step** -- トレーニング ステップを実行します。 +- **prediction_step** -- 評価/テスト ステップを実行します。 +- **evaluate** -- 評価ループを実行し、メトリクスを返します。 +- **predict** -- テスト セットの予測 (ラベルが使用可能な場合はメトリクスも含む) を返します。 + + + +[`Trainer`] クラスは 🤗 Transformers モデル用に最適化されており、驚くべき動作をする可能性があります +他の機種で使用する場合。独自のモデルで使用する場合は、次の点を確認してください。 + +- モデルは常に [`~utils.ModelOutput`] のタプルまたはサブクラスを返します。 +- `labels` 引数が指定され、その損失が最初の値として返される場合、モデルは損失を計算できます。 + タプルの要素 (モデルがタプルを返す場合) +- モデルは複数のラベル引数を受け入れることができます ([`TrainingArguments`] で `label_names` を使用して、その名前を [`Trainer`] に示します) が、それらのいずれにも `"label"` という名前を付ける必要はありません。 + + + +以下は、加重損失を使用するように [`Trainer`] をカスタマイズする方法の例です (不均衡なトレーニング セットがある場合に役立ちます)。 + +```python +from torch import nn +from transformers import Trainer + + +class CustomTrainer(Trainer): + def compute_loss(self, model, inputs, return_outputs=False): + labels = inputs.pop("labels") + # forward pass + outputs = model(**inputs) + logits = outputs.get("logits") + # compute custom loss (suppose one has 3 labels with different weights) + loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 3.0], device=model.device)) + loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1)) + return (loss, outputs) if return_outputs else loss +``` + +PyTorch [`Trainer`] のトレーニング ループの動作をカスタマイズするもう 1 つの方法は、トレーニング ループの状態を検査できる [callbacks](コールバック) を使用することです (進行状況レポート、TensorBoard または他の ML プラットフォームでのログ記録など)。決定(早期停止など)。 + +## Trainer + +[[autodoc]] Trainer + - all + +## Seq2SeqTrainer + +[[autodoc]] Seq2SeqTrainer + - evaluate + - predict + +## TrainingArguments + +[[autodoc]] TrainingArguments + - all + +## Seq2SeqTrainingArguments + +[[autodoc]] Seq2SeqTrainingArguments + - all + +## Checkpoints + +デフォルトでは、[`Trainer`] はすべてのチェックポイントを、 +[`TrainingArguments`] を使用しています。これらは、xxx を含む`checkpoint-xxx`という名前のサブフォルダーに保存されます。 +それはトレーニングの段階でした。 + +チェックポイントからトレーニングを再開するには、次のいずれかを使用して [`Trainer.train`] を呼び出します。 + +- `resume_from_checkpoint=True` は最新のチェックポイントからトレーニングを再開します +- `resume_from_checkpoint=checkpoint_dir` ディレクトリ内の特定のチェックポイントからトレーニングを再開します + 合格した。 + +さらに、`push_to_hub=True` を使用すると、モデル ハブにチェックポイントを簡単に保存できます。デフォルトでは、すべて +中間チェックポイントに保存されたモデルは別のコミットに保存されますが、オプティマイザーの状態は保存されません。適応できます +[`TrainingArguments`] の `hub-strategy` 値を次のいずれかにします。 + +- `"checkpoint"`: 最新のチェックポイントも last-checkpoint という名前のサブフォルダーにプッシュされます。 + `trainer.train(resume_from_checkpoint="output_dir/last-checkpoint")` を使用してトレーニングを簡単に再開します。 +- `"all_checkpoints"`: すべてのチェックポイントは、出力フォルダーに表示されるようにプッシュされます (したがって、1 つのチェックポイントが得られます) + 最終リポジトリ内のフォルダーごとのチェックポイント フォルダー) + +## Logging + +デフォルトでは、[`Trainer`] はメインプロセスに `logging.INFO` を使用し、レプリカがある場合には `logging.WARNING` を使用します。 + +これらのデフォルトは、[`TrainingArguments`] の 5 つの `logging` レベルのいずれかを使用するようにオーバーライドできます。 +引数: + +- `log_level` - メインプロセス用 +- `log_level_replica` - レプリカ用 + +さらに、[`TrainingArguments`] の `log_on_each_node` が `False` に設定されている場合、メイン ノードのみが +メイン プロセスのログ レベル設定を使用すると、他のすべてのノードはレプリカのログ レベル設定を使用します。 + +[`Trainer`] は、`transformers` のログ レベルをノードごとに個別に設定することに注意してください。 +[`Trainer.__init__`]。したがって、他の機能を利用する場合は、これをより早く設定することをお勧めします (次の例を参照)。 +[`Trainer`] オブジェクトを作成する前の `transformers` 機能。 + +これをアプリケーションで使用する方法の例を次に示します。 + +```python +[...] +logger = logging.getLogger(__name__) + +# Setup logging +logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], +) + +# set the main code and the modules it uses to the same log-level according to the node +log_level = training_args.get_process_log_level() +logger.setLevel(log_level) +datasets.utils.logging.set_verbosity(log_level) +transformers.utils.logging.set_verbosity(log_level) + +trainer = Trainer(...) +``` + +そして、メイン ノードと他のすべてのノードで重複する可能性が高いものを出力しないように警告するだけを表示したい場合は、 +警告: 次のように実行できます。 + +```bash +my_app.py ... --log_level warning --log_level_replica error +``` + +マルチノード環境で、各ノードのメインプロセスのログを繰り返したくない場合は、次のようにします。 +上記を次のように変更します。 + +```bash +my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0 +``` + +その後、最初のノードのメイン プロセスのみが「警告」レベルでログに記録され、メイン ノード上の他のすべてのプロセスはログに記録されます。 +ノードと他のノード上のすべてのプロセスは「エラー」レベルでログに記録されます。 + +アプリケーションをできるだけ静かにする必要がある場合は、次のようにします。 + +```bash +my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0 +``` + +(マルチノード環境の場合は `--log_on_each_node 0` を追加します) + +## Randomness + +[`Trainer`] によって生成されたチェックポイントから再開する場合、すべての努力がその状態を復元するために行われます。 +_python_、_numpy_、および _pytorch_ の RNG 状態は、そのチェックポイントを保存した時点と同じ状態になります。 +これにより、「停止して再開」というスタイルのトレーニングが、ノンストップトレーニングに可能な限り近づけられるはずです。 + +ただし、さまざまなデフォルトの非決定的な pytorch 設定により、これは完全に機能しない可能性があります。フルをご希望の場合は +決定論については、[ランダム性のソースの制御](https://pytorch.org/docs/stable/notes/randomness) を参照してください。ドキュメントで説明されているように、これらの設定の一部は +物事を決定論的にするもの (例: `torch.backends.cudnn.deterministic`) は物事を遅くする可能性があるため、これは +デフォルトでは実行できませんが、必要に応じて自分で有効にすることができます。 + +## Specific GPUs Selection + +どの GPU をどのような順序で使用するかをプログラムに指示する方法について説明します。 + +[`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.Parallel.DistributedDataParallel.html) を使用して GPU のサブセットのみを使用する場合、使用する GPU の数を指定するだけです。 。たとえば、GPU が 4 つあるが、最初の 2 つを使用したい場合は、次のようにします。 + +```bash +python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ... +``` + +[`accelerate`](https://github.com/huggingface/accelerate) または [`deepspeed`](https://github.com/microsoft/DeepSpeed) がインストールされている場合は、次を使用して同じことを達成することもできます。の一つ: + +```bash +accelerate launch --num_processes 2 trainer-program.py ... +``` + +```bash +deepspeed --num_gpus 2 trainer-program.py ... +``` + +これらのランチャーを使用するために、Accelerate または [Deepspeed 統合](Deepspeed) 機能を使用する必要はありません。 + + +これまでは、プログラムに使用する GPU の数を指示できました。次に、特定の GPU を選択し、その順序を制御する方法について説明します。 + +次の環境変数は、使用する GPU とその順序を制御するのに役立ちます。 + +**`CUDA_VISIBLE_DEVICES`** + +複数の GPU があり、そのうちの 1 つまたはいくつかの GPU だけを使用したい場合は、環境変数 `CUDA_VISIBLE_DEVICES` を使用する GPU のリストに設定します。 + +たとえば、4 つの GPU (0、1、2、3) があるとします。物理 GPU 0 と 2 のみで実行するには、次のようにします。 + +```bash +CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ... +``` + +したがって、pytorch は 2 つの GPU のみを認識し、物理 GPU 0 と 2 はそれぞれ `cuda:0` と `cuda:1` にマッピングされます。 + +順序を変更することもできます。 + +```bash +CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ... +``` + +ここでは、物理 GPU 0 と 2 がそれぞれ`cuda:1`と`cuda:0`にマッピングされています。 + +上記の例はすべて `DistributedDataParallel` 使用パターンのものですが、同じ方法が [`DataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html) でも機能します。 + + +```bash +CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ... +``` + +GPU のない環境をエミュレートするには、次のようにこの環境変数を空の値に設定するだけです。 + +```bash +CUDA_VISIBLE_DEVICES= python trainer-program.py ... +``` + +他の環境変数と同様に、これらをコマンド ラインに追加する代わりに、次のようにエクスポートすることもできます。 + +```bash +export CUDA_VISIBLE_DEVICES=0,2 +python -m torch.distributed.launch trainer-program.py ... +``` + +ただし、この方法では、以前に環境変数を設定したことを忘れて、なぜ間違った GPU が使用されているのか理解できない可能性があるため、混乱を招く可能性があります。したがって、このセクションのほとんどの例で示されているように、同じコマンド ラインで特定の実行に対してのみ環境変数を設定するのが一般的です。 + +**`CUDA_DEVICE_ORDER`** + +物理デバイスの順序を制御する追加の環境変数 `CUDA_DEVICE_ORDER` があります。選択肢は次の 2 つです。 + +1. PCIe バス ID 順 (`nvidia-smi` の順序と一致) - これがデフォルトです。 + +```bash +export CUDA_DEVICE_ORDER=PCI_BUS_ID +``` + +2. GPU コンピューティング能力順に並べる + +```bash +export CUDA_DEVICE_ORDER=FASTEST_FIRST +``` + +ほとんどの場合、この環境変数を気にする必要はありませんが、古い GPU と新しい GPU が物理的に挿入されているため、遅い古いカードが遅くなっているように見えるような偏ったセットアップを行っている場合には、非常に役立ちます。初め。これを解決する 1 つの方法は、カードを交換することです。ただし、カードを交換できない場合 (デバイスの冷却が影響を受けた場合など)、`CUDA_DEVICE_ORDER=FASTEST_FIRST`を設定すると、常に新しい高速カードが最初に配置されます。ただし、`nvidia-smi`は依然として PCIe の順序でレポートするため、多少混乱するでしょう。 + +順序を入れ替えるもう 1 つの解決策は、以下を使用することです。 + +```bash +export CUDA_VISIBLE_DEVICES=1,0 +``` + +この例では 2 つの GPU だけを使用していますが、もちろん、コンピューターに搭載されている数の GPU にも同じことが当てはまります。 + +また、この環境変数を設定する場合は、`~/.bashrc` ファイルまたはその他の起動設定ファイルに設定して、忘れるのが最善です。 + +## Trainer Integrations + +[`Trainer`] は、トレーニングを劇的に改善する可能性のあるライブラリをサポートするように拡張されました。 +時間とはるかに大きなモデルに適合します。 + +現在、サードパーティのソリューション [DeepSpeed](https://github.com/microsoft/DeepSpeed) および [PyTorch FSDP](https://pytorch.org/docs/stable/fsdp.html) をサポートしています。論文 [ZeRO: メモリの最適化] +兆パラメータ モデルのトレーニングに向けて、Samyam Rajbhandari、Jeff Rasley、Olatunji Ruwase、Yuxiong He 著](https://arxiv.org/abs/1910.02054)。 + +この提供されるサポートは、この記事の執筆時点では新しくて実験的なものです。 DeepSpeed と PyTorch FSDP のサポートはアクティブであり、それに関する問題は歓迎しますが、FairScale 統合は PyTorch メインに統合されているため、もうサポートしていません ([PyTorch FSDP 統合](#pytorch-fully-sharded-data-parallel)) + + + +### CUDA Extension Installation Notes + +この記事の執筆時点では、Deepspeed を使用するには、CUDA C++ コードをコンパイルする必要があります。 + +すべてのインストールの問題は、[Deepspeed](https://github.com/microsoft/DeepSpeed/issues) の対応する GitHub の問題を通じて対処する必要がありますが、ビルド中に発生する可能性のある一般的な問題がいくつかあります。 +CUDA 拡張機能を構築する必要がある PyTorch 拡張機能。 + +したがって、次の操作を実行中に CUDA 関連のビルドの問題が発生した場合は、次のとおりです。 + +```bash +pip install deepspeed +``` + +まず次の注意事項をお読みください。 + +これらのノートでは、`pytorch` が CUDA `10.2` でビルドされた場合に何をすべきかの例を示します。あなたの状況が次のような場合 +異なる場合は、バージョン番号を目的のバージョンに調整することを忘れないでください。 + +#### Possible problem #1 + +Pytorch には独自の CUDA ツールキットが付属していますが、これら 2 つのプロジェクトをビルドするには、同一バージョンの CUDA が必要です。 +システム全体にインストールされます。 + +たとえば、Python 環境に `cudatoolkit==10.2` を指定して `pytorch` をインストールした場合は、次のものも必要です。 +CUDA `10.2` がシステム全体にインストールされました。 + +正確な場所はシステムによって異なる場合がありますが、多くのシステムでは`/usr/local/cuda-10.2`が最も一般的な場所です。 +Unix システム。 CUDA が正しく設定され、`PATH`環境変数に追加されると、 +次のようにしてインストール場所を指定します。 + + +```bash +which nvcc +``` + +CUDA がシステム全体にインストールされていない場合は、最初にインストールしてください。お気に入りを使用して手順を見つけることができます +検索エンジン。たとえば、Ubuntu を使用している場合は、[ubuntu cuda 10.2 install](https://www.google.com/search?q=ubuntu+cuda+10.2+install) を検索するとよいでしょう。 + +#### Possible problem #2 + +もう 1 つの考えられる一般的な問題は、システム全体に複数の CUDA ツールキットがインストールされている可能性があることです。たとえばあなた +がある可能性があり: + +```bash +/usr/local/cuda-10.2 +/usr/local/cuda-11.0 +``` + +この状況では、`PATH` および `LD_LIBRARY_PATH` 環境変数に以下が含まれていることを確認する必要があります。 +目的の CUDA バージョンへの正しいパス。通常、パッケージ インストーラーは、これらに、 +最後のバージョンがインストールされました。適切なパッケージが見つからないためにパッケージのビルドが失敗するという問題が発生した場合は、 +CUDA バージョンがシステム全体にインストールされているにもかかわらず、前述の 2 つを調整する必要があることを意味します +環境変数。 + +まず、その内容を見てみましょう。 + +```bash +echo $PATH +echo $LD_LIBRARY_PATH +``` + +それで、中に何が入っているかがわかります。 + +`LD_LIBRARY_PATH` が空である可能性があります。 + +`PATH` は実行可能ファイルが存在する場所をリストし、`LD_LIBRARY_PATH` は共有ライブラリの場所を示します。 +探すことです。どちらの場合も、前のエントリが後のエントリより優先されます。 `:` は複数を区切るために使用されます +エントリ。 + +ここで、ビルド プログラムに特定の CUDA ツールキットの場所を指示するには、最初にリストされる希望のパスを挿入します。 +やっていること: + +```bash +export PATH=/usr/local/cuda-10.2/bin:$PATH +export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:$LD_LIBRARY_PATH +``` + +既存の値を上書きするのではなく、先頭に追加することに注意してください。 + +もちろん、必要に応じてバージョン番号やフルパスを調整します。割り当てたディレクトリが実際に機能することを確認してください +存在する。 `lib64` サブディレクトリは、`libcudart.so` などのさまざまな CUDA `.so` オブジェクトが存在する場所です。 +システムでは別の名前が付けられますが、現実を反映するように調整してください。 + +#### Possible problem #3 + +一部の古い CUDA バージョンは、新しいコンパイラでのビルドを拒否する場合があります。たとえば、あなたは`gcc-9`を持っていますが、それが必要です +`gcc-7`。 + +それにはさまざまな方法があります。 + +最新の CUDA ツールキットをインストールできる場合は、通常、新しいコンパイラがサポートされているはずです。 + +あるいは、既に所有しているコンパイラに加えて、下位バージョンのコンパイラをインストールすることもできます。 +すでに存在しますが、デフォルトではないため、ビルドシステムはそれを認識できません。 「gcc-7」がインストールされているが、 +ビルドシステムが見つからないというメッセージを表示する場合は、次の方法で解決できる可能性があります。 + +```bash +sudo ln -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc +sudo ln -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++ +``` + +ここでは、`/usr/local/cuda-10.2/bin/gcc` から `gcc-7` へのシンボリックリンクを作成しています。 +`/usr/local/cuda-10.2/bin/` は `PATH` 環境変数内にある必要があります (前の問題の解決策を参照)。 +`gcc-7` (および `g++7`) が見つかるはずで、ビルドは成功します。 + +いつものように、状況に合わせて例のパスを編集してください。 + +### PyTorch Fully Sharded Data parallel + +より大きなバッチ サイズで巨大なモデルのトレーニングを高速化するには、完全にシャード化されたデータ並列モデルを使用できます。 +このタイプのデータ並列パラダイムでは、オプティマイザーの状態、勾配、パラメーターをシャーディングすることで、より多くのデータと大規模なモデルをフィッティングできます。 +この機能とその利点の詳細については、[完全シャーディング データ並列ブログ](https://pytorch.org/blog/introducing-pytorch-full-sharded-data-Parallel-api/) をご覧ください。 +最新の PyTorch の Fully Sharded Data Parallel (FSDP) トレーニング機能を統合しました。 +必要なのは、設定を通じて有効にすることだけです。 + +**FSDP サポートに必要な PyTorch バージョン**: PyTorch Nightly (リリース後にこれを読んだ場合は 1.12.0) +FSDP を有効にしたモデルの保存は、最近の修正でのみ利用できるためです。 + +**使用法**: + +- 配布されたランチャーが追加されていることを確認してください +まだ使用していない場合は、`-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE`を使用します。 + +- **シャーディング戦略**: + - FULL_SHARD : データ並列ワーカー/GPU にわたるシャード オプティマイザーの状態 + 勾配 + モデル パラメーター。 + このためには、コマンドライン引数に`--fsdp full_shard`を追加します。 + - SHARD_GRAD_OP : シャード オプティマイザーの状態 + データ並列ワーカー/GPU 全体の勾配。 + このためには、コマンドライン引数に`--fsdp shard_grad_op`を追加します。 + - NO_SHARD : シャーディングなし。このためには、コマンドライン引数に`--fsdp no_shard`を追加します。 +- パラメータと勾配を CPU にオフロードするには、 + コマンドライン引数に`--fsdp "full_shard offload"`または`--fsdp "shard_grad_op offload"`を追加します。 +- `default_auto_wrap_policy` を使用して FSDP でレイヤーを自動的に再帰的にラップするには、 + コマンドライン引数に`--fsdp "full_shard auto_wrap"`または`--fsdp "shard_grad_op auto_wrap"`を追加します。 +- CPU オフロードと自動ラッピングの両方を有効にするには、 + コマンドライン引数に`--fsdp "full_shard offload auto_wrap"`または`--fsdp "shard_grad_op offload auto_wrap"`を追加します。 +- 残りの FSDP 構成は、`--fsdp_config `を介して渡されます。それは、次のいずれかの場所です。 + FSDP json 構成ファイル (例: `fsdp_config.json`)、またはすでにロードされている json ファイルを `dict` として使用します。 + - 自動ラッピングが有効な場合は、トランスベースの自動ラップ ポリシーまたはサイズ ベースの自動ラップ ポリシーを使用できます。 + - トランスフォーマーベースの自動ラップポリシーの場合、構成ファイルで `fsdp_transformer_layer_cls_to_wrap` を指定することをお勧めします。指定しない場合、使用可能な場合、デフォルト値は `model._no_split_modules` になります。 + これは、ラップするトランスフォーマー層クラス名のリスト (大文字と小文字を区別) を指定します (例: [`BertLayer`]、[`GPTJBlock`]、[`T5Block`] ...)。 + 重みを共有するサブモジュール (埋め込み層など) が異なる FSDP ラップされたユニットにならないようにする必要があるため、これは重要です。 + このポリシーを使用すると、マルチヘッド アテンションとそれに続くいくつかの MLP レイヤーを含むブロックごとにラッピングが発生します。 + 共有埋め込みを含む残りの層は、同じ最も外側の FSDP ユニットにラップされるのが便利です。 + したがって、トランスベースのモデルにはこれを使用してください。 + - サイズベースの自動ラップポリシーの場合は、設定ファイルに`fsdp_min_num_params`を追加してください。 + 自動ラッピングのための FSDP のパラメータの最小数を指定します。 + - 設定ファイルで `fsdp_backward_prefetch` を指定できるようになりました。次のパラメータのセットをいつプリフェッチするかを制御します。 + `backward_pre` と `backward_pos` が利用可能なオプションです。 + 詳細については、`torch.distributed.fsdp.full_sharded_data_Parallel.BackwardPrefetch`を参照してください。 + - 設定ファイルで `fsdp_forward_prefetch` を指定できるようになりました。次のパラメータのセットをいつプリフェッチするかを制御します。 + `True`の場合、FSDP はフォワード パスでの実行中に、次に来るオールギャザーを明示的にプリフェッチします。 + - 設定ファイルで `limit_all_gathers` を指定できるようになりました。 + `True`の場合、FSDP は CPU スレッドを明示的に同期して、実行中のオールギャザが多すぎるのを防ぎます。 + - `activation_checkpointing`を設定ファイルで指定できるようになりました。 + `True`の場合、FSDP アクティベーション チェックポイントは、FSDP のアクティベーションをクリアすることでメモリ使用量を削減する手法です。 + 特定のレイヤーを処理し、バックワード パス中にそれらを再計算します。事実上、これは余分な計算時間を犠牲にします + メモリ使用量を削減します。 + +**注意すべき注意点がいくつかあります** +- これは `generate` と互換性がないため、 `--predict_with_generate` とも互換性がありません + すべての seq2seq/clm スクリプト (翻訳/要約/clm など)。 + 問題 [#21667](https://github.com/huggingface/transformers/issues/21667) を参照してください。 + +### PyTorch/XLA Fully Sharded Data parallel + +TPU ユーザーの皆様に朗報です。 PyTorch/XLA は FSDP をサポートするようになりました。 +最新の Fully Sharded Data Parallel (FSDP) トレーニングがすべてサポートされています。 +詳細については、[FSDP を使用した Cloud TPU での PyTorch モデルのスケーリング](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/) および [PyTorch/XLA 実装 を参照してください。 FSDP の](https://github.com/pytorch/xla/tree/master/torch_xla/distributed/fsdp) +必要なのは、設定を通じて有効にすることだけです。 + +**FSDP サポートに必要な PyTorch/XLA バージョン**: >=2.0 + +**使用法**: + +`--fsdp "full shard"` を、`--fsdp_config ` に加えられる次の変更とともに渡します。 +- PyTorch/XLA FSDP を有効にするには、`xla`を`True`に設定する必要があります。 +- `xla_fsdp_settings` 値は、XLA FSDP ラッピング パラメータを格納する辞書です。 + オプションの完全なリストについては、[こちら]( + https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_full_sharded_data_Parallel.py)。 +- `xla_fsdp_grad_ckpt`。 `True`の場合、ネストされた XLA FSDP でラップされた各レイヤー上で勾配チェックポイントを使用します。 + この設定は、xla フラグが true に設定されており、自動ラッピング ポリシーが指定されている場合にのみ使用できます。 + `fsdp_min_num_params` または `fsdp_transformer_layer_cls_to_wrap`。 +- トランスフォーマー ベースの自動ラップ ポリシーまたはサイズ ベースの自動ラップ ポリシーのいずれかを使用できます。 + - トランスフォーマーベースの自動ラップポリシーの場合、構成ファイルで `fsdp_transformer_layer_cls_to_wrap` を指定することをお勧めします。指定しない場合、使用可能な場合、デフォルト値は `model._no_split_modules` になります。 + これは、ラップするトランスフォーマー層クラス名のリスト (大文字と小文字を区別) を指定します (例: [`BertLayer`]、[`GPTJBlock`]、[`T5Block`] ...)。 + 重みを共有するサブモジュール (埋め込み層など) が異なる FSDP ラップされたユニットにならないようにする必要があるため、これは重要です。 + このポリシーを使用すると、マルチヘッド アテンションとそれに続くいくつかの MLP レイヤーを含むブロックごとにラッピングが発生します。 + 共有埋め込みを含む残りの層は、同じ最も外側の FSDP ユニットにラップされるのが便利です。 + したがって、トランスベースのモデルにはこれを使用してください。 + - サイズベースの自動ラップポリシーの場合は、設定ファイルに`fsdp_min_num_params`を追加してください。 + 自動ラッピングのための FSDP のパラメータの最小数を指定します。 + +### Using Trainer for accelerated PyTorch Training on Mac + +PyTorch v1.12 リリースにより、開発者と研究者は Apple シリコン GPU を利用してモデル トレーニングを大幅に高速化できます。 +これにより、プロトタイピングや微調整などの機械学習ワークフローを Mac 上でローカルで実行できるようになります。 +PyTorch のバックエンドとしての Apple の Metal Performance Shaders (MPS) はこれを可能にし、新しい `"mps"` デバイス経由で使用できます。 +これにより、計算グラフとプリミティブが MPS Graph フレームワークと MPS によって提供される調整されたカーネルにマッピングされます。 +詳細については、公式ドキュメント [Mac での Accelerated PyTorch Training の紹介](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/) を参照してください。 +および [MPS バックエンド](https://pytorch.org/docs/stable/notes/mps.html)。 + + + +MacOS マシンに PyTorch >= 1.13 (執筆時点ではナイトリー バージョン) をインストールすることを強くお勧めします。 +トランスベースのモデルのモデルの正確性とパフォーマンスの向上に関連する主要な修正が行われています。 +詳細については、https://github.com/pytorch/pytorch/issues/82707 を参照してください。 + + + +**Apple Silicon チップを使用したトレーニングと推論の利点** + +1. ユーザーがローカルで大規模なネットワークやバッチ サイズをトレーニングできるようにします +2. ユニファイド メモリ アーキテクチャにより、データ取得の遅延が短縮され、GPU がメモリ ストア全体に直接アクセスできるようになります。 +したがって、エンドツーエンドのパフォーマンスが向上します。 +3. クラウドベースの開発に関連するコストや追加のローカル GPU の必要性を削減します。 + +**前提条件**: mps サポートを備えたトーチをインストールするには、 +この素晴らしいメディア記事 [GPU アクセラレーションが M1 Mac の PyTorch に登場](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1) に従ってください。 。 + +**使用法**: +`mps` デバイスは、`cuda` デバイスが使用される方法と同様に利用可能な場合、デフォルトで使用されます。 +したがって、ユーザーによるアクションは必要ありません。 +たとえば、以下のコマンドを使用して、Apple Silicon GPU を使用して公式の Glue テキスト分類タスクを (ルート フォルダーから) 実行できます。 + +```bash +export TASK_NAME=mrpc + +python examples/pytorch/text-classification/run_glue.py \ + --model_name_or_path bert-base-cased \ + --task_name $TASK_NAME \ + --do_train \ + --do_eval \ + --max_seq_length 128 \ + --per_device_train_batch_size 32 \ + --learning_rate 2e-5 \ + --num_train_epochs 3 \ + --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir +``` + +**注意すべきいくつかの注意事項** + +1. 一部の PyTorch 操作は mps に実装されていないため、エラーがスローされます。 +これを回避する 1 つの方法は、環境変数 `PYTORCH_ENABLE_MPS_FALLBACK=1` を設定することです。 +これらの操作では CPU にフォールバックします。ただし、それでも UserWarning がスローされます。 +2. 分散セットアップ`gloo`および`nccl`は、`mps`デバイスでは動作しません。 +これは、現在「mps」デバイス タイプの単一 GPU のみを使用できることを意味します。 + +最後に、覚えておいてください。 🤗 `Trainer` は MPS バックエンドのみを統合するため、 +MPS バックエンドの使用に関して問題や質問がある場合は、 +[PyTorch GitHub](https://github.com/pytorch/pytorch/issues) に問題を提出してください。 + +## Using Accelerate Launcher with Trainer + +加速してトレーナーにパワーを与えましょう。ユーザーが期待することに関しては、次のとおりです。 +- トレーナー引数に対して FSDP、DeepSpeed などのトレーナー インテレーションを変更せずに使用し続けることができます。 +- トレーナーで Accelerate Launcher を使用できるようになりました (推奨)。 + +トレーナーで Accelerate Launcher を使用する手順: +1. 🤗 Accelerate がインストールされていることを確認してください。Accelerate がないと `Trainer` を使用することはできません。そうでない場合は、`pip install accelerate`してください。 Accelerate のバージョンを更新する必要がある場合もあります: `pip install activate --upgrade` +2. `accelerate config`を実行し、アンケートに記入します。以下は加速設定の例です。 + a. DDP マルチノード マルチ GPU 構成: + ```yaml + compute_environment: LOCAL_MACHINE + distributed_type: MULTI_GPU + downcast_bf16: 'no' + gpu_ids: all + machine_rank: 0 #change rank as per the node + main_process_ip: 192.168.20.1 + main_process_port: 9898 + main_training_function: main + mixed_precision: fp16 + num_machines: 2 + num_processes: 8 + rdzv_backend: static + same_network: true + tpu_env: [] + tpu_use_cluster: false + tpu_use_sudo: false + use_cpu: false + ``` + + b. FSDP config: + ```yaml + compute_environment: LOCAL_MACHINE + distributed_type: FSDP + downcast_bf16: 'no' + fsdp_config: + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP + fsdp_backward_prefetch_policy: BACKWARD_PRE + fsdp_forward_prefetch: true + fsdp_offload_params: false + fsdp_sharding_strategy: 1 + fsdp_state_dict_type: FULL_STATE_DICT + fsdp_sync_module_states: true + fsdp_transformer_layer_cls_to_wrap: BertLayer + fsdp_use_orig_params: true + machine_rank: 0 + main_training_function: main + mixed_precision: bf16 + num_machines: 1 + num_processes: 2 + rdzv_backend: static + same_network: true + tpu_env: [] + tpu_use_cluster: false + tpu_use_sudo: false + use_cpu: false + ``` + c.ファイルを指す DeepSpeed 構成: + ```yaml + compute_environment: LOCAL_MACHINE + deepspeed_config: + deepspeed_config_file: /home/user/configs/ds_zero3_config.json + zero3_init_flag: true + distributed_type: DEEPSPEED + downcast_bf16: 'no' + machine_rank: 0 + main_training_function: main + num_machines: 1 + num_processes: 4 + rdzv_backend: static + same_network: true + tpu_env: [] + tpu_use_cluster: false + tpu_use_sudo: false + use_cpu: false + ``` + + d.加速プラグインを使用した DeepSpeed 構成: + + ```yaml + compute_environment: LOCAL_MACHINE + deepspeed_config: + gradient_accumulation_steps: 1 + gradient_clipping: 0.7 + offload_optimizer_device: cpu + offload_param_device: cpu + zero3_init_flag: true + zero_stage: 2 + distributed_type: DEEPSPEED + downcast_bf16: 'no' + machine_rank: 0 + main_training_function: main + mixed_precision: bf16 + num_machines: 1 + num_processes: 4 + rdzv_backend: static + same_network: true + tpu_env: [] + tpu_use_cluster: false + tpu_use_sudo: false + use_cpu: false + ``` + +3. 加速設定またはランチャー引数によって上記で処理された引数以外の引数を使用して、トレーナー スクリプトを実行します。 +以下は、上記の FSDP 構成で`accelerate launcher`を使用して`run_glue.py`を実行する例です。 + +```bash +cd transformers + +accelerate launch \ +./examples/pytorch/text-classification/run_glue.py \ +--model_name_or_path bert-base-cased \ +--task_name $TASK_NAME \ +--do_train \ +--do_eval \ +--max_seq_length 128 \ +--per_device_train_batch_size 16 \ +--learning_rate 5e-5 \ +--num_train_epochs 3 \ +--output_dir /tmp/$TASK_NAME/ \ +--overwrite_output_dir +``` + +4. `accelerate launch`するための cmd 引数を直接使用することもできます。上の例は次のようにマッピングされます。 + +```bash +cd transformers + +accelerate launch --num_processes=2 \ +--use_fsdp \ +--mixed_precision=bf16 \ +--fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP \ +--fsdp_transformer_layer_cls_to_wrap="BertLayer" \ +--fsdp_sharding_strategy=1 \ +--fsdp_state_dict_type=FULL_STATE_DICT \ +./examples/pytorch/text-classification/run_glue.py +--model_name_or_path bert-base-cased \ +--task_name $TASK_NAME \ +--do_train \ +--do_eval \ +--max_seq_length 128 \ +--per_device_train_batch_size 16 \ +--learning_rate 5e-5 \ +--num_train_epochs 3 \ +--output_dir /tmp/$TASK_NAME/ \ +--overwrite_output_dir +``` + +詳細については、🤗 Accelerate CLI ガイドを参照してください: [🤗 Accelerate スクリプトの起動](https://huggingface.co/docs/accelerate/basic_tutorials/launch)。 + +移動されたセクション: + +[ DeepSpeed +| Installation +| Deployment with multiple GPUs +| Deployment with one GPU +| Deployment in Notebooks +| Configuration +| Passing Configuration +| Shared Configuration +| ZeRO +| ZeRO-2 Config +| ZeRO-3 Config +| NVMe Support +| ZeRO-2 vs ZeRO-3 Performance +| ZeRO-2 Example +| ZeRO-3 Example +| Optimizer +| Scheduler +| fp32 Precision +| Automatic Mixed Precision +| Batch Size +| Gradient Accumulation +| Gradient Clipping +| Getting The Model Weights Out +] From 5bbf6712767a84ac4c4de1a5525a1b4e697064f7 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Tue, 31 Oct 2023 02:16:40 +0800 Subject: [PATCH 014/268] Device agnostic trainer testing (#27131) --- src/transformers/testing_utils.py | 39 ++++++++++++++- tests/extended/test_trainer_ext.py | 17 +++---- tests/trainer/test_trainer.py | 77 ++++++++++++++++-------------- 3 files changed, 87 insertions(+), 46 deletions(-) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 3a3adbece0f5..2c13eaf044af 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -629,6 +629,20 @@ def require_torch_multi_gpu(test_case): return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) +def require_torch_multi_accelerator(test_case): + """ + Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine + without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain + multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator" + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")( + test_case + ) + + def require_torch_non_multi_gpu(test_case): """ Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch). @@ -641,6 +655,16 @@ def require_torch_non_multi_gpu(test_case): return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case) +def require_torch_non_multi_accelerator(test_case): + """ + Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch). + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case) + + def require_torch_up_to_2_gpus(test_case): """ Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch). @@ -653,6 +677,17 @@ def require_torch_up_to_2_gpus(test_case): return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case) +def require_torch_up_to_2_accelerators(test_case): + """ + Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch). + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators") + (test_case) + + def require_torch_tpu(test_case): """ Decorator marking a test that requires a TPU (in PyTorch). @@ -774,7 +809,9 @@ def require_torch_gpu(test_case): def require_torch_accelerator(test_case): """Decorator marking a test that requires an accessible accelerator and PyTorch.""" - return unittest.skipUnless(torch_device != "cpu", "test requires accelerator")(test_case) + return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")( + test_case + ) def require_torch_fp16(test_case): diff --git a/tests/extended/test_trainer_ext.py b/tests/extended/test_trainer_ext.py index 831ffd5feede..eacc9106f2b2 100644 --- a/tests/extended/test_trainer_ext.py +++ b/tests/extended/test_trainer_ext.py @@ -26,16 +26,17 @@ CaptureStderr, ExtendSysPath, TestCasePlus, + backend_device_count, execute_subprocess_async, - get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_torch, require_torch_gpu, - require_torch_multi_gpu, - require_torch_non_multi_gpu, + require_torch_multi_accelerator, + require_torch_non_multi_accelerator, slow, + torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed @@ -89,17 +90,17 @@ def run_seq2seq_quick( assert isinstance(last_step_stats["eval_bleu"], float) assert not math.isnan(float(last_step_stats["eval_loss"])), "eval_loss must not be `nan`" - @require_torch_non_multi_gpu + @require_torch_non_multi_accelerator def test_run_seq2seq_no_dist(self): self.run_seq2seq_quick() # verify that the trainer can handle non-distributed with n_gpu > 1 - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_run_seq2seq_dp(self): self.run_seq2seq_quick(distributed=False) # verify that the trainer can handle distributed with n_gpu > 1 - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_run_seq2seq_ddp(self): self.run_seq2seq_quick(distributed=True) @@ -120,7 +121,7 @@ def test_run_seq2seq_apex(self): self.run_seq2seq_quick(distributed=True, extra_args_str="--fp16 --fp16_backend=apex") @parameterized.expand(["base", "low", "high", "mixed"]) - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_trainer_log_level_replica(self, experiment_id): # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout experiments = { @@ -331,7 +332,7 @@ def run_trainer( if distributed: if n_gpus_to_use is None: - n_gpus_to_use = get_gpu_count() + n_gpus_to_use = backend_device_count(torch_device) master_port = get_torch_dist_unique_port() distributed_args = f""" -m torch.distributed.run diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 624d3833f4f5..8791e92c7148 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -49,6 +49,7 @@ USER, CaptureLogger, TestCasePlus, + backend_device_count, execute_subprocess_async, get_gpu_count, get_tests_dir, @@ -63,17 +64,19 @@ require_tensorboard, require_tokenizers, require_torch, - require_torch_bf16_cpu, - require_torch_bf16_gpu, + require_torch_accelerator, + require_torch_bf16, require_torch_gpu, - require_torch_multi_gpu, + require_torch_multi_accelerator, + require_torch_non_multi_accelerator, require_torch_non_multi_gpu, require_torch_tensorrt_fx, require_torch_tf32, - require_torch_up_to_2_gpus, + require_torch_up_to_2_accelerators, require_torchdynamo, require_wandb, slow, + torch_device, ) from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, HPSearchBackend from transformers.training_args import OptimizerNames @@ -606,7 +609,7 @@ def test_gradient_checkpointing(self): ) def test_training_loss(self): - n_gpus = max(1, get_gpu_count()) + n_gpus = max(1, backend_device_count(torch_device)) # With even logs trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus)) @@ -726,8 +729,8 @@ def test_adafactor_lr_none(self): self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0) - @require_torch_gpu - @require_torch_bf16_gpu + @require_torch_accelerator + @require_torch_bf16 def test_mixed_bf16(self): # very basic test trainer = get_regression_trainer(learning_rate=0.1, bf16=True) @@ -812,25 +815,25 @@ def test_number_of_steps_in_training(self): train_output = trainer.train() self.assertEqual(train_output.global_step, 10) - @require_torch_bf16_cpu + @require_torch_bf16 @require_intel_extension_for_pytorch def test_number_of_steps_in_training_with_ipex(self): for mix_bf16 in [True, False]: # Regular training has n_epochs * len(train_dl) steps - trainer = get_regression_trainer(learning_rate=0.1, use_ipex=True, bf16=mix_bf16, no_cuda=True) + trainer = get_regression_trainer(learning_rate=0.1, use_ipex=True, bf16=mix_bf16, use_cpu=True) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / trainer.args.train_batch_size) # Check passing num_train_epochs works (and a float version too): trainer = get_regression_trainer( - learning_rate=0.1, num_train_epochs=1.5, use_ipex=True, bf16=mix_bf16, no_cuda=True + learning_rate=0.1, num_train_epochs=1.5, use_ipex=True, bf16=mix_bf16, use_cpu=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / trainer.args.train_batch_size)) # If we pass a max_steps, num_train_epochs is ignored trainer = get_regression_trainer( - learning_rate=0.1, max_steps=10, use_ipex=True, bf16=mix_bf16, no_cuda=True + learning_rate=0.1, max_steps=10, use_ipex=True, bf16=mix_bf16, use_cpu=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) @@ -861,7 +864,7 @@ def is_any_loss_nan_or_inf(log_history): self.assertFalse(is_any_loss_nan_or_inf(log_history_filter)) def test_train_and_eval_dataloaders(self): - n_gpu = max(1, torch.cuda.device_count()) + n_gpu = max(1, backend_device_count(torch_device)) trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16) self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16 * n_gpu) trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16) @@ -898,7 +901,7 @@ def test_dataloader_without_dataset(self): trainer.train() trainer.evaluate() - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_data_is_not_parallelized_when_model_is_parallel(self): model = RegressionModel() # Make the Trainer believe it's a parallelized model @@ -995,12 +998,12 @@ def test_evaluate_with_jit(self): expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) - @require_torch_bf16_cpu + @require_torch_bf16 @require_intel_extension_for_pytorch def test_evaluate_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer( - a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, no_cuda=True + a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, use_cpu=True ) results = trainer.evaluate() @@ -1019,7 +1022,7 @@ def test_evaluate_with_ipex(self): eval_len=66, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, - no_cuda=True, + use_cpu=True, ) results = trainer.evaluate() @@ -1038,7 +1041,7 @@ def test_evaluate_with_ipex(self): compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, bf16=mix_bf16, - no_cuda=True, + use_cpu=True, ) results = trainer.evaluate() @@ -1115,24 +1118,24 @@ def test_predict_with_jit(self): self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) - @require_torch_bf16_cpu + @require_torch_bf16 @require_intel_extension_for_pytorch def test_predict_with_ipex(self): for mix_bf16 in [True, False]: - trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, bf16=mix_bf16, no_cuda=True) + trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, bf16=mix_bf16, use_cpu=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size - trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, use_ipex=True, bf16=mix_bf16, no_cuda=True) + trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, use_ipex=True, bf16=mix_bf16, use_cpu=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer( - a=1.5, b=2.5, double_output=True, use_ipex=True, bf16=mix_bf16, no_cuda=True + a=1.5, b=2.5, double_output=True, use_ipex=True, bf16=mix_bf16, use_cpu=True ) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x @@ -1148,7 +1151,7 @@ def test_predict_with_ipex(self): label_names=["labels", "labels_2"], use_ipex=True, bf16=mix_bf16, - no_cuda=True, + use_cpu=True, ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions @@ -1255,7 +1258,7 @@ def test_safe_checkpoints(self): tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False, safe_weights=save_safetensors ) - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_run_seq2seq_double_train_wrap_once(self): # test that we don't wrap the model more than once # since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for @@ -1268,7 +1271,7 @@ def test_run_seq2seq_double_train_wrap_once(self): model_wrapped_after = trainer.model_wrapped self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice") - @require_torch_up_to_2_gpus + @require_torch_up_to_2_accelerators def test_can_resume_training(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model @@ -1424,7 +1427,7 @@ def test_resume_training_with_randomness(self): @slow @require_accelerate - @require_torch_non_multi_gpu + @require_torch_non_multi_accelerator def test_auto_batch_size_finder(self): if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True @@ -1471,7 +1474,7 @@ def test_training_with_resume_from_checkpoint_false(self): trainer.train(resume_from_checkpoint=False) - @require_torch_up_to_2_gpus + @require_torch_up_to_2_accelerators def test_resume_training_with_shard_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model @@ -1497,7 +1500,7 @@ def test_resume_training_with_shard_checkpoint(self): self.check_trainer_state_are_the_same(state, state1) @require_safetensors - @require_torch_up_to_2_gpus + @require_torch_up_to_2_accelerators def test_resume_training_with_safe_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model @@ -1532,7 +1535,7 @@ def test_resume_training_with_safe_checkpoint(self): self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) - @require_torch_up_to_2_gpus + @require_torch_up_to_2_accelerators def test_resume_training_with_gradient_accumulation(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model @@ -1570,7 +1573,7 @@ def test_resume_training_with_gradient_accumulation(self): self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) - @require_torch_up_to_2_gpus + @require_torch_up_to_2_accelerators def test_resume_training_with_frozen_params(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model @@ -1715,7 +1718,7 @@ def test_trainer_eval_mrpc(self): ) eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") - training_args = TrainingArguments(output_dir="./examples", no_cuda=True) + training_args = TrainingArguments(output_dir="./examples", use_cpu=True) trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset) result = trainer.evaluate() self.assertLess(result["eval_loss"], 0.2) @@ -1920,12 +1923,12 @@ def test_mem_metrics(self): trainer = get_regression_trainer(skip_memory_metrics=True) self.check_mem_metrics(trainer, self.assertNotIn) - @require_torch_gpu + @require_torch_accelerator def test_fp16_full_eval(self): # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 - n_gpus = get_gpu_count() + n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus @@ -2090,15 +2093,15 @@ def forward(self, x): # aggressively fuses the operations and reduce the memory footprint. self.assertGreater(orig_peak_mem, peak_mem * 2) - @require_torch_gpu - @require_torch_bf16_gpu + @require_torch_accelerator + @require_torch_bf16 def test_bf16_full_eval(self): # note: most of the logic is the same as test_fp16_full_eval # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 - n_gpus = get_gpu_count() + n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus @@ -2163,7 +2166,7 @@ def test_no_wd_param_group(self): self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params) @slow - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_end_to_end_example(self): # Tests that `translation.py` will run without issues script_path = os.path.abspath( @@ -2302,7 +2305,7 @@ def test_push_to_hub_with_saves_each_epoch(self): self.assertIn(f"Training in progress, epoch {i}", commits) def test_push_to_hub_with_saves_each_n_steps(self): - num_gpus = max(1, get_gpu_count()) + num_gpus = max(1, backend_device_count(torch_device)) if num_gpus > 2: return From f7ea959b96f26fd978e7f5acb2e22c11a58bf20e Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Mon, 30 Oct 2023 19:53:46 +0100 Subject: [PATCH 015/268] [`core`/ `GC` / `tests`] Stronger GC tests (#27124) * stronger GC tests * better tests and skip failing tests * break down into 3 sub-tests * break down into 3 sub-tests * refactor a bit * more refactor * fix * last nit * credits contrib and suggestions * credits contrib and suggestions --------- Co-authored-by: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- tests/models/align/test_modeling_align.py | 24 +++++++ tests/models/altclip/test_modeling_altclip.py | 24 +++++++ .../autoformer/test_modeling_autoformer.py | 18 +++++ tests/models/beit/test_modeling_beit.py | 12 ++++ .../models/big_bird/test_modeling_big_bird.py | 18 +++++ tests/models/blip/test_modeling_blip.py | 36 ++++++++++ tests/models/blip/test_modeling_blip_text.py | 12 ++++ .../models/blip/test_modeling_tf_blip_text.py | 12 ++++ tests/models/blip_2/test_modeling_blip_2.py | 12 ++++ tests/models/canine/test_modeling_canine.py | 18 +++++ .../test_modeling_chinese_clip.py | 24 +++++++ tests/models/clap/test_modeling_clap.py | 24 +++++++ tests/models/clip/test_modeling_clip.py | 24 +++++++ tests/models/clipseg/test_modeling_clipseg.py | 42 ++++++++++++ tests/models/deit/test_modeling_deit.py | 12 ++++ tests/models/dinov2/test_modeling_dinov2.py | 18 +++++ tests/models/dpt/test_modeling_dpt.py | 12 ++++ tests/models/dpt/test_modeling_dpt_hybrid.py | 12 ++++ tests/models/flava/test_modeling_flava.py | 66 +++++++++++++++++++ tests/models/fnet/test_modeling_fnet.py | 18 +++++ tests/models/git/test_modeling_git.py | 12 ++++ tests/models/gpt2/test_modeling_gpt2.py | 18 +++++ .../models/groupvit/test_modeling_groupvit.py | 24 +++++++ tests/models/idefics/test_modeling_idefics.py | 24 +++++++ .../models/imagegpt/test_modeling_imagegpt.py | 18 +++++ .../models/informer/test_modeling_informer.py | 18 +++++ .../test_modeling_instructblip.py | 12 ++++ .../models/layoutlm/test_modeling_layoutlm.py | 18 +++++ tests/models/lilt/test_modeling_lilt.py | 18 +++++ tests/models/luke/test_modeling_luke.py | 18 +++++ tests/models/marian/test_modeling_marian.py | 18 +++++ tests/models/mra/test_modeling_mra.py | 18 +++++ tests/models/owlv2/test_modeling_owlv2.py | 36 ++++++++++ tests/models/owlvit/test_modeling_owlvit.py | 36 ++++++++++ tests/models/pegasus/test_modeling_pegasus.py | 18 +++++ .../pix2struct/test_modeling_pix2struct.py | 24 +++++++ .../models/roformer/test_modeling_roformer.py | 18 +++++ tests/models/sam/test_modeling_sam.py | 12 ++++ .../test_modeling_seamless_m4t.py | 36 ++++++++++ .../test_modeling_speech_to_text.py | 12 ++++ .../test_modeling_tf_speech_to_text.py | 12 ++++ .../models/speecht5/test_modeling_speecht5.py | 36 ++++++++++ tests/models/swin2sr/test_modeling_swin2sr.py | 12 ++++ .../test_modeling_time_series_transformer.py | 18 +++++ tests/models/umt5/test_modeling_umt5.py | 18 +++++ tests/models/vilt/test_modeling_vilt.py | 12 ++++ .../visual_bert/test_modeling_visual_bert.py | 18 +++++ .../models/vitmatte/test_modeling_vitmatte.py | 12 ++++ tests/models/whisper/test_modeling_whisper.py | 12 ++++ tests/models/x_clip/test_modeling_x_clip.py | 24 +++++++ tests/test_modeling_common.py | 60 +++++++++++------ 51 files changed, 1061 insertions(+), 19 deletions(-) diff --git a/tests/models/align/test_modeling_align.py b/tests/models/align/test_modeling_align.py index 47918bcd8389..99daeb816d2d 100644 --- a/tests/models/align/test_modeling_align.py +++ b/tests/models/align/test_modeling_align.py @@ -224,6 +224,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @slow def test_model_from_pretrained(self): for model_name in ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: @@ -352,6 +364,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="ALIGN does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/altclip/test_modeling_altclip.py b/tests/models/altclip/test_modeling_altclip.py index e1510c640a7b..f4f7a8fd5237 100755 --- a/tests/models/altclip/test_modeling_altclip.py +++ b/tests/models/altclip/test_modeling_altclip.py @@ -186,6 +186,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="AltCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -320,6 +332,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_model_outputs_equivalence(self): pass diff --git a/tests/models/autoformer/test_modeling_autoformer.py b/tests/models/autoformer/test_modeling_autoformer.py index ab62d0e395f5..965e5dcd87b0 100644 --- a/tests/models/autoformer/test_modeling_autoformer.py +++ b/tests/models/autoformer/test_modeling_autoformer.py @@ -238,6 +238,24 @@ def test_encoder_decoder_model_standalone(self): def test_resize_tokens_embeddings(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + # # Input is 'static_categorical_features' not 'input_ids' def test_model_main_input_name(self): model_signature = inspect.signature(getattr(AutoformerModel, "forward")) diff --git a/tests/models/beit/test_modeling_beit.py b/tests/models/beit/test_modeling_beit.py index 2a35cddf4057..8774503e7694 100644 --- a/tests/models/beit/test_modeling_beit.py +++ b/tests/models/beit/test_modeling_beit.py @@ -307,6 +307,18 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/big_bird/test_modeling_big_bird.py b/tests/models/big_bird/test_modeling_big_bird.py index f86c6d0ac70a..e3ae650c5634 100644 --- a/tests/models/big_bird/test_modeling_big_bird.py +++ b/tests/models/big_bird/test_modeling_big_bird.py @@ -609,6 +609,24 @@ def test_for_change_to_full_attn(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_change_to_full_attn(*config_and_inputs) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # overwrite from common in order to skip the check on `attentions` def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, diff --git a/tests/models/blip/test_modeling_blip.py b/tests/models/blip/test_modeling_blip.py index cf8c487082c7..e5a5652f6048 100644 --- a/tests/models/blip/test_modeling_blip.py +++ b/tests/models/blip/test_modeling_blip.py @@ -194,6 +194,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -324,6 +336,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -875,6 +899,18 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # override as the `logit_scale` parameter initilization is different for Blip def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/blip/test_modeling_blip_text.py b/tests/models/blip/test_modeling_blip_text.py index 2301b776feb4..c004a8934ef0 100644 --- a/tests/models/blip/test_modeling_blip_text.py +++ b/tests/models/blip/test_modeling_blip_text.py @@ -147,6 +147,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/blip/test_modeling_tf_blip_text.py b/tests/models/blip/test_modeling_tf_blip_text.py index a3da1a7f675d..a21bdd109f89 100644 --- a/tests/models/blip/test_modeling_tf_blip_text.py +++ b/tests/models/blip/test_modeling_tf_blip_text.py @@ -147,6 +147,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py index 66d59465a7c5..9138061ee1bf 100644 --- a/tests/models/blip_2/test_modeling_blip_2.py +++ b/tests/models/blip_2/test_modeling_blip_2.py @@ -188,6 +188,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="Blip2VisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass diff --git a/tests/models/canine/test_modeling_canine.py b/tests/models/canine/test_modeling_canine.py index 303d465ca905..f10823fc5664 100644 --- a/tests/models/canine/test_modeling_canine.py +++ b/tests/models/canine/test_modeling_canine.py @@ -507,6 +507,24 @@ def test_inputs_embeds(self): def test_model_common_attributes(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @slow def test_model_from_pretrained(self): for model_name in CANINE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: diff --git a/tests/models/chinese_clip/test_modeling_chinese_clip.py b/tests/models/chinese_clip/test_modeling_chinese_clip.py index 137c3c2888dd..c2030ebec126 100644 --- a/tests/models/chinese_clip/test_modeling_chinese_clip.py +++ b/tests/models/chinese_clip/test_modeling_chinese_clip.py @@ -395,6 +395,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="ChineseCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -461,6 +473,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="ChineseCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass diff --git a/tests/models/clap/test_modeling_clap.py b/tests/models/clap/test_modeling_clap.py index dc5718850f4e..458290c921bf 100644 --- a/tests/models/clap/test_modeling_clap.py +++ b/tests/models/clap/test_modeling_clap.py @@ -253,6 +253,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="ClapAudioModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -406,6 +418,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="ClapTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/clip/test_modeling_clip.py b/tests/models/clip/test_modeling_clip.py index 0edd73f7ec60..b96edcc56da7 100644 --- a/tests/models/clip/test_modeling_clip.py +++ b/tests/models/clip/test_modeling_clip.py @@ -227,6 +227,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="CLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -376,6 +388,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/clipseg/test_modeling_clipseg.py b/tests/models/clipseg/test_modeling_clipseg.py index 0f97f381fc52..0ebf08da89f9 100644 --- a/tests/models/clipseg/test_modeling_clipseg.py +++ b/tests/models/clipseg/test_modeling_clipseg.py @@ -202,6 +202,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -327,6 +339,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -470,6 +494,24 @@ def test_retain_grad_hidden_states_attentions(self): def test_model_common_attributes(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # override as the some parameters require custom initialization def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/deit/test_modeling_deit.py b/tests/models/deit/test_modeling_deit.py index 2685900afbb9..7b7de3f320b3 100644 --- a/tests/models/deit/test_modeling_deit.py +++ b/tests/models/deit/test_modeling_deit.py @@ -314,6 +314,18 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/dinov2/test_modeling_dinov2.py b/tests/models/dinov2/test_modeling_dinov2.py index fa20833823e2..a040356fb798 100644 --- a/tests/models/dinov2/test_modeling_dinov2.py +++ b/tests/models/dinov2/test_modeling_dinov2.py @@ -238,6 +238,24 @@ def test_config(self): def test_inputs_embeds(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py index 247791ed4127..100803bcdebf 100644 --- a/tests/models/dpt/test_modeling_dpt.py +++ b/tests/models/dpt/test_modeling_dpt.py @@ -256,6 +256,18 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/dpt/test_modeling_dpt_hybrid.py b/tests/models/dpt/test_modeling_dpt_hybrid.py index 7270f609c2bc..82055b210557 100644 --- a/tests/models/dpt/test_modeling_dpt_hybrid.py +++ b/tests/models/dpt/test_modeling_dpt_hybrid.py @@ -270,6 +270,18 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/flava/test_modeling_flava.py b/tests/models/flava/test_modeling_flava.py index 02241816373a..e4b3990dce85 100644 --- a/tests/models/flava/test_modeling_flava.py +++ b/tests/models/flava/test_modeling_flava.py @@ -311,6 +311,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # skip this test as FlavaImageModel has no base class and is # not available in MODEL_MAPPING def test_save_load_fast_init_from_base(self): @@ -458,6 +470,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_inputs_embeds(self): # FLAVA does not use inputs_embeds pass @@ -610,6 +634,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_inputs_embeds(self): # FLAVA does not use inputs_embeds pass @@ -728,6 +764,18 @@ def test_retain_grad_hidden_states_attentions(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_inputs_embeds(self): # FLAVA does not use inputs_embeds pass @@ -1190,6 +1238,24 @@ class FlavaForPreTrainingTest(FlavaModelTest): class_for_tester = FlavaForPreTrainingTester test_torchscript = False + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/fnet/test_modeling_fnet.py b/tests/models/fnet/test_modeling_fnet.py index cfc25125c3ec..ba89542ee66f 100644 --- a/tests/models/fnet/test_modeling_fnet.py +++ b/tests/models/fnet/test_modeling_fnet.py @@ -326,6 +326,24 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): def test_attention_outputs(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/git/test_modeling_git.py b/tests/models/git/test_modeling_git.py index 0dde54a398e3..c503abfb89db 100644 --- a/tests/models/git/test_modeling_git.py +++ b/tests/models/git/test_modeling_git.py @@ -174,6 +174,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="GitVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass diff --git a/tests/models/gpt2/test_modeling_gpt2.py b/tests/models/gpt2/test_modeling_gpt2.py index c94103988849..17b32a22fb1f 100644 --- a/tests/models/gpt2/test_modeling_gpt2.py +++ b/tests/models/gpt2/test_modeling_gpt2.py @@ -562,6 +562,24 @@ def test_gpt2_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_weight_initialization(*config_and_inputs) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @slow def test_batch_generation(self): model = GPT2LMHeadModel.from_pretrained("gpt2") diff --git a/tests/models/groupvit/test_modeling_groupvit.py b/tests/models/groupvit/test_modeling_groupvit.py index 6d52b6b50185..3d7f50ae6eb6 100644 --- a/tests/models/groupvit/test_modeling_groupvit.py +++ b/tests/models/groupvit/test_modeling_groupvit.py @@ -270,6 +270,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="GroupViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -454,6 +466,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="GroupViTTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/idefics/test_modeling_idefics.py b/tests/models/idefics/test_modeling_idefics.py index 040a5c2c087d..ffd46dd197dc 100644 --- a/tests/models/idefics/test_modeling_idefics.py +++ b/tests/models/idefics/test_modeling_idefics.py @@ -379,6 +379,18 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="""IDEFICS does not support retaining the gradients of the hidden states and attention""") def test_retain_grad_hidden_states_attentions(self): return @@ -496,6 +508,18 @@ def test_for_token_classification(self): def test_retain_grad_hidden_states_attentions(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skipIf(not is_torch_greater_or_equal_than_2_0, reason="pytorch 2.0 or higher is required") @require_torch diff --git a/tests/models/imagegpt/test_modeling_imagegpt.py b/tests/models/imagegpt/test_modeling_imagegpt.py index b4e2cd5ab413..ad8c8d290e67 100644 --- a/tests/models/imagegpt/test_modeling_imagegpt.py +++ b/tests/models/imagegpt/test_modeling_imagegpt.py @@ -316,6 +316,24 @@ def test_imagegpt_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_for_image_classification(*config_and_inputs) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @slow def test_model_from_pretrained(self): for model_name in IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: diff --git a/tests/models/informer/test_modeling_informer.py b/tests/models/informer/test_modeling_informer.py index f3c8539d8450..e68d10241d34 100644 --- a/tests/models/informer/test_modeling_informer.py +++ b/tests/models/informer/test_modeling_informer.py @@ -279,6 +279,24 @@ def test_model_outputs_equivalence(self): def test_determinism(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # # Input is 'static_categorical_features' not 'input_ids' def test_model_main_input_name(self): model_signature = inspect.signature(getattr(InformerModel, "forward")) diff --git a/tests/models/instructblip/test_modeling_instructblip.py b/tests/models/instructblip/test_modeling_instructblip.py index f0fd193b6488..3b7dc002aff7 100644 --- a/tests/models/instructblip/test_modeling_instructblip.py +++ b/tests/models/instructblip/test_modeling_instructblip.py @@ -199,6 +199,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="InstructBlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py index aafa53969dfb..38dd86eb8b82 100644 --- a/tests/models/layoutlm/test_modeling_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_layoutlm.py @@ -279,6 +279,24 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: diff --git a/tests/models/lilt/test_modeling_lilt.py b/tests/models/lilt/test_modeling_lilt.py index 1bb92300c3db..653178e2ad66 100644 --- a/tests/models/lilt/test_modeling_lilt.py +++ b/tests/models/lilt/test_modeling_lilt.py @@ -275,6 +275,24 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @slow def test_model_from_pretrained(self): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: diff --git a/tests/models/luke/test_modeling_luke.py b/tests/models/luke/test_modeling_luke.py index 95cb4f5d01a6..4f1ed2e2e3d3 100644 --- a/tests/models/luke/test_modeling_luke.py +++ b/tests/models/luke/test_modeling_luke.py @@ -855,6 +855,24 @@ def test_retain_grad_entity_hidden_states(self): self.assertIsNotNone(entity_hidden_states.grad) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @require_torch class LukeModelIntegrationTests(unittest.TestCase): diff --git a/tests/models/marian/test_modeling_marian.py b/tests/models/marian/test_modeling_marian.py index 0f3acbcf4078..c590b216cc47 100644 --- a/tests/models/marian/test_modeling_marian.py +++ b/tests/models/marian/test_modeling_marian.py @@ -347,6 +347,24 @@ def test_tie_word_embeddings_decoder(self): def test_pipeline_conversational(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" diff --git a/tests/models/mra/test_modeling_mra.py b/tests/models/mra/test_modeling_mra.py index aac9ce5bc160..02c61fa14012 100644 --- a/tests/models/mra/test_modeling_mra.py +++ b/tests/models/mra/test_modeling_mra.py @@ -360,6 +360,24 @@ def test_model_from_pretrained(self): def test_attention_outputs(self): return + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @require_torch class MraModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/owlv2/test_modeling_owlv2.py b/tests/models/owlv2/test_modeling_owlv2.py index 98bae3c77e8f..a45df63c39d3 100644 --- a/tests/models/owlv2/test_modeling_owlv2.py +++ b/tests/models/owlv2/test_modeling_owlv2.py @@ -190,6 +190,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="Owlv2VisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -322,6 +334,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="OWLV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -660,6 +684,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py index 8360b9f2a232..ae419c69f0fe 100644 --- a/tests/models/owlvit/test_modeling_owlvit.py +++ b/tests/models/owlvit/test_modeling_owlvit.py @@ -188,6 +188,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="OwlViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -318,6 +330,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="OWLVIT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -653,6 +677,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return diff --git a/tests/models/pegasus/test_modeling_pegasus.py b/tests/models/pegasus/test_modeling_pegasus.py index 4011fe2c6824..bae10d18ff8c 100644 --- a/tests/models/pegasus/test_modeling_pegasus.py +++ b/tests/models/pegasus/test_modeling_pegasus.py @@ -290,6 +290,24 @@ def test_generate_fp16(self): model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" diff --git a/tests/models/pix2struct/test_modeling_pix2struct.py b/tests/models/pix2struct/test_modeling_pix2struct.py index 34ca767d6b01..002b287fd9f6 100644 --- a/tests/models/pix2struct/test_modeling_pix2struct.py +++ b/tests/models/pix2struct/test_modeling_pix2struct.py @@ -199,6 +199,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`") def test_retain_grad_hidden_states_attentions(self): pass @@ -336,6 +348,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="Pix2Struct does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/roformer/test_modeling_roformer.py b/tests/models/roformer/test_modeling_roformer.py index e54d31d15468..6c130ae1746c 100644 --- a/tests/models/roformer/test_modeling_roformer.py +++ b/tests/models/roformer/test_modeling_roformer.py @@ -486,6 +486,24 @@ def test_model_from_pretrained(self): model = RoFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @require_torch class RoFormerModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/sam/test_modeling_sam.py b/tests/models/sam/test_modeling_sam.py index a0f39a401355..3d5ec22c035e 100644 --- a/tests/models/sam/test_modeling_sam.py +++ b/tests/models/sam/test_modeling_sam.py @@ -421,6 +421,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass diff --git a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py index d75629efc351..2abedb6dd708 100644 --- a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py +++ b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py @@ -490,6 +490,24 @@ def test_forward_signature(self): def test_save_load_fast_init_from_base(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_attention_outputs(self): # expected length is subsampled so need to change a bit this test if not self.has_attentions: @@ -735,6 +753,24 @@ def test_save_load_fast_init_to_base(self): def test_save_load_fast_init_from_base(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @require_torch class SeamlessM4TGenerationTest(unittest.TestCase): diff --git a/tests/models/speech_to_text/test_modeling_speech_to_text.py b/tests/models/speech_to_text/test_modeling_speech_to_text.py index d86fc43a8268..061e17c3e7f5 100644 --- a/tests/models/speech_to_text/test_modeling_speech_to_text.py +++ b/tests/models/speech_to_text/test_modeling_speech_to_text.py @@ -324,6 +324,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_features = input_dict["input_features"] diff --git a/tests/models/speech_to_text/test_modeling_tf_speech_to_text.py b/tests/models/speech_to_text/test_modeling_tf_speech_to_text.py index c874d5c5c3ce..badde2485df1 100644 --- a/tests/models/speech_to_text/test_modeling_tf_speech_to_text.py +++ b/tests/models/speech_to_text/test_modeling_tf_speech_to_text.py @@ -246,6 +246,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_generate_fp16(self): pass diff --git a/tests/models/speecht5/test_modeling_speecht5.py b/tests/models/speecht5/test_modeling_speecht5.py index fed01a94449b..65c1a340ad05 100644 --- a/tests/models/speecht5/test_modeling_speecht5.py +++ b/tests/models/speecht5/test_modeling_speecht5.py @@ -702,6 +702,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: @@ -987,6 +999,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: @@ -1421,6 +1445,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: diff --git a/tests/models/swin2sr/test_modeling_swin2sr.py b/tests/models/swin2sr/test_modeling_swin2sr.py index e43c45d0d47a..f94e11ad6460 100644 --- a/tests/models/swin2sr/test_modeling_swin2sr.py +++ b/tests/models/swin2sr/test_modeling_swin2sr.py @@ -207,6 +207,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/time_series_transformer/test_modeling_time_series_transformer.py b/tests/models/time_series_transformer/test_modeling_time_series_transformer.py index 42319a1dd0a2..c5a3646a5beb 100644 --- a/tests/models/time_series_transformer/test_modeling_time_series_transformer.py +++ b/tests/models/time_series_transformer/test_modeling_time_series_transformer.py @@ -369,6 +369,24 @@ def test_attention_outputs(self): [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @parameterized.expand( [ (1, 5, [1]), diff --git a/tests/models/umt5/test_modeling_umt5.py b/tests/models/umt5/test_modeling_umt5.py index d9fd852c884a..3cf9df9703fb 100644 --- a/tests/models/umt5/test_modeling_umt5.py +++ b/tests/models/umt5/test_modeling_umt5.py @@ -537,6 +537,24 @@ def test_generate_with_head_masking(self): def test_disk_offload(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @require_torch @require_sentencepiece diff --git a/tests/models/vilt/test_modeling_vilt.py b/tests/models/vilt/test_modeling_vilt.py index 399f0710c779..853701e3a8ea 100644 --- a/tests/models/vilt/test_modeling_vilt.py +++ b/tests/models/vilt/test_modeling_vilt.py @@ -320,6 +320,18 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip( reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic hidden states""" diff --git a/tests/models/visual_bert/test_modeling_visual_bert.py b/tests/models/visual_bert/test_modeling_visual_bert.py index 9000be33ab7e..c366e9145ea7 100644 --- a/tests/models/visual_bert/test_modeling_visual_bert.py +++ b/tests/models/visual_bert/test_modeling_visual_bert.py @@ -555,6 +555,24 @@ def test_model_from_pretrained(self): model = VisualBertModel.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @require_torch class VisualBertModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/vitmatte/test_modeling_vitmatte.py b/tests/models/vitmatte/test_modeling_vitmatte.py index 09e3f60966b0..fcc99de0ba93 100644 --- a/tests/models/vitmatte/test_modeling_vitmatte.py +++ b/tests/models/vitmatte/test_modeling_vitmatte.py @@ -173,6 +173,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="ViTMatte does not support input and output embeddings") def test_model_common_attributes(self): pass diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 337d33485210..6bb73468ad07 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -414,6 +414,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + def test_generate_with_head_masking(self): pass diff --git a/tests/models/x_clip/test_modeling_x_clip.py b/tests/models/x_clip/test_modeling_x_clip.py index 5c602d3d3ef7..db28b41c0b39 100644 --- a/tests/models/x_clip/test_modeling_x_clip.py +++ b/tests/models/x_clip/test_modeling_x_clip.py @@ -194,6 +194,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @@ -416,6 +428,18 @@ def test_training(self): def test_training_gradient_checkpointing(self): pass + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + @unittest.skip(reason="X-CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 7e1c471badf4..634d7631dff2 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -539,56 +539,78 @@ def test_forward_signature(self): expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_training(self): + def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.use_cache = False config.return_dict = True - if model_class.__name__ in [ - *get_values(MODEL_MAPPING_NAMES), - *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), - ]: + if ( + model_class.__name__ + in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] + or not model_class.supports_gradient_checkpointing + ): continue + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) + model.to(torch_device) + model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() + + # unfreeze additional layers + for p in model.parameters(): + p.requires_grad_(True) + + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() + optimizer.step() - def test_training_gradient_checkpointing(self): + for k, v in model.named_parameters(): + if v.requires_grad: + self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!") + + def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - config.use_cache = False config.return_dict = True - if ( - model_class.__name__ - in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] - or not model_class.supports_gradient_checkpointing - ): + if model_class.__name__ in [ + *get_values(MODEL_MAPPING_NAMES), + *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), + ]: continue + model = model_class(config) model.to(torch_device) - model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() - model.gradient_checkpointing_disable() - model.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": True}) - model.train() - inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) - loss = model(**inputs).loss - loss.backward() + def test_training_gradient_checkpointing(self): + # Scenario - 1 default behaviour + self.check_training_gradient_checkpointing() + + def test_training_gradient_checkpointing_use_reentrant(self): + # Scenario - 2 with `use_reentrant=True` - this is the default value that is used in pytorch's + # torch.utils.checkpoint.checkpoint + self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": True}) + + def test_training_gradient_checkpointing_use_reentrant_false(self): + # Scenario - 3 with `use_reentrant=False` pytorch suggests users to use this value for + # future releases: https://pytorch.org/docs/stable/checkpoint.html + self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": False}) def test_attention_outputs(self): if not self.has_attentions: From e971486d891a7a580ba3f84d8a4b525c0c51850a Mon Sep 17 00:00:00 2001 From: MD FAIZAN KHAN <124388165+THEFZNKHAN@users.noreply.github.com> Date: Tue, 31 Oct 2023 00:42:09 +0530 Subject: [PATCH 016/268] Fix: typos in README.md (#27154) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8f4496059fee..b7e4b4135677 100644 --- a/README.md +++ b/README.md @@ -244,7 +244,7 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta - This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files. - The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library (possibly, [Accelerate](https://huggingface.co/docs/accelerate)). -- While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/main/examples) are just that: examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. +- While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/main/examples) are just that: examples. It is expected that they won't work out-of-the-box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. ## Installation @@ -283,7 +283,7 @@ Follow the installation pages of Flax, PyTorch or TensorFlow to see how to insta ## Model architectures -**[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co/models) where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations). +**[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co/models), where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations). Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/models&color=brightgreen) From d39352d12c3f6a301e6c9d99218d1770eb513aca Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Mon, 30 Oct 2023 21:08:29 +0100 Subject: [PATCH 017/268] Fix import of torch.utils.checkpoint (#27155) * Fix import * Apply suggestions from code review Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> --------- Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> --- src/transformers/modeling_utils.py | 9 +++------ .../models/seamless_m4t/modeling_seamless_m4t.py | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 0df52e0f45ea..ff533c93a9d2 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -33,6 +33,7 @@ from packaging import version from torch import Tensor, nn from torch.nn import CrossEntropyLoss, Identity +from torch.utils.checkpoint import checkpoint from .activations import get_activation from .configuration_utils import PretrainedConfig @@ -1869,9 +1870,7 @@ def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {} - gradient_checkpointing_func = functools.partial( - torch.utils.checkpoint.checkpoint, **gradient_checkpointing_kwargs - ) + gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs) self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) @@ -1882,9 +1881,7 @@ def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): # the gradients to make sure the gradient flows. self.enable_input_require_grads() - def _set_gradient_checkpointing( - self, enable: bool = True, gradient_checkpointing_func: Callable = torch.utils.checkpoint.checkpoint - ): + def _set_gradient_checkpointing(self, enable: bool = True, gradient_checkpointing_func: Callable = checkpoint): is_gradient_checkpointing_set = False # Apply it on the top-level module in case the top-level modules supports it diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index 29d8cf94e3d6..3fe519d2d259 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -1813,7 +1813,7 @@ def forward( layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: - layer_outputs = torch.utils.checkpoint.checkpoint( + layer_outputs = self._gradient_checkpointing_func( encoder_layer.forward, hidden_states, attention_mask, From 8211c59b9a8fe84d2861446b26542f89a0260e64 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Mon, 30 Oct 2023 21:42:19 +0100 Subject: [PATCH 018/268] [KOSMOS-2] Update docs (#27157) Update docs --- docs/source/en/_toctree.yml | 4 ++-- docs/source/en/model_doc/kosmos-2.md | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 141398c02e4e..1a76762d160f 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -368,8 +368,6 @@ title: I-BERT - local: model_doc/jukebox title: Jukebox - - local: model_doc/kosmos-2 - title: KOSMOS-2 - local: model_doc/led title: LED - local: model_doc/llama @@ -685,6 +683,8 @@ title: IDEFICS - local: model_doc/instructblip title: InstructBLIP + - local: model_doc/kosmos-2 + title: KOSMOS-2 - local: model_doc/layoutlm title: LayoutLM - local: model_doc/layoutlmv2 diff --git a/docs/source/en/model_doc/kosmos-2.md b/docs/source/en/model_doc/kosmos-2.md index 8153ee300924..f799751cce84 100644 --- a/docs/source/en/model_doc/kosmos-2.md +++ b/docs/source/en/model_doc/kosmos-2.md @@ -18,8 +18,7 @@ rendered properly in your Markdown viewer. ## Overview -The KOSMOS-2 model was proposed in [Kosmos-2: Grounding Multimodal Large Language Models to the World] -(https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei +The KOSMOS-2 model was proposed in [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. KOSMOS-2 is a Transformer-based causal language model and is trained using the next-word prediction task on a web-scale dataset of grounded image-text pairs [GRIT](https://huggingface.co/datasets/zzliang/GRIT). The spatial coordinates of @@ -31,6 +30,11 @@ The abstract from the paper is the following: *We introduce Kosmos-2, a Multimodal Large Language Model (MLLM), enabling new capabilities of perceiving object descriptions (e.g., bounding boxes) and grounding text to the visual world. Specifically, we represent refer expressions as links in Markdown, i.e., ``[text span](bounding boxes)'', where object descriptions are sequences of location tokens. Together with multimodal corpora, we construct large-scale data of grounded image-text pairs (called GrIT) to train the model. In addition to the existing capabilities of MLLMs (e.g., perceiving general modalities, following instructions, and performing in-context learning), Kosmos-2 integrates the grounding capability into downstream applications. We evaluate Kosmos-2 on a wide range of tasks, including (i) multimodal grounding, such as referring expression comprehension, and phrase grounding, (ii) multimodal referring, such as referring expression generation, (iii) perception-language tasks, and (iv) language understanding and generation. This work lays out the foundation for the development of Embodiment AI and sheds light on the big convergence of language, multimodal perception, action, and world modeling, which is a key step toward artificial general intelligence. Code and pretrained models are available at https://aka.ms/kosmos-2.* + + + Overview of tasks that KOSMOS-2 can handle. Taken from the original paper. + ## Example ```python From df6f36a171af2468aeed0de54d8b1d5dced42810 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Tue, 31 Oct 2023 17:15:39 +0800 Subject: [PATCH 019/268] deprecate function `get_default_device` in `tools/base.py` (#26774) * get default device through `PartialState().default_device` as is has been officially released * apply code review suggestion * apply code review suggestion Co-authored-by: Zach Mueller --------- Co-authored-by: Zach Mueller --- src/transformers/tools/base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/transformers/tools/base.py b/src/transformers/tools/base.py index f7ce384e9263..c781a0b270ba 100644 --- a/src/transformers/tools/base.py +++ b/src/transformers/tools/base.py @@ -599,6 +599,10 @@ def fn(*args, **kwargs): # TODO: Migrate to Accelerate for this once `PartialState.default_device` makes its way into a release. def get_default_device(): + logger.warning( + "`get_default_device` is deprecated and will be replaced with `accelerate`'s `PartialState().default_device` " + "in version 4.36 of 🤗 Transformers. " + ) if not is_torch_available(): raise ImportError("Please install torch in order to use this tool.") From b5c8e23f0f263269404e1f00da84a9790b5ca845 Mon Sep 17 00:00:00 2001 From: Clifford Ressel Date: Tue, 31 Oct 2023 06:17:54 -0400 Subject: [PATCH 020/268] Remove broken links to s-JoL/Open-Llama (#27164) --- README.md | 2 +- README_es.md | 2 +- README_hd.md | 2 +- README_ja.md | 2 +- README_ko.md | 2 +- README_pt-br.md | 13 +------------ README_ru.md | 2 +- README_te.md | 2 +- README_zh-hans.md | 2 +- README_zh-hant.md | 2 +- docs/source/en/model_doc/open-llama.md | 6 ++---- docs/source/ms/index.md | 2 +- 12 files changed, 13 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index b7e4b4135677..f761e0d149f7 100644 --- a/README.md +++ b/README.md @@ -434,7 +434,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (from Meta AI) released with the paper [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418) by Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic. 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh. 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi. -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. diff --git a/README_es.md b/README_es.md index eeb0990fe58b..55a1fcf24abe 100644 --- a/README_es.md +++ b/README_es.md @@ -409,7 +409,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (from Meta AI) released with the paper [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418) by Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic. 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh. 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi. -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. diff --git a/README_hd.md b/README_hd.md index bbc6b45f43e8..6862966d7b07 100644 --- a/README_hd.md +++ b/README_hd.md @@ -383,7 +383,7 @@ conda install -c huggingface transformers 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (Meta AI से) Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic. द्वाराअनुसंधान पत्र [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418) के साथ जारी किया गया 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (विस्कॉन्सिन विश्वविद्यालय - मैडिसन से) साथ में कागज [Nyströmformer: A Nyström- आधारित एल्गोरिथम आत्म-ध्यान का अनुमान लगाने के लिए ](https://arxiv.org/abs/2102.03902) युनयांग ज़िओंग, झानपेंग ज़ेंग, रुद्रसिस चक्रवर्ती, मिंगक्सिंग टैन, ग्लेन फंग, यिन ली, विकास सिंह द्वारा पोस्ट किया गया। 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (SHI Labs से) पेपर [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) जितेश जैन, जिआचेन ली, मांगटिक चिउ, अली हसनी, निकिता ओरलोव, हम्फ्री शि के द्वारा जारी किया गया है। -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI से) साथ में कागज [विज़न ट्रांसफॉर्मर्स के साथ सिंपल ओपन-वोकैबुलरी ऑब्जेक्ट डिटेक्शन](https:/ /arxiv.org/abs/2205.06230) मैथियास मिंडरर, एलेक्सी ग्रिट्सेंको, ऑस्टिन स्टोन, मैक्सिम न्यूमैन, डिर्क वीसेनबोर्न, एलेक्सी डोसोवित्स्की, अरविंद महेंद्रन, अनुराग अर्नब, मुस्तफा देहघानी, ज़ुओरन शेन, जिओ वांग, ज़ियाओहुआ झाई, थॉमस किफ़, और नील हॉल्सबी द्वारा पोस्ट किया गया। 1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (Google AI से) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. द्वाराअनुसंधान पत्र [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) के साथ जारी किया गया diff --git a/README_ja.md b/README_ja.md index b37098c491bb..83f6126dea49 100644 --- a/README_ja.md +++ b/README_ja.md @@ -443,7 +443,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (Meta AI から) Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic. から公開された研究論文 [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418) 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (the University of Wisconsin - Madison から) Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh から公開された研究論文: [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (SHI Labs から) Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi から公開された研究論文: [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI から) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al から公開された研究論文: [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby から公開された研究論文: [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. から公開された研究論文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) diff --git a/README_ko.md b/README_ko.md index ed67748854c4..3de95b9cfc8f 100644 --- a/README_ko.md +++ b/README_ko.md @@ -358,7 +358,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (Meta AI 에서 제공)은 Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic.의 [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418)논문과 함께 발표했습니다. 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (the University of Wisconsin - Madison 에서) Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 의 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 논문과 함께 발표했습니다. 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (SHI Labs 에서) Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi 의 [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) 논문과 함께 발표했습니다. -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI 에서) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 의 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 논문과 함께 발표했습니다. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI 에서) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 의 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 논문과 함께 발표했습니다. 1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (Google AI 에서 제공)은 Matthias Minderer, Alexey Gritsenko, Neil Houlsby.의 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683)논문과 함께 발표했습니다. diff --git a/README_pt-br.md b/README_pt-br.md index c15cd4867bd9..0e5f638bc5f6 100644 --- a/README_pt-br.md +++ b/README_pt-br.md @@ -441,7 +441,7 @@ Número atual de pontos de verificação: ![](https://img.shields.io/endpoint?ur 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (from Meta AI) released with the paper [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418) by Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic. 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh. 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi. -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. @@ -563,14 +563,3 @@ Agora temos um [artigo](https://www.aclweb.org/anthology/2020.emnlp-demos.6/) qu pages = "38--45" } ``` - - - - - - - - - - - diff --git a/README_ru.md b/README_ru.md index 4cb1bf924ee8..9945b47fc25a 100644 --- a/README_ru.md +++ b/README_ru.md @@ -429,7 +429,7 @@ conda install -c huggingface transformers 1. **[NLLB-MOE](https://huggingface.co/docs/transformers/model_doc/nllb-moe)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team. 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh. 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi. -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. diff --git a/README_te.md b/README_te.md index 8bf4f5082740..1b6a1812fa04 100644 --- a/README_te.md +++ b/README_te.md @@ -434,7 +434,7 @@ Flax, PyTorch లేదా TensorFlow యొక్క ఇన్‌స్టా 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (from Meta AI) released with the paper [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418) by Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic. 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh. 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi. -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. diff --git a/README_zh-hans.md b/README_zh-hans.md index b9ffb8ae8416..4a6d50da3c39 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -382,7 +382,7 @@ conda install -c huggingface transformers 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (来自 Meta AI) 伴随论文 [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418) 由 Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic 发布。 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (来自 the University of Wisconsin - Madison) 伴随论文 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 由 Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 发布。 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (来自 SHI Labs) 伴随论文 [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) 由 Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi 发布。 -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (来自 [s-JoL](https://huggingface.co/s-JoL)) 由 [Open-Llama](https://github.com/s-JoL/Open-Llama) 发布. +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (来自 [s-JoL](https://huggingface.co/s-JoL)) 由 GitHub (现已删除). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。 1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (来自 Google AI) 伴随论文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) 由 Matthias Minderer, Alexey Gritsenko, Neil Houlsby 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 8d47ef4ef072..ff6f14df4ad3 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -394,7 +394,7 @@ conda install -c huggingface transformers 1. **[Nougat](https://huggingface.co/docs/transformers/model_doc/nougat)** (from Meta AI) released with the paper [Nougat: Neural Optical Understanding for Academic Documents](https://arxiv.org/abs/2308.13418) by Lukas Blecher, Guillem Cucurull, Thomas Scialom, Robert Stojnic. 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh. 1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi. -1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. diff --git a/docs/source/en/model_doc/open-llama.md b/docs/source/en/model_doc/open-llama.md index c20ecb7f88ca..9663170c4083 100644 --- a/docs/source/en/model_doc/open-llama.md +++ b/docs/source/en/model_doc/open-llama.md @@ -33,15 +33,13 @@ This model differs from the [OpenLLaMA models](https://huggingface.co/models?sea ## Overview -The Open-Llama model was proposed in [Open-Llama project](https://github.com/s-JoL/Open-Llama) by community developer s-JoL. +The Open-Llama model was proposed in the open source Open-Llama project by community developer s-JoL. The model is mainly based on LLaMA with some modifications, incorporating memory-efficient attention from Xformers, stable embedding from Bloom, and shared input-output embedding from PaLM. And the model is pre-trained on both Chinese and English, which gives it better performance on Chinese language tasks. This model was contributed by [s-JoL](https://huggingface.co/s-JoL). -The original code can be found [Open-Llama](https://github.com/s-JoL/Open-Llama). -Checkpoint and usage can be found at [s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1). - +The original code was released on GitHub by [s-JoL](https://github.com/s-JoL), but is now removed. ## OpenLlamaConfig diff --git a/docs/source/ms/index.md b/docs/source/ms/index.md index 562840d82048..e57b65fc40c6 100644 --- a/docs/source/ms/index.md +++ b/docs/source/ms/index.md @@ -180,7 +180,7 @@ Dokumentasi disusun kepada lima bahagian: 1. **[NLLB-MOE](model_doc/nllb-moe)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team. 1. **[Nyströmformer](model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh. 1. **[OneFormer](model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi. -1. **[OpenLlama](model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama). +1. **[OpenLlama](model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. From 9234caefb0241939f7b2b0ee3d73ed5ebf842ae9 Mon Sep 17 00:00:00 2001 From: Akshar Goyal Date: Tue, 31 Oct 2023 06:20:14 -0400 Subject: [PATCH 021/268] [docstring] Fix docstring for AltCLIPTextConfig, AltCLIPVisionConfig and AltCLIPConfig (#27128) * [docstring] Fix docstring for AltCLIPVisionConfig, AltCLIPTextConfig + cleaned some docstring * Removed entries from check_docstring.py * Removed entries from check_docstring.py * Removed entry from check_docstring.py * [docstring] Fix docstring for AltCLIPTextConfig, AltCLIPVisionConfig and AltCLIPConfig --- .../models/altclip/configuration_altclip.py | 19 +++++++++++++++---- .../bridgetower/configuration_bridgetower.py | 6 +++--- .../configuration_chinese_clip.py | 2 +- .../models/clipseg/configuration_clipseg.py | 2 +- .../mask2former/configuration_mask2former.py | 2 +- .../models/owlv2/configuration_owlv2.py | 2 +- .../models/owlvit/configuration_owlvit.py | 2 +- .../pix2struct/configuration_pix2struct.py | 2 +- .../models/x_clip/configuration_x_clip.py | 4 ++-- utils/check_docstrings.py | 2 -- 10 files changed, 26 insertions(+), 17 deletions(-) diff --git a/src/transformers/models/altclip/configuration_altclip.py b/src/transformers/models/altclip/configuration_altclip.py index 431c61565ba4..032006452099 100755 --- a/src/transformers/models/altclip/configuration_altclip.py +++ b/src/transformers/models/altclip/configuration_altclip.py @@ -61,12 +61,19 @@ class AltCLIPTextConfig(PretrainedConfig): max_position_embeddings (`int`, *optional*, defaults to 514): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). - type_vocab_size (`int`, *optional*, defaults to 2): + type_vocab_size (`int`, *optional*, defaults to 1): The vocabulary size of the `token_type_ids` passed when calling [`AltCLIPTextModel`] initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - layer_norm_eps (`float`, *optional*, defaults to 1e-5): + initializer_factor (`float`, *optional*, defaults to 0.02): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. + pad_token_id (`int`, *optional*, defaults to 1): The id of the *padding* token. + bos_token_id (`int`, *optional*, defaults to 0): The id of the *beginning-of-sequence* token. + eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to @@ -154,10 +161,14 @@ class AltCLIPVisionConfig(PretrainedConfig): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): @@ -165,13 +176,13 @@ class AltCLIPVisionConfig(PretrainedConfig): hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. - layer_norm_eps (`float`, *optional*, defaults to 1e-5): + layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float``, *optional*, defaults to 1): + initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). diff --git a/src/transformers/models/bridgetower/configuration_bridgetower.py b/src/transformers/models/bridgetower/configuration_bridgetower.py index 30b6bf28795a..e44373cba59c 100644 --- a/src/transformers/models/bridgetower/configuration_bridgetower.py +++ b/src/transformers/models/bridgetower/configuration_bridgetower.py @@ -49,7 +49,7 @@ class BridgeTowerVisionConfig(PretrainedConfig): The size (resolution) of each patch. image_size (`int`, *optional*, defaults to 288): The size (resolution) of each image. - initializer_factor (`float``, *optional*, defaults to 1): + initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). layer_norm_eps (`float`, *optional*, defaults to 1e-05): @@ -151,7 +151,7 @@ class BridgeTowerTextConfig(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids`. - initializer_factor (`float``, *optional*, defaults to 1): + initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). layer_norm_eps (`float`, *optional*, defaults to 1e-05): @@ -255,7 +255,7 @@ class BridgeTowerConfig(PretrainedConfig): The non-linear activation function (function or string) in the encoder and pooler. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. - initializer_factor (`float``, *optional*, defaults to 1): + initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). layer_norm_eps (`float`, *optional*, defaults to 1e-05): diff --git a/src/transformers/models/chinese_clip/configuration_chinese_clip.py b/src/transformers/models/chinese_clip/configuration_chinese_clip.py index 7bcfc73799d0..0e91200ce601 100644 --- a/src/transformers/models/chinese_clip/configuration_chinese_clip.py +++ b/src/transformers/models/chinese_clip/configuration_chinese_clip.py @@ -203,7 +203,7 @@ class ChineseCLIPVisionConfig(PretrainedConfig): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float``, *optional*, defaults to 1.0): + initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: diff --git a/src/transformers/models/clipseg/configuration_clipseg.py b/src/transformers/models/clipseg/configuration_clipseg.py index 56b90f721e1a..cb178514b293 100644 --- a/src/transformers/models/clipseg/configuration_clipseg.py +++ b/src/transformers/models/clipseg/configuration_clipseg.py @@ -62,7 +62,7 @@ class CLIPSegTextConfig(PretrainedConfig): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float``, *optional*, defaults to 1.0): + initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 1): diff --git a/src/transformers/models/mask2former/configuration_mask2former.py b/src/transformers/models/mask2former/configuration_mask2former.py index ccc1c9c2cffc..1fe241473934 100644 --- a/src/transformers/models/mask2former/configuration_mask2former.py +++ b/src/transformers/models/mask2former/configuration_mask2former.py @@ -93,7 +93,7 @@ class Mask2FormerConfig(PretrainedConfig): Ratio of points that are sampled via importance sampling. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - init_xavier_std (`float``, *optional*, defaults to 1.0): + init_xavier_std (`float`, *optional*, defaults to 1.0): The scaling factor used for the Xavier initialization gain in the HM Attention map module. use_auxiliary_loss (`boolean``, *optional*, defaults to `True`): If `True` [`Mask2FormerForUniversalSegmentationOutput`] will contain the auxiliary losses computed using diff --git a/src/transformers/models/owlv2/configuration_owlv2.py b/src/transformers/models/owlv2/configuration_owlv2.py index b4d75261281e..54611c4daf64 100644 --- a/src/transformers/models/owlv2/configuration_owlv2.py +++ b/src/transformers/models/owlv2/configuration_owlv2.py @@ -180,7 +180,7 @@ class Owlv2VisionConfig(PretrainedConfig): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float``, *optional*, defaults to 1.0): + initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). diff --git a/src/transformers/models/owlvit/configuration_owlvit.py b/src/transformers/models/owlvit/configuration_owlvit.py index 424c4adc17c1..f9d3914ed8b5 100644 --- a/src/transformers/models/owlvit/configuration_owlvit.py +++ b/src/transformers/models/owlvit/configuration_owlvit.py @@ -183,7 +183,7 @@ class OwlViTVisionConfig(PretrainedConfig): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float``, *optional*, defaults to 1.0): + initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). diff --git a/src/transformers/models/pix2struct/configuration_pix2struct.py b/src/transformers/models/pix2struct/configuration_pix2struct.py index feb5397a2a08..c23d2cdcfe63 100644 --- a/src/transformers/models/pix2struct/configuration_pix2struct.py +++ b/src/transformers/models/pix2struct/configuration_pix2struct.py @@ -203,7 +203,7 @@ class Pix2StructVisionConfig(PretrainedConfig): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float``, *optional*, defaults to 1.0): + initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). seq_len (`int`, *optional*, defaults to 4096): diff --git a/src/transformers/models/x_clip/configuration_x_clip.py b/src/transformers/models/x_clip/configuration_x_clip.py index 183b66439b36..fbccdf1167d9 100644 --- a/src/transformers/models/x_clip/configuration_x_clip.py +++ b/src/transformers/models/x_clip/configuration_x_clip.py @@ -63,7 +63,7 @@ class XCLIPTextConfig(PretrainedConfig): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float``, *optional*, defaults to 1): + initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). @@ -176,7 +176,7 @@ class XCLIPVisionConfig(PretrainedConfig): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float``, *optional*, defaults to 1): + initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). drop_path_rate (`float`, *optional*, defaults to 0.0): diff --git a/utils/check_docstrings.py b/utils/check_docstrings.py index 42223e586fe1..3d8a2881bf20 100644 --- a/utils/check_docstrings.py +++ b/utils/check_docstrings.py @@ -79,8 +79,6 @@ "AlbertTokenizerFast", "AlignTextModel", "AlignVisionConfig", - "AltCLIPTextConfig", - "AltCLIPVisionConfig", "AudioClassificationPipeline", "AutoformerConfig", "AutomaticSpeechRecognitionPipeline", From 14bb196cc8ab62bdc029b065558786d0d97ed98c Mon Sep 17 00:00:00 2001 From: "Seungwoo, Jeong" <142411895+Hangsiin@users.noreply.github.com> Date: Tue, 31 Oct 2023 19:41:56 +0900 Subject: [PATCH 022/268] [doctring] Fix docstring for BlipTextConfig, BlipVisionConfig (#27173) Update configuration_blip.py edit docstrings --- src/transformers/models/blip/configuration_blip.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/blip/configuration_blip.py b/src/transformers/models/blip/configuration_blip.py index 39760a7e22a9..2482fcaf9ec2 100644 --- a/src/transformers/models/blip/configuration_blip.py +++ b/src/transformers/models/blip/configuration_blip.py @@ -55,7 +55,7 @@ class BlipTextConfig(PretrainedConfig): Args: - vocab_size (`int`, *optional*, defaults to 30522): + vocab_size (`int`, *optional*, defaults to 30524): Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlipModel`]. hidden_size (`int`, *optional*, defaults to 768): @@ -68,7 +68,7 @@ class BlipTextConfig(PretrainedConfig): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. - max_position_embeddings (`int`, *optional*, defaults to 77): + max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): @@ -90,7 +90,7 @@ class BlipTextConfig(PretrainedConfig): The id of the `padding` token. sep_token_id (`int`, *optional*, defaults to 102): The id of the `separator` token. - is_decoder (`bool`, *optional*, defaults to `False`): + is_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as a decoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). @@ -197,9 +197,9 @@ class BlipVisionConfig(PretrainedConfig): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. - image_size (`int`, *optional*, defaults to 224): + image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. - patch_size (`int`, *optional*, defaults to 32): + patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, @@ -208,7 +208,7 @@ class BlipVisionConfig(PretrainedConfig): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. - initializer_range (`float`, *optional*, defaults to 0.02): + initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: From 9dc4ce9ea73dd46c4f63a3182a8cbd1cef1b886f Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 31 Oct 2023 11:59:21 +0100 Subject: [PATCH 023/268] Disable CI runner check (#27170) Disable runner check Co-authored-by: ydshieh --- .github/workflows/self-nightly-scheduled.yml | 34 ------------------ .github/workflows/self-past.yml | 34 ------------------ .github/workflows/self-push.yml | 34 ------------------ .github/workflows/self-scheduled.yml | 36 -------------------- utils/notification_service.py | 11 +++--- 5 files changed, 7 insertions(+), 142 deletions(-) diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index 713e004d8e58..f5d96828183e 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -21,36 +21,8 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: ubuntu-latest - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-past-ci-runner-docker,multi-gpu-past-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci] - container: - image: huggingface/transformers-all-latest-torch-nightly-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - setup: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -276,8 +248,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -288,8 +258,6 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner availability: ${{ needs.check_runner_status.result }}" - echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v3 @@ -303,8 +271,6 @@ jobs: CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }} ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} CI_EVENT: Nightly CI - RUNNER_STATUS: ${{ needs.check_runner_status.result }} - RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index 71f904c831e9..ad624fb2581b 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -32,36 +32,8 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: ubuntu-latest - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-past-ci-runner-docker,multi-gpu-past-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci] - container: - image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - setup: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -319,8 +291,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -331,8 +301,6 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner availability: ${{ needs.check_runner_status.result }}" - echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v3 @@ -351,8 +319,6 @@ jobs: CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }} ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }} - RUNNER_STATUS: ${{ needs.check_runner_status.result }} - RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index e4b1b3b4b235..15035704d0ae 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -27,36 +27,8 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: ubuntu-latest - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-ci-runner-docker,multi-gpu-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci] - container: - image: huggingface/transformers-all-latest-gpu-push-ci - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - setup: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -521,8 +493,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -534,9 +504,7 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner availability: ${{ needs.check_runner_status.result }}" echo "Setup status: ${{ needs.setup.result }}" - echo "Runner status: ${{ needs.check_runners.result }}" # Necessary to get the correct branch name and commit SHA for `workflow_run` event # We also take into account the `push` event (we might want to test some changes in a branch) @@ -589,8 +557,6 @@ jobs: CI_TITLE_PUSH: ${{ github.event.head_commit.message }} CI_TITLE_WORKFLOW_RUN: ${{ github.event.workflow_run.head_commit.message }} CI_SHA: ${{ env.CI_SHA }} - RUNNER_STATUS: ${{ needs.check_runner_status.result }} - RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 2bd6bbade1cb..09ea3af0d44c 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -25,36 +25,8 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: ubuntu-latest - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-scheduled-ci-runner-docker,multi-gpu-scheduled-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] - container: - image: huggingface/transformers-all-latest-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - setup: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -430,8 +402,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -480,8 +450,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - check_runner_status, - check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -496,8 +464,6 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner availability: ${{ needs.check_runner_status.result }}" - echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v3 @@ -513,8 +479,6 @@ jobs: CI_EVENT: scheduled CI_SHA: ${{ github.sha }} CI_WORKFLOW_REF: ${{ github.workflow_ref }} - RUNNER_STATUS: ${{ needs.check_runner_status.result }} - RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. diff --git a/utils/notification_service.py b/utils/notification_service.py index 610d597d2307..4df3b2994484 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -769,12 +769,15 @@ def prepare_reports(title, header, reports, to_truncate=True): if __name__ == "__main__": - runner_status = os.environ.get("RUNNER_STATUS") - runner_env_status = os.environ.get("RUNNER_ENV_STATUS") + # runner_status = os.environ.get("RUNNER_STATUS") + # runner_env_status = os.environ.get("RUNNER_ENV_STATUS") setup_status = os.environ.get("SETUP_STATUS") - runner_not_available = True if runner_status is not None and runner_status != "success" else False - runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False + # runner_not_available = True if runner_status is not None and runner_status != "success" else False + # runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False + # Let's keep the lines regardig runners' status (we might be able to use them again in the future) + runner_not_available = False + runner_failed = False setup_failed = True if setup_status is not None and setup_status != "success" else False org = "huggingface" From b5db8ca66fad3a39ea80d3e5609686d0933bde87 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 31 Oct 2023 16:51:02 +0530 Subject: [PATCH 024/268] Add flash attention for `gpt_bigcode` (#26479) * added flash attention of gpt_bigcode * changed docs * Update src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py * add FA-2 docs * oops * Update docs/source/en/perf_infer_gpu_one.md Last Nit Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * fix * oops * remove padding_mask * change getattr->hasattr logic * changed .md file --------- Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Co-authored-by: younesbelkada Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- docs/source/en/model_doc/gpt_bigcode.md | 39 +++ docs/source/en/perf_infer_gpu_one.md | 1 + .../gpt_bigcode/modeling_gpt_bigcode.py | 311 ++++++++++++++++-- 3 files changed, 328 insertions(+), 23 deletions(-) diff --git a/docs/source/en/model_doc/gpt_bigcode.md b/docs/source/en/model_doc/gpt_bigcode.md index 6965d5837d8e..8cc77a825de7 100644 --- a/docs/source/en/model_doc/gpt_bigcode.md +++ b/docs/source/en/model_doc/gpt_bigcode.md @@ -42,6 +42,45 @@ The main differences compared to GPT2. You can read more about the optimizations in the [original pull request](https://github.com/huggingface/transformers/pull/22575) +## Combining Starcoder and Flash Attention 2 + +First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature. + +```bash +pip install -U flash-attn --no-build-isolation +``` + +Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16``) + +To load and run a model using Flash Attention 2, refer to the snippet below: + +```python +>>> import torch +>>> from transformers import AutoModelForCausalLM, AutoTokenizer +>>> device = "cuda" # the device to load the model onto + +>>> model = AutoModelForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder", torch_dtype=torch.float16, use_flash_attention_2=True) +>>> tokenizer = AutoTokenizer.from_pretrained("bigcode/gpt_bigcode-santacoder") + +>>> prompt = "def hello_world():" + +>>> model_inputs = tokenizer([prompt], return_tensors="pt").to(device) +>>> model.to(device) + +>>> generated_ids = model.generate(**model_inputs, max_new_tokens=30, do_sample=False) +>>> tokenizer.batch_decode(generated_ids)[0] +'def hello_world():\n print("hello world")\n\nif __name__ == "__main__":\n print("hello world")\n<|endoftext|>' +``` + +### Expected speedups + +Below is a expected speedup diagram that compares pure inference time between the native implementation in transformers using `bigcode/starcoder` checkpoint and the Flash Attention 2 version of the model using two different sequence lengths. + +
+ +
+ + ## GPTBigCodeConfig [[autodoc]] GPTBigCodeConfig diff --git a/docs/source/en/perf_infer_gpu_one.md b/docs/source/en/perf_infer_gpu_one.md index d24299012e9f..39f2ca22b1f0 100644 --- a/docs/source/en/perf_infer_gpu_one.md +++ b/docs/source/en/perf_infer_gpu_one.md @@ -34,6 +34,7 @@ We natively support Flash Attention 2 for the following models: - Llama - Mistral - Falcon +- [GPTBigCode (Starcoder)](model_doc/gpt_bigcode#) You can request to add Flash Attention 2 support for more models by opening an issue on GitHub, and even open a Pull Request to integrate the changes. The supported models can be used for inference and training, including training with padding tokens - *which is currently not supported for `BetterTransformer` API below.* diff --git a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index f8e52b6510a0..fcbbfca5ceda 100644 --- a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -16,6 +16,7 @@ from typing import List, Optional, Tuple, Union import torch +import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -32,11 +33,17 @@ add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, + is_flash_attn_2_available, logging, ) from .configuration_gpt_bigcode import GPTBigCodeConfig +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "bigcode/gpt_bigcode-santacoder" @@ -78,11 +85,25 @@ def masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor return x +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + class GPTBigCodeAttention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() - self.mask_value = None + self.config = config + self.mask_value = None self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads @@ -90,6 +111,8 @@ def __init__(self, config, is_cross_attention=False, layer_idx=None): self.kv_heads = 1 if self.multi_query else self.num_heads self.kv_dim = self.kv_heads * self.head_dim self.split_size = self.embed_dim + self.is_causal = True + if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" @@ -212,10 +235,16 @@ def forward( encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, + **kwargs, ) -> Union[ Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], ]: + if "padding_mask" in kwargs: + logger.warning_once( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + if encoder_hidden_states is not None: if not hasattr(self, "q_attn") or not self.is_cross_attention: raise ValueError( @@ -262,6 +291,223 @@ def forward( return outputs # a, present, (attentions) +class GPTBigCodeFlashAttention2(GPTBigCodeAttention): + """ + GPTBigCode flash attention module. This module inherits from `GPTBigCodeAttention` as the weights of the module + stays untouched. The only required change would be on the forward pass where it needs to correctly call the public + API of flash attention and deal with padding tokens in case the input contains any of them. + """ + + def forward( + self, + hidden_states: torch.Tensor, + layer_past: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + **kwargs, + ) -> Union[ + Tuple[torch.Tensor, Optional[torch.Tensor]], + Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], + ]: + if "padding_mask" in kwargs: + logger.warning_once( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop("padding_mask") + + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn") or not self.is_cross_attention: + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key_value = self.c_attn(encoder_hidden_states) + attention_mask = encoder_attention_mask + elif self.multi_query: + query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) + else: + # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim), + # i.e., the memory layout is not the same as GPT2. + # This makes the concatenation with past_key_value more efficient. + query, key_value = ( + self.c_attn(hidden_states) + .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) + .transpose(1, 2) + .split((self.head_dim, 2 * self.head_dim), dim=3) + ) + + if layer_past is not None: + key_value = torch.cat((layer_past, key_value), dim=-2) + present = key_value if use_cache else None + + key, value = key_value.split((self.head_dim, self.head_dim), dim=-1) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + if self.multi_query: + batch_size, query_length, _ = query.shape + query = query.reshape(batch_size, query_length, self.num_heads, self.head_dim) + key = key.unsqueeze(2) + value = value.unsqueeze(2) + else: + query_length = query.shape[2] + batch_size, _, tgt, _ = key.shape + query = query.transpose(1, 2).reshape(batch_size, query_length, self.num_heads, self.head_dim) + key = key.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) + value = value.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) + + attn_dropout = self.dropout if self.training else 0.0 + + softmax_dtype = torch.float32 if self.attention_softmax_in_fp32 else query.dtype + upcast = query.dtype != softmax_dtype + softmax_scale = self.layer_idx + 1 if self.scale_attention_softmax_in_fp32 and upcast else 1 + softmax_scale = softmax_scale**-1 + if self.scale_attn_weights: + softmax_scale /= self.head_dim**0.5 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query.dtype + if input_dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.c_attn.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + query = query.to(target_dtype) + key = key.to(target_dtype) + value = value.to(target_dtype) + + attn_output = self._flash_attention_forward( + query, key, value, attention_mask, query_length, dropout=attn_dropout, softmax_scale=softmax_scale + ) + + attn_weights_reshaped = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) + attn_output = self.c_proj(attn_weights_reshaped) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + + if output_attentions: + if self.multi_query: + # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length) + attn_weights_reshaped = attn_weights_reshaped.transpose(1, 2) + else: + attn_weights_reshaped = None + + outputs += (attn_weights_reshaped,) + + return outputs # a, present, (attentions) + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + class GPTBigCodeMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() @@ -287,13 +533,21 @@ def __init__(self, config, layer_idx=None): self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) - self.attn = GPTBigCodeAttention(config, layer_idx=layer_idx) + self.attn = ( + GPTBigCodeAttention(config, layer_idx=layer_idx) + if not getattr(config, "_flash_attn_2_enabled", False) + else GPTBigCodeFlashAttention2(config, layer_idx=layer_idx) + ) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: if config.multi_query: raise NotImplementedError("Cross-attention not implemented for MQA") - self.crossattention = GPTBigCodeAttention(config, is_cross_attention=True, layer_idx=layer_idx) + self.crossattention = ( + GPTBigCodeAttention(config, is_cross_attention=True, layer_idx=layer_idx) + if not getattr(config, "_flash_attn_2_enabled", False) + else GPTBigCodeFlashAttention2(config, is_cross_attention=True, layer_idx=layer_idx) + ) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTBigCodeMLP(self.inner_dim, config) @@ -373,6 +627,7 @@ class GPTBigCodePreTrainedModel(PreTrainedModel): supports_gradient_checkpointing = True _no_split_modules = ["GPTBigCodeBlock"] _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @@ -594,28 +849,38 @@ def forward( key_length = past_length + query_length self_attention_mask = self.bias[None, key_length - query_length : key_length, :key_length] - if attention_mask is not None: - self_attention_mask = self_attention_mask * attention_mask.view(batch_size, 1, -1).to( - dtype=torch.bool, device=self_attention_mask.device + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask.bool() if (attention_mask is not None and 0 in attention_mask) else None + encoder_attention_mask = ( + encoder_attention_mask.bool() + if (encoder_attention_mask is not None and 0 in encoder_attention_mask) + else None ) - - # MQA models: (batch_size, query_length, n_heads, key_length) - # MHA models: (batch_size, n_heads, query_length, key_length) - attention_mask = self_attention_mask.unsqueeze(2 if self.multi_query else 1) - - # If a 2D or 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if ( - self.config.add_cross_attention - and encoder_hidden_states is not None - and encoder_attention_mask is not None - ): - if encoder_attention_mask.dim() == 2: - encoder_attention_mask.unsqueeze(1) - assert encoder_attention_mask.dim() == 3 - encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1) else: - encoder_attention_mask = None + # 4d mask is passed through the layers + if attention_mask is not None: + self_attention_mask = self_attention_mask * attention_mask.view(batch_size, 1, -1).to( + dtype=torch.bool, device=self_attention_mask.device + ) + + # MQA models: (batch_size, query_length, n_heads, key_length) + # MHA models: (batch_size, n_heads, query_length, key_length) + attention_mask = self_attention_mask.unsqueeze(2 if self.multi_query else 1) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if ( + self.config.add_cross_attention + and encoder_hidden_states is not None + and encoder_attention_mask is not None + ): + if encoder_attention_mask.dim() == 2: + encoder_attention_mask.unsqueeze(1) + assert encoder_attention_mask.dim() == 3 + encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1) + else: + encoder_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head From 3cd3eaf96048cb76e67a432e72a7cecbdd1630a8 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Tue, 31 Oct 2023 06:09:56 -0700 Subject: [PATCH 025/268] fix: Fix typical_p behaviour broken in recent change (#27165) A recent PR https://github.com/huggingface/transformers/pull/26579 fixed an edge case out-of-bounds tensor indexing error in TypicalLogitsWarper, and a related behaviour change was made that we thought fixed a long-standing bug w.r.t. the token inclusion cutoff. However after looking more closely, I am pretty certain that the original logic was correct and that the OOB fix should have been made differently. Specifically the docs state that it should include the "smallest set of tokens that add up to P or higher" and so `last_ind` should actually be one more than the index of the last token satisfying (cumulative_probs < self.mass). We still need a max clamp in case that last token is the very last one in the tensor. --- src/transformers/generation/logits_process.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/generation/logits_process.py b/src/transformers/generation/logits_process.py index f9af4f7ffc08..60d50a7efa27 100644 --- a/src/transformers/generation/logits_process.py +++ b/src/transformers/generation/logits_process.py @@ -492,8 +492,8 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> to cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) # Remove tokens with cumulative mass above the threshold - last_ind = (cumulative_probs < self.mass).sum(dim=1) - 1 - last_ind.clamp_(min=0) + last_ind = (cumulative_probs < self.mass).sum(dim=1) + last_ind.clamp_(max=sorted_scores.shape[-1] - 1) sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1)) sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0 indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) From 2963e196ee1fb9ab2c677221ca5e135568604662 Mon Sep 17 00:00:00 2001 From: Vivek Khandelwal Date: Tue, 31 Oct 2023 19:15:23 +0530 Subject: [PATCH 026/268] Add support for loading GPTQ models on CPU (#26719) * Add support for loading GPTQ models on CPU Right now, we can only load the GPTQ Quantized model on the CUDA device. The attribute `gptq_supports_cpu` checks if the current auto_gptq version is the one which has the cpu support for the model or not. The larger variants of the model are hard to load/run/trace on the GPU and that's the rationale behind adding this attribute. Signed-Off By: Vivek Khandelwal * Update quantization.md * Update quantization.md * Update quantization.md --- docs/source/en/main_classes/quantization.md | 2 +- src/transformers/modeling_utils.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/source/en/main_classes/quantization.md b/docs/source/en/main_classes/quantization.md index 5a822f37135f..2a2c6c21adb0 100644 --- a/docs/source/en/main_classes/quantization.md +++ b/docs/source/en/main_classes/quantization.md @@ -132,7 +132,7 @@ model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", de ### Exllama kernels for faster inference -For 4-bit model, you can use the exllama kernels in order to a faster inference speed. It is activated by default. You can change that behavior by passing `disable_exllama` in [`GPTQConfig`]. This will overwrite the quantization config stored in the config. Note that you will only be able to overwrite the attributes related to the kernels. Furthermore, you need to have the entire model on gpus if you want to use exllama kernels. +For 4-bit model, you can use the exllama kernels in order to a faster inference speed. It is activated by default. You can change that behavior by passing `disable_exllama` in [`GPTQConfig`]. This will overwrite the quantization config stored in the config. Note that you will only be able to overwrite the attributes related to the kernels. Furthermore, you need to have the entire model on gpus if you want to use exllama kernels. Also, you can perform CPU inference using Auto-GPTQ for Auto-GPTQ version > 0.4.2 by passing `device_map` = "cpu". For CPU inference, you have to pass `disable_exallama = True` in the `GPTQConfig.` ```py import torch diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index ff533c93a9d2..7d02d53fc350 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2788,7 +2788,8 @@ def from_pretrained( quantization_method_from_args == QuantizationMethod.GPTQ or quantization_method_from_config == QuantizationMethod.GPTQ ): - if not torch.cuda.is_available(): + gptq_supports_cpu = version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2") + if not gptq_supports_cpu and not torch.cuda.is_available(): raise RuntimeError("GPU is required to quantize or run quantize model.") elif not (is_optimum_available() and is_auto_gptq_available()): raise ImportError( From a8e74ebdc5e63175b722880f8e4555dad5935a1c Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 31 Oct 2023 14:49:02 +0100 Subject: [PATCH 027/268] Trigger CI if `tiny_model_summary.json` is modified (#27175) fix Co-authored-by: ydshieh --- utils/tests_fetcher.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 2fd866e9d8da..b2f9d2f8cec7 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -949,6 +949,10 @@ def infer_tests_to_run( if "setup.py" in modified_files: test_files_to_run = ["tests", "examples"] repo_utils_launch = True + # in order to trigger pipeline tests even if no code change at all + elif "tests/utils/tiny_model_summary.json" in modified_files: + test_files_to_run = ["tests"] + repo_utils_launch = any(f.split(os.path.sep)[0] == "utils" for f in modified_files) else: # All modified tests need to be run. test_files_to_run = [ From 08fadc8085610f7616a993dee8109a1bdc222d58 Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 31 Oct 2023 14:20:04 +0000 Subject: [PATCH 028/268] Shorten the conversation tests for speed + fixing position overflows (#26960) * Shorten the conversation tests for speed + fixing position overflows * Put max_new_tokens back to 5 * Remove test skips * Increase max_position_embeddings in blenderbot tests * Add skips for blenderbot_small * Correct TF test skip * make fixup * Reformat skips to use is_pipeline_test_to_skip * Update tests/models/blenderbot_small/test_modeling_blenderbot_small.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../blenderbot/test_modeling_blenderbot.py | 2 +- .../blenderbot/test_modeling_flax_blenderbot.py | 2 +- .../blenderbot/test_modeling_tf_blenderbot.py | 2 +- .../test_modeling_blenderbot_small.py | 9 ++------- .../test_modeling_flax_blenderbot_small.py | 7 ++++++- .../test_modeling_tf_blenderbot_small.py | 16 ++++++---------- tests/pipelines/test_pipelines_conversational.py | 8 ++++---- tests/test_pipeline_mixin.py | 2 +- 8 files changed, 22 insertions(+), 26 deletions(-) diff --git a/tests/models/blenderbot/test_modeling_blenderbot.py b/tests/models/blenderbot/test_modeling_blenderbot.py index ca1630b3cfd3..f3416396cea8 100644 --- a/tests/models/blenderbot/test_modeling_blenderbot.py +++ b/tests/models/blenderbot/test_modeling_blenderbot.py @@ -85,7 +85,7 @@ def __init__( hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, - max_position_embeddings=20, + max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, diff --git a/tests/models/blenderbot/test_modeling_flax_blenderbot.py b/tests/models/blenderbot/test_modeling_flax_blenderbot.py index ffcc9a7d04e6..a39fcada0cab 100644 --- a/tests/models/blenderbot/test_modeling_flax_blenderbot.py +++ b/tests/models/blenderbot/test_modeling_flax_blenderbot.py @@ -87,7 +87,7 @@ def __init__( hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, - max_position_embeddings=32, + max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, diff --git a/tests/models/blenderbot/test_modeling_tf_blenderbot.py b/tests/models/blenderbot/test_modeling_tf_blenderbot.py index 26b03a5d6a3f..0fdfef1cbdad 100644 --- a/tests/models/blenderbot/test_modeling_tf_blenderbot.py +++ b/tests/models/blenderbot/test_modeling_tf_blenderbot.py @@ -53,7 +53,7 @@ def __init__( intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, - max_position_embeddings=20, + max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, diff --git a/tests/models/blenderbot_small/test_modeling_blenderbot_small.py b/tests/models/blenderbot_small/test_modeling_blenderbot_small.py index 2397b6fee972..8c622b802810 100644 --- a/tests/models/blenderbot_small/test_modeling_blenderbot_small.py +++ b/tests/models/blenderbot_small/test_modeling_blenderbot_small.py @@ -85,7 +85,7 @@ def __init__( hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, - max_position_embeddings=20, + max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, @@ -242,12 +242,7 @@ class BlenderbotSmallModelTest(ModelTesterMixin, GenerationTesterMixin, Pipeline def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): - if pipeline_test_casse_name == "TextGenerationPipelineTests": - return True - # TODO @Rocketnight1 to fix - if pipeline_test_casse_name == "ConversationalPipelineTests": - return True - return False + return pipeline_test_casse_name in ("TextGenerationPipelineTests", "ConversationalPipelineTests") def setUp(self): self.model_tester = BlenderbotSmallModelTester(self) diff --git a/tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py b/tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py index d417ac3073d5..332cb69ec7c3 100644 --- a/tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py +++ b/tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py @@ -86,7 +86,7 @@ def __init__( hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, - max_position_embeddings=32, + max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, @@ -320,6 +320,11 @@ class FlaxBlenderbotSmallModelTest(FlaxModelTesterMixin, unittest.TestCase, Flax ) all_generative_model_classes = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () + def is_pipeline_test_to_skip( + self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name + ): + return pipeline_test_casse_name in ("TextGenerationPipelineTests", "ConversationalPipelineTests") + def setUp(self): self.model_tester = FlaxBlenderbotSmallModelTester(self) diff --git a/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py b/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py index 6fde705082d1..2c70a7507b29 100644 --- a/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py +++ b/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py @@ -53,7 +53,7 @@ def __init__( intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, - max_position_embeddings=20, + max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, @@ -198,6 +198,11 @@ class TFBlenderbotSmallModelTest(TFModelTesterMixin, PipelineTesterMixin, unitte test_pruning = False test_onnx = False + def is_pipeline_test_to_skip( + self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name + ): + return pipeline_test_casse_name in ("TextGenerationPipelineTests", "ConversationalPipelineTests") + def setUp(self): self.model_tester = TFBlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) @@ -209,15 +214,6 @@ def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) - # TODO: Fix the failed tests when this model gets more usage - def is_pipeline_test_to_skip( - self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name - ): - # TODO @Rocketnight1 to fix - if pipeline_test_casse_name == "ConversationalPipelineTests": - return True - return False - @require_tokenizers @require_tf diff --git a/tests/pipelines/test_pipelines_conversational.py b/tests/pipelines/test_pipelines_conversational.py index c85eb04c1957..2970c4afe5ec 100644 --- a/tests/pipelines/test_pipelines_conversational.py +++ b/tests/pipelines/test_pipelines_conversational.py @@ -77,14 +77,14 @@ def get_test_pipeline(self, model, tokenizer, processor): def run_pipeline_test(self, conversation_agent, _): # Simple - outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=20) + outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=5) self.assertEqual( outputs, Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), ) # Single list - outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=20) + outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=5) self.assertEqual( outputs, Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), @@ -96,7 +96,7 @@ def run_pipeline_test(self, conversation_agent, _): self.assertEqual(len(conversation_1), 1) self.assertEqual(len(conversation_2), 1) - outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=20) + outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=5) self.assertEqual(outputs, [conversation_1, conversation_2]) self.assertEqual( outputs, @@ -118,7 +118,7 @@ def run_pipeline_test(self, conversation_agent, _): # One conversation with history conversation_2.add_message({"role": "user", "content": "Why do you recommend it?"}) - outputs = conversation_agent(conversation_2, max_new_tokens=20) + outputs = conversation_agent(conversation_2, max_new_tokens=5) self.assertEqual(outputs, conversation_2) self.assertEqual( outputs, diff --git a/tests/test_pipeline_mixin.py b/tests/test_pipeline_mixin.py index 0c07248ab065..bd4b9eb39343 100644 --- a/tests/test_pipeline_mixin.py +++ b/tests/test_pipeline_mixin.py @@ -313,7 +313,7 @@ def data(n): out = [] if task == "conversational": - for item in pipeline(data(10), batch_size=4, max_new_tokens=20): + for item in pipeline(data(10), batch_size=4, max_new_tokens=5): out.append(item) else: for item in pipeline(data(10), batch_size=4): From f53041a753ed3bc4fbc6eed0eb84b68a13475127 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Tue, 31 Oct 2023 22:46:31 +0800 Subject: [PATCH 029/268] device agnostic pipelines testing (#27129) * device agnostic pipelines testing * pass torch_device --- ..._pipelines_automatic_speech_recognition.py | 17 ++++++++-------- tests/pipelines/test_pipelines_common.py | 20 ++++++++++--------- .../test_pipelines_conversational.py | 14 +++++-------- tests/pipelines/test_pipelines_fill_mask.py | 17 ++++++++++------ .../pipelines/test_pipelines_summarization.py | 5 +---- .../test_pipelines_text_classification.py | 6 ++---- .../test_pipelines_text_generation.py | 12 ++++++++--- .../pipelines/test_pipelines_text_to_audio.py | 7 ++++--- .../test_pipelines_token_classification.py | 9 +++++---- ...est_pipelines_visual_question_answering.py | 15 +++++++------- 10 files changed, 64 insertions(+), 58 deletions(-) diff --git a/tests/pipelines/test_pipelines_automatic_speech_recognition.py b/tests/pipelines/test_pipelines_automatic_speech_recognition.py index 50bce7785d67..0343c32939d0 100644 --- a/tests/pipelines/test_pipelines_automatic_speech_recognition.py +++ b/tests/pipelines/test_pipelines_automatic_speech_recognition.py @@ -39,9 +39,10 @@ require_pyctcdecode, require_tf, require_torch, - require_torch_gpu, + require_torch_accelerator, require_torchaudio, slow, + torch_device, ) from .test_pipelines_common import ANY @@ -166,13 +167,11 @@ def test_small_model_pt(self): _ = speech_recognizer(waveform, return_timestamps="char") @slow - @require_torch + @require_torch_accelerator def test_whisper_fp16(self): - if not torch.cuda.is_available(): - self.skipTest("Cuda is necessary for this test") speech_recognizer = pipeline( model="openai/whisper-base", - device=0, + device=torch_device, torch_dtype=torch.float16, ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) @@ -904,12 +903,12 @@ def test_speech_to_text_leveraged(self): self.assertEqual(output, {"text": "a man said to the universe sir i exist"}) @slow - @require_torch_gpu + @require_torch_accelerator def test_wav2vec2_conformer_float16(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-conformer-rope-large-960h-ft", - device="cuda:0", + device=torch_device, torch_dtype=torch.float16, framework="pt", ) @@ -1304,14 +1303,14 @@ def test_stride(self): self.assertEqual(output, {"text": "XB"}) @slow - @require_torch_gpu + @require_torch_accelerator def test_slow_unfinished_sequence(self): from transformers import GenerationConfig pipe = pipeline( "automatic-speech-recognition", model="vasista22/whisper-hindi-large-v2", - device="cuda:0", + device=torch_device, ) # Original model wasn't trained with timestamps and has incorrect generation config pipe.model.generation_config = GenerationConfig.from_pretrained("openai/whisper-large-v2") diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 8c7c66939c33..56467bdc4b8b 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -40,15 +40,17 @@ USER, CaptureLogger, RequestCounter, + backend_empty_cache, is_pipeline_test, is_staging_test, nested_simplify, require_tensorflow_probability, require_tf, require_torch, - require_torch_gpu, + require_torch_accelerator, require_torch_or_tf, slow, + torch_device, ) from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available from transformers.utils import logging as transformers_logging @@ -511,7 +513,7 @@ def test_load_default_pipelines_pt(self): # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) @slow @require_tf @@ -541,20 +543,20 @@ def test_load_default_pipelines_pt_table_qa(self): # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) @slow @require_torch - @require_torch_gpu - def test_pipeline_cuda(self): - pipe = pipeline("text-generation", device="cuda") + @require_torch_accelerator + def test_pipeline_accelerator(self): + pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_torch - @require_torch_gpu - def test_pipeline_cuda_indexed(self): - pipe = pipeline("text-generation", device="cuda:0") + @require_torch_accelerator + def test_pipeline_accelerator_indexed(self): + pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow diff --git a/tests/pipelines/test_pipelines_conversational.py b/tests/pipelines/test_pipelines_conversational.py index 2970c4afe5ec..ba3b37055fd1 100644 --- a/tests/pipelines/test_pipelines_conversational.py +++ b/tests/pipelines/test_pipelines_conversational.py @@ -31,6 +31,7 @@ pipeline, ) from transformers.testing_utils import ( + backend_empty_cache, is_pipeline_test, is_torch_available, require_tf, @@ -42,9 +43,6 @@ from .test_pipelines_common import ANY -DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0 - - @is_pipeline_test class ConversationalPipelineTests(unittest.TestCase): def tearDown(self): @@ -52,9 +50,7 @@ def tearDown(self): # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): - import torch - - torch.cuda.empty_cache() + backend_empty_cache(torch_device) model_mapping = dict( list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) @@ -136,7 +132,7 @@ def run_pipeline_test(self, conversation_agent, _): @slow def test_integration_torch_conversation(self): # When - conversation_agent = pipeline(task="conversational", device=DEFAULT_DEVICE_NUM) + conversation_agent = pipeline(task="conversational", device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") # Then @@ -168,7 +164,7 @@ def test_integration_torch_conversation(self): @slow def test_integration_torch_conversation_truncated_history(self): # When - conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=DEFAULT_DEVICE_NUM) + conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") # Then self.assertEqual(len(conversation_1.past_user_inputs), 1) @@ -374,7 +370,7 @@ def test_integration_torch_conversation_encoder_decoder(self): # When tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot_small-90M") - conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=DEFAULT_DEVICE_NUM) + conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=torch_device) conversation_1 = Conversation("My name is Sarah and I live in London") conversation_2 = Conversation("Going to the movies tonight, What movie would you recommend? ") diff --git a/tests/pipelines/test_pipelines_fill_mask.py b/tests/pipelines/test_pipelines_fill_mask.py index 3794e88613d4..c85797fbb6eb 100644 --- a/tests/pipelines/test_pipelines_fill_mask.py +++ b/tests/pipelines/test_pipelines_fill_mask.py @@ -18,13 +18,15 @@ from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( + backend_empty_cache, is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, - require_torch_gpu, + require_torch_accelerator, slow, + torch_device, ) from .test_pipelines_common import ANY @@ -40,9 +42,7 @@ def tearDown(self): # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): - import torch - - torch.cuda.empty_cache() + backend_empty_cache(torch_device) @require_tf def test_small_model_tf(self): @@ -148,9 +148,14 @@ def test_small_model_pt(self): ], ) - @require_torch_gpu + @require_torch_accelerator def test_fp16_casting(self): - pipe = pipeline("fill-mask", model="hf-internal-testing/tiny-random-distilbert", device=0, framework="pt") + pipe = pipeline( + "fill-mask", + model="hf-internal-testing/tiny-random-distilbert", + device=torch_device, + framework="pt", + ) # convert model to fp16 pipe.model.half() diff --git a/tests/pipelines/test_pipelines_summarization.py b/tests/pipelines/test_pipelines_summarization.py index e6aaebb31d02..7b75842081c5 100644 --- a/tests/pipelines/test_pipelines_summarization.py +++ b/tests/pipelines/test_pipelines_summarization.py @@ -27,9 +27,6 @@ from .test_pipelines_common import ANY -DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0 - - @is_pipeline_test class SummarizationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING @@ -106,7 +103,7 @@ def test_small_model_tf(self): @require_torch @slow def test_integration_torch_summarization(self): - summarizer = pipeline(task="summarization", device=DEFAULT_DEVICE_NUM) + summarizer = pipeline(task="summarization", device=torch_device) cnn_article = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" diff --git a/tests/pipelines/test_pipelines_text_classification.py b/tests/pipelines/test_pipelines_text_classification.py index d203fd5bcdd5..7a33a41c0650 100644 --- a/tests/pipelines/test_pipelines_text_classification.py +++ b/tests/pipelines/test_pipelines_text_classification.py @@ -20,7 +20,7 @@ TextClassificationPipeline, pipeline, ) -from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow +from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device from .test_pipelines_common import ANY @@ -96,13 +96,11 @@ def test_small_model_pt(self): @require_torch def test_accepts_torch_device(self): - import torch - text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", - device=torch.device("cpu"), + device=torch_device, ) outputs = text_classifier("This is great !") diff --git a/tests/pipelines/test_pipelines_text_generation.py b/tests/pipelines/test_pipelines_text_generation.py index 44a29a673d81..b9a5febb5609 100644 --- a/tests/pipelines/test_pipelines_text_generation.py +++ b/tests/pipelines/test_pipelines_text_generation.py @@ -27,8 +27,10 @@ require_accelerate, require_tf, require_torch, + require_torch_accelerator, require_torch_gpu, require_torch_or_tf, + torch_device, ) from .test_pipelines_common import ANY @@ -319,16 +321,20 @@ def test_small_model_pt_bloom_accelerate(self): ) @require_torch - @require_torch_gpu + @require_torch_accelerator def test_small_model_fp16(self): import torch - pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device=0, torch_dtype=torch.float16) + pipe = pipeline( + model="hf-internal-testing/tiny-random-bloom", + device=torch_device, + torch_dtype=torch.float16, + ) pipe("This is a test") @require_torch @require_accelerate - @require_torch_gpu + @require_torch_accelerator def test_pipeline_accelerate_top_p(self): import torch diff --git a/tests/pipelines/test_pipelines_text_to_audio.py b/tests/pipelines/test_pipelines_text_to_audio.py index 04acd8fdf822..6aca34ed98a0 100644 --- a/tests/pipelines/test_pipelines_text_to_audio.py +++ b/tests/pipelines/test_pipelines_text_to_audio.py @@ -25,9 +25,10 @@ from transformers.testing_utils import ( is_pipeline_test, require_torch, - require_torch_gpu, + require_torch_accelerator, require_torch_or_tf, slow, + torch_device, ) from .test_pipelines_common import ANY @@ -115,9 +116,9 @@ def test_small_bark_pt(self): self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow - @require_torch_gpu + @require_torch_accelerator def test_conversion_additional_tensor(self): - speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=0) + speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device) processor = AutoProcessor.from_pretrained("suno/bark-small") forward_params = { diff --git a/tests/pipelines/test_pipelines_token_classification.py b/tests/pipelines/test_pipelines_token_classification.py index 7f6658c93a2e..f6f47accc2bb 100644 --- a/tests/pipelines/test_pipelines_token_classification.py +++ b/tests/pipelines/test_pipelines_token_classification.py @@ -30,8 +30,9 @@ nested_simplify, require_tf, require_torch, - require_torch_gpu, + require_torch_accelerator, slow, + torch_device, ) from .test_pipelines_common import ANY @@ -391,13 +392,13 @@ def test_spanish_bert(self): ], ) - @require_torch_gpu + @require_torch_accelerator @slow - def test_gpu(self): + def test_accelerator(self): sentence = "This is dummy sentence" ner = pipeline( "token-classification", - device=0, + device=torch_device, aggregation_strategy=AggregationStrategy.SIMPLE, ) diff --git a/tests/pipelines/test_pipelines_visual_question_answering.py b/tests/pipelines/test_pipelines_visual_question_answering.py index 55ad44ef8d1d..15db1ce714b6 100644 --- a/tests/pipelines/test_pipelines_visual_question_answering.py +++ b/tests/pipelines/test_pipelines_visual_question_answering.py @@ -22,9 +22,10 @@ nested_simplify, require_tf, require_torch, - require_torch_gpu, + require_torch_accelerator, require_vision, slow, + torch_device, ) from .test_pipelines_common import ANY @@ -91,7 +92,7 @@ def test_small_model_pt(self): ) @require_torch - @require_torch_gpu + @require_torch_accelerator def test_small_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration" @@ -112,9 +113,9 @@ def test_small_model_pt_blip2(self): "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration", model_kwargs={"torch_dtype": torch.float16}, - device=0, + device=torch_device, ) - self.assertEqual(vqa_pipeline.model.device, torch.device(0)) + self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16) @@ -148,15 +149,15 @@ def test_large_model_pt(self): @slow @require_torch - @require_torch_gpu + @require_torch_accelerator def test_large_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="Salesforce/blip2-opt-2.7b", model_kwargs={"torch_dtype": torch.float16}, - device=0, + device=torch_device, ) - self.assertEqual(vqa_pipeline.model.device, torch.device(0)) + self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" From 309a90664f8d05c1203ec8925e8c37e715ebdc23 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 31 Oct 2023 16:03:59 +0100 Subject: [PATCH 030/268] [FEAT] Add Neftune into transformers Trainer (#27141) * add v1 neftune * use `unwrap_model` instead * add test + docs * Apply suggestions from code review Co-authored-by: Zach Mueller * more details * fixup * Update docs/source/en/main_classes/trainer.md Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * refactor a bit * more elaborated test * fix unwrap issue --------- Co-authored-by: Zach Mueller Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docs/source/en/main_classes/trainer.md | 24 +++++++++++++ src/transformers/trainer.py | 48 ++++++++++++++++++++++++++ src/transformers/trainer_utils.py | 26 ++++++++++++++ src/transformers/training_args.py | 12 +++++++ tests/trainer/test_trainer.py | 44 +++++++++++++++++++++++ 5 files changed, 154 insertions(+) diff --git a/docs/source/en/main_classes/trainer.md b/docs/source/en/main_classes/trainer.md index 462cea55dc46..e9a93bbff751 100644 --- a/docs/source/en/main_classes/trainer.md +++ b/docs/source/en/main_classes/trainer.md @@ -740,3 +740,27 @@ Sections that were moved: | Gradient Clipping | Getting The Model Weights Out ] + +## Boost your fine-tuning performances using NEFTune + + +NEFTune is a technique to boost the performance of chat models and was introduced by the paper “NEFTune: Noisy Embeddings Improve Instruction Finetuning” from Jain et al. it consists of adding noise to the embedding vectors during training. According to the abstract of the paper: + +> Standard finetuning of LLaMA-2-7B using Alpaca achieves 29.79% on AlpacaEval, which rises to 64.69% using noisy embeddings. NEFTune also improves over strong baselines on modern instruction datasets. Models trained with Evol-Instruct see a 10% improvement, with ShareGPT an 8% improvement, and with OpenPlatypus an 8% improvement. Even powerful models further refined with RLHF such as LLaMA-2-Chat benefit from additional training with NEFTune. + +
+ +
+ +To use it in `Trainer` simply pass `neftune_noise_alpha` when creating your `TrainingArguments` instance. Note that to avoid any surprising behaviour, NEFTune is disabled after training to retrieve back the original behaviour of the embedding layer. + +```python +from transformers import Trainer, TrainingArguments + +args = TrainingArguments(..., neftune_noise_alpha=0.1) +trainer = Trainer(..., args=args) + +... + +trainer.train() +``` diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 945b557021c7..aa5e372bdc24 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -113,6 +113,7 @@ find_executable_batch_size, get_last_checkpoint, has_length, + neftune_post_forward_hook, number_of_arguments, seed_worker, set_seed, @@ -486,6 +487,8 @@ def __init__( self.model_wrapped = model self.model = model + self.neftune_noise_alpha = args.neftune_noise_alpha + self.compute_metrics = compute_metrics self.preprocess_logits_for_metrics = preprocess_logits_for_metrics self.optimizer, self.lr_scheduler = optimizers @@ -634,6 +637,42 @@ def __init__( if args.torch_compile and not is_torch_compile_available(): raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.") + def _activate_neftune(self, model): + r""" + Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: + https://arxiv.org/abs/2310.05914 + """ + unwrapped_model = unwrap_model(model) + + if is_peft_available() and isinstance(unwrapped_model, PeftModel): + embeddings = unwrapped_model.base_model.get_input_embeddings() + else: + embeddings = unwrapped_model.get_input_embeddings() + + del unwrapped_model + + embeddings.neftune_noise_alpha = self.neftune_noise_alpha + hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook) + self.neftune_hook_handle = hook_handle + return model + + def _deactivate_neftune(self, model): + """ + Deactivates the neftune method. Make sure to call `_activate_neftune` first. + """ + if not hasattr(self, "neftune_hook_handle"): + raise ValueError("Neftune is not activated make sure to call `trainer._activate_neftune()` first") + + unwrapped_model = unwrap_model(model) + + if is_peft_available() and isinstance(unwrapped_model, PeftModel): + embeddings = unwrapped_model.base_model.get_input_embeddings() + else: + embeddings = unwrapped_model.get_input_embeddings() + + self.neftune_hook_handle.remove() + del embeddings.neftune_noise_alpha, unwrapped_model + def add_callback(self, callback): """ Add a callback to the current list of [`~transformer.TrainerCallback`]. @@ -1444,6 +1483,10 @@ def train( self.is_in_train = True + # Attach NEFTune hooks if necessary + if self.neftune_noise_alpha is not None: + self.model = self._activate_neftune(self.model) + # do_train is not a reliable argument, as it might not be set and .train() still called, so # the following is a workaround: if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train: @@ -1956,6 +1999,11 @@ def _inner_training_loop( # Wait for the checkpoint to be uploaded. self._finish_current_push() + # After training we make sure to retrieve back the original forward pass method + # for the embedding layer by removing the forward post hook. + if self.neftune_noise_alpha is not None: + self._deactivate_neftune(self.model) + return TrainOutput(self.state.global_step, train_loss, metrics) def _get_output_dir(self, trial): diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index 5bf29efffa8f..dd793c02036e 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -105,6 +105,32 @@ def set_seed(seed: int): tf.random.set_seed(seed) +def neftune_post_forward_hook(module, input, output): + """ + Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding + layers. This method is slightly adapted from the original source code that can be found here: + https://github.com/neelsjain/NEFTune Simply add it to your model as follows: + ```python + model = ... + model.embed_tokens.neftune_noise_alpha = 0.1 + model.embed_tokens.register_forward_hook(neftune_post_forward_hook) + ``` + Args: + module (`torch.nn.Module`): + The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to + the desired noise alpha value. + input (`torch.Tensor`): + The input tensor to the model. + output (`torch.Tensor`): + The output tensor of the model (i.e. the embeddings). + """ + if module.training: + dims = torch.tensor(output.size(1) * output.size(2)) + mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) + output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) + return output + + class EvalPrediction: """ Evaluation output (always contains labels), to be used to compute metrics. diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 507515c696af..8a6d7255f500 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -627,6 +627,11 @@ class TrainingArguments: This will iterate over the entire training dataloader once beforehand, and will slow down the entire process. + neftune_noise_alpha (`Optional[float]`): + If not `None`, this will activate NEFTune noise embeddings. This can drastically improve model performance + for instruction fine-tuning. Check out the [original paper](https://arxiv.org/abs/2310.05914) and the + [original code](https://github.com/neelsjain/NEFTune). Support transformers `PreTrainedModel` and also + `PeftModel` from peft. """ framework = "pt" @@ -1226,6 +1231,13 @@ class TrainingArguments: metadata={"help": "If set to `True`, the speed metrics will include `tgs` (tokens per second per device)."}, ) + neftune_noise_alpha: float = field( + default=None, + metadata={ + "help": "Activates neftune noise embeddings into the model. NEFTune has been proven to drastically improve model performances for instrcution fine-tuning. Check out the original paper here: https://arxiv.org/abs/2310.05914 and the original code here: https://github.com/neelsjain/NEFTune. Only supported for `PreTrainedModel` and `PeftModel` classes." + }, + ) + def __post_init__(self): # expand paths, if not os.makedirs("~/bar") will make directory # in the current directory instead of the actual home diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 8791e92c7148..6c208d0de019 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -838,6 +838,50 @@ def test_number_of_steps_in_training_with_ipex(self): train_output = trainer.train() self.assertEqual(train_output.global_step, 10) + def test_neftune(self): + config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) + tiny_gpt2 = GPT2LMHeadModel(config) + x = torch.randint(0, 100, (128,)) + train_dataset = RepeatDataset(x) + + # Trainer without inf/nan filter + args = TrainingArguments( + "./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4 + ) + trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) + + trainer.model = trainer._activate_neftune(trainer.model) + + dummy_input = torch.LongTensor([[1, 0, 1]]).to(torch_device) + + emb1 = trainer.model.get_input_embeddings()(dummy_input) + emb2 = trainer.model.get_input_embeddings()(dummy_input) + + self.assertFalse(torch.allclose(emb1, emb2), "Neftune noise is not applied!") + + # redefine the model + tiny_gpt2 = GPT2LMHeadModel(config) + # Trainer without inf/nan filter + args = TrainingArguments( + "./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4 + ) + trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) + + # Check that it trains without errors + trainer.train() + + # Make sure forward pass works fine + _ = trainer.model(dummy_input) + self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) == 0) + + trainer.model.eval() + + # Check that we get identical embeddings just in case + emb1 = trainer.model.get_input_embeddings()(dummy_input) + emb2 = trainer.model.get_input_embeddings()(dummy_input) + + self.assertTrue(torch.allclose(emb1, emb2), "Neftune noise is still applied!") + def test_logging_inf_nan_filter(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) From 05f2290114def72a3e20643fd4359c4c2d3abafe Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 31 Oct 2023 15:12:06 +0000 Subject: [PATCH 031/268] Backward compatibility fix for the Conversation class (#27176) * Backward compatibility fix for the Conversation class * Explain what's going on in the conditional --- src/transformers/pipelines/conversational.py | 20 +++++++++++++++---- .../test_pipelines_conversational.py | 10 +++++----- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/transformers/pipelines/conversational.py b/src/transformers/pipelines/conversational.py index 2beaf8cc2eaf..96a16e5b0f8f 100644 --- a/src/transformers/pipelines/conversational.py +++ b/src/transformers/pipelines/conversational.py @@ -54,6 +54,7 @@ def __init__( # This block deals with the legacy args - new code should just totally # avoid past_user_inputs and generated_responses + self._num_processed_user_inputs = 0 generated_responses = deprecated_kwargs.pop("generated_responses", None) past_user_inputs = deprecated_kwargs.pop("past_user_inputs", None) if generated_responses is not None and past_user_inputs is None: @@ -114,10 +115,11 @@ def append_response(self, response: str): def mark_processed(self): """ - This is a legacy method that no longer has any effect, as the Conversation no longer distinguishes between - processed and unprocessed user input. + This is a legacy method, as the Conversation no longer distinguishes between processed and unprocessed user + input. We set a counter here to keep behaviour mostly backward-compatible, but in general you should just read + the messages directly when writing new code. """ - pass + self._num_processed_user_inputs = len(self._user_messages) def __iter__(self): for message in self.messages: @@ -163,7 +165,17 @@ def _user_messages(self): @property def past_user_inputs(self): # This is a legacy property for backwards compatibility. It is recommended to just directly access - # conversation.messages instead. + # conversation.messages instead. The modern class does not care about which messages are "processed" + # or not. + if not self._user_messages: + return [] + # In the past, the most recent user message had to be mark_processed() before being included + # in past_user_messages. The class essentially had a single-message buffer, representing messages that + # had not yet been replied to. This is no longer the case, but we mimic the behaviour in this property + # for backward compatibility. + if self.messages[-1]["role"] != "user" or self._num_processed_user_inputs == len(self._user_messages): + return self._user_messages + return self._user_messages[:-1] @property diff --git a/tests/pipelines/test_pipelines_conversational.py b/tests/pipelines/test_pipelines_conversational.py index ba3b37055fd1..6ba2d8379d2a 100644 --- a/tests/pipelines/test_pipelines_conversational.py +++ b/tests/pipelines/test_pipelines_conversational.py @@ -136,8 +136,8 @@ def test_integration_torch_conversation(self): conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") # Then - self.assertEqual(len(conversation_1.past_user_inputs), 1) - self.assertEqual(len(conversation_2.past_user_inputs), 1) + self.assertEqual(len(conversation_1.past_user_inputs), 0) + self.assertEqual(len(conversation_2.past_user_inputs), 0) # When result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then @@ -167,7 +167,7 @@ def test_integration_torch_conversation_truncated_history(self): conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") # Then - self.assertEqual(len(conversation_1.past_user_inputs), 1) + self.assertEqual(len(conversation_1.past_user_inputs), 0) # When result = conversation_agent(conversation_1, do_sample=False, max_length=36) # Then @@ -375,8 +375,8 @@ def test_integration_torch_conversation_encoder_decoder(self): conversation_1 = Conversation("My name is Sarah and I live in London") conversation_2 = Conversation("Going to the movies tonight, What movie would you recommend? ") # Then - self.assertEqual(len(conversation_1.past_user_inputs), 1) - self.assertEqual(len(conversation_2.past_user_inputs), 1) + self.assertEqual(len(conversation_1.past_user_inputs), 0) + self.assertEqual(len(conversation_2.past_user_inputs), 0) # When result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then From 4bb50aa212032bda01ed8a60d7b11307ef8265a5 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 31 Oct 2023 16:25:53 +0100 Subject: [PATCH 032/268] [`Quantization` / `tests` ] Fix bnb MPT test (#27178) fix bnb mpt test --- tests/quantization/bnb/test_mixed_int8.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/quantization/bnb/test_mixed_int8.py b/tests/quantization/bnb/test_mixed_int8.py index 4666fe3576c2..3be1e5582ac4 100644 --- a/tests/quantization/bnb/test_mixed_int8.py +++ b/tests/quantization/bnb/test_mixed_int8.py @@ -124,7 +124,6 @@ def tearDown(self): gc.collect() torch.cuda.empty_cache() - @unittest.skip("Un-skip once https://github.com/mosaicml/llm-foundry/issues/703 is resolved") def test_get_keys_to_not_convert_trust_remote_code(self): r""" Test the `get_keys_to_not_convert` function with `trust_remote_code` models. @@ -135,11 +134,11 @@ def test_get_keys_to_not_convert_trust_remote_code(self): model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained( - model_id, trust_remote_code=True, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7" + model_id, trust_remote_code=True, revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( - config, trust_remote_code=True, code_revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7" + config, trust_remote_code=True, code_revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"]) From e22b7ced9a7cb6cd9bc2effab7e520ce83cee2a9 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 31 Oct 2023 21:14:57 +0530 Subject: [PATCH 033/268] Fix dropout in `StarCoder` (#27182) fix dropout in modeling_gpt_bigcode.py --- src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index fcbbfca5ceda..14db9efb4bca 100644 --- a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -364,7 +364,7 @@ def forward( key = key.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) value = value.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) - attn_dropout = self.dropout if self.training else 0.0 + attn_dropout = self.config.attn_pdrop if self.training else 0.0 softmax_dtype = torch.float32 if self.attention_softmax_in_fp32 else query.dtype upcast = query.dtype != softmax_dtype From 6b7f8ff1f3db0b21afbedb322770c292bc8dedae Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Tue, 31 Oct 2023 10:57:37 -0500 Subject: [PATCH 034/268] translate traning.md to chinese (#27122) * translate traning.md * update _tocree.yml * update _tocree.yml * update _tocree.yml --- docs/source/zh/_toctree.yml | 14 +- docs/source/zh/training.md | 407 ++++++++++++++++++++++++++++++++++++ 2 files changed, 415 insertions(+), 6 deletions(-) create mode 100644 docs/source/zh/training.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index 4dc8920a1900..6aafffa59849 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -7,14 +7,16 @@ title: 安装 title: 开始使用 - sections: - - local: accelerate - title: 加速分布式训练 - - local: preprocessing - title: 预处理 - local: pipeline_tutorial - title: pipeline教程 + title: 使用pipelines进行推理 + - local: preprocessing + title: 预处理数据 + - local: training + title: 微调预训练模型 + - local: accelerate + title: 使用🤗Accelerate进行分布式训练 - local: transformers_agents - title: transformers_agents教程 + title: agents教程 title: 教程 - sections: - local: fast_tokenizers diff --git a/docs/source/zh/training.md b/docs/source/zh/training.md new file mode 100644 index 000000000000..4ef49b459f95 --- /dev/null +++ b/docs/source/zh/training.md @@ -0,0 +1,407 @@ + + +# 微调预训练模型 + +[[open-in-colab]] + +使用预训练模型有许多显著的好处。它降低了计算成本,减少了碳排放,同时允许您使用最先进的模型,而无需从头开始训练一个。🤗 Transformers 提供了涉及各种任务的成千上万的预训练模型。当您使用预训练模型时,您需要在与任务相关的数据集上训练该模型。这种操作被称为微调,是一种非常强大的训练技术。在本教程中,您将使用您选择的深度学习框架来微调一个预训练模型: + +* 使用 🤗 Transformers 的 [`Trainer`] 来微调预训练模型。 +* 在 TensorFlow 中使用 Keras 来微调预训练模型。 +* 在原生 PyTorch 中微调预训练模型。 + + + +## 准备数据集 + + + +在您进行预训练模型微调之前,需要下载一个数据集并为训练做好准备。之前的教程向您展示了如何处理训练数据,现在您有机会将这些技能付诸实践! + +首先,加载[Yelp评论](https://huggingface.co/datasets/yelp_review_full)数据集: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("yelp_review_full") +>>> dataset["train"][100] +{'label': 0, + 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'} +``` + +正如您现在所知,您需要一个`tokenizer`来处理文本,包括填充和截断操作以处理可变的序列长度。如果要一次性处理您的数据集,可以使用 🤗 Datasets 的 [`map`](https://huggingface.co/docs/datasets/process.html#map) 方法,将预处理函数应用于整个数据集: + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + + +>>> def tokenize_function(examples): +... return tokenizer(examples["text"], padding="max_length", truncation=True) + + +>>> tokenized_datasets = dataset.map(tokenize_function, batched=True) +``` +如果愿意的话,您可以从完整数据集提取一个较小子集来进行微调,以减少训练所需的时间: + +```py +>>> small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) +>>> small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000)) +``` + + + +## 训练 + +此时,您应该根据您训练所用的框架来选择对应的教程章节。您可以使用右侧的链接跳转到您想要的章节 - 如果您想隐藏某个框架对应的所有教程内容,只需使用右上角的按钮! + + + + + + +## 使用 PyTorch Trainer 进行训练 + +🤗 Transformers 提供了一个专为训练 🤗 Transformers 模型而优化的 [`Trainer`] 类,使您无需手动编写自己的训练循环步骤而更轻松地开始训练模型。[`Trainer`] API 支持各种训练选项和功能,如日志记录、梯度累积和混合精度。 + +首先加载您的模型并指定期望的标签数量。根据 Yelp Review [数据集卡片](https://huggingface.co/datasets/yelp_review_full#data-fields),您知道有五个标签: + + +```py +>>> from transformers import AutoModelForSequenceClassification + +>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5) +``` + + + +您将会看到一个警告,提到一些预训练权重未被使用,以及一些权重被随机初始化。不用担心,这是完全正常的!BERT 模型的预训练`head`被丢弃,并替换为一个随机初始化的分类`head`。您将在您的序列分类任务上微调这个新模型`head`,将预训练模型的知识转移给它。 + + + +### 训练超参数 + +接下来,创建一个 [`TrainingArguments`] 类,其中包含您可以调整的所有超参数以及用于激活不同训练选项的标志。对于本教程,您可以从默认的训练[超参数](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments)开始,但随时可以尝试不同的设置以找到最佳设置。 + +指定保存训练检查点的位置: + +```py +>>> from transformers import TrainingArguments + +>>> training_args = TrainingArguments(output_dir="test_trainer") +``` + +### 评估 + +[`Trainer`] 在训练过程中不会自动评估模型性能。您需要向 [`Trainer`] 传递一个函数来计算和展示指标。[🤗 Evaluate](https://huggingface.co/docs/evaluate/index) 库提供了一个简单的 [`accuracy`](https://huggingface.co/spaces/evaluate-metric/accuracy) 函数,您可以使用 [`evaluate.load`] 函数加载它(有关更多信息,请参阅此[快速入门](https://huggingface.co/docs/evaluate/a_quick_tour)): + +```py +>>> import numpy as np +>>> import evaluate + +>>> metric = evaluate.load("accuracy") +``` +在 `metric` 上调用 [`~evaluate.compute`] 来计算您的预测的准确性。在将预测传递给 `compute` 之前,您需要将预测转换为`logits`(请记住,所有 🤗 Transformers 模型都返回对`logits`): + +```py +>>> def compute_metrics(eval_pred): +... logits, labels = eval_pred +... predictions = np.argmax(logits, axis=-1) +... return metric.compute(predictions=predictions, references=labels) +``` + +如果您希望在微调过程中监视评估指标,请在您的训练参数中指定 `evaluation_strategy` 参数,以在每个`epoch`结束时展示评估指标: + +```py +>>> from transformers import TrainingArguments, Trainer + +>>> training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch") +``` + +### 训练器 + +创建一个包含您的模型、训练参数、训练和测试数据集以及评估函数的 [`Trainer`] 对象: + + +```py +>>> trainer = Trainer( +... model=model, +... args=training_args, +... train_dataset=small_train_dataset, +... eval_dataset=small_eval_dataset, +... compute_metrics=compute_metrics, +... ) +``` +然后调用[`~transformers.Trainer.train`]以微调模型: + +```py +>>> trainer.train() +``` + + + + + + +## 使用keras训练TensorFlow模型 + +您也可以使用 Keras API 在 TensorFlow 中训练 🤗 Transformers 模型! + +### 加载用于 Keras 的数据 + +当您希望使用 Keras API 训练 🤗 Transformers 模型时,您需要将您的数据集转换为 Keras 可理解的格式。如果您的数据集很小,您可以将整个数据集转换为NumPy数组并传递给 Keras。在进行更复杂的操作之前,让我们先尝试这种方法。 + +首先,加载一个数据集。我们将使用 [GLUE benchmark](https://huggingface.co/datasets/glue) 中的 CoLA 数据集,因为它是一个简单的二元文本分类任务。现在只使用训练数据集。 + + +```py +from datasets import load_dataset + +dataset = load_dataset("glue", "cola") +dataset = dataset["train"] # Just take the training split for now +``` +接下来,加载一个`tokenizer`并将数据标记为 NumPy 数组。请注意,标签已经是由 0 和 1 组成的`list`,因此我们可以直接将其转换为 NumPy 数组而无需进行分词处理! + +```py +from transformers import AutoTokenizer + +tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") +tokenized_data = tokenizer(dataset["sentence"], return_tensors="np", padding=True) +# Tokenizer returns a BatchEncoding, but we convert that to a dict for Keras +tokenized_data = dict(tokenized_data) + +labels = np.array(dataset["label"]) # Label is already an array of 0 and 1 +``` +最后,加载、[`compile`](https://keras.io/api/models/model_training_apis/#compile-method) 和 [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) 模型。请注意,Transformers 模型都有一个默认的与任务相关的损失函数,因此除非您希望自定义,否则无需指定一个损失函数: + +```py +from transformers import TFAutoModelForSequenceClassification +from tensorflow.keras.optimizers import Adam + +# Load and compile our model +model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased") +# Lower learning rates are often better for fine-tuning transformers +model.compile(optimizer=Adam(3e-5)) # No loss argument! + +model.fit(tokenized_data, labels) +``` + + + +当您使用 `compile()` 编译模型时,无需传递损失参数!如果不指定损失参数,Hugging Face 模型会自动选择适合其任务和模型架构的损失函数。如果需要,您始终可以自己指定损失函数以覆盖默认配置。 + + + +这种方法对于较小的数据集效果很好,但对于较大的数据集,您可能会发现它开始变得有问题。为什么呢?因为分词后的数组和标签必须完全加载到内存中,而且由于 NumPy 无法处理“不规则”数组,因此每个分词后的样本长度都必须被填充到数据集中最长样本的长度。这将使您的数组变得更大,而所有这些`padding tokens`也会减慢训练速度! + + +### 将数据加载为 tf.data.Dataset + +如果您想避免训练速度减慢,可以将数据加载为 `tf.data.Dataset`。虽然您可以自己编写自己的 `tf.data` 流水线,但我们有两种方便的方法来实现这一点: + +- [`~TFPreTrainedModel.prepare_tf_dataset`]:这是我们在大多数情况下推荐的方法。因为它是模型上的一个方法,它可以检查模型以自动确定哪些列可用作模型输入,并丢弃其他列以创建一个更简单、性能更好的数据集。 +- [`~datasets.Dataset.to_tf_dataset`]:这个方法更低级,但当您希望完全控制数据集的创建方式时非常有用,可以通过指定要包括的确切 `columns` 和 `label_cols` 来实现。 + +在使用 [`~TFPreTrainedModel.prepare_tf_dataset`] 之前,您需要将`tokenizer`的输出添加到数据集作为列,如下面的代码示例所示: + +```py +def tokenize_dataset(data): + # Keys of the returned dictionary will be added to the dataset as columns + return tokenizer(data["text"]) + + +dataset = dataset.map(tokenize_dataset) +``` +请记住,默认情况下,Hugging Face 数据集存储在硬盘上,因此这不会增加您的内存使用!一旦列已经添加,您可以从数据集中流式的传输批次数据,并为每个批次添加`padding tokens`,这与为整个数据集添加`padding tokens`相比,大大减少了`padding tokens`的数量。 + +```py +>>> tf_dataset = model.prepare_tf_dataset(dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer) +``` +请注意,在上面的代码示例中,您需要将`tokenizer`传递给`prepare_tf_dataset`,以便它可以在加载批次时正确填充它们。如果数据集中的所有样本都具有相同的长度而且不需要填充,您可以跳过此参数。如果需要执行比填充样本更复杂的操作(例如,用于掩码语言模型的`tokens` 替换),则可以使用 `collate_fn` 参数,而不是传递一个函数来将样本列表转换为批次并应用任何所需的预处理。请查看我们的[示例](https://github.com/huggingface/transformers/tree/main/examples)或[笔记](https://huggingface.co/docs/transformers/notebooks)以了解此方法的实际操作。 + +一旦创建了 `tf.data.Dataset`,您可以像以前一样编译和训练模型: + +```py +model.compile(optimizer=Adam(3e-5)) # No loss argument! + +model.fit(tf_dataset) +``` + + + + + + +## 在原生 PyTorch 中训练 + + + + + +[`Trainer`] 负责训练循环,允许您在一行代码中微调模型。对于喜欢编写自己训练循环的用户,您也可以在原生 PyTorch 中微调 🤗 Transformers 模型。 + +现在,您可能需要重新启动您的`notebook`,或执行以下代码以释放一些内存: + +```py +del model +del trainer +torch.cuda.empty_cache() +``` + +接下来,手动处理 `tokenized_dataset` 以准备进行训练。 + +1. 移除 text 列,因为模型不接受原始文本作为输入: + + ```py + >>> tokenized_datasets = tokenized_datasets.remove_columns(["text"]) + ``` + +2. 将 label 列重命名为 labels,因为模型期望参数的名称为 labels: + + ```py + >>> tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + ``` + +3. 设置数据集的格式以返回 PyTorch 张量而不是`lists`: + + ```py + >>> tokenized_datasets.set_format("torch") + ``` + +接着,创建一个先前展示的数据集的较小子集,以加速微调过程 + +```py +>>> small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) +>>> small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000)) +``` + +### DataLoader + +您的训练和测试数据集创建一个`DataLoader`类,以便可以迭代处理数据批次 + +```py +>>> from torch.utils.data import DataLoader + +>>> train_dataloader = DataLoader(small_train_dataset, shuffle=True, batch_size=8) +>>> eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) +``` + +加载您的模型,并指定期望的标签数量: + +```py +>>> from transformers import AutoModelForSequenceClassification + +>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5) +``` + +### Optimizer and learning rate scheduler + +创建一个`optimizer`和`learning rate scheduler`以进行模型微调。让我们使用 PyTorch 中的 [AdamW](https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html) 优化器: + +```py +>>> from torch.optim import AdamW + +>>> optimizer = AdamW(model.parameters(), lr=5e-5) +``` + +创建来自 [`Trainer`] 的默认`learning rate scheduler`: + + +```py +>>> from transformers import get_scheduler + +>>> num_epochs = 3 +>>> num_training_steps = num_epochs * len(train_dataloader) +>>> lr_scheduler = get_scheduler( +... name="linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps +... ) +``` + +最后,指定 `device` 以使用 GPU(如果有的话)。否则,使用 CPU 进行训练可能需要几个小时,而不是几分钟。 + + +```py +>>> import torch + +>>> device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") +>>> model.to(device) +``` + + + +如果没有 GPU,可以通过notebook平台如 [Colaboratory](https://colab.research.google.com/) 或 [SageMaker StudioLab](https://studiolab.sagemaker.aws/) 来免费获得云端GPU使用。 + + + +现在您已经准备好训练了!🥳 + +### 训练循环 + +为了跟踪训练进度,使用 [tqdm](https://tqdm.github.io/) 库来添加一个进度条,显示训练步数的进展: + +```py +>>> from tqdm.auto import tqdm + +>>> progress_bar = tqdm(range(num_training_steps)) + +>>> model.train() +>>> for epoch in range(num_epochs): +... for batch in train_dataloader: +... batch = {k: v.to(device) for k, v in batch.items()} +... outputs = model(**batch) +... loss = outputs.loss +... loss.backward() + +... optimizer.step() +... lr_scheduler.step() +... optimizer.zero_grad() +... progress_bar.update(1) +``` + +### 评估 + +就像您在 [`Trainer`] 中添加了一个评估函数一样,当您编写自己的训练循环时,您需要做同样的事情。但与在每个`epoch`结束时计算和展示指标不同,这一次您将使用 [`~evaluate.add_batch`] 累积所有批次,并在最后计算指标。 + +```py +>>> import evaluate + +>>> metric = evaluate.load("accuracy") +>>> model.eval() +>>> for batch in eval_dataloader: +... batch = {k: v.to(device) for k, v in batch.items()} +... with torch.no_grad(): +... outputs = model(**batch) + +... logits = outputs.logits +... predictions = torch.argmax(logits, dim=-1) +... metric.add_batch(predictions=predictions, references=batch["labels"]) + +>>> metric.compute() +``` + + + + + +## 附加资源 + +更多微调例子可参考如下链接: + +- [🤗 Transformers 示例](https://github.com/huggingface/transformers/tree/main/examples) 包含用于在 PyTorch 和 TensorFlow 中训练常见自然语言处理任务的脚本。 + +- [🤗 Transformers 笔记](notebooks) 包含针对特定任务在 PyTorch 和 TensorFlow 中微调模型的各种`notebook`。 \ No newline at end of file From 77930f8a01d5a18af88335c60b86068f10b647f7 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Tue, 31 Oct 2023 09:44:51 -0700 Subject: [PATCH 035/268] [docs] Update CPU/GPU inference docs (#26881) * first draft * remove non-existent paths * edits * feedback * feedback and optimum * Apply suggestions from code review Co-authored-by: regisss <15324346+regisss@users.noreply.github.com> Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> * redirect to correct doc * _redirects.yml --------- Co-authored-by: regisss <15324346+regisss@users.noreply.github.com> Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- docs/source/en/_redirects.yml | 3 + docs/source/en/_toctree.yml | 8 +- docs/source/en/perf_infer_cpu.md | 108 +++++--- docs/source/en/perf_infer_gpu_many.md | 124 ---------- docs/source/en/perf_infer_gpu_one.md | 342 ++++++++++---------------- docs/source/en/perf_infer_special.md | 18 -- docs/source/en/performance.md | 2 +- src/transformers/utils/logging.py | 4 +- utils/not_doctested.txt | 2 - 9 files changed, 215 insertions(+), 396 deletions(-) create mode 100644 docs/source/en/_redirects.yml delete mode 100644 docs/source/en/perf_infer_gpu_many.md delete mode 100644 docs/source/en/perf_infer_special.md diff --git a/docs/source/en/_redirects.yml b/docs/source/en/_redirects.yml new file mode 100644 index 000000000000..0dd4d2bfb34b --- /dev/null +++ b/docs/source/en/_redirects.yml @@ -0,0 +1,3 @@ +# Optimizing inference + +perf_infer_gpu_many: perf_infer_gpu_one \ No newline at end of file diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 1a76762d160f..4d434b7a18c1 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -155,13 +155,9 @@ title: Efficient training techniques - sections: - local: perf_infer_cpu - title: Inference on CPU + title: CPU inference - local: perf_infer_gpu_one - title: Inference on one GPU - - local: perf_infer_gpu_many - title: Inference on many GPUs - - local: perf_infer_special - title: Inference on Specialized Hardware + title: GPU inference title: Optimizing inference - local: big_models title: Instantiating a big model diff --git a/docs/source/en/perf_infer_cpu.md b/docs/source/en/perf_infer_cpu.md index a7a524ae1ef0..f10fc01e7ca6 100644 --- a/docs/source/en/perf_infer_cpu.md +++ b/docs/source/en/perf_infer_cpu.md @@ -13,46 +13,48 @@ rendered properly in your Markdown viewer. --> -# Efficient Inference on CPU +# CPU inference -This guide focuses on inferencing large models efficiently on CPU. +With some optimizations, it is possible to efficiently run large model inference on a CPU. One of these optimization techniques involves compiling the PyTorch code into an intermediate format for high-performance environments like C++. The other technique fuses multiple operations into one kernel to reduce the overhead of running each operation separately. -## `BetterTransformer` for faster inference +You'll learn how to use [BetterTransformer](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/) for faster inference, and how to convert your PyTorch code to [TorchScript](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html). If you're using an Intel CPU, you can also use [graph optimizations](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features.html#graph-optimization) from [Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/index.html) to boost inference speed even more. Finally, learn how to use 🤗 Optimum to accelerate inference with ONNX Runtime or OpenVINO (if you're using an Intel CPU). -We have recently integrated `BetterTransformer` for faster inference on CPU for text, image and audio models. Check the documentation about this integration [here](https://huggingface.co/docs/optimum/bettertransformer/overview) for more details. +## BetterTransformer -## PyTorch JIT-mode (TorchScript) -TorchScript is a way to create serializable and optimizable models from PyTorch code. Any TorchScript program can be saved from a Python process and loaded in a process where there is no Python dependency. -Comparing to default eager mode, jit mode in PyTorch normally yields better performance for model inference from optimization methodologies like operator fusion. +BetterTransformer accelerates inference with its fastpath (native PyTorch specialized implementation of Transformer functions) execution. The two optimizations in the fastpath execution are: -For a gentle introduction to TorchScript, see the Introduction to [PyTorch TorchScript tutorial](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#tracing-modules). +1. fusion, which combines multiple sequential operations into a single "kernel" to reduce the number of computation steps +2. skipping the inherent sparsity of padding tokens to avoid unnecessary computation with nested tensors -### IPEX Graph Optimization with JIT-mode -Intel® Extension for PyTorch provides further optimizations in jit mode for Transformers series models. It is highly recommended for users to take advantage of Intel® Extension for PyTorch with jit mode. Some frequently used operator patterns from Transformers models are already supported in Intel® Extension for PyTorch with jit mode fusions. Those fusion patterns like Multi-head-attention fusion, Concat Linear, Linear+Add, Linear+Gelu, Add+LayerNorm fusion and etc. are enabled and perform well. The benefit of the fusion is delivered to users in a transparent fashion. According to the analysis, ~70% of most popular NLP tasks in question-answering, text-classification, and token-classification can get performance benefits with these fusion patterns for both Float32 precision and BFloat16 Mixed precision. +BetterTransformer also converts all attention operations to use the more memory-efficient [scaled dot product attention](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention). -Check more detailed information for [IPEX Graph Optimization](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html). + -#### IPEX installation: +BetterTransformer is not supported for all models. Check this [list](https://huggingface.co/docs/optimum/bettertransformer/overview#supported-models) to see if a model supports BetterTransformer. -IPEX release is following PyTorch, check the approaches for [IPEX installation](https://intel.github.io/intel-extension-for-pytorch/). + -### Usage of JIT-mode -To enable JIT-mode in Trainer for evaluaion or prediction, users should add `jit_mode_eval` in Trainer command arguments. +Before you start, make sure you have 🤗 Optimum [installed](https://huggingface.co/docs/optimum/installation). - +Enable BetterTransformer with the [`PreTrainedModel.to_bettertransformer`] method: -for PyTorch >= 1.14.0. JIT-mode could benefit any models for prediction and evaluaion since dict input is supported in jit.trace +```py +from transformers import AutoModelForCausalLM -for PyTorch < 1.14.0. JIT-mode could benefit models whose forward parameter order matches the tuple input order in jit.trace, like question-answering model -In the case where the forward parameter order does not match the tuple input order in jit.trace, like text-classification models, jit.trace will fail and we are capturing this with the exception here to make it fallback. Logging is used to notify users. +model = AutoModelForCausalLM.from_pretrained("bigcode/starcoder") +model.to_bettertransformer() +``` - +## TorchScript + +TorchScript is an intermediate PyTorch model representation that can be run in production environments where performance is important. You can train a model in PyTorch and then export it to TorchScript to free the model from Python performance constraints. PyTorch [traces](https://pytorch.org/docs/stable/generated/torch.jit.trace.html) a model to return a [`ScriptFunction`] that is optimized with just-in-time compilation (JIT). Compared to the default eager mode, JIT mode in PyTorch typically yields better performance for inference using optimization techniques like operator fusion. -Take an example of the use cases on [Transformers question-answering](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) +For a gentle introduction to TorchScript, see the [Introduction to PyTorch TorchScript](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) tutorial. +With the [`Trainer`] class, you can enable JIT mode for CPU inference by setting the `--jit_mode_eval` flag: -- Inference using jit mode on CPU: -
python run_qa.py \
+```bash
+python run_qa.py \
 --model_name_or_path csarron/bert-base-uncased-squad-v1 \
 --dataset_name squad \
 --do_eval \
@@ -60,10 +62,31 @@ Take an example of the use cases on [Transformers question-answering](https://gi
 --doc_stride 128 \
 --output_dir /tmp/ \
 --no_cuda \
---jit_mode_eval 
+--jit_mode_eval +``` + + + +For PyTorch >= 1.14.0, JIT-mode could benefit any model for prediction and evaluaion since the dict input is supported in `jit.trace`. + +For PyTorch < 1.14.0, JIT-mode could benefit a model if its forward parameter order matches the tuple input order in `jit.trace`, such as a question-answering model. If the forward parameter order does not match the tuple input order in `jit.trace`, like a text classification model, `jit.trace` will fail and we are capturing this with the exception here to make it fallback. Logging is used to notify users. + + + +## IPEX graph optimization + +Intel® Extension for PyTorch (IPEX) provides further optimizations in JIT mode for Intel CPUs, and we recommend combining it with TorchScript for even faster performance. The IPEX [graph optimization](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html) fuses operations like Multi-head attention, Concat Linear, Linear + Add, Linear + Gelu, Add + LayerNorm, and more. + +To take advantage of these graph optimizations, make sure you have IPEX [installed](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html): + +```bash +pip install intel_extension_for_pytorch +``` -- Inference with IPEX using jit mode on CPU: -
python run_qa.py \
+Set the `--use_ipex` and `--jit_mode_eval` flags in the [`Trainer`] class to enable JIT mode with the graph optimizations:
+
+```bash
+python run_qa.py \
 --model_name_or_path csarron/bert-base-uncased-squad-v1 \
 --dataset_name squad \
 --do_eval \
@@ -71,5 +94,34 @@ Take an example of the use cases on [Transformers question-answering](https://gi
 --doc_stride 128 \
 --output_dir /tmp/ \
 --no_cuda \
---use_ipex \
---jit_mode_eval
+--use_ipex \ +--jit_mode_eval +``` + +## 🤗 Optimum + + + +Learn more details about using ORT with 🤗 Optimum in the [Optimum Inference with ONNX Runtime](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/models) guide. This section only provides a brief and simple example. + + + +ONNX Runtime (ORT) is a model accelerator that runs inference on CPUs by default. ORT is supported by 🤗 Optimum which can be used in 🤗 Transformers, without making too many changes to your code. You only need to replace the 🤗 Transformers `AutoClass` with its equivalent [`~optimum.onnxruntime.ORTModel`] for the task you're solving, and load a checkpoint in the ONNX format. + +For example, if you're running inference on a question answering task, load the [optimum/roberta-base-squad2](https://huggingface.co/optimum/roberta-base-squad2) checkpoint which contains a `model.onnx` file: + +```py +from transformers import AutoTokenizer, pipeline +from optimum.onnxruntime import ORTModelForQuestionAnswering + +model = ORTModelForQuestionAnswering.from_pretrained("optimum/roberta-base-squad2") +tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2") + +onnx_qa = pipeline("question-answering", model=model, tokenizer=tokenizer) + +question = "What's my name?" +context = "My name is Philipp and I live in Nuremberg." +pred = onnx_qa(question, context) +``` + +If you have an Intel CPU, take a look at 🤗 [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) which supports a variety of compression techniques (quantization, pruning, knowledge distillation) and tools for converting models to the [OpenVINO](https://huggingface.co/docs/optimum/intel/inference) format for higher performance inference. diff --git a/docs/source/en/perf_infer_gpu_many.md b/docs/source/en/perf_infer_gpu_many.md deleted file mode 100644 index 2118b5ddb404..000000000000 --- a/docs/source/en/perf_infer_gpu_many.md +++ /dev/null @@ -1,124 +0,0 @@ - - -# Efficient Inference on a Multiple GPUs - -This document contains information on how to efficiently infer on a multiple GPUs. - - -Note: A multi GPU setup can use the majority of the strategies described in the [single GPU section](./perf_infer_gpu_one). You must be aware of simple techniques, though, that can be used for a better usage. - - - -## Flash Attention 2 - -Flash Attention 2 integration also works in a multi-GPU setup, check out the appropriate section in the [single GPU section](./perf_infer_gpu_one#Flash-Attention-2) - -## BetterTransformer - -[BetterTransformer](https://huggingface.co/docs/optimum/bettertransformer/overview) converts 🤗 Transformers models to use the PyTorch-native fastpath execution, which calls optimized kernels like Flash Attention under the hood. - -BetterTransformer is also supported for faster inference on single and multi-GPU for text, image, and audio models. - - - -Flash Attention can only be used for models using fp16 or bf16 dtype. Make sure to cast your model to the appropriate dtype before using BetterTransformer. - - - -### Decoder models - -For text models, especially decoder-based models (GPT, T5, Llama, etc.), the BetterTransformer API converts all attention operations to use the [`torch.nn.functional.scaled_dot_product_attention` operator](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) (SDPA) that is only available in PyTorch 2.0 and onwards. - -To convert a model to BetterTransformer: - -```python -from transformers import AutoModelForCausalLM - -model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") -# convert the model to BetterTransformer -model.to_bettertransformer() - -# Use it for training or inference -``` - -SDPA can also call [Flash Attention](https://arxiv.org/abs/2205.14135) kernels under the hood. To enable Flash Attention or to check that it is available in a given setting (hardware, problem size), use [`torch.backends.cuda.sdp_kernel`](https://pytorch.org/docs/master/backends.html#torch.backends.cuda.sdp_kernel) as a context manager: - - -```diff -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer - -tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") -model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m").to("cuda") -# convert the model to BetterTransformer -model.to_bettertransformer() - -input_text = "Hello my dog is cute and" -inputs = tokenizer(input_text, return_tensors="pt").to("cuda") - -+ with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): - outputs = model.generate(**inputs) - -print(tokenizer.decode(outputs[0], skip_special_tokens=True)) -``` - -If you see a bug with a traceback saying - -```bash -RuntimeError: No available kernel. Aborting execution. -``` - -try using the PyTorch nightly version, which may have a broader coverage for Flash Attention: - -```bash -pip3 install -U --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118 -``` - -Have a look at this [blog post](https://pytorch.org/blog/out-of-the-box-acceleration/) to learn more about what is possible with the BetterTransformer + SDPA API. - -### Encoder models - -For encoder models during inference, BetterTransformer dispatches the forward call of encoder layers to an equivalent of [`torch.nn.TransformerEncoderLayer`](https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoderLayer.html) that will execute the fastpath implementation of the encoder layers. - -Because `torch.nn.TransformerEncoderLayer` fastpath does not support training, it is dispatched to `torch.nn.functional.scaled_dot_product_attention` instead, which does not leverage nested tensors but can use Flash Attention or Memory-Efficient Attention fused kernels. - -More details about BetterTransformer performance can be found in this [blog post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2), and you can learn more about BetterTransformer for encoder models in this [blog](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/). - - -## Advanced usage: mixing FP4 (or Int8) and BetterTransformer - -You can combine the different methods described above to get the best performance for your model. For example, you can use BetterTransformer with FP4 mixed-precision inference + flash attention: - -```py -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig - -quantization_config = BitsAndBytesConfig( - load_in_4bit=True, - bnb_4bit_compute_dtype=torch.float16 -) - -tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") -model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", quantization_config=quantization_config) - -input_text = "Hello my dog is cute and" -inputs = tokenizer(input_text, return_tensors="pt").to("cuda") - -with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): - outputs = model.generate(**inputs) - -print(tokenizer.decode(outputs[0], skip_special_tokens=True)) -``` \ No newline at end of file diff --git a/docs/source/en/perf_infer_gpu_one.md b/docs/source/en/perf_infer_gpu_one.md index 39f2ca22b1f0..06e91c550226 100644 --- a/docs/source/en/perf_infer_gpu_one.md +++ b/docs/source/en/perf_infer_gpu_one.md @@ -13,40 +13,38 @@ rendered properly in your Markdown viewer. --> -# Efficient Inference on a Single GPU +# GPU inference -In addition to this guide, relevant information can be found as well in [the guide for training on a single GPU](perf_train_gpu_one) and [the guide for inference on CPUs](perf_infer_cpu). - -## Flash Attention 2 +GPUs are the standard choice of hardware for machine learning, unlike CPUs, because they are optimized for memory bandwidth and parallelism. To keep up with the larger sizes of modern models or to run these large models on existing and older hardware, there are several optimizations you can use to speed up GPU inference. In this guide, you'll learn how to use FlashAttention-2 (a more memory-efficient attention mechanism), BetterTransformer (a PyTorch native fastpath execution), and bitsandbytes to quantize your model to a lower precision. Finally, learn how to use 🤗 Optimum to accelerate inference with ONNX Runtime on Nvidia GPUs. -Note that this feature is experimental and might considerably change in future versions. For instance, the Flash Attention 2 API might migrate to `BetterTransformer` API in the near future. +The majority of the optimizations described here also apply to multi-GPU setups! -Flash Attention 2 can considerably speed up transformer-based models' training and inference speed. Flash Attention 2 has been introduced in the [official Flash Attention repository](https://github.com/Dao-AILab/flash-attention) by Tri Dao et al. The scientific paper on Flash Attention can be found [here](https://arxiv.org/abs/2205.14135). +## FlashAttention-2 -Make sure to follow the installation guide on the repository mentioned above to properly install Flash Attention 2. Once that package is installed, you can benefit from this feature. + -We natively support Flash Attention 2 for the following models: +FlashAttention-2 is experimental and may change considerably in future versions. -- Llama -- Mistral -- Falcon -- [GPTBigCode (Starcoder)](model_doc/gpt_bigcode#) + -You can request to add Flash Attention 2 support for more models by opening an issue on GitHub, and even open a Pull Request to integrate the changes. The supported models can be used for inference and training, including training with padding tokens - *which is currently not supported for `BetterTransformer` API below.* +[FlashAttention-2](https://huggingface.co/papers/2205.14135) is a faster and more efficient implementation of the standard attention mechanism that can significantly speedup inference by: - +1. additionally parallelizing the attention computation over sequence length +2. partitioning the work between GPU threads to reduce communication and shared memory reads/writes between them -Flash Attention 2 can only be used when the models' dtype is `fp16` or `bf16` and runs only on NVIDIA-GPU devices. Make sure to cast your model to the appropriate dtype and load them on a supported device before using that feature. - - +FlashAttention-2 supports inference with Llama, Mistral, and Falcon models. You can request to add FlashAttention-2 support for another model by opening a GitHub Issue or Pull Request. -### Quick usage +Before you begin, make sure you have FlashAttention-2 installed (see the [installation](https://github.com/Dao-AILab/flash-attention?tab=readme-ov-file#installation-and-features) guide for more details about prerequisites): -To enable Flash Attention 2 in your model, add `use_flash_attention_2` in the `from_pretrained` arguments: +```bash +pip install flash-attn --no-build-isolation +``` + +To enable FlashAttention-2, add the `use_flash_attention_2` parameter to [`~AutoModelForCausalLM.from_pretrained`]: ```python import torch @@ -62,74 +60,29 @@ model = AutoModelForCausalLM.from_pretrained( ) ``` -And use it for generation or fine-tuning. - -### Expected speedups - -You can benefit from considerable speedups for fine-tuning and inference, especially for long sequences. However, since Flash Attention does not support computing attention scores with padding tokens under the hood, we must manually pad / unpad the attention scores for batched inference when the sequence contains padding tokens. This leads to a significant slowdown for batched generations with padding tokens. - -To overcome this, one should use Flash Attention without padding tokens in the sequence for training (e.g., by packing a dataset, i.e., concatenating sequences until reaching the maximum sequence length. An example is provided [here](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm.py#L516). - -Below is the expected speedup you can get for a simple forward pass on [tiiuae/falcon-7b](https://hf.co/tiiuae/falcon-7b) with a sequence length of 4096 and various batch sizes, without padding tokens: - -
- -
- -Below is the expected speedup you can get for a simple forward pass on [`meta-llama/Llama-7b-hf`](https://hf.co/meta-llama/Llama-7b-hf) with a sequence length of 4096 and various batch sizes, without padding tokens: - -
- -
- -For sequences with padding tokens (training with padding tokens or generating with padding tokens), we need to unpad / pad the input sequences to compute correctly the attention scores. For relatively small sequence length, on pure forward pass, this creates an overhead leading to a small speedup (below 30% of the input has been filled with padding tokens). - -
- -
- -But for large sequence length you can benefit from interesting speedup for pure inference (also training) - -Note that Flash Attention makes the attention computation more memory efficient, meaning you can train with much larger sequence lengths without facing CUDA OOM issues. It can lead up to memory reduction up to 20 for large sequence length. Check out [the official flash attention repository](https://github.com/Dao-AILab/flash-attention) for more details. - -
- -
- - -### Advanced usage - -You can combine this feature with many exisiting feature for model optimization. Check out few examples below: + -### Combining Flash Attention 2 and 8-bit models +FlashAttention-2 can only be used when the model's dtype is `fp16` or `bf16`, and it only runs on Nvidia GPUs. Make sure to cast your model to the appropriate dtype and load them on a supported device before using FlashAttention-2. + + -You can combine this feature together with 8-bit quantization: +FlashAttention-2 can be combined with other optimization techniques like quantization to further speedup inference. For example, you can combine FlashAttention-2 with 8-bit or 4-bit quantization: -```python +```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM model_id = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) +# load in 8bit model = AutoModelForCausalLM.from_pretrained( model_id, load_in_8bit=True, use_flash_attention_2=True, ) -``` - -### Combining Flash Attention 2 and 4-bit models - -You can combine this feature together with 4-bit quantization: - -```python -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM - -model_id = "tiiuae/falcon-7b" -tokenizer = AutoTokenizer.from_pretrained(model_id) +# load in 4bit model = AutoModelForCausalLM.from_pretrained( model_id, load_in_4bit=True, @@ -137,85 +90,77 @@ model = AutoModelForCausalLM.from_pretrained( ) ``` -### Combining Flash Attention 2 and PEFT - -You can combine this feature together with PEFT for training adapters using Flash Attention 2 under the hood: +### Expected speedups -```python -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM -from peft import LoraConfig +You can benefit from considerable speedups for inference, especially for inputs with long sequences. However, since FlashAttention-2 does not support computing attention scores with padding tokens, you must manually pad/unpad the attention scores for batched inference when the sequence contains padding tokens. This leads to a significant slowdown for batched generations with padding tokens. -model_id = "tiiuae/falcon-7b" -tokenizer = AutoTokenizer.from_pretrained(model_id) +To overcome this, you should use FlashAttention-2 without padding tokens in the sequence during training (by packing a dataset or [concatenating sequences](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm.py#L516) until reaching the maximum sequence length). -model = AutoModelForCausalLM.from_pretrained( - model_id, - load_in_4bit=True, - use_flash_attention_2=True, -) +For a single forward pass on [tiiuae/falcon-7b](https://hf.co/tiiuae/falcon-7b) with a sequence length of 4096 and various batch sizes without padding tokens, the expected speedup is: -lora_config = LoraConfig( - r=8, - task_type="CAUSAL_LM" -) +
+ +
-model.add_adapter(lora_config) +For a single forward pass on [meta-llama/Llama-7b-hf](https://hf.co/meta-llama/Llama-7b-hf) with a sequence length of 4096 and various batch sizes without padding tokens, the expected speedup is: -... # train your model -``` +
+ +
-## BetterTransformer +For sequences with padding tokens (generating with padding tokens), you need to unpad/pad the input sequences to correctly compute the attention scores. With a relatively small sequence length, a single forward pass creates overhead leading to a small speedup (in the example below, 30% of the input is filled with padding tokens): -[BetterTransformer](https://huggingface.co/docs/optimum/bettertransformer/overview) converts 🤗 Transformers models to use the PyTorch-native fastpath execution, which calls optimized kernels like Flash Attention under the hood. +
+ +
-BetterTransformer is also supported for faster inference on single and multi-GPU for text, image, and audio models. +But for larger sequence lengths, you can expect even more speedup benefits: -Flash Attention can only be used for models using fp16 or bf16 dtype. Make sure to cast your model to the appropriate dtype before using BetterTransformer. - - +FlashAttention is more memory efficient, meaning you can train on much larger sequence lengths without running into out-of-memory issues. You can potentially reduce memory usage up to 20x for larger sequence lengths. Take a look at the [flash-attention](https://github.com/Dao-AILab/flash-attention) repository for more details. -### Encoder models +
-PyTorch-native [`nn.MultiHeadAttention`](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/) attention fastpath, called BetterTransformer, can be used with Transformers through the integration in the [🤗 Optimum library](https://huggingface.co/docs/optimum/bettertransformer/overview). +
+ +
-PyTorch's attention fastpath allows to speed up inference through kernel fusions and the use of [nested tensors](https://pytorch.org/docs/stable/nested.html). Detailed benchmarks can be found in [this blog post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2). +## BetterTransformer -After installing the [`optimum`](https://github.com/huggingface/optimum) package, to use Better Transformer during inference, the relevant internal modules are replaced by calling [`~PreTrainedModel.to_bettertransformer`]: + -```python -model = model.to_bettertransformer() -``` +Check out our benchmarks with BetterTransformer and scaled dot product attention in the [Out of the box acceleration and memory savings of 🤗 decoder models with PyTorch 2.0](https://pytorch.org/blog/out-of-the-box-acceleration/) and learn more about the fastpath execution in the [BetterTransformer](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2) blog post. -The method [`~PreTrainedModel.reverse_bettertransformer`] allows to go back to the original modeling, which should be used before saving the model in order to use the canonical transformers modeling: + -```python -model = model.reverse_bettertransformer() -model.save_pretrained("saved_model") -``` +BetterTransformer accelerates inference with its fastpath (native PyTorch specialized implementation of Transformer functions) execution. The two optimizations in the fastpath execution are: -Have a look at this [blog post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2) to learn more about what is possible to do with `BetterTransformer` API for encoder models. +1. fusion, which combines multiple sequential operations into a single "kernel" to reduce the number of computation steps +2. skipping the inherent sparsity of padding tokens to avoid unnecessary computation with nested tensors -### Decoder models +BetterTransformer also converts all attention operations to use the more memory-efficient [scaled dot product attention (SDPA)](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention), and it calls optimized kernels like [FlashAttention](https://huggingface.co/papers/2205.14135) under the hood. -For text models, especially decoder-based models (GPT, T5, Llama, etc.), the BetterTransformer API converts all attention operations to use the [`torch.nn.functional.scaled_dot_product_attention` operator](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) (SDPA) that is only available in PyTorch 2.0 and onwards. +Before you start, make sure you have 🤗 Optimum [installed](https://huggingface.co/docs/optimum/installation). -To convert a model to BetterTransformer: +Then you can enable BetterTransformer with the [`PreTrainedModel.to_bettertransformer`] method: ```python -from transformers import AutoModelForCausalLM +model = model.to_bettertransformer() +``` -model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") -# convert the model to BetterTransformer -model.to_bettertransformer() +You can return the original Transformers model with the [`~PreTrainedModel.reverse_bettertransformer`] method. You should use this before saving your model to use the canonical Transformers modeling: -# Use it for training or inference +```py +model = model.reverse_bettertransformer() +model.save_pretrained("saved_model") ``` -SDPA can also call [Flash Attention](https://arxiv.org/abs/2205.14135) kernels under the hood. To enable Flash Attention or to check that it is available in a given setting (hardware, problem size), use [`torch.backends.cuda.sdp_kernel`](https://pytorch.org/docs/master/backends.html#torch.backends.cuda.sdp_kernel) as a context manager: +### FlashAttention +SDPA can also call FlashAttention kernels under the hood. FlashAttention can only be used for models using the `fp16` or `bf16` dtype, so make sure to cast your model to the appropriate dtype before using it. + +To enable FlashAttention or to check whether it is available in a given setting (hardware, problem size), use [`torch.backends.cuda.sdp_kernel`](https://pytorch.org/docs/master/backends.html#torch.backends.cuda.sdp_kernel) as a context manager: ```diff import torch @@ -235,47 +180,32 @@ inputs = tokenizer(input_text, return_tensors="pt").to("cuda") print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` -If you see a bug with a traceback saying +If you see a bug with the traceback below, try using nightly version of PyTorch which may have broader coverage for FlashAttention: ```bash -RuntimeError: No available kernel. Aborting execution. -``` - -try using the PyTorch nightly version, which may have a broader coverage for Flash Attention: +RuntimeError: No available kernel. Aborting execution. -```bash +# install PyTorch nightly pip3 install -U --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118 ``` -Or make sure your model is correctly casted in float16 or bfloat16 - - -Have a look at [this detailed blogpost](https://pytorch.org/blog/out-of-the-box-acceleration/) to read more about what is possible to do with `BetterTransformer` + SDPA API. - -## `bitsandbytes` integration for FP4 mixed-precision inference - -You can install `bitsandbytes` and benefit from easy model compression on GPUs. Using FP4 quantization you can expect to reduce up to 8x the model size compared to its native full precision version. Check out below how to get started. - - - -Note that this feature can also be used in a multi GPU setup. - - +## bitsandbytes -### Requirements [[requirements-for-fp4-mixedprecision-inference]] +bitsandbytes is a quantization library that includes support for 4-bit and 8-bit quantization. Quantization reduces your model size compared to its native full precision version, making it easier to fit large models onto GPUs with limited memory. -- Latest `bitsandbytes` library -`pip install bitsandbytes>=0.39.0` +Make sure you have bitsnbytes and 🤗 Accelerate installed: -- Install latest `accelerate` from source -`pip install git+https://github.com/huggingface/accelerate.git` +```bash +# these versions support 8-bit and 4-bit +pip install bitsandbytes>=0.39.0 accelerate>=0.20.0 -- Install latest `transformers` from source -`pip install git+https://github.com/huggingface/transformers.git` +# install Transformers +pip install transformers +``` -### Running FP4 models - single GPU setup - Quickstart +### 4-bit -You can quickly run a FP4 model on a single GPU by running the following code: +To load a model in 4-bit for inference, use the `load_in_4bit` parameter. The `device_map` parameter is optional, but we recommend setting it to `"auto"` to allow 🤗 Accelerate to automatically and efficiently allocate the model given the available resources in the environment. ```py from transformers import AutoModelForCausalLM @@ -283,16 +213,8 @@ from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` -Note that `device_map` is optional but setting `device_map = 'auto'` is prefered for inference as it will dispatch efficiently the model on the available ressources. -### Running FP4 models - multi GPU setup - -The way to load your mixed 4-bit model in multiple GPUs is as follows (same command as single GPU setup): -```py -model_name = "bigscience/bloom-2b5" -model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) -``` -But you can control the GPU RAM you want to allocate on each GPU using `accelerate`. Use the `max_memory` argument as follows: +To load a model in 4-bit for inference with multiple GPUs, you can control how much GPU RAM you want to allocate to each GPU. For example, to distribute 600MB of memory to the first GPU and 1GB of memory to the second GPU: ```py max_memory_mapping = {0: "600MB", 1: "1GB"} @@ -301,44 +223,16 @@ model_4bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_4bit=True, max_memory=max_memory_mapping ) ``` -In this example, the first GPU will use 600MB of memory and the second 1GB. -### Advanced usage - -For more advanced usage of this method, please have a look at the [quantization](main_classes/quantization) documentation page. - -## `bitsandbytes` integration for Int8 mixed-precision matrix decomposition +### 8-bit -Note that this feature can also be used in a multi GPU setup. +If you're curious and interested in learning more about the concepts underlying 8-bit quantization, read the [Gentle Introduction to 8-bit Matrix Multiplication for transformers at scale using Hugging Face Transformers, Accelerate and bitsandbytes](https://huggingface.co/blog/hf-bitsandbytes-integration) blog post. -From the paper [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339), we support Hugging Face integration for all models in the Hub with a few lines of code. -The method reduces `nn.Linear` size by 2 for `float16` and `bfloat16` weights and by 4 for `float32` weights, with close to no impact to the quality by operating on the outliers in half-precision. - -![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) - -Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) a systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models. -For more details regarding the method, check out the [paper](https://arxiv.org/abs/2208.07339) or our [blogpost about the integration](https://huggingface.co/blog/hf-bitsandbytes-integration). - -![MixedInt8.gif](https://cdn-uploads.huggingface.co/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) - -Note, that you would require a GPU to run mixed-8bit models as the kernels have been compiled for GPUs only. Make sure that you have enough GPU memory to store the quarter (or half if your model weights are in half precision) of the model before using this feature. -Below are some notes to help you use this module, or follow the demos on [Google colab](#colab-demos). - -### Requirements [[requirements-for-int8-mixedprecision-matrix-decomposition]] - -- If you have `bitsandbytes<0.37.0`, make sure you run on NVIDIA GPUs that support 8-bit tensor cores (Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100). For `bitsandbytes>=0.37.0`, all GPUs should be supported. -- Install the correct version of `bitsandbytes` by running: -`pip install bitsandbytes>=0.31.5` -- Install `accelerate` -`pip install accelerate>=0.12.0` - -### Running mixed-Int8 models - single GPU setup - -After installing the required libraries, the way to load your mixed 8-bit model is as follows: +To load a model in 8-bit for inference, use the `load_in_8bit` parameter. The `device_map` parameter is optional, but we recommend setting it to `"auto"` to allow 🤗 Accelerate to automatically and efficiently allocate the model given the available resources in the environment: ```py from transformers import AutoModelForCausalLM @@ -347,12 +241,7 @@ model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) ``` -For text generation, we recommend: - -* using the model's `generate()` method instead of the `pipeline()` function. Although inference is possible with the `pipeline()` function, it is not optimized for mixed-8bit models, and will be slower than using the `generate()` method. Moreover, some sampling strategies are like nucleaus sampling are not supported by the `pipeline()` function for mixed-8bit models. -* placing all inputs on the same device as the model. - -Here is a simple example: +If you're loading a model in 8-bit for text generation, you should use the [`~transformers.GenerationMixin.generate`] method instead of the [`Pipeline`] function which is not optimized for 8-bit models and will be slower. Some sampling strategies, like nucleus sampling, are also not supported by the [`Pipeline`] for 8-bit models. You should also place all inputs on the same device as the model: ```py from transformers import AutoModelForCausalLM, AutoTokenizer @@ -367,15 +256,7 @@ generated_ids = model.generate(**inputs) outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` - -### Running mixed-int8 models - multi GPU setup - -The way to load your mixed 8-bit model in multiple GPUs is as follows (same command as single GPU setup): -```py -model_name = "bigscience/bloom-2b5" -model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) -``` -But you can control the GPU RAM you want to allocate on each GPU using `accelerate`. Use the `max_memory` argument as follows: +To load a model in 4-bit for inference with multiple GPUs, you can control how much GPU RAM you want to allocate to each GPU. For example, to distribute 1GB of memory to the first GPU and 2GB of memory to the second GPU: ```py max_memory_mapping = {0: "1GB", 1: "2GB"} @@ -384,27 +265,56 @@ model_8bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping ) ``` -In this example, the first GPU will use 1GB of memory and the second 2GB. -### Colab demos + -With this method you can infer on models that were not possible to infer on a Google Colab before. -Check out the demo for running T5-11b (42GB in fp32)! Using 8-bit quantization on Google Colab: +Feel free to try running a 11 billion parameter [T5 model](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) or the 3 billion parameter [BLOOM model](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) for inference on Google Colab's free tier GPUs! -[![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) + -Or this demo for BLOOM-3B: +## 🤗 Optimum -[![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) + + +Learn more details about using ORT with 🤗 Optimum in the [Accelerated inference on NVIDIA GPUs](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#accelerated-inference-on-nvidia-gpus) guide. This section only provides a brief and simple example. + + + +ONNX Runtime (ORT) is a model accelerator that supports accelerated inference on Nvidia GPUs. ORT uses optimization techniques like fusing common operations into a single node and constant folding to reduce the number of computations performed and speedup inference. ORT also places the most computationally intensive operations on the GPU and the rest on the CPU to intelligently distribute the workload between the two devices. -## Advanced usage: mixing FP4 (or Int8) and BetterTransformer +ORT is supported by 🤗 Optimum which can be used in 🤗 Transformers. You'll need to use an [`~optimum.onnxruntime.ORTModel`] for the task you're solving, and specify the `provider` parameter which can be set to either [`CUDAExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#cudaexecutionprovider) or [`TensorrtExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#tensorrtexecutionprovider). If you want to load a model that was not yet exported to ONNX, you can set `export=True` to convert your model on-the-fly to the ONNX format : + +```py +from optimum.onnxruntime import ORTModelForSequenceClassification + +ort_model = ORTModelForSequenceClassification.from_pretrained( + "distilbert-base-uncased-finetuned-sst-2-english", + export=True, + provider="CUDAExecutionProvider", +) +``` -You can combine the different methods described above to get the best performance for your model. For example, you can use BetterTransformer with FP4 mixed-precision inference + flash attention: +Now you're free to use the model for inference: + +```py +from optimum.pipelines import pipeline +from transformers import AutoTokenizer + +tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english") + +pipeline = pipeline(task="text-classification", model=ort_model, tokenizer=tokenizer, device="cuda:0") +result = pipeline("Both the music and visual were astounding, not to mention the actors performance.") +``` + +## Combine optimizations + +It is often possible to combine several of the optimization techniques described above to get the best inference performance possible for your model. For example, you can load a model in 4-bit, and then enable BetterTransformer with FlashAttention: ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig +# load model in 4-bit quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16 @@ -413,9 +323,13 @@ quantization_config = BitsAndBytesConfig( tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", quantization_config=quantization_config) +# enable BetterTransformer +model = model.to_bettertransformer() + input_text = "Hello my dog is cute and" inputs = tokenizer(input_text, return_tensors="pt").to("cuda") +# enable FlashAttention with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) diff --git a/docs/source/en/perf_infer_special.md b/docs/source/en/perf_infer_special.md deleted file mode 100644 index e5744754b88e..000000000000 --- a/docs/source/en/perf_infer_special.md +++ /dev/null @@ -1,18 +0,0 @@ - - -# Inference on Specialized Hardware - -This document will be completed soon with information on how to infer on specialized hardware. In the meantime you can check out [the guide for inference on CPUs](perf_infer_cpu). \ No newline at end of file diff --git a/docs/source/en/performance.md b/docs/source/en/performance.md index a1661a6ba5a8..ccd78d326d52 100644 --- a/docs/source/en/performance.md +++ b/docs/source/en/performance.md @@ -53,7 +53,7 @@ sections we go through the steps to run inference on CPU and single/multi-GPU se * [Inference on a single CPU](perf_infer_cpu) * [Inference on a single GPU](perf_infer_gpu_one) -* [Multi-GPU inference](perf_infer_gpu_many) +* [Multi-GPU inference](perf_infer_gpu_one) * [XLA Integration for TensorFlow Models](tf_xla) diff --git a/src/transformers/utils/logging.py b/src/transformers/utils/logging.py index d732e5bd37a9..276fa6e8f855 100644 --- a/src/transformers/utils/logging.py +++ b/src/transformers/utils/logging.py @@ -30,9 +30,7 @@ WARN, # NOQA WARNING, # NOQA ) -from logging import ( - captureWarnings as _captureWarnings, -) +from logging import captureWarnings as _captureWarnings from typing import Optional import huggingface_hub.utils as hf_hub_utils diff --git a/utils/not_doctested.txt b/utils/not_doctested.txt index fe25c2e59a23..31cda5fd76c6 100644 --- a/utils/not_doctested.txt +++ b/utils/not_doctested.txt @@ -272,9 +272,7 @@ docs/source/en/pad_truncation.md docs/source/en/peft.md docs/source/en/perf_hardware.md docs/source/en/perf_infer_cpu.md -docs/source/en/perf_infer_gpu_many.md docs/source/en/perf_infer_gpu_one.md -docs/source/en/perf_infer_special.md docs/source/en/perf_torch_compile.md docs/source/en/perf_train_cpu.md docs/source/en/perf_train_cpu_many.md From 50378cbf6c1fd8717a74b36c352f57f9a73e7282 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Wed, 1 Nov 2023 01:12:14 +0800 Subject: [PATCH 036/268] device agnostic models testing (#27146) * device agnostic models testing * add decorator `require_torch_fp16` * make style * apply review suggestion * Oops, the fp16 decorator was misused --- tests/generation/test_utils.py | 10 ++++++++-- tests/models/bark/test_modeling_bark.py | 20 ++++++++++++------- tests/models/bart/test_modeling_bart.py | 17 +++++++++++----- tests/models/bert/test_modeling_bert.py | 4 ++-- .../test_modeling_bigbird_pegasus.py | 13 +++++++++--- .../blenderbot/test_modeling_blenderbot.py | 16 +++++++++++---- .../test_modeling_blenderbot_small.py | 11 +++++++--- tests/models/blip/test_modeling_blip.py | 12 +++++++++-- tests/models/blip_2/test_modeling_blip_2.py | 16 ++++++++++----- .../models/convbert/test_modeling_convbert.py | 4 ++-- tests/models/ctrl/test_modeling_ctrl.py | 6 +++--- .../test_modeling_deformable_detr.py | 8 ++++---- tests/models/deit/test_modeling_deit.py | 6 ++++-- .../distilbert/test_modeling_distilbert.py | 4 ++-- tests/models/ernie/test_modeling_ernie.py | 4 ++-- .../models/flaubert/test_modeling_flaubert.py | 4 ++-- tests/models/fsmt/test_modeling_fsmt.py | 16 ++++++++++----- tests/models/fuyu/test_modeling_fuyu.py | 5 ++--- tests/models/gpt2/test_modeling_gpt2.py | 6 +++--- tests/models/gptj/test_modeling_gptj.py | 3 ++- tests/models/jukebox/test_modeling_jukebox.py | 13 +++++++++--- tests/models/led/test_modeling_led.py | 13 +++++++++--- tests/models/llama/test_modeling_llama.py | 11 ++++++++-- tests/models/m2m_100/test_modeling_m2m_100.py | 13 +++++++++--- tests/models/marian/test_modeling_marian.py | 17 +++++++++++----- .../mask2former/test_modeling_mask2former.py | 6 ++++-- .../maskformer/test_modeling_maskformer.py | 6 ++++-- tests/models/mbart/test_modeling_mbart.py | 13 +++++++++--- tests/models/mega/test_modeling_mega.py | 12 ++++++++--- tests/models/mistral/test_modeling_mistral.py | 13 +++++++++--- .../models/musicgen/test_modeling_musicgen.py | 12 ++++++++--- tests/models/mvp/test_modeling_mvp.py | 17 +++++++++++----- .../models/nllb_moe/test_modeling_nllb_moe.py | 5 +++-- .../oneformer/test_modeling_oneformer.py | 6 ++++-- tests/models/opt/test_modeling_opt.py | 5 +++-- tests/models/owlv2/test_modeling_owlv2.py | 12 +++++++++-- tests/models/owlvit/test_modeling_owlvit.py | 12 +++++++++-- tests/models/pegasus/test_modeling_pegasus.py | 17 ++++++++++------ .../pegasus_x/test_modeling_pegasus_x.py | 13 +++++++++--- .../persimmon/test_modeling_persimmon.py | 16 +++++++++++---- tests/models/plbart/test_modeling_plbart.py | 13 +++++++++--- tests/models/pvt/test_modeling_pvt.py | 6 ++++-- tests/models/sam/test_modeling_sam.py | 8 +++----- .../test_modeling_speech_to_text.py | 7 ++++--- .../test_modeling_switch_transformers.py | 12 +++++++++-- tests/models/vit/test_modeling_vit.py | 6 ++++-- .../models/wav2vec2/test_modeling_wav2vec2.py | 3 ++- .../test_modeling_wav2vec2_conformer.py | 15 +++++++++++--- tests/models/whisper/test_modeling_whisper.py | 15 ++++++++++---- tests/models/xglm/test_modeling_xglm.py | 11 ++++++++-- tests/test_modeling_utils.py | 10 +++++----- 51 files changed, 369 insertions(+), 154 deletions(-) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 86a3d5efd90b..42b67be91a1c 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -22,7 +22,13 @@ import numpy as np from transformers import is_torch_available, pipeline -from transformers.testing_utils import require_accelerate, require_torch, require_torch_multi_gpu, slow, torch_device +from transformers.testing_utils import ( + require_accelerate, + require_torch, + require_torch_multi_accelerator, + slow, + torch_device, +) from ..test_modeling_common import floats_tensor, ids_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin @@ -1019,7 +1025,7 @@ def test_beam_search_generate_dict_outputs_use_cache(self): ) @require_accelerate - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_model_parallel_beam_search(self): for model_class in self.all_generative_model_classes: if model_class._no_split_modules is None: diff --git a/tests/models/bark/test_modeling_bark.py b/tests/models/bark/test_modeling_bark.py index d80ee24a1610..4186a72628e3 100644 --- a/tests/models/bark/test_modeling_bark.py +++ b/tests/models/bark/test_modeling_bark.py @@ -32,7 +32,13 @@ BarkFineGenerationConfig, BarkSemanticGenerationConfig, ) -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import ( + require_torch, + require_torch_fp16, + require_torch_gpu, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -570,13 +576,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @@ -636,13 +642,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @@ -700,14 +706,14 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] # take first codebook channel model = self.all_model_classes[0](config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() # toy generation_configs semantic_generation_config = BarkSemanticGenerationConfig(semantic_vocab_size=0) diff --git a/tests/models/bart/test_modeling_bart.py b/tests/models/bart/test_modeling_bart.py index d91ecf4cf5d4..5e79de87c4c0 100644 --- a/tests/models/bart/test_modeling_bart.py +++ b/tests/models/bart/test_modeling_bart.py @@ -22,7 +22,14 @@ import timeout_decorator # noqa from transformers import BartConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -383,12 +390,12 @@ def test_tokenization(self): bart_toks = tokenizer.encode(ex, return_tensors="pt").squeeze() assert_tensors_close(desired_result.long(), bart_toks, prefix=ex) + @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = BartForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @@ -497,13 +504,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BartForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/bert/test_modeling_bert.py b/tests/models/bert/test_modeling_bert.py index 9aec91367d8d..2601c92cfb76 100644 --- a/tests/models/bert/test_modeling_bert.py +++ b/tests/models/bert/test_modeling_bert.py @@ -18,7 +18,7 @@ from transformers import BertConfig, is_torch_available from transformers.models.auto import get_values -from transformers.testing_utils import CaptureLogger, require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import CaptureLogger, require_torch, require_torch_accelerator, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -601,7 +601,7 @@ def test_model_from_pretrained(self): self.assertIsNotNone(model) @slow - @require_torch_gpu + @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: diff --git a/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py b/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py index aedbbb46341e..90b71a7b8292 100644 --- a/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py +++ b/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py @@ -20,7 +20,14 @@ import unittest from transformers import BigBirdPegasusConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -376,13 +383,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_dict.pop("decoder_attention_mask") input_dict.pop("decoder_input_ids") model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(**input_dict) model.generate(**input_dict, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/blenderbot/test_modeling_blenderbot.py b/tests/models/blenderbot/test_modeling_blenderbot.py index f3416396cea8..da7d8cc12480 100644 --- a/tests/models/blenderbot/test_modeling_blenderbot.py +++ b/tests/models/blenderbot/test_modeling_blenderbot.py @@ -18,7 +18,15 @@ import unittest from transformers import BlenderbotConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + backend_empty_cache, + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -269,13 +277,13 @@ def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BlenderbotForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @@ -315,7 +323,7 @@ def test_generation_from_short_input_same_as_parlai_3B(self): FASTER_GEN_KWARGS = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} TOK_DECODE_KW = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} - torch.cuda.empty_cache() + backend_empty_cache(torch_device) model = BlenderbotForConditionalGeneration.from_pretrained(self.ckpt).half().to(torch_device) src_text = ["Sam"] diff --git a/tests/models/blenderbot_small/test_modeling_blenderbot_small.py b/tests/models/blenderbot_small/test_modeling_blenderbot_small.py index 8c622b802810..7bb45bdabd87 100644 --- a/tests/models/blenderbot_small/test_modeling_blenderbot_small.py +++ b/tests/models/blenderbot_small/test_modeling_blenderbot_small.py @@ -18,7 +18,12 @@ import unittest from transformers import BlenderbotSmallConfig, is_torch_available -from transformers.testing_utils import require_torch, slow, torch_device +from transformers.testing_utils import ( + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -269,13 +274,13 @@ def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BlenderbotSmallForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/blip/test_modeling_blip.py b/tests/models/blip/test_modeling_blip.py index e5a5652f6048..4792757f9118 100644 --- a/tests/models/blip/test_modeling_blip.py +++ b/tests/models/blip/test_modeling_blip.py @@ -24,7 +24,14 @@ import requests from transformers import BlipConfig, BlipTextConfig, BlipVisionConfig -from transformers.testing_utils import require_torch, require_torch_gpu, require_vision, slow, torch_device +from transformers.testing_utils import ( + require_torch, + require_torch_accelerator, + require_torch_fp16, + require_vision, + slow, + torch_device, +) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester @@ -1280,7 +1287,8 @@ def test_inference_image_captioning(self): [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_image_captioning_fp16(self): model = BlipForConditionalGeneration.from_pretrained( "Salesforce/blip-image-captioning-base", torch_dtype=torch.float16 diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py index 9138061ee1bf..dd87961372d2 100644 --- a/tests/models/blip_2/test_modeling_blip_2.py +++ b/tests/models/blip_2/test_modeling_blip_2.py @@ -23,7 +23,13 @@ import requests from transformers import CONFIG_MAPPING, Blip2Config, Blip2QFormerConfig, Blip2VisionConfig -from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device +from transformers.testing_utils import ( + require_torch, + require_torch_multi_accelerator, + require_vision, + slow, + torch_device, +) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester @@ -934,8 +940,8 @@ def test_inference_t5_batched_beam_search(self): self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) self.assertEqual(predictions[1].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) - @require_torch_multi_gpu - def test_inference_opt_multi_gpu(self): + @require_torch_multi_accelerator + def test_inference_opt_multi_accelerator(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16, device_map="balanced" @@ -966,8 +972,8 @@ def test_inference_opt_multi_gpu(self): ) self.assertEqual(generated_text, "it's not a city, it's a beach") - @require_torch_multi_gpu - def test_inference_t5_multi_gpu(self): + @require_torch_multi_accelerator + def test_inference_t5_multi_accelerator(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") device_map = device_map = { "query_tokens": 0, diff --git a/tests/models/convbert/test_modeling_convbert.py b/tests/models/convbert/test_modeling_convbert.py index 754967ce0039..281a8e477b0b 100644 --- a/tests/models/convbert/test_modeling_convbert.py +++ b/tests/models/convbert/test_modeling_convbert.py @@ -19,7 +19,7 @@ from transformers import ConvBertConfig, is_torch_available from transformers.models.auto import get_values -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask @@ -427,7 +427,7 @@ def test_attention_outputs(self): ) @slow - @require_torch_gpu + @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: diff --git a/tests/models/ctrl/test_modeling_ctrl.py b/tests/models/ctrl/test_modeling_ctrl.py index 65d3cbebc4f1..13b35926117d 100644 --- a/tests/models/ctrl/test_modeling_ctrl.py +++ b/tests/models/ctrl/test_modeling_ctrl.py @@ -17,7 +17,7 @@ import unittest from transformers import CTRLConfig, is_torch_available -from transformers.testing_utils import require_torch, slow, torch_device +from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -230,7 +230,7 @@ def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_config(self): self.config_tester.run_common_tests() @@ -260,7 +260,7 @@ def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) @slow def test_lm_generate_ctrl(self): diff --git a/tests/models/deformable_detr/test_modeling_deformable_detr.py b/tests/models/deformable_detr/test_modeling_deformable_detr.py index b44564f69193..8cfe6ca451d9 100644 --- a/tests/models/deformable_detr/test_modeling_deformable_detr.py +++ b/tests/models/deformable_detr/test_modeling_deformable_detr.py @@ -25,7 +25,7 @@ from transformers.testing_utils import ( require_timm, require_torch, - require_torch_gpu, + require_torch_accelerator, require_vision, slow, torch_device, @@ -648,7 +648,7 @@ def test_inference_object_detection_head_with_box_refine_two_stage(self): self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) - @require_torch_gpu + @require_torch_accelerator def test_inference_object_detection_head_equivalence_cpu_gpu(self): image_processor = self.default_image_processor image = prepare_img() @@ -663,10 +663,10 @@ def test_inference_object_detection_head_equivalence_cpu_gpu(self): cpu_outputs = model(pixel_values, pixel_mask) # 2. run model on GPU - model.to("cuda") + model.to(torch_device) with torch.no_grad(): - gpu_outputs = model(pixel_values.to("cuda"), pixel_mask.to("cuda")) + gpu_outputs = model(pixel_values.to(torch_device), pixel_mask.to(torch_device)) # 3. assert equivalence for key in cpu_outputs.keys(): diff --git a/tests/models/deit/test_modeling_deit.py b/tests/models/deit/test_modeling_deit.py index 7b7de3f320b3..4a9945a731fd 100644 --- a/tests/models/deit/test_modeling_deit.py +++ b/tests/models/deit/test_modeling_deit.py @@ -24,7 +24,8 @@ from transformers.testing_utils import ( require_accelerate, require_torch, - require_torch_gpu, + require_torch_accelerator, + require_torch_fp16, require_vision, slow, torch_device, @@ -424,7 +425,8 @@ def test_inference_image_classification_head(self): @slow @require_accelerate - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. diff --git a/tests/models/distilbert/test_modeling_distilbert.py b/tests/models/distilbert/test_modeling_distilbert.py index ff56afd0a981..22e976535369 100644 --- a/tests/models/distilbert/test_modeling_distilbert.py +++ b/tests/models/distilbert/test_modeling_distilbert.py @@ -17,7 +17,7 @@ import unittest from transformers import DistilBertConfig, is_torch_available -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask @@ -264,7 +264,7 @@ def test_model_from_pretrained(self): self.assertIsNotNone(model) @slow - @require_torch_gpu + @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: diff --git a/tests/models/ernie/test_modeling_ernie.py b/tests/models/ernie/test_modeling_ernie.py index f0bdec3efb91..6fc557219c85 100644 --- a/tests/models/ernie/test_modeling_ernie.py +++ b/tests/models/ernie/test_modeling_ernie.py @@ -18,7 +18,7 @@ from transformers import ErnieConfig, is_torch_available from transformers.models.auto import get_values -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -574,7 +574,7 @@ def test_model_from_pretrained(self): self.assertIsNotNone(model) @slow - @require_torch_gpu + @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: diff --git a/tests/models/flaubert/test_modeling_flaubert.py b/tests/models/flaubert/test_modeling_flaubert.py index 61806182bb7f..f21695e39c56 100644 --- a/tests/models/flaubert/test_modeling_flaubert.py +++ b/tests/models/flaubert/test_modeling_flaubert.py @@ -17,7 +17,7 @@ import unittest from transformers import FlaubertConfig, is_torch_available -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask @@ -463,7 +463,7 @@ def test_model_from_pretrained(self): self.assertIsNotNone(model) @slow - @require_torch_gpu + @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: diff --git a/tests/models/fsmt/test_modeling_fsmt.py b/tests/models/fsmt/test_modeling_fsmt.py index f533da772783..da73b8d41d99 100644 --- a/tests/models/fsmt/test_modeling_fsmt.py +++ b/tests/models/fsmt/test_modeling_fsmt.py @@ -20,7 +20,14 @@ from parameterized import parameterized from transformers import FSMTConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -398,12 +405,12 @@ def test_shift_tokens_right(self): self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) + @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = FSMTForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @@ -538,8 +545,7 @@ def test_translation_direct(self, pair): @slow def test_translation_pipeline(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) - device = 0 if torch_device == "cuda" else -1 - pipeline = TranslationPipeline(model, tokenizer, framework="pt", device=device) + pipeline = TranslationPipeline(model, tokenizer, framework="pt", device=torch_device) output = pipeline([src_text]) self.assertEqual([tgt_text], [x["translation_text"] for x in output]) diff --git a/tests/models/fuyu/test_modeling_fuyu.py b/tests/models/fuyu/test_modeling_fuyu.py index 58d671bd5701..b9c061e7a004 100644 --- a/tests/models/fuyu/test_modeling_fuyu.py +++ b/tests/models/fuyu/test_modeling_fuyu.py @@ -4,7 +4,7 @@ import requests from transformers import AutoTokenizer, FuyuConfig, is_torch_available, is_vision_available -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_modeling_common import ids_tensor, random_attention_mask @@ -257,7 +257,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch -@require_torch_gpu +@require_torch_accelerator @slow class FuyuIntegrationTest(unittest.TestCase): # , ModelTesterMixin) """ @@ -279,7 +279,6 @@ def setUp(self): self.bus_image_pil = Image.open(io.BytesIO(requests.get(self.bus_image_url).content)) @slow - @require_torch_gpu def test_model_8b_chat_greedy_generation_bus_captioning(self): EXPECTED_TEXT_COMPLETION = """A bus parked on the side of a road.|ENDOFTEXT|""" text_prompt_coco_captioning = "Generate a coco-style caption.\n" diff --git a/tests/models/gpt2/test_modeling_gpt2.py b/tests/models/gpt2/test_modeling_gpt2.py index 17b32a22fb1f..4c49c0b746dc 100644 --- a/tests/models/gpt2/test_modeling_gpt2.py +++ b/tests/models/gpt2/test_modeling_gpt2.py @@ -20,7 +20,7 @@ import unittest from transformers import GPT2Config, is_torch_available -from transformers.testing_utils import require_torch, slow, torch_device +from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -505,7 +505,7 @@ def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_config(self): self.config_tester.run_common_tests() @@ -712,7 +712,7 @@ def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def _test_lm_generate_gpt2_helper( self, diff --git a/tests/models/gptj/test_modeling_gptj.py b/tests/models/gptj/test_modeling_gptj.py index f0e02700700c..4fc41ba6d310 100644 --- a/tests/models/gptj/test_modeling_gptj.py +++ b/tests/models/gptj/test_modeling_gptj.py @@ -562,7 +562,8 @@ def test_gptj_sample(self): output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True) output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True) - if torch_device == "cuda": + if torch_device != "cpu": + # currently this expect value is only for `cuda` EXPECTED_OUTPUT_STR = ( "Today is a nice day and I've already been enjoying it. I walked to work with my wife" ) diff --git a/tests/models/jukebox/test_modeling_jukebox.py b/tests/models/jukebox/test_modeling_jukebox.py index 8de0696c04b1..ea0ee1397773 100644 --- a/tests/models/jukebox/test_modeling_jukebox.py +++ b/tests/models/jukebox/test_modeling_jukebox.py @@ -16,7 +16,13 @@ from unittest import skip from transformers import is_torch_available -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import ( + require_torch, + require_torch_accelerator, + require_torch_fp16, + slow, + torch_device, +) from transformers.trainer_utils import set_seed @@ -363,7 +369,7 @@ def test_sampling(self): self.assertIn(zs[2][0].detach().cpu().tolist(), [self.EXPECTED_OUTPUT_0, self.EXPECTED_OUTPUT_0_PT_2]) @slow - @require_torch_gpu + @require_torch_accelerator @skip("Not enough GPU memory on CI runners") def test_slow_sampling(self): model = JukeboxModel.from_pretrained(self.model_id, min_duration=0).eval() @@ -388,7 +394,8 @@ def test_slow_sampling(self): torch.testing.assert_allclose(zs[2][0].cpu(), torch.tensor(self.EXPECTED_GPU_OUTPUTS_0)) @slow - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_fp16_slow_sampling(self): prior_id = "ArthurZ/jukebox_prior_0" model = JukeboxPrior.from_pretrained(prior_id, min_duration=0).eval().half().to(torch_device) diff --git a/tests/models/led/test_modeling_led.py b/tests/models/led/test_modeling_led.py index b6dfc3256b05..120308db90d8 100644 --- a/tests/models/led/test_modeling_led.py +++ b/tests/models/led/test_modeling_led.py @@ -21,7 +21,14 @@ from transformers import LEDConfig, is_torch_available from transformers.models.auto import get_values -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -363,13 +370,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = LEDForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py index 2402986900fd..4d6b363e4a75 100644 --- a/tests/models/llama/test_modeling_llama.py +++ b/tests/models/llama/test_modeling_llama.py @@ -21,7 +21,14 @@ from pytest import mark from transformers import LlamaConfig, is_torch_available, set_seed -from transformers.testing_utils import require_flash_attn, require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import ( + require_flash_attn, + require_torch, + require_torch_accelerator, + require_torch_gpu, + slow, + torch_device, +) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -534,7 +541,7 @@ def main(): """, ] - @require_torch_gpu + @require_torch_accelerator @slow def test_model_7b_logits(self): model = LlamaForCausalLM.from_pretrained("codellama/CodeLlama-7b-hf").to(torch_device) diff --git a/tests/models/m2m_100/test_modeling_m2m_100.py b/tests/models/m2m_100/test_modeling_m2m_100.py index d081041978c0..39790917488d 100644 --- a/tests/models/m2m_100/test_modeling_m2m_100.py +++ b/tests/models/m2m_100/test_modeling_m2m_100.py @@ -20,7 +20,14 @@ import unittest from transformers import M2M100Config, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -312,13 +319,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = M2M100ForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/marian/test_modeling_marian.py b/tests/models/marian/test_modeling_marian.py index c590b216cc47..53a67c20459f 100644 --- a/tests/models/marian/test_modeling_marian.py +++ b/tests/models/marian/test_modeling_marian.py @@ -20,7 +20,14 @@ from huggingface_hub.hf_api import list_models from transformers import MarianConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -281,13 +288,13 @@ def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MarianMTModel(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @@ -620,9 +627,9 @@ def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected() @slow + @require_torch def test_pipeline(self): - device = 0 if torch_device == "cuda" else -1 - pipeline = TranslationPipeline(self.model, self.tokenizer, framework="pt", device=device) + pipeline = TranslationPipeline(self.model, self.tokenizer, framework="pt", device=torch_device) output = pipeline(self.src_text) self.assertEqual(self.expected_text, [x["translation_text"] for x in output]) diff --git a/tests/models/mask2former/test_modeling_mask2former.py b/tests/models/mask2former/test_modeling_mask2former.py index c05901a9dccd..b2fc84e7d324 100644 --- a/tests/models/mask2former/test_modeling_mask2former.py +++ b/tests/models/mask2former/test_modeling_mask2former.py @@ -23,7 +23,8 @@ from transformers import Mask2FormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_torch, - require_torch_gpu, + require_torch_accelerator, + require_torch_fp16, require_torch_multi_gpu, require_vision, slow, @@ -427,7 +428,8 @@ def test_inference_universal_segmentation_head(self): ).to(torch_device) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_fp16(self): model = ( Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) diff --git a/tests/models/maskformer/test_modeling_maskformer.py b/tests/models/maskformer/test_modeling_maskformer.py index a2e3ee306a82..fe1cc3423e0f 100644 --- a/tests/models/maskformer/test_modeling_maskformer.py +++ b/tests/models/maskformer/test_modeling_maskformer.py @@ -24,7 +24,8 @@ from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_torch, - require_torch_gpu, + require_torch_accelerator, + require_torch_fp16, require_torch_multi_gpu, require_vision, slow, @@ -516,7 +517,8 @@ def test_inference_instance_segmentation_head_resnet_backbone(self): ).to(torch_device) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_fp16(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff") diff --git a/tests/models/mbart/test_modeling_mbart.py b/tests/models/mbart/test_modeling_mbart.py index deaa8b5dafe6..3cabf7d999aa 100644 --- a/tests/models/mbart/test_modeling_mbart.py +++ b/tests/models/mbart/test_modeling_mbart.py @@ -20,7 +20,14 @@ import unittest from transformers import MBartConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -317,13 +324,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MBartForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/mega/test_modeling_mega.py b/tests/models/mega/test_modeling_mega.py index 10df7a555e5d..a67ee0d00328 100644 --- a/tests/models/mega/test_modeling_mega.py +++ b/tests/models/mega/test_modeling_mega.py @@ -17,7 +17,13 @@ import unittest from transformers import MegaConfig, is_torch_available -from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device +from transformers.testing_utils import ( + TestCasePlus, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -619,12 +625,12 @@ def test_for_sequence_length_beyond_max_positions(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_sequence_length_beyond_max_positions(*config_and_inputs) + @require_torch_fp16 def test_generate_fp16(self): config, input_ids, _, attention_mask, *_ = self.model_tester.prepare_config_and_inputs_for_decoder() # attention_mask = torch.LongTensor(input_ids.ne(1)).to(torch_device) model = MegaForCausalLM(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py index a795ebceef46..4bcb722c144e 100644 --- a/tests/models/mistral/test_modeling_mistral.py +++ b/tests/models/mistral/test_modeling_mistral.py @@ -22,7 +22,14 @@ from pytest import mark from transformers import AutoTokenizer, MistralConfig, is_torch_available -from transformers.testing_utils import require_flash_attn, require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import ( + backend_empty_cache, + require_flash_attn, + require_torch, + require_torch_gpu, + slow, + torch_device, +) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -450,7 +457,7 @@ def test_model_7b_logits(self): torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4) del model - torch.cuda.empty_cache() + backend_empty_cache(torch_device) gc.collect() @slow @@ -467,5 +474,5 @@ def test_model_7b_generation(self): self.assertEqual(EXPECTED_TEXT_COMPLETION, text) del model - torch.cuda.empty_cache() + backend_empty_cache(torch_device) gc.collect() diff --git a/tests/models/musicgen/test_modeling_musicgen.py b/tests/models/musicgen/test_modeling_musicgen.py index 02ab3b538c26..2cd662bfe576 100644 --- a/tests/models/musicgen/test_modeling_musicgen.py +++ b/tests/models/musicgen/test_modeling_musicgen.py @@ -28,7 +28,13 @@ PretrainedConfig, T5Config, ) -from transformers.testing_utils import is_torch_available, require_torch, slow, torch_device +from transformers.testing_utils import ( + is_torch_available, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -1082,13 +1088,13 @@ def test_generate_without_input_ids(self): output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.greedy_sample_model_classes: model = model_class(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() # greedy model.generate(input_dict["input_ids"], attention_mask=input_dict["attention_mask"], max_new_tokens=10) # sampling diff --git a/tests/models/mvp/test_modeling_mvp.py b/tests/models/mvp/test_modeling_mvp.py index 8e6143529a80..ab9dbd95f751 100644 --- a/tests/models/mvp/test_modeling_mvp.py +++ b/tests/models/mvp/test_modeling_mvp.py @@ -22,7 +22,14 @@ import timeout_decorator # noqa from transformers import MvpConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -374,12 +381,12 @@ def test_tokenization(self): mvp_toks = tokenizer.encode(ex, return_tensors="pt").squeeze() assert_tensors_close(desired_result.long(), mvp_toks, prefix=ex) + @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = MvpForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @@ -505,13 +512,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MvpForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/nllb_moe/test_modeling_nllb_moe.py b/tests/models/nllb_moe/test_modeling_nllb_moe.py index 409db2207e20..0ba66ff6b338 100644 --- a/tests/models/nllb_moe/test_modeling_nllb_moe.py +++ b/tests/models/nllb_moe/test_modeling_nllb_moe.py @@ -24,6 +24,7 @@ require_sentencepiece, require_tokenizers, require_torch, + require_torch_fp16, slow, torch_device, ) @@ -327,13 +328,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = NllbMoeForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/oneformer/test_modeling_oneformer.py b/tests/models/oneformer/test_modeling_oneformer.py index 1fa400a22855..cb00170799f9 100644 --- a/tests/models/oneformer/test_modeling_oneformer.py +++ b/tests/models/oneformer/test_modeling_oneformer.py @@ -24,7 +24,8 @@ from transformers import OneFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_torch, - require_torch_gpu, + require_torch_accelerator, + require_torch_fp16, require_torch_multi_gpu, require_vision, slow, @@ -540,7 +541,8 @@ def test_inference_universal_segmentation_head(self): ).to(torch_device) self.assertTrue(torch.allclose(class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_fp16(self): model = ( OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) diff --git a/tests/models/opt/test_modeling_opt.py b/tests/models/opt/test_modeling_opt.py index 18c0c9a7efee..9845fcd4695f 100644 --- a/tests/models/opt/test_modeling_opt.py +++ b/tests/models/opt/test_modeling_opt.py @@ -22,7 +22,7 @@ import timeout_decorator # noqa from transformers import OPTConfig, is_torch_available -from transformers.testing_utils import require_torch, require_torch_fp16, require_torch_gpu, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -514,7 +514,8 @@ def test_generation_post_attn_layer_norm(self): self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_batched_nan_fp16(self): # a bug manifested starting at models facebook/opt-1.3 and larger when running batched generations, # therefore not using a tiny model, but the smallest model the problem was seen with which is opt-1.3b. diff --git a/tests/models/owlv2/test_modeling_owlv2.py b/tests/models/owlv2/test_modeling_owlv2.py index a45df63c39d3..d9913596ac5c 100644 --- a/tests/models/owlv2/test_modeling_owlv2.py +++ b/tests/models/owlv2/test_modeling_owlv2.py @@ -24,7 +24,14 @@ import requests from transformers import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig -from transformers.testing_utils import require_torch, require_torch_gpu, require_vision, slow, torch_device +from transformers.testing_utils import ( + require_torch, + require_torch_accelerator, + require_torch_fp16, + require_vision, + slow, + torch_device, +) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester @@ -869,7 +876,8 @@ def test_inference_one_shot_object_detection(self): self.assertTrue(torch.allclose(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) @slow - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_one_shot_object_detection_fp16(self): model_name = "google/owlv2-base-patch16" model = Owlv2ForObjectDetection.from_pretrained(model_name, torch_dtype=torch.float16).to(torch_device) diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py index ae419c69f0fe..8edbf411f7b9 100644 --- a/tests/models/owlvit/test_modeling_owlvit.py +++ b/tests/models/owlvit/test_modeling_owlvit.py @@ -24,7 +24,14 @@ import requests from transformers import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig -from transformers.testing_utils import require_torch, require_torch_gpu, require_vision, slow, torch_device +from transformers.testing_utils import ( + require_torch, + require_torch_accelerator, + require_torch_fp16, + require_vision, + slow, + torch_device, +) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester @@ -860,7 +867,8 @@ def test_inference_one_shot_object_detection(self): self.assertTrue(torch.allclose(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) @slow - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_one_shot_object_detection_fp16(self): model_name = "google/owlvit-base-patch32" model = OwlViTForObjectDetection.from_pretrained(model_name, torch_dtype=torch.float16).to(torch_device) diff --git a/tests/models/pegasus/test_modeling_pegasus.py b/tests/models/pegasus/test_modeling_pegasus.py index bae10d18ff8c..fbf79650f45e 100644 --- a/tests/models/pegasus/test_modeling_pegasus.py +++ b/tests/models/pegasus/test_modeling_pegasus.py @@ -18,7 +18,14 @@ import unittest from transformers import PegasusConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -280,13 +287,13 @@ def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = PegasusForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @@ -352,6 +359,7 @@ def model(self): return AutoModelForSeq2SeqLM.from_pretrained(self.checkpoint_name).to(torch_device) @slow + @require_torch_fp16 def test_pegasus_xsum_summary(self): assert self.tokenizer.model_max_length == 512 inputs = self.tokenizer(self.src_text, return_tensors="pt", truncation=True, max_length=512, padding=True).to( @@ -362,9 +370,6 @@ def test_pegasus_xsum_summary(self): decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True) assert self.tgt_text == decoded - if "cuda" not in torch_device: - return - # Demonstrate fp16 issue, Contributions welcome! self.model.half() translated_tokens_fp16 = self.model.generate(**inputs, max_length=10) decoded_fp16 = self.tokenizer.batch_decode(translated_tokens_fp16, skip_special_tokens=True) diff --git a/tests/models/pegasus_x/test_modeling_pegasus_x.py b/tests/models/pegasus_x/test_modeling_pegasus_x.py index 22d7b0c8634a..106a8b39e84f 100644 --- a/tests/models/pegasus_x/test_modeling_pegasus_x.py +++ b/tests/models/pegasus_x/test_modeling_pegasus_x.py @@ -21,7 +21,14 @@ import unittest from transformers import is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -274,13 +281,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = PegasusXForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/persimmon/test_modeling_persimmon.py b/tests/models/persimmon/test_modeling_persimmon.py index aa092f3870d2..0ffb999145be 100644 --- a/tests/models/persimmon/test_modeling_persimmon.py +++ b/tests/models/persimmon/test_modeling_persimmon.py @@ -21,7 +21,14 @@ from parameterized import parameterized from transformers import PersimmonConfig, is_torch_available, set_seed -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import ( + backend_empty_cache, + require_torch, + require_torch_accelerator, + require_torch_fp16, + slow, + torch_device, +) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -413,12 +420,13 @@ def test_model_8b_chat_logits(self): # fmt: on torch.testing.assert_close(out.cpu()[0, 0, :30], EXPECTED_SLICE, atol=1e-5, rtol=1e-5) - torch.cuda.empty_cache() + backend_empty_cache(torch_device) del model gc.collect() @slow - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_model_8b_chat_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """human: Simply put, the theory of relativity states that?\n\nadept: The theory of relativity states that the laws of physics are the same for all observers, regardless of their relative motion.""" prompt = "human: Simply put, the theory of relativity states that?\n\nadept:" @@ -433,6 +441,6 @@ def test_model_8b_chat_greedy_generation(self): text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) - torch.cuda.empty_cache() + backend_empty_cache(torch_device) del model gc.collect() diff --git a/tests/models/plbart/test_modeling_plbart.py b/tests/models/plbart/test_modeling_plbart.py index 4fbbb6835bfa..0d5274b01819 100644 --- a/tests/models/plbart/test_modeling_plbart.py +++ b/tests/models/plbart/test_modeling_plbart.py @@ -20,7 +20,14 @@ import unittest from transformers import PLBartConfig, is_torch_available -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.testing_utils import ( + require_sentencepiece, + require_tokenizers, + require_torch, + require_torch_fp16, + slow, + torch_device, +) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin @@ -304,13 +311,13 @@ def test_inputs_embeds(self): with torch.no_grad(): model(**inputs)[0] + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = PLBartForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - model.half() + model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/pvt/test_modeling_pvt.py b/tests/models/pvt/test_modeling_pvt.py index eb1370d0bc29..04ce21530531 100644 --- a/tests/models/pvt/test_modeling_pvt.py +++ b/tests/models/pvt/test_modeling_pvt.py @@ -23,7 +23,8 @@ from transformers.testing_utils import ( require_accelerate, require_torch, - require_torch_gpu, + require_torch_accelerator, + require_torch_fp16, slow, torch_device, ) @@ -318,7 +319,8 @@ def test_inference_model(self): @slow @require_accelerate - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. diff --git a/tests/models/sam/test_modeling_sam.py b/tests/models/sam/test_modeling_sam.py index 3d5ec22c035e..eb8f265c2946 100644 --- a/tests/models/sam/test_modeling_sam.py +++ b/tests/models/sam/test_modeling_sam.py @@ -22,7 +22,7 @@ import requests from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig, pipeline -from transformers.testing_utils import require_torch, slow, torch_device +from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester @@ -478,7 +478,7 @@ def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_inference_mask_generation_no_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") @@ -772,9 +772,7 @@ def test_inference_mask_generation_three_boxes_point_batch(self): torch.testing.assert_allclose(iou_scores, EXPECTED_IOU, atol=1e-4, rtol=1e-4) def test_dummy_pipeline_generation(self): - generator = pipeline( - "mask-generation", model="facebook/sam-vit-base", device=0 if torch.cuda.is_available() else -1 - ) + generator = pipeline("mask-generation", model="facebook/sam-vit-base", device=torch_device) raw_image = prepare_image() _ = generator(raw_image, points_per_batch=64) diff --git a/tests/models/speech_to_text/test_modeling_speech_to_text.py b/tests/models/speech_to_text/test_modeling_speech_to_text.py index 061e17c3e7f5..2c4fc268e8d9 100644 --- a/tests/models/speech_to_text/test_modeling_speech_to_text.py +++ b/tests/models/speech_to_text/test_modeling_speech_to_text.py @@ -26,6 +26,7 @@ require_sentencepiece, require_tokenizers, require_torch, + require_torch_fp16, require_torchaudio, slow, torch_device, @@ -336,14 +337,14 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_features = input_dict["input_features"] attention_mask = input_dict["attention_mask"] model = Speech2TextForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - input_features = input_features.half() - model.half() + input_features = input_features.half() + model.half() model.generate(input_features, attention_mask=attention_mask) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/switch_transformers/test_modeling_switch_transformers.py b/tests/models/switch_transformers/test_modeling_switch_transformers.py index 54e17b91b7b2..7c2fb88acda4 100644 --- a/tests/models/switch_transformers/test_modeling_switch_transformers.py +++ b/tests/models/switch_transformers/test_modeling_switch_transformers.py @@ -19,7 +19,14 @@ import unittest from transformers import SwitchTransformersConfig, is_torch_available -from transformers.testing_utils import require_tokenizers, require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import ( + require_tokenizers, + require_torch, + require_torch_accelerator, + require_torch_bf16, + slow, + torch_device, +) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -1017,7 +1024,8 @@ def test_max_routing_capacity(self): @require_torch @require_tokenizers class SwitchTransformerModelIntegrationTests(unittest.TestCase): - @require_torch_gpu + @require_torch_accelerator + @require_torch_bf16 def test_small_logits(self): r""" Logits testing to check implementation consistency between `t5x` implementation diff --git a/tests/models/vit/test_modeling_vit.py b/tests/models/vit/test_modeling_vit.py index 82ba910ec869..d1e887183329 100644 --- a/tests/models/vit/test_modeling_vit.py +++ b/tests/models/vit/test_modeling_vit.py @@ -22,7 +22,8 @@ from transformers.testing_utils import ( require_accelerate, require_torch, - require_torch_gpu, + require_torch_accelerator, + require_torch_fp16, require_vision, slow, torch_device, @@ -316,7 +317,8 @@ def test_inference_interpolate_pos_encoding(self): @slow @require_accelerate - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. diff --git a/tests/models/wav2vec2/test_modeling_wav2vec2.py b/tests/models/wav2vec2/test_modeling_wav2vec2.py index 096246fe62b1..cb943520db68 100644 --- a/tests/models/wav2vec2/test_modeling_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_wav2vec2.py @@ -29,6 +29,7 @@ from transformers import Wav2Vec2Config, is_torch_available from transformers.testing_utils import ( CaptureLogger, + backend_empty_cache, is_pt_flax_cross_test, is_pyctcdecode_available, is_torchaudio_available, @@ -1455,7 +1456,7 @@ def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") diff --git a/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py b/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py index 33d37a073be9..5c7bfd0a9bee 100644 --- a/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py +++ b/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py @@ -21,7 +21,14 @@ from datasets import load_dataset from transformers import Wav2Vec2ConformerConfig, is_torch_available -from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import ( + is_pt_flax_cross_test, + require_torch, + require_torch_accelerator, + require_torch_fp16, + slow, + torch_device, +) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( @@ -468,12 +475,14 @@ def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_model_float16_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model_float16(*config_and_inputs) - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_model_float16_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model_float16(*config_and_inputs) diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 6bb73468ad07..9bb835360887 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -24,7 +24,14 @@ import transformers from transformers import WhisperConfig -from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_torchaudio, slow, torch_device +from transformers.testing_utils import ( + is_pt_flax_cross_test, + require_torch, + require_torch_fp16, + require_torchaudio, + slow, + torch_device, +) from transformers.utils import cached_property, is_flax_available, is_torch_available from transformers.utils.import_utils import is_datasets_available @@ -429,14 +436,14 @@ def test_training_gradient_checkpointing_use_reentrant_false(self): def test_generate_with_head_masking(self): pass + @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = WhisperForConditionalGeneration(config).eval().to(torch_device) - if torch_device == "cuda": - input_features = input_features.half() - model.half() + input_features = input_features.half() + model.half() model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) diff --git a/tests/models/xglm/test_modeling_xglm.py b/tests/models/xglm/test_modeling_xglm.py index 105ad5c44e99..235d9e19001a 100644 --- a/tests/models/xglm/test_modeling_xglm.py +++ b/tests/models/xglm/test_modeling_xglm.py @@ -19,7 +19,13 @@ import unittest from transformers import XGLMConfig, is_torch_available -from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.testing_utils import ( + require_torch, + require_torch_accelerator, + require_torch_fp16, + slow, + torch_device, +) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -492,7 +498,8 @@ def test_xglm_sample_max_time(self): duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=1.25 * MAX_TIME)) - @require_torch_gpu + @require_torch_accelerator + @require_torch_fp16 def test_batched_nan_fp16(self): model_name = "facebook/xglm-564M" tokenizer = XGLMTokenizer.from_pretrained(model_name, use_fast=False, padding_side="left") diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index ffdb2ae7d0e6..9e824e8efa03 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -44,8 +44,8 @@ require_accelerate, require_safetensors, require_torch, - require_torch_gpu, - require_torch_multi_gpu, + require_torch_accelerator, + require_torch_multi_accelerator, require_usr_bin_time, slow, torch_device, @@ -681,7 +681,7 @@ def test_from_pretrained_low_cpu_mem_usage_measured(self): @require_accelerate @mark.accelerate_tests - @require_torch_multi_gpu + @require_torch_multi_accelerator @slow def test_model_parallelism_gpt2(self): device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1} @@ -699,7 +699,7 @@ def test_model_parallelism_gpt2(self): @require_accelerate @mark.accelerate_tests - @require_torch_gpu + @require_torch_accelerator def test_from_pretrained_disk_offload_task_model(self): model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { @@ -1036,7 +1036,7 @@ def f(input_ids): opt_fn(input_ids) self.assertEqual(compile_counter.frame_count, 0) - @require_torch_gpu + @require_torch_accelerator @slow def test_pretrained_low_mem_new_config(self): # Checking for 1 model(the same one which was described in the issue) . From 25e6e9418cc03bde2051f20f3ce0765c268e5943 Mon Sep 17 00:00:00 2001 From: Dong-geon Lee Date: Wed, 1 Nov 2023 03:12:14 +0900 Subject: [PATCH 037/268] Unify warning styles for better readability (#27184) --- .../pytorch/audio-classification/run_audio_classification.py | 2 +- examples/pytorch/contrastive-image-text/run_clip.py | 2 +- .../pytorch/image-classification/run_image_classification.py | 2 +- examples/pytorch/image-pretraining/run_mae.py | 2 +- examples/pytorch/image-pretraining/run_mim.py | 2 +- examples/pytorch/language-modeling/run_clm.py | 2 +- examples/pytorch/language-modeling/run_mlm.py | 2 +- examples/pytorch/language-modeling/run_plm.py | 2 +- examples/pytorch/multiple-choice/run_swag.py | 2 +- examples/pytorch/question-answering/run_qa.py | 2 +- examples/pytorch/question-answering/run_qa_beam_search.py | 2 +- examples/pytorch/question-answering/run_seq2seq_qa.py | 2 +- .../pytorch/semantic-segmentation/run_semantic_segmentation.py | 2 +- .../pytorch/speech-recognition/run_speech_recognition_ctc.py | 2 +- .../speech-recognition/run_speech_recognition_ctc_adapter.py | 2 +- .../speech-recognition/run_speech_recognition_seq2seq.py | 2 +- examples/pytorch/summarization/run_summarization.py | 2 +- examples/pytorch/text-classification/run_classification.py | 2 +- examples/pytorch/text-classification/run_glue.py | 2 +- examples/pytorch/text-classification/run_xnli.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- examples/pytorch/translation/run_translation.py | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) diff --git a/examples/pytorch/audio-classification/run_audio_classification.py b/examples/pytorch/audio-classification/run_audio_classification.py index d1e76a189e8e..a3c24fd8ad63 100644 --- a/examples/pytorch/audio-classification/run_audio_classification.py +++ b/examples/pytorch/audio-classification/run_audio_classification.py @@ -246,7 +246,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 3224bee9c4f8..76e36964ba09 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -284,7 +284,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 11c316a787eb..65e71a2635c4 100755 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -226,7 +226,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index 8d9f70457e1b..ade4db6f43ee 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -214,7 +214,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index 3b0365743c5c..06b3590bec38 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -288,7 +288,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 34c1e51a16bb..65cf2330d4b5 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -288,7 +288,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 18a2a410d507..a91b6c577afe 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -288,7 +288,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index e7d33a86e5f2..169622a4cca7 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -269,7 +269,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index 82ccfc367db3..e5632d1677b0 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -275,7 +275,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 4887e04a80e5..4b00ba80518a 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -277,7 +277,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 124ecfc86480..d410a515885a 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -266,7 +266,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index 7b4a0832292a..c41184ce6323 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -323,7 +323,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index 9622f4942583..d3c033b625e1 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -314,7 +314,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index 083bd18b34fd..cab1945db90c 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -433,7 +433,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py index 998add172774..9a1f37fc888c 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py @@ -429,7 +429,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 7793dbb74a43..7db11d27e7c2 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -325,7 +325,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index ad58a14ffc44..582f78b77fd9 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -362,7 +362,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/text-classification/run_classification.py b/examples/pytorch/text-classification/run_classification.py index c7021ca7d74e..6075a3403045 100755 --- a/examples/pytorch/text-classification/run_classification.py +++ b/examples/pytorch/text-classification/run_classification.py @@ -318,7 +318,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index c6d4501682eb..7d41788c1187 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -266,7 +266,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index a7fbd0962d08..33dea5e25ff8 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -225,7 +225,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index c2f80a50bda9..bc91df51af00 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -267,7 +267,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index a373acbb95cb..6270f737350e 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -311,7 +311,7 @@ def main(): # Log on each process the small summary: logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") From 113ebf80ac9bdb74037239847cd906d7ea986a18 Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Tue, 31 Oct 2023 19:16:49 +0100 Subject: [PATCH 038/268] Safetensors serialization by default (#27064) * Safetensors serialization by default * First pass on the tests * Second pass on the tests * Third pass on the tests * Fix TF weight loading from TF-format safetensors * Specific encoder-decoder fixes for weight crossloading * Add VisionEncoderDecoder fixes for TF too * Change filename test for pt-to-tf * One missing fix for TFVisionEncoderDecoder * Fix the other crossload test * Support for flax + updated tests * Apply suggestions from code review Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> * Sanchit's comments * Sanchit's comments 2 * Nico's comments * Fix tests * cleanup * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: Matt Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../modeling_flax_pytorch_utils.py | 28 +++- src/transformers/modeling_flax_utils.py | 126 ++++++++++++++---- src/transformers/modeling_tf_utils.py | 57 ++++---- src/transformers/modeling_utils.py | 34 +++-- .../modeling_encoder_decoder.py | 4 +- .../modeling_tf_encoder_decoder.py | 22 +-- .../modeling_tf_vision_encoder_decoder.py | 22 +-- .../modeling_vision_encoder_decoder.py | 4 +- src/transformers/pipelines/base.py | 4 +- src/transformers/training_args.py | 4 +- src/transformers/utils/hub.py | 4 +- tests/models/auto/test_modeling_tf_auto.py | 5 + .../test_modeling_tf_encoder_decoder.py | 13 +- ...test_modeling_tf_vision_encoder_decoder.py | 10 +- tests/test_modeling_common.py | 5 +- tests/test_modeling_flax_utils.py | 92 ++++++++++++- tests/test_modeling_tf_utils.py | 41 +++++- tests/test_modeling_utils.py | 88 +++++++++--- tests/trainer/test_trainer.py | 6 +- tests/utils/test_cli.py | 1 - 20 files changed, 433 insertions(+), 137 deletions(-) diff --git a/src/transformers/modeling_flax_pytorch_utils.py b/src/transformers/modeling_flax_pytorch_utils.py index 79d91da49729..5a0f52a995e8 100644 --- a/src/transformers/modeling_flax_pytorch_utils.py +++ b/src/transformers/modeling_flax_pytorch_utils.py @@ -27,9 +27,15 @@ import transformers +from . import is_safetensors_available from .utils import logging +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.flax import load_file as safe_load_file + + logger = logging.get_logger(__name__) @@ -56,7 +62,13 @@ def load_pytorch_checkpoint_in_flax_state_dict( pt_path = os.path.abspath(pytorch_checkpoint_path) logger.info(f"Loading PyTorch weights from {pt_path}") - pt_state_dict = torch.load(pt_path, map_location="cpu") + if pt_path.endswith(".safetensors"): + pt_state_dict = {} + with safe_open(pt_path, framework="pt") as f: + for k in f.keys(): + pt_state_dict[k] = f.get_tensor(k) + else: + pt_state_dict = torch.load(pt_path, map_location="cpu") logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters.") flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model) @@ -319,11 +331,15 @@ def load_flax_checkpoint_in_pytorch_model(model, flax_checkpoint_path): flax_cls = getattr(transformers, "Flax" + model.__class__.__name__) # load flax weight dict - with open(flax_checkpoint_path, "rb") as state_f: - try: - flax_state_dict = from_bytes(flax_cls, state_f.read()) - except UnpicklingError: - raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. ") + if flax_checkpoint_path.endswith(".safetensors"): + flax_state_dict = safe_load_file(flax_checkpoint_path) + flax_state_dict = unflatten_dict(flax_state_dict, sep=".") + else: + with open(flax_checkpoint_path, "rb") as state_f: + try: + flax_state_dict = from_bytes(flax_cls, state_f.read()) + except UnpicklingError: + raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. ") return load_flax_weights_in_pytorch_model(model, flax_state_dict) diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index b05fa4d72a6c..9e63cb0cb961 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -39,6 +39,8 @@ from .utils import ( FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, PushToHubMixin, @@ -54,8 +56,14 @@ replace_return_docstrings, ) from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files +from .utils.import_utils import is_safetensors_available +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.flax import load_file as safe_load_file + from safetensors.flax import save_file as safe_save_file + logger = logging.get_logger(__name__) @@ -422,6 +430,31 @@ def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None): ```""" return self._cast_floating_to(params, jnp.float16, mask) + @classmethod + def load_flax_weights(cls, resolved_archive_file): + try: + if resolved_archive_file.endswith(".safetensors"): + state = safe_load_file(resolved_archive_file) + state = unflatten_dict(state, sep=".") + else: + with open(resolved_archive_file, "rb") as state_f: + state = from_bytes(cls, state_f.read()) + except (UnpicklingError, msgpack.exceptions.ExtraData) as e: + try: + with open(resolved_archive_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please" + " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" + " folder you cloned." + ) + else: + raise ValueError from e + except (UnicodeDecodeError, ValueError): + raise EnvironmentError(f"Unable to convert {resolved_archive_file} to Flax deserializable object. ") + + return state + @classmethod def load_flax_sharded_weights(cls, shard_files): """ @@ -688,7 +721,12 @@ def from_pretrained( pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): - if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): + if is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) + ): + # Load from a safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) + elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME) elif from_pt and os.path.isfile( @@ -705,6 +743,13 @@ def from_pretrained( archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME) is_sharded = True # At this stage we don't have a weight file so we will raise an error. + elif is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) + ): + # Load from a sharded safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) + is_sharded = True + raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!") elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"Error no file named {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} " @@ -723,7 +768,13 @@ def from_pretrained( filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: - filename = WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME + if from_pt: + filename = WEIGHTS_NAME + elif is_safetensors_available(): + filename = SAFE_WEIGHTS_NAME + else: + filename = FLAX_WEIGHTS_NAME + try: # Load from URL or cache if already cached cached_file_kwargs = { @@ -741,8 +792,15 @@ def from_pretrained( } resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) - # Since we set _raise_exceptions_for_missing_entries=False, we don't get an expection but a None + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: + # Did not find the safetensors file, let's fallback to Flax. + # No support for sharded safetensors yet, so we'll raise an error if that's all we find. + filename = FLAX_WEIGHTS_NAME + resolved_archive_file = cached_file( + pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **cached_file_kwargs + ) if resolved_archive_file is None and filename == FLAX_WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( @@ -751,21 +809,26 @@ def from_pretrained( if resolved_archive_file is not None: is_sharded = True # Maybe the checkpoint is pytorch sharded, we try to grab the pytorch index name in this case. - elif resolved_archive_file is None and from_pt: + if resolved_archive_file is None and from_pt: resolved_archive_file = cached_file( pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs ) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None: - # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error + # Otherwise, maybe there is a TF or Torch model file. We try those to give a helpful error # message. has_file_kwargs = { "revision": revision, "proxies": proxies, "token": token, } - if has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): + if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): + is_sharded = True + raise NotImplementedError( + "Support for sharded checkpoints using safetensors is coming soon!" + ) + elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {FLAX_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to" @@ -798,6 +861,7 @@ def from_pretrained( if is_local: logger.info(f"loading weights file {archive_file}") resolved_archive_file = archive_file + filename = resolved_archive_file.split(os.path.sep)[-1] else: logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") else: @@ -821,31 +885,27 @@ def from_pretrained( _commit_hash=commit_hash, ) + safetensors_from_pt = False + if filename == SAFE_WEIGHTS_NAME: + with safe_open(resolved_archive_file, framework="flax") as f: + safetensors_metadata = f.metadata() + if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax"]: + raise OSError( + f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." + " Make sure you save your model with the `save_pretrained` method." + ) + safetensors_from_pt = safetensors_metadata.get("format") == "pt" + # init random models model = cls(config, *model_args, _do_init=_do_init, **model_kwargs) - if from_pt: + if from_pt or safetensors_from_pt: state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file, is_sharded) else: if is_sharded: state = cls.load_flax_sharded_weights(resolved_archive_file) else: - try: - with open(resolved_archive_file, "rb") as state_f: - state = from_bytes(cls, state_f.read()) - except (UnpicklingError, msgpack.exceptions.ExtraData) as e: - try: - with open(resolved_archive_file) as f: - if f.read().startswith("version"): - raise OSError( - "You seem to have cloned a repository without having git-lfs installed. Please" - " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" - " folder you cloned." - ) - else: - raise ValueError from e - except (UnicodeDecodeError, ValueError): - raise EnvironmentError(f"Unable to convert {archive_file} to Flax deserializable object. ") + state = cls.load_flax_weights(resolved_archive_file) # make sure all arrays are stored as jnp.arrays # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4: # https://github.com/google/flax/issues/1261 @@ -1030,6 +1090,7 @@ def save_pretrained( push_to_hub=False, max_shard_size="10GB", token: Optional[Union[str, bool]] = None, + safe_serialization: bool = False, **kwargs, ): """ @@ -1059,6 +1120,8 @@ def save_pretrained( the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + safe_serialization (`bool`, *optional*, defaults to `False`): + Whether to save the model using `safetensors` or through msgpack. """ use_auth_token = kwargs.pop("use_auth_token", None) @@ -1103,24 +1166,31 @@ def save_pretrained( self.generation_config.save_pretrained(save_directory) # save model - output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME) + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else FLAX_WEIGHTS_NAME + output_model_file = os.path.join(save_directory, weights_name) shards, index = flax_shard_checkpoint(params if params is not None else self.params, max_shard_size) # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) + weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") if ( - filename.startswith(FLAX_WEIGHTS_NAME[:-4]) + filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and filename not in shards.keys() ): os.remove(full_filename) if index is None: - with open(output_model_file, "wb") as f: + if safe_serialization: params = params if params is not None else self.params - model_bytes = to_bytes(params) - f.write(model_bytes) + flat_dict = flatten_dict(params, sep=".") + safe_save_file(flat_dict, output_model_file, metadata={"format": "flax"}) + else: + with open(output_model_file, "wb") as f: + params = params if params is not None else self.params + model_bytes = to_bytes(params) + f.write(model_bytes) else: save_index_file = os.path.join(save_directory, FLAX_WEIGHTS_INDEX_NAME) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index bea3edfa2289..c342b5059c4f 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -626,11 +626,13 @@ def dtype_byte_size(dtype): return bit_size // 8 -def format_weight_name(name, _prefix=None): +def strip_model_name_and_prefix(name, _prefix=None): + if _prefix is not None and name.startswith(_prefix): + name = name[len(_prefix) :] + if name.startswith("/"): + name = name[1:] if "model." not in name and len(name.split("/")) > 1: name = "/".join(name.split("/")[1:]) - if _prefix is not None: - name = _prefix + "/" + name return name @@ -986,7 +988,7 @@ def load_tf_weights_from_safetensors(model, resolved_archive_file, ignore_mismat # Read the safetensors file with safe_open(resolved_archive_file, framework="tf") as safetensors_archive: mismatched_layers = [] - weight_names = [format_weight_name(w.name, _prefix=_prefix) for w in model.weights] + weight_names = [strip_model_name_and_prefix(w.name, _prefix=_prefix) for w in model.weights] loaded_weight_names = list(safetensors_archive.keys()) # Find the missing layers from the high level list of layers missing_layers = list(set(weight_names) - set(loaded_weight_names)) @@ -994,7 +996,7 @@ def load_tf_weights_from_safetensors(model, resolved_archive_file, ignore_mismat unexpected_layers = list(set(loaded_weight_names) - set(weight_names)) for weight in model.weights: - weight_name = format_weight_name(weight.name, _prefix=_prefix) + weight_name = strip_model_name_and_prefix(weight.name, _prefix=_prefix) if weight_name in loaded_weight_names: weight_value = safetensors_archive.get_tensor(weight_name) # Check if the shape of the current weight and the one from the H5 file are different @@ -1003,7 +1005,7 @@ def load_tf_weights_from_safetensors(model, resolved_archive_file, ignore_mismat # If the two shapes are not compatible we raise an issue try: weight_value = tf.reshape(weight_value, K.int_shape(weight)) - except ValueError as e: + except (ValueError, tf.errors.InvalidArgumentError) as e: if ignore_mismatched_sizes: mismatched_layers.append((weight_name, weight_value.shape, K.int_shape(weight))) continue @@ -2367,7 +2369,7 @@ def save_pretrained( create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `False`): - Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + Whether to save the model using `safetensors` or the traditional TensorFlow way (that uses `h5`). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). @@ -2457,7 +2459,7 @@ def save_pretrained( if index is None: if safe_serialization: - state_dict = {format_weight_name(w.name): w.value() for w in self.weights} + state_dict = {strip_model_name_and_prefix(w.name): w.value() for w in self.weights} safe_save_file(state_dict, output_model_file, metadata={"format": "tf"}) else: self.save_weights(output_model_file) @@ -2718,13 +2720,6 @@ def from_pretrained( ): # Load from a safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) - elif is_safetensors_available() and os.path.isfile( - os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) - ): - # Load from a sharded safetensors checkpoint - archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) - is_sharded = True - raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!") elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) @@ -2732,6 +2727,13 @@ def from_pretrained( # Load from a sharded TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME) is_sharded = True + elif is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) + ): + # Load from a sharded safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) + is_sharded = True + raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!") # At this stage we don't have a weight file so we will raise an error. elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)) or os.path.isfile( os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) @@ -2784,21 +2786,12 @@ def from_pretrained( # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None # result when internet is up, the repo and revision exist, but the file does not. if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: - # Maybe the checkpoint is sharded, we try to grab the index name in this case. + # Did not find the safetensors file, let's fallback to TF. + # No support for sharded safetensors yet, so we'll raise an error if that's all we find. + filename = TF2_WEIGHTS_NAME resolved_archive_file = cached_file( - pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **cached_file_kwargs + pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **cached_file_kwargs ) - if resolved_archive_file is not None: - is_sharded = True - raise NotImplementedError( - "Support for sharded checkpoints using safetensors is coming soon!" - ) - else: - # This repo has no safetensors file of any kind, we switch to TensorFlow. - filename = TF2_WEIGHTS_NAME - resolved_archive_file = cached_file( - pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **cached_file_kwargs - ) if resolved_archive_file is None and filename == TF2_WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( @@ -2821,7 +2814,12 @@ def from_pretrained( "proxies": proxies, "token": token, } - if has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): + if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): + is_sharded = True + raise NotImplementedError( + "Support for sharded checkpoints using safetensors is coming soon!" + ) + elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {TF2_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to" @@ -2928,6 +2926,7 @@ def from_pretrained( output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, + tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) # 'by_name' allow us to do transfer learning by skipping/adding layers diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 7d02d53fc350..ccb9073aef12 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -470,10 +470,6 @@ def load_state_dict(checkpoint_file: Union[str, os.PathLike]): f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " "you save your model with the `save_pretrained` method." ) - elif metadata["format"] != "pt": - raise NotImplementedError( - f"Conversion from a {metadata['format']} safetensors archive to PyTorch is not implemented yet." - ) return safe_load_file(checkpoint_file) try: if ( @@ -1934,7 +1930,7 @@ def save_pretrained( save_function: Callable = torch.save, push_to_hub: bool = False, max_shard_size: Union[int, str] = "5GB", - safe_serialization: bool = False, + safe_serialization: bool = True, variant: Optional[str] = None, token: Optional[Union[str, bool]] = None, save_peft_format: bool = True, @@ -1975,7 +1971,7 @@ def save_pretrained( - safe_serialization (`bool`, *optional*, defaults to `False`): + safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). variant (`str`, *optional*): If specified, weights are saved in the format pytorch_model..bin. @@ -2736,8 +2732,6 @@ def from_pretrained( " sure the weights are in PyTorch format." ) - from_pt = not (from_tf | from_flax) - user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline @@ -3103,6 +3097,29 @@ def from_pretrained( _commit_hash=commit_hash, ) + if ( + is_safetensors_available() + and isinstance(resolved_archive_file, str) + and resolved_archive_file.endswith(".safetensors") + ): + with safe_open(resolved_archive_file, framework="pt") as f: + metadata = f.metadata() + + if metadata.get("format") == "pt": + pass + elif metadata.get("format") == "tf": + from_tf = True + logger.info("A TensorFlow safetensors file is being loaded in a PyTorch model.") + elif metadata.get("format") == "flax": + from_flax = True + logger.info("A Flax safetensors file is being loaded in a PyTorch model.") + else: + raise ValueError( + f"Incompatible safetensors file. File metadata is not ['pt', 'tf', 'flax'] but {metadata.get('format')}" + ) + + from_pt = not (from_tf | from_flax) + # load pt weights early so that we know which dtype to init the model under if from_pt: if not is_sharded and state_dict is None: @@ -3391,7 +3408,6 @@ def from_pretrained( # restore default dtype if dtype_orig is not None: torch.set_default_dtype(dtype_orig) - ( model, missing_keys, diff --git a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index ff5a56749fac..27a213707c76 100644 --- a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -366,8 +366,8 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): model.config = config if hasattr(model, "enc_to_dec_proj"): - model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight - model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias + model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous() + model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous() return model diff --git a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py index 19fc47546b0f..14653410b029 100644 --- a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py @@ -306,17 +306,21 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): # here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's # not the case, and I wasn't sure how else to go from the config to the correct MainLayer name! - if kwargs.get("from_pt", False): - config = AutoConfig.from_pretrained(pretrained_model_name_or_path) - encoder_model_type = config.encoder.model_type + # This override is only needed in the case where we're crossloading weights from PT. However, since weights are + # often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file. + # Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it + # or not. - def tf_to_pt_weight_rename(tf_weight): - if "encoder" in tf_weight and "decoder" not in tf_weight: - return re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight) - else: - return tf_weight + config = AutoConfig.from_pretrained(pretrained_model_name_or_path) + encoder_model_type = config.encoder.model_type - kwargs["tf_to_pt_weight_rename"] = tf_to_pt_weight_rename + def tf_to_pt_weight_rename(tf_weight): + if "encoder" in tf_weight and "decoder" not in tf_weight: + return re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight) + else: + return tf_weight + + kwargs["tf_to_pt_weight_rename"] = tf_to_pt_weight_rename return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) @classmethod diff --git a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py index a0fae071a1b7..dea1aaaf59c0 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py @@ -322,17 +322,21 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): # here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's # not the case, and I wasn't sure how else to go from the config to the correct MainLayer name! - if kwargs.get("from_pt", False): - config = AutoConfig.from_pretrained(pretrained_model_name_or_path) - encoder_model_type = config.encoder.model_type + # This override is only needed in the case where we're crossloading weights from PT. However, since weights are + # often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file. + # Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it + # or not. - def tf_to_pt_weight_rename(tf_weight): - if "encoder" in tf_weight and "decoder" not in tf_weight: - return re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight) - else: - return tf_weight + config = AutoConfig.from_pretrained(pretrained_model_name_or_path) + encoder_model_type = config.encoder.model_type - kwargs["tf_to_pt_weight_rename"] = tf_to_pt_weight_rename + def tf_to_pt_weight_rename(tf_weight): + if "encoder" in tf_weight and "decoder" not in tf_weight: + return re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight) + else: + return tf_weight + + kwargs["tf_to_pt_weight_rename"] = tf_to_pt_weight_rename return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) @classmethod diff --git a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py index 60646809a621..f9c6c25cd8d1 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py @@ -342,8 +342,8 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): model.config = config if hasattr(model, "enc_to_dec_proj"): - model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight - model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias + model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous() + model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous() return model diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index 36c9585a69d7..2d18384d1b3b 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -836,7 +836,7 @@ def __init__( # then we should keep working self.image_processor = self.feature_extractor - def save_pretrained(self, save_directory: str, safe_serialization: bool = False): + def save_pretrained(self, save_directory: str, safe_serialization: bool = True): """ Save the pipeline's model and tokenizer. @@ -844,7 +844,7 @@ def save_pretrained(self, save_directory: str, safe_serialization: bool = False) save_directory (`str`): A path to the directory where to saved. It will be created if it doesn't exist. safe_serialization (`str`): - Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow + Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 8a6d7255f500..147d1e6b1c63 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -293,7 +293,7 @@ class TrainingArguments: `save_total_limit=5` and `load_best_model_at_end`, the four last checkpoints will always be retained alongside the best model. When `save_total_limit=1` and `load_best_model_at_end`, it is possible that two checkpoints are saved: the last one and the best one (if they are different). - save_safetensors (`bool`, *optional*, defaults to `False`): + save_safetensors (`bool`, *optional*, defaults to `True`): Use [safetensors](https://huggingface.co/docs/safetensors) saving and loading for state dicts instead of default `torch.load` and `torch.save`. save_on_each_node (`bool`, *optional*, defaults to `False`): @@ -797,7 +797,7 @@ class TrainingArguments: }, ) save_safetensors: Optional[bool] = field( - default=False, + default=True, metadata={ "help": "Use safetensors saving and loading for state dicts instead of default torch.load and torch.save." }, diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 2dcfd7f3c838..0d58211da835 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -797,7 +797,7 @@ def push_to_hub( token: Optional[Union[bool, str]] = None, max_shard_size: Optional[Union[int, str]] = "5GB", create_pr: bool = False, - safe_serialization: bool = False, + safe_serialization: bool = True, revision: str = None, commit_description: str = None, **deprecated_kwargs, @@ -827,7 +827,7 @@ def push_to_hub( Google Colab instances without any CPU OOM issues. create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. - safe_serialization (`bool`, *optional*, defaults to `False`): + safe_serialization (`bool`, *optional*, defaults to `True`): Whether or not to convert the model weights in safetensors format for safer serialization. revision (`str`, *optional*): Branch to push the uploaded files to. diff --git a/tests/models/auto/test_modeling_tf_auto.py b/tests/models/auto/test_modeling_tf_auto.py index c8754ca42702..2f6fe476158f 100644 --- a/tests/models/auto/test_modeling_tf_auto.py +++ b/tests/models/auto/test_modeling_tf_auto.py @@ -211,6 +211,8 @@ def test_from_pretrained_with_tuple_values(self): config = copy.deepcopy(model.config) config.architectures = ["FunnelBaseModel"] model = TFAutoModel.from_config(config) + model.build() + self.assertIsInstance(model, TFFunnelBaseModel) with tempfile.TemporaryDirectory() as tmp_dir: @@ -245,7 +247,10 @@ def test_new_model_registration(self): # Now that the config is registered, it can be used as any other config with the auto-API tiny_config = BertModelTester(self).get_config() config = NewModelConfig(**tiny_config.to_dict()) + model = auto_class.from_config(config) + model.build() + self.assertIsInstance(model, TFNewModel) with tempfile.TemporaryDirectory() as tmp_dir: diff --git a/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py b/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py index ab5da3d41e6c..1d8d4e985b6e 100644 --- a/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py +++ b/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py @@ -525,7 +525,7 @@ def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict): # PT -> TF with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) - tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) + tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) @@ -542,7 +542,7 @@ def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict): with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) - tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) + tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) @@ -560,7 +560,8 @@ def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict): tf_model(**tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: - tf_model.save_pretrained(tmpdirname) + # TODO Matt: PT doesn't support loading TF safetensors - remove the arg and from_tf=True when it does + tf_model.save_pretrained(tmpdirname, safe_serialization=False) pt_model = EncoderDecoderModel.from_pretrained(tmpdirname, from_tf=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) @@ -1129,9 +1130,7 @@ def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): with tempfile.TemporaryDirectory() as tmp_dirname_1, tempfile.TemporaryDirectory() as tmp_dirname_2: encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1) encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2) - encoder_decoder_tf = TFEncoderDecoderModel.from_encoder_decoder_pretrained( - tmp_dirname_1, tmp_dirname_2, encoder_from_pt=True, decoder_from_pt=True - ) + encoder_decoder_tf = TFEncoderDecoderModel.from_encoder_decoder_pretrained(tmp_dirname_1, tmp_dirname_2) logits_tf = encoder_decoder_tf(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits @@ -1150,7 +1149,7 @@ def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): # TensorFlow => PyTorch with tempfile.TemporaryDirectory() as tmp_dirname: - encoder_decoder_tf.save_pretrained(tmp_dirname) + encoder_decoder_tf.save_pretrained(tmp_dirname, safe_serialization=False) encoder_decoder_pt = EncoderDecoderModel.from_pretrained(tmp_dirname, from_tf=True) max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy())) diff --git a/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py index e173e21a9b5d..2cb5e446724f 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py @@ -458,7 +458,7 @@ def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict): # PT -> TF with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) - tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) + tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) @@ -473,7 +473,7 @@ def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict): with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) - tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) + tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) @@ -489,7 +489,7 @@ def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict): tf_model(**tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: - tf_model.save_pretrained(tmpdirname) + tf_model.save_pretrained(tmpdirname, safe_serialization=False) pt_model = VisionEncoderDecoderModel.from_pretrained(tmpdirname, from_tf=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) @@ -803,7 +803,7 @@ def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1) encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2) encoder_decoder_tf = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( - tmp_dirname_1, tmp_dirname_2, encoder_from_pt=True, decoder_from_pt=True + tmp_dirname_1, tmp_dirname_2 ) logits_tf = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits @@ -814,7 +814,7 @@ def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): # Make sure `from_pretrained` following `save_pretrained` work and give the same result # (See https://github.com/huggingface/transformers/pull/14016) with tempfile.TemporaryDirectory() as tmp_dirname: - encoder_decoder_tf.save_pretrained(tmp_dirname) + encoder_decoder_tf.save_pretrained(tmp_dirname, safe_serialization=False) encoder_decoder_tf = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) logits_tf_2 = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 634d7631dff2..3c4810074728 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -91,6 +91,7 @@ if is_torch_available(): import torch + from safetensors.torch import save_file as safe_save_file from torch import nn from transformers import MODEL_MAPPING, AdaptiveEmbedding @@ -1751,8 +1752,8 @@ def test_model_weights_reload_no_missing_tied_weights(self): # We are nuking ALL weights on file, so every parameter should # yell on load. We're going to detect if we yell too much, or too little. - with open(os.path.join(tmp_dir, "pytorch_model.bin"), "wb") as f: - torch.save({}, f) + placeholder_dict = {"tensor": torch.tensor([1, 2])} + safe_save_file(placeholder_dict, os.path.join(tmp_dir, "model.safetensors"), metadata={"format": "pt"}) model_reloaded, infos = model_class.from_pretrained(tmp_dir, output_loading_info=True) prefix = f"{model_reloaded.base_model_prefix}." diff --git a/tests/test_modeling_flax_utils.py b/tests/test_modeling_flax_utils.py index d8fb71a6104c..06ed30f8afa1 100644 --- a/tests/test_modeling_flax_utils.py +++ b/tests/test_modeling_flax_utils.py @@ -16,11 +16,12 @@ import unittest import numpy as np -from huggingface_hub import HfFolder, delete_repo +from huggingface_hub import HfFolder, delete_repo, snapshot_download from requests.exceptions import HTTPError -from transformers import BertConfig, is_flax_available -from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax +from transformers import BertConfig, BertModel, is_flax_available +from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax, require_safetensors, require_torch +from transformers.utils import FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_NAME if is_flax_available(): @@ -184,3 +185,88 @@ def test_model_from_pretrained_hub_subfolder_sharded(self): model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) + + @require_safetensors + def test_safetensors_save_and_load(self): + model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + + # No msgpack file, only a model.safetensors + self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) + self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) + + new_model = FlaxBertModel.from_pretrained(tmp_dir) + + self.assertTrue(check_models_equal(model, new_model)) + + @require_flax + @require_torch + def test_safetensors_save_and_load_pt_to_flax(self): + model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True) + pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") + with tempfile.TemporaryDirectory() as tmp_dir: + pt_model.save_pretrained(tmp_dir) + + # Check we have a model.safetensors file + self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) + + new_model = FlaxBertModel.from_pretrained(tmp_dir) + + # Check models are equal + self.assertTrue(check_models_equal(model, new_model)) + + @require_safetensors + def test_safetensors_load_from_hub(self): + flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + + # Can load from the Flax-formatted checkpoint + safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-only") + self.assertTrue(check_models_equal(flax_model, safetensors_model)) + + @require_torch + @require_safetensors + def test_safetensors_load_from_hub_flax_and_pt(self): + flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + + # Can load from the PyTorch-formatted checkpoint + safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only", from_pt=True) + self.assertTrue(check_models_equal(flax_model, safetensors_model)) + + @require_safetensors + def test_safetensors_flax_from_flax(self): + model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + new_model = FlaxBertModel.from_pretrained(tmp_dir) + + self.assertTrue(check_models_equal(model, new_model)) + + @require_safetensors + @require_torch + def test_safetensors_flax_from_torch(self): + hub_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + new_model = FlaxBertModel.from_pretrained(tmp_dir) + + self.assertTrue(check_models_equal(hub_model, new_model)) + + @require_safetensors + def test_safetensors_flax_from_sharded_msgpack_with_sharded_safetensors_local(self): + with tempfile.TemporaryDirectory() as tmp_dir: + path = snapshot_download( + "hf-internal-testing/tiny-bert-flax-safetensors-msgpack-sharded", cache_dir=tmp_dir + ) + + # This should not raise even if there are two types of sharded weights + FlaxBertModel.from_pretrained(path) + + @require_safetensors + def test_safetensors_flax_from_sharded_msgpack_with_sharded_safetensors_hub(self): + # This should not raise even if there are two types of sharded weights + # This should discard the safetensors weights in favor of the msgpack sharded weights + FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-msgpack-sharded") diff --git a/tests/test_modeling_tf_utils.py b/tests/test_modeling_tf_utils.py index 862a2cffa8a0..6d0ed8640772 100644 --- a/tests/test_modeling_tf_utils.py +++ b/tests/test_modeling_tf_utils.py @@ -24,7 +24,7 @@ import unittest import unittest.mock as mock -from huggingface_hub import HfFolder, Repository, delete_repo +from huggingface_hub import HfFolder, Repository, delete_repo, snapshot_download from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError @@ -39,6 +39,7 @@ is_staging_test, require_safetensors, require_tf, + require_torch, slow, ) from transformers.utils import SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, logging @@ -496,6 +497,44 @@ def test_safetensors_load_from_hub(self): for p1, p2 in zip(safetensors_model.weights, tf_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) + @require_safetensors + def test_safetensors_tf_from_tf(self): + model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + new_model = TFBertModel.from_pretrained(tmp_dir) + + for p1, p2 in zip(model.weights, new_model.weights): + self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) + + @require_safetensors + @is_pt_tf_cross_test + def test_safetensors_tf_from_torch(self): + hub_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") + model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + new_model = TFBertModel.from_pretrained(tmp_dir) + + for p1, p2 in zip(hub_model.weights, new_model.weights): + self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) + + @require_safetensors + def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_local(self): + with tempfile.TemporaryDirectory() as tmp_dir: + path = snapshot_download("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded", cache_dir=tmp_dir) + + # This should not raise even if there are two types of sharded weights + TFBertModel.from_pretrained(path) + + @require_safetensors + def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_hub(self): + # This should not raise even if there are two types of sharded weights + # This should discard the safetensors weights in favor of the .h5 sharded weights + TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded") + @require_tf @is_staging_test diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index 9e824e8efa03..8456871df620 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import copy import glob import json import os @@ -42,7 +42,9 @@ TestCasePlus, is_staging_test, require_accelerate, + require_flax, require_safetensors, + require_tf, require_torch, require_torch_accelerator, require_torch_multi_accelerator, @@ -56,7 +58,7 @@ WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) -from transformers.utils.import_utils import is_torchdynamo_available +from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torchdynamo_available sys.path.append(str(Path(__file__).parent.parent / "utils")) @@ -66,6 +68,7 @@ if is_torch_available(): import torch + from safetensors.torch import save_file as safe_save_file from test_module.custom_modeling import CustomModel, NoSuperInitModel from torch import nn @@ -146,6 +149,13 @@ def tie_weights(self): self.decoder.weight = self.base.linear.weight +if is_flax_available(): + from transformers import FlaxBertModel + +if is_tf_available(): + from transformers import TFBertModel + + TINY_T5 = "patrickvonplaten/t5-tiny-random" TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" @@ -420,13 +430,13 @@ def test_shard_checkpoint(self): }, ) - def test_checkpoint_sharding_local(self): + def test_checkpoint_sharding_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["50kB", "50kiB", "100kB", "100kiB", "200kB", "200kiB"]: - model.save_pretrained(tmp_dir, max_shard_size=max_size) + model.save_pretrained(tmp_dir, max_shard_size=max_size, safe_serialization=False) # Get each shard file and its size shard_to_size = {} @@ -472,11 +482,11 @@ def test_checkpoint_sharding_from_hub(self): for p1, p2 in zip(model.parameters(), ref_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) - def test_checkpoint_variant_local(self): + def test_checkpoint_variant_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: - model.save_pretrained(tmp_dir, variant="v2") + model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) @@ -492,11 +502,11 @@ def test_checkpoint_variant_local(self): for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) - def test_checkpoint_variant_local_sharded(self): + def test_checkpoint_variant_local_sharded_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: - model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB") + model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=False) weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) @@ -604,18 +614,18 @@ def test_checkpoint_variant_hub_sharded_safe(self): ) self.assertIsNotNone(model) - def test_checkpoint_variant_save_load(self): + def test_checkpoint_variant_save_load_bin(self): with tempfile.TemporaryDirectory() as tmp_dir: model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) - model.save_pretrained(tmp_dir, variant="v2") + model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) # saving will create a variant checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) - model.save_pretrained(tmp_dir) + model.save_pretrained(tmp_dir, safe_serialization=False) # saving shouldn't delete variant checkpoints weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) @@ -874,7 +884,7 @@ def test_safetensors_load_from_hub_sharded(self): def test_base_model_to_head_model_load(self): base_model = BaseModel(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: - base_model.save_pretrained(tmp_dir) + base_model.save_pretrained(tmp_dir, safe_serialization=False) # Can load a base model in a model with head model = ModelWithHead.from_pretrained(tmp_dir) @@ -886,7 +896,7 @@ def test_base_model_to_head_model_load(self): head_state_dict = model.state_dict() base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"] base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"] - torch.save(base_state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) + safe_save_file(base_state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with self.assertRaisesRegex( ValueError, "The state dictionary of the model you are trying to load is corrupted." @@ -934,8 +944,8 @@ def test_unexpected_keys_warnings(self): # Loading the model with the same class, we do get a warning for unexpected weights state_dict = model.state_dict() - state_dict["added_key"] = state_dict["linear.weight"] - torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) + state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"]) + safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with CaptureLogger(logger) as cl: _, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True) self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out) @@ -1072,6 +1082,54 @@ def test_generation_config_is_loaded_with_model(self): ) self.assertEqual(model.generation_config.transformers_version, "foo") + @require_safetensors + def test_safetensors_torch_from_torch(self): + model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + new_model = BertModel.from_pretrained(tmp_dir) + + for p1, p2 in zip(model.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + @require_safetensors + @require_flax + def test_safetensors_torch_from_flax(self): + hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") + model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + new_model = BertModel.from_pretrained(tmp_dir) + + for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + @require_tf + @require_safetensors + def test_safetensors_torch_from_tf(self): + hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") + model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + new_model = BertModel.from_pretrained(tmp_dir) + + for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + @require_safetensors + def test_safetensors_torch_from_torch_sharded(self): + model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") + new_model = BertModel.from_pretrained(tmp_dir) + + for p1, p2 in zip(model.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + @require_torch @is_staging_test diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 6c208d0de019..ae6d8f7ae388 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -403,7 +403,7 @@ def get_regression_trainer( class TrainerIntegrationCommon: - def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, safe_weights=False): + def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, safe_weights=True): weights_file = WEIGHTS_NAME if not safe_weights else SAFE_WEIGHTS_NAME file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"] if is_pretrained: @@ -415,7 +415,7 @@ def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, s self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename))) def check_best_model_has_been_loaded( - self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=False + self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=True ): checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}") log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history @@ -456,7 +456,7 @@ def check_trainer_state_are_the_same(self, trainer_state, trainer_state1): _ = log1.pop(key, None) self.assertEqual(log, log1) - def convert_to_sharded_checkpoint(self, folder, save_safe=False, load_safe=False): + def convert_to_sharded_checkpoint(self, folder, save_safe=True, load_safe=True): # Converts a checkpoint of a regression model to a sharded checkpoint. if load_safe: loader = safetensors.torch.load_file diff --git a/tests/utils/test_cli.py b/tests/utils/test_cli.py index fc7b8ebb5e02..b208ff19f1a4 100644 --- a/tests/utils/test_cli.py +++ b/tests/utils/test_cli.py @@ -43,7 +43,6 @@ def test_cli_pt_to_tf(self): shutil.rmtree("/tmp/hf-internal-testing/tiny-random-gptj", ignore_errors=True) # cleans potential past runs transformers.commands.transformers_cli.main() - # The original repo has no TF weights -- if they exist, they were created by the CLI self.assertTrue(os.path.exists("/tmp/hf-internal-testing/tiny-random-gptj/tf_model.h5")) @require_torch From 7d8ff3629b2725ec43ace99c1a6e87ac1978d433 Mon Sep 17 00:00:00 2001 From: Yeyang <76979429+yyLeaves@users.noreply.github.com> Date: Wed, 1 Nov 2023 03:50:48 +0800 Subject: [PATCH 039/268] =?UTF-8?q?=F0=9F=8C=90=20[i18n-ZH]=20Translate=20?= =?UTF-8?q?tflite.md=20into=20Chinese=20(#27134)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs(zh): translate tflite.md * docs(zh): add space around links * Update docs/source/zh/tflite.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/zh/_toctree.yml | 2 ++ docs/source/zh/tflite.md | 54 +++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 docs/source/zh/tflite.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index 6aafffa59849..659c81d7582b 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -29,4 +29,6 @@ title: 共享自定义模型 - local: serialization title: 导出为 ONNX + - local: tflite + title: 导出为 TFLite title: 开发者指南 diff --git a/docs/source/zh/tflite.md b/docs/source/zh/tflite.md new file mode 100644 index 000000000000..bf47d411447a --- /dev/null +++ b/docs/source/zh/tflite.md @@ -0,0 +1,54 @@ + + +# 导出为 TFLite + +[TensorFlow Lite](https://www.tensorflow.org/lite/guide) 是一个轻量级框架,用于资源受限的设备上,如手机、嵌入式系统和物联网(IoT)设备,部署机器学习模型。TFLite 旨在在计算能力、内存和功耗有限的设备上优化和高效运行模型。模型以一种特殊的高效可移植格式表示,其文件扩展名为 `.tflite`。 + +🤗 Optimum 通过 `exporters.tflite` 模块提供将 🤗 Transformers 模型导出至 TFLite 格式的功能。请参考 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/exporters/tflite/overview) 以获取支持的模型架构列表。 + +要将模型导出为 TFLite 格式,请安装所需的依赖项: + +```bash +pip install optimum[exporters-tf] +``` + +请参阅 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/main/en/exporters/tflite/usage_guides/export_a_model) 以查看所有可用参数,或者在命令行中查看帮助: + +```bash +optimum-cli export tflite --help +``` + +运行以下命令,以从 🤗 Hub 导出模型的检查点(checkpoint),以 `bert-base-uncased` 为例: + +```bash +optimum-cli export tflite --model bert-base-uncased --sequence_length 128 bert_tflite/ +``` + +你应该能在日志中看到导出进度以及生成的 `model.tflite` 文件的保存位置,如下所示: + +```bash +Validating TFLite model... + -[✓] TFLite model output names match reference model (logits) + - Validating TFLite Model output "logits": + -[✓] (1, 128, 30522) matches (1, 128, 30522) + -[x] values not close enough, max diff: 5.817413330078125e-05 (atol: 1e-05) +The TensorFlow Lite export succeeded with the warning: The maximum absolute difference between the output of the reference model and the TFLite exported model is not within the set tolerance 1e-05: +- logits: max diff = 5.817413330078125e-05. + The exported model was saved at: bert_tflite +``` + +上面的示例说明了从 🤗 Hub 导出检查点的过程。导出本地模型时,首先需要确保将模型的权重和分词器文件保存在同一目录(`local_path`)中。在使用 CLI(命令行)时,将 `local_path` 传递给 `model` 参数,而不是 🤗 Hub 上的检查点名称。 \ No newline at end of file From 82c7e879876822864b5ceaf2c99eb01159266bcd Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Wed, 1 Nov 2023 14:17:06 +0800 Subject: [PATCH 040/268] device agnostic fsdp testing (#27120) * make fsdp test cases device agnostic * make style --- tests/fsdp/test_fsdp.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py index 69103dcd8c3f..2a9473c862ff 100644 --- a/tests/fsdp/test_fsdp.py +++ b/tests/fsdp/test_fsdp.py @@ -24,18 +24,19 @@ from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, + backend_device_count, execute_subprocess_async, - get_gpu_count, mockenv_context, require_accelerate, require_fsdp, - require_torch_gpu, - require_torch_multi_gpu, + require_torch_accelerator, + require_torch_multi_accelerator, slow, + torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import FSDPOption, set_seed -from transformers.utils import is_accelerate_available, is_torch_bf16_gpu_available +from transformers.utils import is_accelerate_available, is_torch_bf16_available_on_device if is_torch_available(): @@ -46,7 +47,7 @@ # default torch.distributed port DEFAULT_MASTER_PORT = "10999" dtypes = ["fp16"] -if is_torch_bf16_gpu_available(): +if is_torch_bf16_available_on_device(torch_device): dtypes += ["bf16"] sharding_strategies = ["full_shard", "shard_grad_op"] state_dict_types = ["FULL_STATE_DICT", "SHARDED_STATE_DICT"] @@ -100,7 +101,7 @@ def get_launcher(distributed=False, use_accelerate=False): # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) - num_gpus = min(2, get_gpu_count()) if distributed else 1 + num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) if use_accelerate: return f"""accelerate launch @@ -121,7 +122,7 @@ def _parameterized_custom_name_func(func, param_num, param): @require_accelerate -@require_torch_gpu +@require_torch_accelerator @require_fsdp_version class TrainerIntegrationFSDP(TestCasePlus, TrainerIntegrationCommon): def setUp(self): @@ -170,7 +171,7 @@ def test_fsdp_config(self, sharding_strategy, dtype): self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true") @parameterized.expand(params, name_func=_parameterized_custom_name_func) - @require_torch_multi_gpu + @require_torch_multi_accelerator @slow def test_basic_run(self, sharding_strategy, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) @@ -182,7 +183,7 @@ def test_basic_run(self, sharding_strategy, dtype): execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(dtypes) - @require_torch_multi_gpu + @require_torch_multi_accelerator @slow @unittest.skipIf(not is_torch_greater_or_equal_than_2_1, reason="This test on pytorch 2.0 takes 4 hours.") def test_basic_run_with_cpu_offload(self, dtype): @@ -195,7 +196,7 @@ def test_basic_run_with_cpu_offload(self, dtype): execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(state_dict_types, name_func=_parameterized_custom_name_func) - @require_torch_multi_gpu + @require_torch_multi_accelerator @slow def test_training_and_can_resume_normally(self, state_dict_type): output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) From ae093eef016533a3670561fa9e26addb42d446d1 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Wed, 1 Nov 2023 09:06:31 +0100 Subject: [PATCH 041/268] [`core` / `Quantization` ] AWQ integration (#27045) * working v1 * oops * Update src/transformers/modeling_utils.py Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> * fixup * oops * push * more changes * add docs * some fixes * fix copies * add v1 doc * added installation guide * relax constraints * revert * attempt llm-awq * oops * oops * fixup * raise error when incorrect cuda compute capability * nit * add instructions for llm-awq * fixup * fix copies * fixup and docs * change * few changes + add demo * add v1 tests * add autoawq in dockerfile * finalize * Update tests/quantization/autoawq/test_awq.py * fix test * fix * fix issue * Update src/transformers/integrations/awq.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Update docs/source/en/main_classes/quantization.md Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Update docs/source/en/main_classes/quantization.md Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Update src/transformers/integrations/awq.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Update src/transformers/integrations/awq.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * add link to example script * Update docs/source/en/main_classes/quantization.md Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * add more content * add more details * add link to quantization docs * camel case + change backend class name * change to string * fixup * raise errors if libs not installed * change to `bits` and `group_size` * nit * nit * Apply suggestions from code review Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> * disable training * address some comments and fix nits * fix * final nits and fix tests * adapt to our new runners * make fix-copies * Update src/transformers/utils/quantization_config.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/utils/quantization_config.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/integrations/awq.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/integrations/awq.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * move to top * add conversion test * final nit * add more elaborated test --------- Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docker/transformers-all-latest-gpu/Dockerfile | 3 + docs/source/en/main_classes/quantization.md | 91 ++++++++ src/transformers/__init__.py | 4 +- src/transformers/integrations/__init__.py | 2 + src/transformers/integrations/awq.py | 104 +++++++++ src/transformers/modeling_utils.py | 59 ++++- src/transformers/testing_utils.py | 8 + src/transformers/utils/__init__.py | 1 + src/transformers/utils/import_utils.py | 6 + src/transformers/utils/quantization_config.py | 75 ++++++ tests/quantization/autoawq/__init__.py | 0 tests/quantization/autoawq/test_awq.py | 221 ++++++++++++++++++ 12 files changed, 571 insertions(+), 3 deletions(-) create mode 100644 src/transformers/integrations/awq.py create mode 100644 tests/quantization/autoawq/__init__.py create mode 100644 tests/quantization/autoawq/test_awq.py diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index d531e130c02e..843c06cbd745 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -55,6 +55,9 @@ RUN python3 -m pip install --no-cache-dir auto-gptq --extra-index-url https://hu # Add einops for additional model testing RUN python3 -m pip install --no-cache-dir einops +# Add autoawq for quantization testing +RUN python3 -m pip install --no-cache-dir autoawq + # For bettertransformer + gptq RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum diff --git a/docs/source/en/main_classes/quantization.md b/docs/source/en/main_classes/quantization.md index 2a2c6c21adb0..1ab20f1fa110 100644 --- a/docs/source/en/main_classes/quantization.md +++ b/docs/source/en/main_classes/quantization.md @@ -16,6 +16,97 @@ rendered properly in your Markdown viewer. # Quantize 🤗 Transformers models +## AWQ integration + +AWQ method has been introduced in the [*AWQ: Activation-aware Weight Quantization for LLM Compression and Acceleration* paper](https://arxiv.org/abs/2306.00978). With AWQ you can run models in 4-bit precision, while preserving its original quality (i.e. no performance degradation) with a superior throughput that other quantization methods presented below - reaching similar throughput as pure `float16` inference. + +We now support inference with any AWQ model, meaning anyone can load and use AWQ weights that are pushed on the Hub or saved locally. Note that using AWQ requires to have access to a NVIDIA GPU. CPU inference is not supported yet. + +### Quantizing a model + +We advise users to look at different existing tools in the ecosystem to quantize their models with AWQ algorithm, such as: + +- [`llm-awq`](https://github.com/mit-han-lab/llm-awq) from MIT Han Lab +- [`autoawq`](https://github.com/casper-hansen/AutoAWQ) from [`casper-hansen`](https://github.com/casper-hansen) +- Intel neural compressor from Intel - through [`optimum-intel`](https://huggingface.co/docs/optimum/main/en/intel/optimization_inc) + +Many other tools might exist in the ecosystem, please feel free to open a PR to add them to the list. +Currently the integration with 🤗 Transformers is only available for models that have been quantized using `autoawq` library and `llm-awq`. Most of the models quantized with `auto-awq` can be found under [`TheBloke`](https://huggingface.co/TheBloke) namespace of 🤗 Hub, and to quantize models with `llm-awq` please refer to the [`convert_to_hf.py`](https://github.com/mit-han-lab/llm-awq/blob/main/examples/convert_to_hf.py) script in the examples folder of [`llm-awq`](https://github.com/mit-han-lab/llm-awq/). + +### Load a quantized model + +You can load a quantized model from the Hub using the `from_pretrained` method. Make sure that the pushed weights are quantized, by checking that the attribute `quantization_config` is present in the model's configuration file (`configuration.json`). You can confirm that the model is quantized in the AWQ format by checking the field `quantization_config.quant_method` which should be set to `"awq"`. Note that loading the model will set other weights in `float16` by default for performance reasons. If you want to change that behavior, you can pass `torch_dtype` argument to `torch.float32` or `torch.bfloat16`. You can find in the sections below some example snippets and notebook. + +## Example usage + +First, you need to install [`autoawq`](https://github.com/casper-hansen/AutoAWQ) library + +```bash +pip install autoawq +``` + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_id = "TheBloke/zephyr-7B-alpha-AWQ" +model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda:0") +``` + +In case you first load your model on CPU, make sure to move it to your GPU device before using + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_id = "TheBloke/zephyr-7B-alpha-AWQ" +model = AutoModelForCausalLM.from_pretrained(model_id).to("cuda:0") +``` + +### Combining AWQ and Flash Attention + +You can combine AWQ quantization with Flash Attention to get a model that is both quantized and faster. Simply load the model using `from_pretrained` and pass `use_flash_attention_2=True` argument. + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained("TheBloke/zephyr-7B-alpha-AWQ", use_flash_attention_2=True, device_map="cuda:0") +``` + +### Benchmarks + +We performed some speed, throughput and latency benchmarks using [`optimum-benchmark`](https://github.com/huggingface/optimum-benchmark) library. + +Note at that time of writing this documentation section, the available quantization methods were: `awq`, `gptq` and `bitsandbytes`. + +The benchmark was run on a NVIDIA-A100 instance and the model used was [`TheBloke/Mistral-7B-v0.1-AWQ`](https://huggingface.co/TheBloke/Mistral-7B-v0.1-AWQ) for the AWQ model, [`TheBloke/Mistral-7B-v0.1-GPTQ`](https://huggingface.co/TheBloke/Mistral-7B-v0.1-GPTQ) for the GPTQ model. We also benchmarked it against `bitsandbytes` quantization methods and native `float16` model. Some results are shown below: + +
+ +
+ +
+ +
+ +
+ +
+ +
+ +
+ +You can find the full results together with packages versions in [this link](https://github.com/huggingface/optimum-benchmark/tree/main/examples/running-mistral). + +From the results it appears that AWQ quantization method is the fastest quantization method for inference, text generation and among the lowest peak memory for text generation. However, AWQ seems to have the largest forward latency per batch size. + +### Google colab demo + +Check out how to use this integration throughout this [Google Colab demo](https://colab.research.google.com/drive/1HzZH89yAXJaZgwJDhQj9LqSBux932BvY)! + +### AwqConfig + +[[autodoc]] AwqConfig + ## `AutoGPTQ` Integration 🤗 Transformers has integrated `optimum` API to perform GPTQ quantization on language models. You can load and quantize your model in 8, 4, 3 or even 2 bits without a big drop of performance and faster inference speed! This is supported by most GPU hardwares. diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 1fc1ff38d06d..a784cff7c839 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -778,7 +778,7 @@ "is_vision_available", "logging", ], - "utils.quantization_config": ["BitsAndBytesConfig", "GPTQConfig"], + "utils.quantization_config": ["AwqConfig", "BitsAndBytesConfig", "GPTQConfig"], } # sentencepiece-backed objects @@ -4943,7 +4943,7 @@ ) # bitsandbytes config - from .utils.quantization_config import BitsAndBytesConfig, GPTQConfig + from .utils.quantization_config import AwqConfig, BitsAndBytesConfig, GPTQConfig try: if not is_sentencepiece_available(): diff --git a/src/transformers/integrations/__init__.py b/src/transformers/integrations/__init__.py index ddd36955b3bf..7596555984a3 100644 --- a/src/transformers/integrations/__init__.py +++ b/src/transformers/integrations/__init__.py @@ -17,6 +17,7 @@ _import_structure = { + "awq": ["replace_with_awq_linear"], "bitsandbytes": [ "get_keys_to_not_convert", "replace_8bit_linear", @@ -77,6 +78,7 @@ } if TYPE_CHECKING: + from .awq import replace_with_awq_linear from .bitsandbytes import ( get_keys_to_not_convert, replace_8bit_linear, diff --git a/src/transformers/integrations/awq.py b/src/transformers/integrations/awq.py new file mode 100644 index 000000000000..94d996b0fffd --- /dev/null +++ b/src/transformers/integrations/awq.py @@ -0,0 +1,104 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"AWQ (Activation aware Weight Quantization) integration file" +from ..utils import is_auto_awq_available, is_torch_available +from ..utils.quantization_config import AwqBackendPackingMethod, AWQLinearVersion + + +if is_torch_available(): + import torch.nn as nn + + +def replace_with_awq_linear( + model, + modules_to_not_convert=None, + quantization_config=None, + current_key_name=None, + has_been_replaced=False, +) -> bool: + """ + Public method that recursively replaces the Linear layers of the given model with AWQ quantized layers. + `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the + conversion has been successfull or not. + + During the module replacement, we also infer the backend to use through the `quantization_config` object. + + Args: + model (`torch.nn.Module`): + The model to convert, can be any `torch.nn.Module` instance. + quantization_config (`AwqConfig`): + The quantization config object that contains the quantization parameters. + modules_to_not_convert (`list`, *optional*): + A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be + converted. + current_key_name (`list`, *optional*): + A list that contains the current key name. This is used for recursion and should not be passed by the user. + has_been_replaced (`bool`, *optional*): + A boolean that indicates if the conversion has been successful or not. This is used for recursion and + should not be passed by the user. + """ + if modules_to_not_convert is None: + modules_to_not_convert = [] + + backend = quantization_config.backend + + if not is_auto_awq_available(): + raise ValueError( + "AWQ (either `autoawq` or `llmawq`) is not available. Please install it with `pip install autoawq` or check out the installation guide in https://github.com/mit-han-lab/llm-awq" + ) + + if backend == AwqBackendPackingMethod.AUTOAWQ: + from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV + elif backend == AwqBackendPackingMethod.LLMAWQ: + from awq.quantize.qmodule import WQLinear + + if backend == AwqBackendPackingMethod.AUTOAWQ: + target_cls = WQLinear_GEMM if quantization_config.version == AWQLinearVersion.GEMM else WQLinear_GEMV + else: + target_cls = WQLinear + + for name, module in model.named_children(): + if current_key_name is None: + current_key_name = [] + current_key_name.append(name) + + if isinstance(module, nn.Linear) and name not in modules_to_not_convert: + # Check if the current key is not in the `modules_to_not_convert` + if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): + in_features = module.in_features + out_features = module.out_features + + model._modules[name] = target_cls( + w_bit=quantization_config.bits, + group_size=quantization_config.group_size, + in_features=in_features, + out_features=out_features, + bias=module.bias is not None, + dev=module.weight.device, + ) + has_been_replaced = True + + # Force requires grad to False to avoid unexpected errors + model._modules[name].requires_grad_(False) + if len(list(module.children())) > 0: + _, has_been_replaced = replace_with_awq_linear( + module, + modules_to_not_convert=modules_to_not_convert, + current_key_name=current_key_name, + quantization_config=quantization_config, + has_been_replaced=has_been_replaced, + ) + # Remove the last key for recursion + current_key_name.pop(-1) + return model, has_been_replaced diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index ccb9073aef12..be88dfd204d7 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -70,6 +70,7 @@ extract_commit_hash, has_file, is_accelerate_available, + is_auto_awq_available, is_auto_gptq_available, is_bitsandbytes_available, is_flash_attn_2_available, @@ -90,7 +91,7 @@ is_torch_fx_proxy, is_torchdynamo_compiling, ) -from .utils.quantization_config import BitsAndBytesConfig, GPTQConfig, QuantizationMethod +from .utils.quantization_config import AwqConfig, BitsAndBytesConfig, GPTQConfig, QuantizationMethod from .utils.versions import require_version_core @@ -2674,6 +2675,13 @@ def from_pretrained( quantization_config, "quant_method", QuantizationMethod.BITS_AND_BYTES ) + if quantization_method_from_args == QuantizationMethod.AWQ: + raise ValueError( + "You cannot pass an `AwqConfig` when loading a model as you can only use AWQ models" + " for inference. To quantize transformers models with AWQ algorithm, please refer to our" + " quantization docs: https://huggingface.co/docs/transformers/main_classes/quantization " + ) + if quantization_config is None and (load_in_8bit or load_in_4bit): quantization_method_from_args = QuantizationMethod.BITS_AND_BYTES quantization_config, kwargs = BitsAndBytesConfig.from_dict( @@ -2805,6 +2813,36 @@ def from_pretrained( logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.") quantizer = GPTQQuantizer.from_dict(quantization_config.to_dict()) + elif quantization_method_from_config == QuantizationMethod.AWQ: + if not torch.cuda.is_available(): + raise RuntimeError("GPU is required to run AWQ quantized model.") + + if not is_auto_awq_available(): + raise ImportError("Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)") + + if not is_accelerate_available(): + raise ImportError("Loading an AWQ quantized model requires accelerate (`pip install accelerate`)") + + if device_map is None: + logger.warning( + "You have loaded an AWQ model on CPU and have a CUDA device available, make sure to set " + "your model on a GPU device in order to run your model." + ) + elif device_map is not None: + if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): + raise ValueError( + "You are attempting to load an AWQ model with a device_map that contains a CPU or disk device." + " This is not supported. Please remove the CPU or disk device from the device_map." + ) + + if torch_dtype is None: + torch_dtype = torch.float16 + else: + logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with AWQ.") + + # Force-set to `True` for more mem efficiency + if low_cpu_mem_usage is None: + low_cpu_mem_usage = True if ( is_8bit_serializable @@ -3265,6 +3303,25 @@ def from_pretrained( if quantization_method_from_config == QuantizationMethod.GPTQ: model = quantizer.convert_model(model) model._is_quantized_training_enabled = True + elif quantization_method_from_config == QuantizationMethod.AWQ: + from .integrations import get_keys_to_not_convert, replace_with_awq_linear + + modules_to_not_convert = get_keys_to_not_convert(model) + + if quantization_config is None: + quantization_config = AwqConfig.from_dict(config.quantization_config) + + model, has_been_replaced = replace_with_awq_linear( + model, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert + ) + model._is_quantized_training_enabled = False + + if not has_been_replaced: + logger.warning( + "You are loading an AWQ model but no linear modules were found in your model." + " Please double check your model architecture, or submit an issue on github if you think this is" + " a bug." + ) if quantization_method_from_config is not None: model.quantization_method = quantization_method_from_config diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 2c13eaf044af..815a13c9e96d 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -52,6 +52,7 @@ from .utils import ( is_accelerate_available, is_apex_available, + is_auto_awq_available, is_auto_gptq_available, is_bitsandbytes_available, is_bs4_available, @@ -963,6 +964,13 @@ def require_auto_gptq(test_case): return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case) +def require_auto_awq(test_case): + """ + Decorator for auto_awq dependency + """ + return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case) + + def require_phonemizer(test_case): """ Decorator marking a test that requires phonemizer diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 25dfa53d8d7e..1a0d68f700ec 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -104,6 +104,7 @@ get_torch_version, is_accelerate_available, is_apex_available, + is_auto_awq_available, is_auto_gptq_available, is_bitsandbytes_available, is_bs4_available, diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 3a92ad75e953..9ad802a48982 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -107,6 +107,8 @@ def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[ _openai_available = _is_package_available("openai") _optimum_available = _is_package_available("optimum") _auto_gptq_available = _is_package_available("auto_gptq") +# `importlib.metadata.version` doesn't work with `awq` +_auto_awq_available = importlib.util.find_spec("awq") is not None _pandas_available = _is_package_available("pandas") _peft_available = _is_package_available("peft") _phonemizer_available = _is_package_available("phonemizer") @@ -675,6 +677,10 @@ def is_optimum_available(): return _optimum_available +def is_auto_awq_available(): + return _auto_awq_available + + def is_auto_gptq_available(): return _auto_gptq_available diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 13f81a5a2cfa..51830aab5744 100644 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -37,6 +37,17 @@ class QuantizationMethod(str, Enum): BITS_AND_BYTES = "bitsandbytes" GPTQ = "gptq" + AWQ = "awq" + + +class AWQLinearVersion(str, Enum): + GEMM = "gemm" + GEMV = "gemv" + + +class AwqBackendPackingMethod(str, Enum): + AUTOAWQ = "autoawq" + LLMAWQ = "llm-awq" @dataclass @@ -418,3 +429,67 @@ def post_init(self): f"""dataset needs to be either a list of string or a value in ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" ) + + +@dataclass +class AwqConfig(QuantizationConfigMixin): + """ + This is a wrapper class about all possible attributes and features that you can play with a model that has been + loaded using `auto-awq` library awq quantization relying on auto_awq backend. + + Args: + bits (`int`, *optional*, defaults to 4): + The number of bits to quantize to. + group_size (`int`, *optional*, defaults to 128): + The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. + zero_point (`bool`, *optional*, defaults to `True`): + Whether to use zero point quantization. + version (`AWQLinearVersion`, *optional*, defaults to `AWQLinearVersion.GEMM`): + The version of the quantization algorithm to use. GEMM is better for big batch_size (e.g. >= 8) otherwise, + GEMV is better (e.g. < 8 ) + backend (`AwqBackendPackingMethod`, *optional*, defaults to `AwqBackendPackingMethod.AUTOAWQ`): + The quantization backend. Some models might be quantized using `llm-awq` backend. This is useful for users + that quantize their own models using `llm-awq` library. + """ + + def __init__( + self, + bits: int = 4, + group_size: int = 128, + zero_point: bool = True, + version: AWQLinearVersion = AWQLinearVersion.GEMM, + backend: AwqBackendPackingMethod = AwqBackendPackingMethod.AUTOAWQ, + **kwargs, + ): + self.quant_method = QuantizationMethod.AWQ + + self.bits = bits + self.group_size = group_size + self.zero_point = zero_point + self.version = version + self.backend = backend + + self.post_init() + + def post_init(self): + r""" + Safety checker that arguments are correct + """ + if not torch.cuda.is_available(): + raise ValueError("AWQ is only available on GPU") + + if self.backend not in [AwqBackendPackingMethod.AUTOAWQ, AwqBackendPackingMethod.LLMAWQ]: + raise ValueError( + f"Only supported quantization backends in {AwqBackendPackingMethod.AUTOAWQ} and {AwqBackendPackingMethod.LLMAWQ} - not recognized backend {self.backend}" + ) + + if self.version not in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV]: + raise ValueError( + f"Only supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV] - not recognized version {self.version}" + ) + + if self.backend == AwqBackendPackingMethod.LLMAWQ: + compute_capability = torch.cuda.get_device_capability() + major, minor = compute_capability + if major < 8: + raise ValueError("LLM-AWQ backend is only supported on GPUs with compute capability >= 8.0") diff --git a/tests/quantization/autoawq/__init__.py b/tests/quantization/autoawq/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/quantization/autoawq/test_awq.py b/tests/quantization/autoawq/test_awq.py new file mode 100644 index 000000000000..2b3622d82321 --- /dev/null +++ b/tests/quantization/autoawq/test_awq.py @@ -0,0 +1,221 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest + +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AwqConfig, OPTForCausalLM +from transformers.testing_utils import ( + require_accelerate, + require_auto_awq, + require_torch_gpu, + require_torch_multi_gpu, + slow, + torch_device, +) +from transformers.utils import is_accelerate_available, is_torch_available + + +if is_torch_available(): + import torch + +if is_accelerate_available(): + from accelerate import init_empty_weights + + +@require_torch_gpu +class AwqConfigTest(unittest.TestCase): + def test_wrong_backend(self): + """ + Simple test that checks if a user passes a wrong backend an error is raised + """ + # This should work fine + _ = AwqConfig(bits=4) + + with self.assertRaises(ValueError): + AwqConfig(bits=4, backend="") + + # LLMAWQ does not work on a T4 + with self.assertRaises(ValueError): + AwqConfig(bits=4, backend="llm-awq") + + def test_to_dict(self): + """ + Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object + """ + quantization_config = AwqConfig(bits=4) + config_to_dict = quantization_config.to_dict() + + for key in config_to_dict: + self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) + + def test_from_dict(self): + """ + Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict + """ + dict = {"bits": 2, "zero_point": False, "backend": "autoawq"} + quantization_config = AwqConfig.from_dict(dict) + + self.assertEqual(dict["bits"], quantization_config.bits) + self.assertEqual(dict["zero_point"], quantization_config.zero_point) + self.assertEqual(dict["backend"], quantization_config.backend) + + +@slow +@require_torch_gpu +@require_auto_awq +@require_accelerate +class AwqTest(unittest.TestCase): + # TODO: @younesbelkada change it to `TheBloke/Mistral-7B-v0.1-AWQ` in the future + model_name = "ybelkada/test-mistral-7b-v0.1-awq" + dummy_transformers_model_name = "bigscience/bloom-560m" + + input_text = "Hello my name is" + + EXPECTED_OUTPUT = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish" + EXPECTED_OUTPUT_BF16 = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Exercise and Sport Science with a" + + device_map = "cuda" + + # called only once for all test in this class + @classmethod + def setUpClass(cls): + """ + Setup quantized model + """ + cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) + cls.quantized_model = AutoModelForCausalLM.from_pretrained( + cls.model_name, + device_map=cls.device_map, + ) + + def test_quantized_model_conversion(self): + """ + Simple test that checks if the quantized model has been converted properly + """ + from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV + + from transformers.integrations.awq import replace_with_awq_linear + + model_id = "facebook/opt-350m" + config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") + quantization_config = AwqConfig(bits=4) + + with init_empty_weights(): + model = OPTForCausalLM(config) + + nb_linears = 0 + for module in model.modules(): + if isinstance(module, torch.nn.Linear): + nb_linears += 1 + + model, _ = replace_with_awq_linear(model, quantization_config=quantization_config) + nb_awq_linear = 0 + for module in model.modules(): + if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): + nb_awq_linear += 1 + + self.assertEqual(nb_linears, nb_awq_linear) + + # Try with `modules_not_to_convert` + with init_empty_weights(): + model = OPTForCausalLM(config) + + model, _ = replace_with_awq_linear( + model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] + ) + nb_awq_linear = 0 + for module in model.modules(): + if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): + nb_awq_linear += 1 + + self.assertEqual(nb_linears - 1, nb_awq_linear) + + def test_quantized_model(self): + """ + Simple test that checks if the quantized model is working properly + """ + input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) + + output = self.quantized_model.generate(**input_ids, max_new_tokens=40) + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) + + def test_quantized_model_bf16(self): + """ + Simple test that checks if the quantized model is working properly with bf16 + """ + input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) + + quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.bfloat16).to( + torch_device + ) + + output = quantized_model.generate(**input_ids, max_new_tokens=40) + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_BF16) + + def test_quantized_model_no_device_map(self): + """ + Simple test that checks if the quantized model is working properly + """ + input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) + + quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name).to(torch_device) + output = quantized_model.generate(**input_ids, max_new_tokens=40) + + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) + + def test_save_pretrained(self): + """ + Simple test that checks if the quantized model is working properly after being saved and loaded + """ + with tempfile.TemporaryDirectory() as tmpdirname: + self.quantized_model.save_pretrained(tmpdirname) + model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) + + input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) + + output = model.generate(**input_ids, max_new_tokens=40) + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) + + def test_raise_quantization(self): + """ + Simple test that checks if one passes a quantization config to quantize a model, it raises an error + """ + quantization_config = AwqConfig(bits=4) + + with self.assertRaises(ValueError) as context: + _ = AutoModelForCausalLM.from_pretrained( + self.dummy_transformers_model_name, quantization_config=quantization_config + ) + + self.assertEqual( + str(context.exception), + "You cannot pass an `AwqConfig` when loading a model as you can only use AWQ models for inference. To quantize transformers models with AWQ algorithm, please refer to our quantization docs: https://huggingface.co/docs/transformers/main_classes/quantization ", + ) + + @require_torch_multi_gpu + def test_quantized_model_multi_gpu(self): + """ + Simple test that checks if the quantized model is working properly with multiple GPUs + """ + input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) + + quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") + + self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1, 2, 3}) + + output = quantized_model.generate(**input_ids, max_new_tokens=40) + + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) From 71025520bc980eec1825ec0716d086bdd9180e5a Mon Sep 17 00:00:00 2001 From: Wesley L Passos <33898112+wesleylp@users.noreply.github.com> Date: Wed, 1 Nov 2023 09:26:14 -0300 Subject: [PATCH 042/268] Fix docstring get maskformer resize output image size (#27196) * fix docstring in get_maskformer_resize_output_image_size * fix functions docstring * fix 'copied from' functions docstring * fix docstring * fix return type * fix docstring resize --- .../image_processing_mask2former.py | 20 ++++++++++--------- .../maskformer/image_processing_maskformer.py | 20 ++++++++++--------- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/src/transformers/models/mask2former/image_processing_mask2former.py b/src/transformers/models/mask2former/image_processing_mask2former.py index af3591e192e1..4b541125646c 100644 --- a/src/transformers/models/mask2former/image_processing_mask2former.py +++ b/src/transformers/models/mask2former/image_processing_mask2former.py @@ -304,21 +304,23 @@ def get_mask2former_resize_output_image_size( size_divisor: int = 0, default_to_square: bool = True, input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> tuple: +) -> Tuple[int, int]: """ Computes the output size given the desired size. Args: - input_image (`np.ndarray`): + image (`np.ndarray`): The input image. - size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): + size (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`): The size of the output image. - default_to_square (`bool`, *optional*, defaults to `True`): - Whether to default to square if no size is provided. max_size (`int`, *optional*): The maximum size of the output image. - size_divisible (`int`, *optional*, defaults to 0): - If size_divisible is given, the output image size will be divisible by the number. + size_divisor (`int`, *optional*, defaults to 0): + If `size_divisor` is given, the output image size will be divisible by the number. + default_to_square (`bool`, *optional*, defaults to `True`): + Whether to default to square if no size is provided. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `Tuple[int, int]`: The output size. @@ -471,10 +473,10 @@ def resize( size (`Dict[str, int]`): The size of the output image. size_divisor (`int`, *optional*, defaults to 0): - If size_divisor is given, the output image size will be divisible by the number. + If `size_divisor` is given, the output image size will be divisible by the number. resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resizing the image. - data_format (`str` or `ChannelDimension`, *optional*): + data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): diff --git a/src/transformers/models/maskformer/image_processing_maskformer.py b/src/transformers/models/maskformer/image_processing_maskformer.py index e071c45e0cc8..eb93250532e4 100644 --- a/src/transformers/models/maskformer/image_processing_maskformer.py +++ b/src/transformers/models/maskformer/image_processing_maskformer.py @@ -306,21 +306,23 @@ def get_maskformer_resize_output_image_size( size_divisor: int = 0, default_to_square: bool = True, input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> tuple: +) -> Tuple[int, int]: """ Computes the output size given the desired size. Args: - input_image (`np.ndarray`): + image (`np.ndarray`): The input image. - size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): + size (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`): The size of the output image. - default_to_square (`bool`, *optional*, defaults to `True`): - Whether to default to square if no size is provided. max_size (`int`, *optional*): The maximum size of the output image. - size_divisible (`int`, *optional*, defaults to 0): - If size_divisible is given, the output image size will be divisible by the number. + size_divisor (`int`, *optional*, defaults to 0): + If `size_divisor` is given, the output image size will be divisible by the number. + default_to_square (`bool`, *optional*, defaults to `True`): + Whether to default to square if no size is provided. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `Tuple[int, int]`: The output size. @@ -479,10 +481,10 @@ def resize( size (`Dict[str, int]`): The size of the output image. size_divisor (`int`, *optional*, defaults to 0): - If size_divisor is given, the output image size will be divisible by the number. + If `size_divisor` is given, the output image size will be divisible by the number. resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resizing the image. - data_format (`str` or `ChannelDimension`, *optional*): + data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): From 636f704d0ba16998a85924c33c104bc6dfcd44c2 Mon Sep 17 00:00:00 2001 From: MD FAIZAN KHAN <124388165+THEFZNKHAN@users.noreply.github.com> Date: Wed, 1 Nov 2023 18:12:22 +0530 Subject: [PATCH 043/268] Fix the typos and grammar mistakes in CONTRIBUTING.md. (#27193) Fix the typos and grammar mistakes in CONTRIBUTING.md --- CONTRIBUTING.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6cfa3e47398c..9ccfc46c2c14 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,8 +40,8 @@ There are several ways you can contribute to 🤗 Transformers: If you don't know where to start, there is a special [Good First Issue](https://github.com/huggingface/transformers/contribute) listing. It will give you a list of -open issues that are beginner-friendly and help you start contributing to open-source. Just comment in the issue that you'd like to work -on it. +open issues that are beginner-friendly and help you start contributing to open-source. Just comment on the issue that you'd like to work +on. For something slightly more challenging, you can also take a look at the [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) list. In general though, if you feel like you know what you're doing, go for it and we'll help you get there! 🚀 @@ -62,7 +62,7 @@ feedback. The 🤗 Transformers library is robust and reliable thanks to users who report the problems they encounter. Before you report an issue, we would really appreciate it if you could **make sure the bug was not -already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask on the [forum](https://discuss.huggingface.co/) first. This helps us respond quicker to fixing issues related to the library versus general questions. +already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask in the [forum](https://discuss.huggingface.co/) first. This helps us respond quicker to fixing issues related to the library versus general questions. Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it: @@ -105,7 +105,7 @@ We have added [templates](https://github.com/huggingface/transformers/tree/main/ New models are constantly released and if you want to implement a new model, please provide the following information -* A short description of the model and link to the paper. +* A short description of the model and a link to the paper. * Link to the implementation if it is open-sourced. * Link to the model weights if they are available. @@ -172,7 +172,7 @@ You'll need **[Python 3.8]((https://github.com/huggingface/transformers/blob/mai which should be enough for most use cases. -5. Develop the features on your branch. +5. Develop the features in your branch. As you work on your code, you should make sure the test suite passes. Run the tests impacted by your changes like this: @@ -208,7 +208,7 @@ You'll need **[Python 3.8]((https://github.com/huggingface/transformers/blob/mai make quality ``` - Finally, we have a lot of scripts to make sure we didn't forget to update + Finally, we have a lot of scripts to make sure we don't forget to update some files when adding a new model. You can run these scripts with: ```bash @@ -218,7 +218,7 @@ You'll need **[Python 3.8]((https://github.com/huggingface/transformers/blob/mai To learn more about those checks and how to fix any issues with them, check out the [Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide. - If you're modifying documents under `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check + If you're modifying documents under the `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check make sure you install the documentation builder: ```bash @@ -234,7 +234,7 @@ You'll need **[Python 3.8]((https://github.com/huggingface/transformers/blob/mai This will build the documentation in the `~/tmp/test-build` folder where you can inspect the generated Markdown files with your favorite editor. You can also preview the docs on GitHub when you open a pull request. - Once you're happy with your changes, add changed files with `git add` and + Once you're happy with your changes, add the changed files with `git add` and record your changes locally with `git commit`: ```bash @@ -261,7 +261,7 @@ You'll need **[Python 3.8]((https://github.com/huggingface/transformers/blob/mai If you've already opened a pull request, you'll need to force push with the `--force` flag. Otherwise, if the pull request hasn't been opened yet, you can just push your changes normally. -6. Now you can go to your fork of the repository on GitHub and click on **Pull request** to open a pull request. Make sure you tick off all the boxes in our [checklist](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#pull-request-checklist) below. When you're ready, you can send your changes to the project maintainers for review. +6. Now you can go to your fork of the repository on GitHub and click on **Pull Request** to open a pull request. Make sure you tick off all the boxes on our [checklist](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#pull-request-checklist) below. When you're ready, you can send your changes to the project maintainers for review. 7. It's ok if maintainers request changes, it happens to our core contributors too! So everyone can see the changes in the pull request, work in your local From f3c1a172bb7a8b67250343dba6b506cc6b45e0ee Mon Sep 17 00:00:00 2001 From: Wesley L Passos <33898112+wesleylp@users.noreply.github.com> Date: Wed, 1 Nov 2023 09:42:41 -0300 Subject: [PATCH 044/268] Fixing docstring in get_resize_output_image_size function (#27191) --- .../conditional_detr/image_processing_conditional_detr.py | 6 +++--- .../deformable_detr/image_processing_deformable_detr.py | 6 +++--- src/transformers/models/deta/image_processing_deta.py | 6 +++--- src/transformers/models/detr/image_processing_detr.py | 6 +++--- src/transformers/models/yolos/image_processing_yolos.py | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py index 626911a94bcf..998cb7419174 100644 --- a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py +++ b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py @@ -136,9 +136,9 @@ def get_resize_output_image_size( image size is computed by keeping the aspect ratio of the input image size. Args: - image_size (`Tuple[int, int]`): - The input image size. - size (`int`): + input_image (`np.ndarray`): + The image to resize. + size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. diff --git a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py index 5bbde326eaa0..4e83488d75b4 100644 --- a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py +++ b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py @@ -135,9 +135,9 @@ def get_resize_output_image_size( image size is computed by keeping the aspect ratio of the input image size. Args: - image_size (`Tuple[int, int]`): - The input image size. - size (`int`): + input_image (`np.ndarray`): + The image to resize. + size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. diff --git a/src/transformers/models/deta/image_processing_deta.py b/src/transformers/models/deta/image_processing_deta.py index f12627286268..36c2c2e38127 100644 --- a/src/transformers/models/deta/image_processing_deta.py +++ b/src/transformers/models/deta/image_processing_deta.py @@ -127,9 +127,9 @@ def get_resize_output_image_size( image size is computed by keeping the aspect ratio of the input image size. Args: - image_size (`Tuple[int, int]`): - The input image size. - size (`int`): + input_image (`np.ndarray`): + The image to resize. + size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. diff --git a/src/transformers/models/detr/image_processing_detr.py b/src/transformers/models/detr/image_processing_detr.py index e48536463b76..995c33fcdb9a 100644 --- a/src/transformers/models/detr/image_processing_detr.py +++ b/src/transformers/models/detr/image_processing_detr.py @@ -133,9 +133,9 @@ def get_resize_output_image_size( image size is computed by keeping the aspect ratio of the input image size. Args: - image_size (`Tuple[int, int]`): - The input image size. - size (`int`): + input_image (`np.ndarray`): + The image to resize. + size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. diff --git a/src/transformers/models/yolos/image_processing_yolos.py b/src/transformers/models/yolos/image_processing_yolos.py index 317eda9ce831..937d3c429877 100644 --- a/src/transformers/models/yolos/image_processing_yolos.py +++ b/src/transformers/models/yolos/image_processing_yolos.py @@ -152,9 +152,9 @@ def get_resize_output_image_size( image size is computed by keeping the aspect ratio of the input image size. Args: - image_size (`Tuple[int, int]`): - The input image size. - size (`int`): + input_image (`np.ndarray`): + The image to resize. + size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. From 037fb7d0e1086146612a716ef914305160134e9c Mon Sep 17 00:00:00 2001 From: Shashank Rajput <144760128+ShashankMosaicML@users.noreply.github.com> Date: Wed, 1 Nov 2023 07:16:57 -0700 Subject: [PATCH 045/268] added unsqueeze_dim to apply_rotary_pos_emb (#27117) * added unsqueeze_dim to apply_rotary_pos_emb * Added docstring * Modified docstring * Modified docstring * Modified docstring * Modified docstring * Modified docstring * ran make fix-copies and make fixup * Update src/transformers/models/llama/modeling_llama.py Accepting the proposed changes in formatting. Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * incorporating PR suggestions * incorporating PR suggestions * incorporating PR suggestions * incorporating PR suggestions * .. --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../open_llama/modeling_open_llama.py | 26 +++++++++++++++--- .../models/gpt_neox/modeling_gpt_neox.py | 26 +++++++++++++++--- .../models/idefics/modeling_idefics.py | 26 +++++++++++++++--- .../models/llama/modeling_llama.py | 27 ++++++++++++++++--- .../models/mistral/modeling_mistral.py | 26 +++++++++++++++--- .../models/persimmon/modeling_persimmon.py | 26 +++++++++++++++--- 6 files changed, 138 insertions(+), 19 deletions(-) diff --git a/src/transformers/models/deprecated/open_llama/modeling_open_llama.py b/src/transformers/models/deprecated/open_llama/modeling_open_llama.py index 609bb39df1c7..f0558edf6b53 100644 --- a/src/transformers/models/deprecated/open_llama/modeling_open_llama.py +++ b/src/transformers/models/deprecated/open_llama/modeling_open_llama.py @@ -155,9 +155,29 @@ def rotate_half(x): # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb -def apply_rotary_pos_emb(q, k, cos, sin, position_ids): - cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] - sin = sin[position_ids].unsqueeze(1) +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index ac59011b2811..cc62d7fe4963 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -374,9 +374,29 @@ def rotate_half(x): # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb -def apply_rotary_pos_emb(q, k, cos, sin, position_ids): - cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] - sin = sin[position_ids].unsqueeze(1) +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 29deae594b3b..aba0b43f695b 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -514,9 +514,29 @@ def rotate_half(x): # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb -def apply_rotary_pos_emb(q, k, cos, sin, position_ids): - cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] - sin = sin[position_ids].unsqueeze(1) +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 4bf9da26394e..d9e3907fb971 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -191,10 +191,29 @@ def rotate_half(x): return torch.cat((-x2, x1), dim=-1) -# Copied from transformers.models.gpt_neox.modeling_gpt_neox.apply_rotary_pos_emb -def apply_rotary_pos_emb(q, k, cos, sin, position_ids): - cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] - sin = sin[position_ids].unsqueeze(1) +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 7cdea6f053aa..f1699a8f48ad 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -132,9 +132,29 @@ def rotate_half(x): # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb -def apply_rotary_pos_emb(q, k, cos, sin, position_ids): - cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] - sin = sin[position_ids].unsqueeze(1) +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed diff --git a/src/transformers/models/persimmon/modeling_persimmon.py b/src/transformers/models/persimmon/modeling_persimmon.py index 2ffcf8748979..36be8761871f 100644 --- a/src/transformers/models/persimmon/modeling_persimmon.py +++ b/src/transformers/models/persimmon/modeling_persimmon.py @@ -132,9 +132,29 @@ def rotate_half(x): # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb -def apply_rotary_pos_emb(q, k, cos, sin, position_ids): - cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] - sin = sin[position_ids].unsqueeze(1) +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed From f9b4bea0a63062f8b8bfaf9d88f9529a806bb8e4 Mon Sep 17 00:00:00 2001 From: Alexander Kozlov Date: Wed, 1 Nov 2023 18:37:19 +0400 Subject: [PATCH 046/268] Added cache_block_outputs option to enable GPTQ for non-regular models (#27032) * Added cache_block_outputs option to enable GPTQ for non-regular models * Update src/transformers/utils/quantization_config.py Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> * Update src/transformers/utils/quantization_config.py Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> * Fixed style * Update src/transformers/utils/quantization_config.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- src/transformers/utils/quantization_config.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 51830aab5744..35542d3f78fd 100644 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -360,6 +360,8 @@ class GPTQConfig(QuantizationConfigMixin): max_input_length (`int`, *optional*): The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input length. It is specific to the exllama backend with act-order. + cache_block_outputs (`bool`, *optional*, defaults to `True`): + Whether to cache block outputs to reuse as inputs for the succeeding block. """ def __init__( @@ -380,6 +382,7 @@ def __init__( pad_token_id: Optional[int] = None, disable_exllama: bool = False, max_input_length: Optional[int] = None, + cache_block_outputs: bool = True, **kwargs, ): self.quant_method = QuantizationMethod.GPTQ @@ -399,6 +402,7 @@ def __init__( self.pad_token_id = pad_token_id self.disable_exllama = disable_exllama self.max_input_length = max_input_length + self.cache_block_outputs = cache_block_outputs self.post_init() def get_loading_attributes(self): From 391d14e8105cf877797e95a92534db89077ccb7e Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 1 Nov 2023 16:01:53 +0100 Subject: [PATCH 047/268] [WhisperForCausalLM] Add WhisperForCausalLM for speculative decoding (#27195) * finish * add tests * fix all tests * [Assistant Decoding] Add test * fix more * better * finish * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * finish --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docs/source/en/model_doc/whisper.md | 5 + docs/source/en/tasks/language_modeling.md | 2 +- src/transformers/__init__.py | 2 + src/transformers/generation/utils.py | 35 ++- src/transformers/models/auto/modeling_auto.py | 1 + src/transformers/models/whisper/__init__.py | 2 + .../models/whisper/modeling_whisper.py | 247 +++++++++++++++++- src/transformers/utils/dummy_pt_objects.py | 7 + tests/generation/test_utils.py | 61 +++++ tests/models/whisper/test_modeling_whisper.py | 244 +++++++++++++++++ 10 files changed, 601 insertions(+), 5 deletions(-) diff --git a/docs/source/en/model_doc/whisper.md b/docs/source/en/model_doc/whisper.md index 382246e43d31..2f1cfc5e22b3 100644 --- a/docs/source/en/model_doc/whisper.md +++ b/docs/source/en/model_doc/whisper.md @@ -88,6 +88,11 @@ The original code can be found [here](https://github.com/openai/whisper). - forward - generate +## WhisperForCausalLM + +[[autodoc]] WhisperForCausalLM + - forward + ## WhisperForAudioClassification [[autodoc]] WhisperForAudioClassification diff --git a/docs/source/en/tasks/language_modeling.md b/docs/source/en/tasks/language_modeling.md index c509899882e3..9c35b7293d75 100644 --- a/docs/source/en/tasks/language_modeling.md +++ b/docs/source/en/tasks/language_modeling.md @@ -37,7 +37,7 @@ You can finetune other architectures for causal language modeling following the Choose one of the following architectures: -[BART](../model_doc/bart), [BERT](../model_doc/bert), [Bert Generation](../model_doc/bert-generation), [BigBird](../model_doc/big_bird), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [BioGpt](../model_doc/biogpt), [Blenderbot](../model_doc/blenderbot), [BlenderbotSmall](../model_doc/blenderbot-small), [BLOOM](../model_doc/bloom), [CamemBERT](../model_doc/camembert), [CodeLlama](../model_doc/code_llama), [CodeGen](../model_doc/codegen), [CPM-Ant](../model_doc/cpmant), [CTRL](../model_doc/ctrl), [Data2VecText](../model_doc/data2vec-text), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [Falcon](../model_doc/falcon), [Fuyu](../model_doc/fuyu), [GIT](../model_doc/git), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [GPT NeoX Japanese](../model_doc/gpt_neox_japanese), [GPT-J](../model_doc/gptj), [LLaMA](../model_doc/llama), [Marian](../model_doc/marian), [mBART](../model_doc/mbart), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [Mistral](../model_doc/mistral), [MPT](../model_doc/mpt), [MusicGen](../model_doc/musicgen), [MVP](../model_doc/mvp), [OpenLlama](../model_doc/open-llama), [OpenAI GPT](../model_doc/openai-gpt), [OPT](../model_doc/opt), [Pegasus](../model_doc/pegasus), [Persimmon](../model_doc/persimmon), [PLBart](../model_doc/plbart), [ProphetNet](../model_doc/prophetnet), [QDQBert](../model_doc/qdqbert), [Reformer](../model_doc/reformer), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [RWKV](../model_doc/rwkv), [Speech2Text2](../model_doc/speech_to_text_2), [Transformer-XL](../model_doc/transfo-xl), [TrOCR](../model_doc/trocr), [XGLM](../model_doc/xglm), [XLM](../model_doc/xlm), [XLM-ProphetNet](../model_doc/xlm-prophetnet), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod) +[BART](../model_doc/bart), [BERT](../model_doc/bert), [Bert Generation](../model_doc/bert-generation), [BigBird](../model_doc/big_bird), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [BioGpt](../model_doc/biogpt), [Blenderbot](../model_doc/blenderbot), [BlenderbotSmall](../model_doc/blenderbot-small), [BLOOM](../model_doc/bloom), [CamemBERT](../model_doc/camembert), [CodeLlama](../model_doc/code_llama), [CodeGen](../model_doc/codegen), [CPM-Ant](../model_doc/cpmant), [CTRL](../model_doc/ctrl), [Data2VecText](../model_doc/data2vec-text), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [Falcon](../model_doc/falcon), [Fuyu](../model_doc/fuyu), [GIT](../model_doc/git), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [GPT NeoX Japanese](../model_doc/gpt_neox_japanese), [GPT-J](../model_doc/gptj), [LLaMA](../model_doc/llama), [Marian](../model_doc/marian), [mBART](../model_doc/mbart), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [Mistral](../model_doc/mistral), [MPT](../model_doc/mpt), [MusicGen](../model_doc/musicgen), [MVP](../model_doc/mvp), [OpenLlama](../model_doc/open-llama), [OpenAI GPT](../model_doc/openai-gpt), [OPT](../model_doc/opt), [Pegasus](../model_doc/pegasus), [Persimmon](../model_doc/persimmon), [PLBart](../model_doc/plbart), [ProphetNet](../model_doc/prophetnet), [QDQBert](../model_doc/qdqbert), [Reformer](../model_doc/reformer), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [RWKV](../model_doc/rwkv), [Speech2Text2](../model_doc/speech_to_text_2), [Transformer-XL](../model_doc/transfo-xl), [TrOCR](../model_doc/trocr), [Whisper](../model_doc/whisper), [XGLM](../model_doc/xglm), [XLM](../model_doc/xlm), [XLM-ProphetNet](../model_doc/xlm-prophetnet), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index a784cff7c839..01127a5651d3 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -3090,6 +3090,7 @@ [ "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForAudioClassification", + "WhisperForCausalLM", "WhisperForConditionalGeneration", "WhisperModel", "WhisperPreTrainedModel", @@ -6845,6 +6846,7 @@ from .models.whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, + WhisperForCausalLM, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 1c412f8185dc..df4239d05421 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -1626,6 +1626,10 @@ def generate( if not model_kwargs["use_cache"]: raise ValueError("assisted generate requires `use_cache=True`") + assistant_accepts_encoder_outputs = "encoder_outputs" in set( + inspect.signature(assistant_model.forward).parameters.keys() + ) + # 11. If the assistant model is an encoder-decoder, prepare its encoder outputs if assistant_model.config.is_encoder_decoder and "assistant_encoder_outputs" not in model_kwargs: assistant_model_kwargs = copy.deepcopy(model_kwargs) @@ -1637,6 +1641,17 @@ def generate( ) model_kwargs["assistant_encoder_outputs"] = assistant_model_kwargs["encoder_outputs"] + if ( + not assistant_model.config.is_encoder_decoder + and assistant_accepts_encoder_outputs + and "encoder_outputs" in model_kwargs + ): + # some assistants might be assymetric (many more enc layers than dec layers) + # encoder-decoder models that share the exact same encoder as the teacher + # in this case the assistant only needs to load the light-weight decoder, + # but still requires `encoder_outputs` to be passed + model_kwargs["assistant_encoder_outputs"] = model_kwargs["encoder_outputs"] + # 12. run assisted generate return self.assisted_decoding( input_ids, @@ -4368,6 +4383,11 @@ def assisted_decoding( else: num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens + # check if assistant model accepts encoder_outputs + assistant_accepts_encoder_outputs = "encoder_outputs" in set( + inspect.signature(assistant_model.forward).parameters.keys() + ) + # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() @@ -4454,9 +4474,13 @@ def assisted_decoding( encoder_outputs=model_kwargs["assistant_encoder_outputs"], ) else: + encoder_kwargs = {} + + if assistant_accepts_encoder_outputs and "assistant_encoder_outputs" in model_kwargs: + encoder_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] + assistant_model_outputs = assistant_model( - assist_inputs, - past_key_values=model_kwargs["assistant_past_key_values"], + assist_inputs, past_key_values=model_kwargs["assistant_past_key_values"], **encoder_kwargs ) else: if assistant_model.config.is_encoder_decoder: @@ -4465,7 +4489,12 @@ def assisted_decoding( encoder_outputs=model_kwargs["assistant_encoder_outputs"], ) else: - assistant_model_outputs = assistant_model(candidate_input_ids) + encoder_kwargs = {} + + if assistant_accepts_encoder_outputs and "assistant_encoder_outputs" in model_kwargs: + encoder_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] + + assistant_model_outputs = assistant_model(candidate_input_ids, **encoder_kwargs) # 1.2. greedily select the next candidate token model_kwargs["assistant_past_key_values"] = assistant_model_outputs.past_key_values diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 3c622c815827..5387809ca483 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -438,6 +438,7 @@ ("speech_to_text_2", "Speech2Text2ForCausalLM"), ("transfo-xl", "TransfoXLLMHeadModel"), ("trocr", "TrOCRForCausalLM"), + ("whisper", "WhisperForCausalLM"), ("xglm", "XGLMForCausalLM"), ("xlm", "XLMWithLMHeadModel"), ("xlm-prophetnet", "XLMProphetNetForCausalLM"), diff --git a/src/transformers/models/whisper/__init__.py b/src/transformers/models/whisper/__init__.py index cd962478e34d..d87828da69f5 100644 --- a/src/transformers/models/whisper/__init__.py +++ b/src/transformers/models/whisper/__init__.py @@ -46,6 +46,7 @@ else: _import_structure["modeling_whisper"] = [ "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", + "WhisperForCausalLM", "WhisperForConditionalGeneration", "WhisperModel", "WhisperPreTrainedModel", @@ -102,6 +103,7 @@ from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, + WhisperForCausalLM, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index 8df937e3e681..48f47fe12df7 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -29,6 +29,7 @@ from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, SequenceClassifierOutput, @@ -945,6 +946,8 @@ class WhisperDecoder(WhisperPreTrainedModel): config: WhisperConfig """ + main_input_name = "input_ids" + def __init__(self, config: WhisperConfig): super().__init__(config) self.dropout = config.dropout @@ -1028,7 +1031,8 @@ def forward( If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of - all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal @@ -1811,6 +1815,247 @@ def _extract_token_timestamps(self, generate_outputs, alignment_heads, time_prec return timestamps +class WhisperDecoderWrapper(WhisperPreTrainedModel): + """ + This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is + used in combination with the [`EncoderDecoderModel`] framework. + """ + + def __init__(self, config): + super().__init__(config) + config.is_encoder_decoder = False + self.decoder = WhisperDecoder(config) + + def get_input_embeddings(self): + return self.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.decoder.embed_tokens = value + + def forward(self, *args, **kwargs): + return self.decoder(*args, **kwargs) + + +@add_start_docstrings( + """ + Whisper decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings). + """, + WHISPER_START_DOCSTRING, +) +class WhisperForCausalLM(WhisperPreTrainedModel): + _tied_weights_keys = ["proj_out.weight"] + main_input_name = "input_ids" + + def __init__(self, config): + super().__init__(config) + config.is_encoder_decoder = False + self.model = WhisperDecoderWrapper(config) + + self.proj_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.proj_out + + def set_output_embeddings(self, new_embeddings): + self.proj_out = new_embeddings + + def get_input_embeddings(self) -> nn.Module: + return self.model.get_input_embeddings() + + def set_input_embeddings(self, value): + self.model.set_input_embeddings(value) + + def set_decoder(self, decoder): + self.model.decoder = decoder + + def get_decoder(self): + return self.model.decoder + + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + [What are attention masks?](../glossary#attention-mask) + encoder_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional + tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains + pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If + `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + + Example: + + ```python + >>> from transformers import WhisperForCausalLM, WhisperForConditionalGeneration, WhisperProcessor + >>> import torch + >>> from datasets import load_dataset + + >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2") + + >>> assistant_model = WhisperForCausalLM.from_pretrained("distil-whisper/distil-large-v2") + + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> sample = ds[0]["audio"] + >>> input_features = processor( + ... sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt" + ... ).input_features + + >>> predicted_ids = model.generate(input_features, assistant_model=assistant_model) + + >>> # decode token ids to text + >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) + >>> transcription + ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.' + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # If the user passed a tuple or `BaseModelOutput` for encoder_outputs, we extract only the hidden states + if isinstance(encoder_outputs, (BaseModelOutput, tuple, list)): + encoder_outputs = encoder_outputs[0] + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_outputs, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + logits = self.proj_out(outputs[0]) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + use_cache=None, + encoder_outputs=None, + attention_mask=None, + **kwargs, + ): + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + return { + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "input_ids": input_ids, + "use_cache": use_cache, + "attention_mask": attention_mask, + } + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + @add_start_docstrings( """ Whisper Encoder Model with a sequence classification head on top (a linear layer over the pooled output) for tasks diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 1310312519cc..8ce211b21f87 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -8413,6 +8413,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class WhisperForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class WhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 42b67be91a1c..6468973d6758 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -43,6 +43,7 @@ AutoModelForSpeechSeq2Seq, AutoModelForVision2Seq, AutoTokenizer, + BartForCausalLM, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, @@ -3010,3 +3011,63 @@ def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, assistant_encoder_outputs=encoder_outputs, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) + + def test_assisted_decoding_encoder_decoder_shared_encoder(self): + # PT-only test: TF doesn't support assisted decoding yet. + # Bart subclass with a kwarg called foo that distorts the output + class FakeBart(BartForConditionalGeneration): + def forward(self, input_ids, foo=False, **kwargs): + outs = super().forward(input_ids, **kwargs) + + if foo: + outs["logits"][:, :, :] = 0.0 + + return outs + + def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, **kwargs): + kwargs["encoder_outputs"] = encoder_outputs + inputs = super().prepare_inputs_for_generation(*args, **kwargs) + + inputs["foo"] = foo + return inputs + + model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( + torch_device + ) + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration") + + text = "Hello world" + tokenized_inputs = tokenizer([text], return_tensors="pt") + input_ids = tokenized_inputs.input_ids.to(torch_device) + + # Traditional way of generating text + outputs_normal = model.generate(input_ids) + self.assertEqual(outputs_normal.shape, (1, 20)) + + # Should be different with foo + outputs_foo = model.generate(input_ids, foo=True) + with self.assertRaises(AssertionError): + self.assertListEqual(outputs_foo.tolist(), outputs_normal.tolist()) + + # Assistant model + assistant = BartForCausalLM.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( + torch_device + ) + + # If assisted generation passes model_kwargs correctly, should be same as previous + outputs_assisted = model.generate( + input_ids, + foo=True, + assistant_model=assistant, + ) + self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) + + # Check that passing encoder_outputs directly also works as expected + encoder_outputs = model.get_encoder()(input_ids) + + outputs_assisted = model.generate( + foo=True, + assistant_model=assistant, + encoder_outputs=encoder_outputs, + ) + self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 9bb835360887..bc1a7bd218c4 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -51,6 +51,7 @@ from transformers import ( WhisperFeatureExtractor, WhisperForAudioClassification, + WhisperForCausalLM, WhisperForConditionalGeneration, WhisperModel, WhisperProcessor, @@ -1990,3 +1991,246 @@ def test_equivalence_flax_to_pt(self): self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) + + +class WhisperStandaloneDecoderModelTester: + def __init__( + self, + parent, + batch_size=2, + is_training=True, + use_labels=False, + vocab_size=200, + hidden_size=16, + num_hidden_layers=2, + num_attention_heads=4, + input_channels=1, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=20, + max_source_positions=30, + max_target_positions=40, + bos_token_id=98, + eos_token_id=98, + pad_token_id=0, + num_mel_bins=80, + decoder_start_token_id=85, + num_conv_layers=1, + suppress_tokens=None, + begin_suppress_tokens=None, + ): + self.parent = parent + self.batch_size = batch_size + self.is_training = is_training + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.input_channels = input_channels + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.num_mel_bins = num_mel_bins + self.max_position_embeddings = max_position_embeddings + self.max_source_positions = max_source_positions + self.max_target_positions = max_target_positions + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.bos_token_id = bos_token_id + self.decoder_start_token_id = decoder_start_token_id + self.num_conv_layers = num_conv_layers + self.suppress_tokens = suppress_tokens + self.begin_suppress_tokens = begin_suppress_tokens + + def prepare_config_and_inputs(self): + input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) + + decoder_input_ids = torch.tensor( + self.batch_size * [[self.decoder_start_token_id, 3, 3, 7, 2]], device=torch_device + ) + + config = self.get_config() + config.is_encoder_decoder = False + inputs_dict = prepare_whisper_inputs_dict( + config, + attention_mask=None, + input_features=input_features, + decoder_input_ids=decoder_input_ids, + ) + + inputs_dict.pop("input_features") + inputs_dict.pop("head_mask") + inputs_dict.pop("decoder_head_mask") + inputs_dict.pop("cross_attn_head_mask") + + inputs_dict["attention_mask"] = inputs_dict.pop("decoder_attention_mask") + inputs_dict["input_ids"] = inputs_dict.pop("decoder_input_ids") + return config, inputs_dict + + @property + def encoder_seq_length(self): + return 5 + + @property + def seq_length(self): + return 5 + + def get_config(self): + return WhisperConfig( + vocab_size=self.vocab_size, + d_model=self.hidden_size, + encoder_layers=self.num_hidden_layers, + decoder_layers=self.num_hidden_layers, + encoder_attention_heads=self.num_attention_heads, + decoder_attention_heads=self.num_attention_heads, + input_channels=self.input_channels, + dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + max_source_positions=self.max_source_positions, + max_target_positions=self.max_target_positions, + eos_token_id=self.eos_token_id, + bos_token_id=self.bos_token_id, + pad_token_id=self.pad_token_id, + decoder_ffn_dim=self.hidden_size, + encoder_ffn_dim=self.hidden_size, + decoder_start_token_id=self.decoder_start_token_id, + suppress_tokens=self.suppress_tokens, + begin_suppress_tokens=self.begin_suppress_tokens, + ) + + def prepare_config_and_inputs_for_common(self): + config, inputs_dict = self.prepare_config_and_inputs() + + inputs_dict["input_ids"][:, -1] = self.pad_token_id + + return config, inputs_dict + + def prepare_config_and_inputs_for_decoder(self): + config, input_features = self.prepare_config_and_inputs() + input_ids = input_features["input_ids"] + encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size]) + + return (config, input_ids, encoder_hidden_states) + + def create_and_check_decoder_model_past(self, config, input_ids): + config.use_cache = True + model = WhisperDecoder(config=config).to(torch_device).eval() + # first forward pass + outputs = model(input_ids, use_cache=True) + outputs_use_cache_conf = model(input_ids) + outputs_no_past = model(input_ids, use_cache=False) + + self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) + self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) + + past_key_values = outputs["past_key_values"] + + # create hypothetical next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) + + # append to next input_ids and + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + + output_from_no_past = model(next_input_ids)["last_hidden_state"] + output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() + + # test that outputs are equal for slice + assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) + + def create_and_check_decoder_model_attention_mask_past(self, config, input_ids): + model = WhisperDecoder(config=config).to(torch_device).eval() + + # create attention mask + attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) + + half_seq_length = input_ids.shape[-1] // 2 + attn_mask[:, half_seq_length:] = 0 + + # first forward pass + past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] + + # create hypothetical next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) + + # change a random masked slice from input_ids + random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 + random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) + input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens + + # append to next input_ids and attn_mask + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + attn_mask = torch.cat( + [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], + dim=1, + ) + + # get two different outputs + output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] + output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[ + "last_hidden_state" + ] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() + + # test that outputs are equal for slice + assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) + + +@require_torch +class WhisperStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = (WhisperDecoder, WhisperForCausalLM) if is_torch_available() else () + all_generative_model_classes = (WhisperForCausalLM,) if is_torch_available() else () + fx_comptatible = False + test_pruning = False + is_encoder_decoder = False + test_missing_keys = False + + def setUp(self): + self.model_tester = WhisperStandaloneDecoderModelTester(self, is_training=False) + self.config_tester = ConfigTester(self, config_class=WhisperConfig) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_decoder_model_past(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + config, inputs_dict = config_and_inputs + + self.model_tester.create_and_check_decoder_model_past(config=config, input_ids=inputs_dict["input_ids"]) + + def test_decoder_model_attn_mask_past(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + config, inputs_dict = config_and_inputs + + self.model_tester.create_and_check_decoder_model_attention_mask_past( + config=config, input_ids=inputs_dict["input_ids"] + ) + + @unittest.skip("Generate needs input ids") + def test_generate_without_input_ids(self): + # generate only works with input ids for whisper + pass + + @unittest.skip("Decoder can't keep attention grads") + def test_retain_grad_hidden_states_attentions(self): + # decoder cannot keep gradients + return + + @unittest.skip("The model doesn't support fast init from base") + def test_save_load_fast_init_from_base(self): + pass + + @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) + def test_left_padding_compatibility(self): + pass From f8afb2b2ec5b2fe33667d0a9b7655501d5d2e19a Mon Sep 17 00:00:00 2001 From: Andi Powers Holmes Date: Thu, 2 Nov 2023 02:09:55 +1100 Subject: [PATCH 048/268] Add TensorFlow implementation of ConvNeXTv2 (#25558) * Add type annotations to TFConvNextDropPath * Use tf.debugging.assert_equal for TFConvNextEmbeddings shape check * Add TensorFlow implementation of ConvNeXTV2 * check_docstrings: add TFConvNextV2Model to exclusions TFConvNextV2Model and TFConvNextV2ForImageClassification have docstrings which are equivalent to their PyTorch cousins, but a parsing issue prevents them from passing the test. Adding exclusions for these two classes as discussed in #25558. --- docs/source/en/index.md | 2 +- docs/source/en/model_doc/convnextv2.md | 13 +- src/transformers/__init__.py | 12 + .../models/auto/modeling_tf_auto.py | 2 + .../models/convnext/modeling_tf_convnext.py | 49 +- .../models/convnextv2/__init__.py | 24 + .../convnextv2/modeling_tf_convnextv2.py | 595 ++++++++++++++++++ .../modeling_tf_efficientformer.py | 4 +- .../models/segformer/modeling_tf_segformer.py | 4 +- src/transformers/utils/dummy_tf_objects.py | 21 + .../convnextv2/test_modeling_tf_convnextv2.py | 308 +++++++++ utils/check_docstrings.py | 2 + 12 files changed, 1012 insertions(+), 24 deletions(-) create mode 100644 src/transformers/models/convnextv2/modeling_tf_convnextv2.py create mode 100644 tests/models/convnextv2/test_modeling_tf_convnextv2.py diff --git a/docs/source/en/index.md b/docs/source/en/index.md index 5a76935b71c7..8aa372391d19 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -97,7 +97,7 @@ Flax), PyTorch, and/or TensorFlow. | [Conditional DETR](model_doc/conditional_detr) | ✅ | ❌ | ❌ | | [ConvBERT](model_doc/convbert) | ✅ | ✅ | ❌ | | [ConvNeXT](model_doc/convnext) | ✅ | ✅ | ❌ | -| [ConvNeXTV2](model_doc/convnextv2) | ✅ | ❌ | ❌ | +| [ConvNeXTV2](model_doc/convnextv2) | ✅ | ✅ | ❌ | | [CPM](model_doc/cpm) | ✅ | ✅ | ✅ | | [CPM-Ant](model_doc/cpmant) | ✅ | ❌ | ❌ | | [CTRL](model_doc/ctrl) | ✅ | ✅ | ❌ | diff --git a/docs/source/en/model_doc/convnextv2.md b/docs/source/en/model_doc/convnextv2.md index 9479cdd56fa1..af08128c45ef 100644 --- a/docs/source/en/model_doc/convnextv2.md +++ b/docs/source/en/model_doc/convnextv2.md @@ -58,4 +58,15 @@ If you're interested in submitting a resource to be included here, please feel f ## ConvNextV2ForImageClassification [[autodoc]] ConvNextV2ForImageClassification - - forward \ No newline at end of file + - forward + +## TFConvNextV2Model + +[[autodoc]] TFConvNextV2Model + - call + + +## TFConvNextV2ForImageClassification + +[[autodoc]] TFConvNextV2ForImageClassification + - call diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 01127a5651d3..cb3e7b0353e3 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -3415,6 +3415,13 @@ "TFConvNextPreTrainedModel", ] ) + _import_structure["models.convnextv2"].extend( + [ + "TFConvNextV2ForImageClassification", + "TFConvNextV2Model", + "TFConvNextV2PreTrainedModel", + ] + ) _import_structure["models.ctrl"].extend( [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -7127,6 +7134,11 @@ TFConvBertPreTrainedModel, ) from .models.convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel + from .models.convnextv2 import ( + TFConvNextV2ForImageClassification, + TFConvNextV2Model, + TFConvNextV2PreTrainedModel, + ) from .models.ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index b334dd30917f..e79922f92822 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -39,6 +39,7 @@ ("clip", "TFCLIPModel"), ("convbert", "TFConvBertModel"), ("convnext", "TFConvNextModel"), + ("convnextv2", "TFConvNextV2Model"), ("ctrl", "TFCTRLModel"), ("cvt", "TFCvtModel"), ("data2vec-vision", "TFData2VecVisionModel"), @@ -200,6 +201,7 @@ [ # Model for Image-classsification ("convnext", "TFConvNextForImageClassification"), + ("convnextv2", "TFConvNextV2ForImageClassification"), ("cvt", "TFCvtForImageClassification"), ("data2vec-vision", "TFData2VecVisionForImageClassification"), ("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")), diff --git a/src/transformers/models/convnext/modeling_tf_convnext.py b/src/transformers/models/convnext/modeling_tf_convnext.py index 1629988900aa..59a36b398376 100644 --- a/src/transformers/models/convnext/modeling_tf_convnext.py +++ b/src/transformers/models/convnext/modeling_tf_convnext.py @@ -17,7 +17,7 @@ from __future__ import annotations -from typing import Optional, Tuple, Union +from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf @@ -50,11 +50,11 @@ class TFConvNextDropPath(tf.keras.layers.Layer): (1) github.com:rwightman/pytorch-image-models """ - def __init__(self, drop_path, **kwargs): + def __init__(self, drop_path: float, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path - def call(self, x, training=None): + def call(self, x: tf.Tensor, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) @@ -69,7 +69,7 @@ class TFConvNextEmbeddings(tf.keras.layers.Layer): found in src/transformers/models/swin/modeling_swin.py. """ - def __init__(self, config, **kwargs): + def __init__(self, config: ConvNextConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = tf.keras.layers.Conv2D( filters=config.hidden_sizes[0], @@ -77,7 +77,7 @@ def __init__(self, config, **kwargs): strides=config.patch_size, name="patch_embeddings", kernel_initializer=get_initializer(config.initializer_range), - bias_initializer="zeros", + bias_initializer=tf.keras.initializers.Zeros(), ) self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm") self.num_channels = config.num_channels @@ -86,15 +86,15 @@ def call(self, pixel_values): if isinstance(pixel_values, dict): pixel_values = pixel_values["pixel_values"] - num_channels = shape_list(pixel_values)[1] - if tf.executing_eagerly() and num_channels != self.num_channels: - raise ValueError( - "Make sure that the channel dimension of the pixel values match with the one set in the configuration." - ) + tf.debugging.assert_equal( + shape_list(pixel_values)[1], + self.num_channels, + message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.", + ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. - # shape = (batch_size, in_height, in_width, in_channels=num_channels) + # shape = (batch_size, in_height, in_width, in_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) embeddings = self.patch_embeddings(pixel_values) @@ -188,15 +188,28 @@ class TFConvNextStage(tf.keras.layers.Layer): """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks. Args: - config ([`ConvNextConfig`]): Model configuration class. - in_channels (`int`): Number of input channels. - out_channels (`int`): Number of output channels. - depth (`int`): Number of residual blocks. - drop_path_rates(`List[float]`): Stochastic depth rates for each layer. + config (`ConvNextV2Config`): + Model configuration class. + in_channels (`int`): + Number of input channels. + out_channels (`int`): + Number of output channels. + depth (`int`): + Number of residual blocks. + drop_path_rates(`List[float]`): + Stochastic depth rates for each layer. """ def __init__( - self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None, **kwargs + self, + config: ConvNextConfig, + in_channels: int, + out_channels: int, + kernel_size: int = 2, + stride: int = 2, + depth: int = 2, + drop_path_rates: Optional[List[float]] = None, + **kwargs, ): super().__init__(**kwargs) if in_channels != out_channels or stride > 1: @@ -215,7 +228,7 @@ def __init__( kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), - bias_initializer="zeros", + bias_initializer=tf.keras.initializers.Zeros(), name="downsampling_layer.1", ), ] diff --git a/src/transformers/models/convnextv2/__init__.py b/src/transformers/models/convnextv2/__init__.py index 9bfd6b26e05c..d2a484b9b828 100644 --- a/src/transformers/models/convnextv2/__init__.py +++ b/src/transformers/models/convnextv2/__init__.py @@ -22,6 +22,7 @@ OptionalDependencyNotAvailable, _LazyModule, is_torch_available, + is_tf_available, ) @@ -46,6 +47,17 @@ "ConvNextV2Backbone", ] +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_convnextv2"] = [ + "TFConvNextV2ForImageClassification", + "TFConvNextV2Model", + "TFConvNextV2PreTrainedModel", + ] if TYPE_CHECKING: from .configuration_convnextv2 import ( @@ -67,6 +79,18 @@ ConvNextV2PreTrainedModel, ) + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_convnextv2 import ( + TFConvNextV2ForImageClassification, + TFConvNextV2Model, + TFConvNextV2PreTrainedModel, + ) + else: import sys diff --git a/src/transformers/models/convnextv2/modeling_tf_convnextv2.py b/src/transformers/models/convnextv2/modeling_tf_convnextv2.py new file mode 100644 index 000000000000..863e59406f1c --- /dev/null +++ b/src/transformers/models/convnextv2/modeling_tf_convnextv2.py @@ -0,0 +1,595 @@ +# coding=utf-8 +# Copyright 2023 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 ConvNextV2 model.""" + + +from __future__ import annotations + +from typing import List, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutputWithNoAttention, + TFBaseModelOutputWithPooling, + TFBaseModelOutputWithPoolingAndNoAttention, + TFImageClassifierOutputWithNoAttention, +) +from ...modeling_tf_utils import ( + TFModelInputType, + TFPreTrainedModel, + TFSequenceClassificationLoss, + get_initializer, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import shape_list +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, +) +from .configuration_convnextv2 import ConvNextV2Config + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "ConvNextV2Config" + +# Base docstring +_CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224" +_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + +CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "facebook/convnextv2-tiny-1k-224", + # See all ConvNextV2 models at https://huggingface.co/models?filter=convnextv2 +] + + +# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->ConvNextV2 +class TFConvNextV2DropPath(tf.keras.layers.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + References: + (1) github.com:rwightman/pytorch-image-models + """ + + def __init__(self, drop_path: float, **kwargs): + super().__init__(**kwargs) + self.drop_path = drop_path + + def call(self, x: tf.Tensor, training=None): + if training: + keep_prob = 1 - self.drop_path + shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) + random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) + random_tensor = tf.floor(random_tensor) + return (x / keep_prob) * random_tensor + return x + + +class TFConvNextV2GRN(tf.keras.layers.Layer): + """GRN (Global Response Normalization) layer""" + + def __init__(self, config: ConvNextV2Config, dim: int, **kwargs): + super().__init__(**kwargs) + self.dim = dim + + def build(self, input_shape: tf.TensorShape = None): + # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa) + self.weight = self.add_weight( + name="weight", + shape=(1, 1, 1, self.dim), + initializer=tf.keras.initializers.Zeros(), + ) + self.bias = self.add_weight( + name="bias", + shape=(1, 1, 1, self.dim), + initializer=tf.keras.initializers.Zeros(), + ) + return super().build(input_shape) + + def call(self, hidden_states: tf.Tensor): + global_features = tf.norm(hidden_states, ord="euclidean", axis=(1, 2), keepdims=True) + norm_features = global_features / (tf.reduce_mean(global_features, axis=-1, keepdims=True) + 1e-6) + hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states + return hidden_states + + +# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextEmbeddings with ConvNext->ConvNextV2 +class TFConvNextV2Embeddings(tf.keras.layers.Layer): + """This class is comparable to (and inspired by) the SwinEmbeddings class + found in src/transformers/models/swin/modeling_swin.py. + """ + + def __init__(self, config: ConvNextV2Config, **kwargs): + super().__init__(**kwargs) + self.patch_embeddings = tf.keras.layers.Conv2D( + filters=config.hidden_sizes[0], + kernel_size=config.patch_size, + strides=config.patch_size, + name="patch_embeddings", + kernel_initializer=get_initializer(config.initializer_range), + bias_initializer=tf.keras.initializers.Zeros(), + ) + self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm") + self.num_channels = config.num_channels + + def call(self, pixel_values): + if isinstance(pixel_values, dict): + pixel_values = pixel_values["pixel_values"] + + tf.debugging.assert_equal( + shape_list(pixel_values)[1], + self.num_channels, + message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.", + ) + + # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. + # So change the input format from `NCHW` to `NHWC`. + # shape = (batch_size, in_height, in_width, in_channels) + pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) + + embeddings = self.patch_embeddings(pixel_values) + embeddings = self.layernorm(embeddings) + return embeddings + + +class TFConvNextV2Layer(tf.keras.layers.Layer): + """This corresponds to the `Block` class in the original implementation. + + There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, + H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back + + The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow + NHWC ordering, we can just apply the operations straight-away without the permutation. + + Args: + config (`ConvNextV2Config`): + Model configuration class. + dim (`int`): + Number of input channels. + drop_path (`float`, defaults to 0.0): + Stochastic depth rate. + """ + + def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float = 0.0, **kwargs): + super().__init__(**kwargs) + self.dim = dim + self.config = config + self.dwconv = tf.keras.layers.Conv2D( + filters=dim, + kernel_size=7, + padding="same", + groups=dim, + kernel_initializer=get_initializer(config.initializer_range), + bias_initializer=tf.keras.initializers.Zeros(), + name="dwconv", + ) # depthwise conv + self.layernorm = tf.keras.layers.LayerNormalization( + epsilon=1e-6, + name="layernorm", + ) + self.pwconv1 = tf.keras.layers.Dense( + units=4 * dim, + kernel_initializer=get_initializer(config.initializer_range), + bias_initializer=tf.keras.initializers.Zeros(), + name="pwconv1", + ) # pointwise/1x1 convs, implemented with linear layers + self.act = get_tf_activation(config.hidden_act) + self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name="grn") + self.pwconv2 = tf.keras.layers.Dense( + units=dim, + kernel_initializer=get_initializer(config.initializer_range), + bias_initializer=tf.keras.initializers.Zeros(), + name="pwconv2", + ) + # Using `layers.Activation` instead of `tf.identity` to better control `training` + # behaviour. + self.drop_path = ( + TFConvNextV2DropPath(drop_path, name="drop_path") + if drop_path > 0.0 + else tf.keras.layers.Activation("linear", name="drop_path") + ) + + def call(self, hidden_states, training=False): + input = hidden_states + x = self.dwconv(hidden_states) + x = self.layernorm(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.grn(x) + x = self.pwconv2(x) + x = self.drop_path(x, training=training) + x = input + x + return x + + +# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextStage with ConvNext->ConvNextV2 +class TFConvNextV2Stage(tf.keras.layers.Layer): + """ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks. + + Args: + config (`ConvNextV2V2Config`): + Model configuration class. + in_channels (`int`): + Number of input channels. + out_channels (`int`): + Number of output channels. + depth (`int`): + Number of residual blocks. + drop_path_rates(`List[float]`): + Stochastic depth rates for each layer. + """ + + def __init__( + self, + config: ConvNextV2Config, + in_channels: int, + out_channels: int, + kernel_size: int = 2, + stride: int = 2, + depth: int = 2, + drop_path_rates: Optional[List[float]] = None, + **kwargs, + ): + super().__init__(**kwargs) + if in_channels != out_channels or stride > 1: + self.downsampling_layer = [ + tf.keras.layers.LayerNormalization( + epsilon=1e-6, + name="downsampling_layer.0", + ), + # Inputs to this layer will follow NHWC format since we + # transposed the inputs from NCHW to NHWC in the `TFConvNextV2Embeddings` + # layer. All the outputs throughout the model will be in NHWC + # from this point on until the output where we again change to + # NCHW. + tf.keras.layers.Conv2D( + filters=out_channels, + kernel_size=kernel_size, + strides=stride, + kernel_initializer=get_initializer(config.initializer_range), + bias_initializer=tf.keras.initializers.Zeros(), + name="downsampling_layer.1", + ), + ] + else: + self.downsampling_layer = [tf.identity] + + drop_path_rates = drop_path_rates or [0.0] * depth + self.layers = [ + TFConvNextV2Layer( + config, + dim=out_channels, + drop_path=drop_path_rates[j], + name=f"layers.{j}", + ) + for j in range(depth) + ] + + def call(self, hidden_states): + for layer in self.downsampling_layer: + hidden_states = layer(hidden_states) + for layer in self.layers: + hidden_states = layer(hidden_states) + return hidden_states + + +class TFConvNextV2Encoder(tf.keras.layers.Layer): + def __init__(self, config: ConvNextV2Config, **kwargs): + super().__init__(**kwargs) + self.stages = [] + drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths)) + drop_path_rates = tf.split(drop_path_rates, config.depths) + drop_path_rates = [x.numpy().tolist() for x in drop_path_rates] + prev_chs = config.hidden_sizes[0] + for i in range(config.num_stages): + out_chs = config.hidden_sizes[i] + stage = TFConvNextV2Stage( + config, + in_channels=prev_chs, + out_channels=out_chs, + stride=2 if i > 0 else 1, + depth=config.depths[i], + drop_path_rates=drop_path_rates[i], + name=f"stages.{i}", + ) + self.stages.append(stage) + prev_chs = out_chs + + def call( + self, + hidden_states: tf.Tensor, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple, TFBaseModelOutputWithNoAttention]: + all_hidden_states = () if output_hidden_states else None + + for i, layer_module in enumerate(self.stages): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + hidden_states = layer_module(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) + + return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) + + +@keras_serializable +class TFConvNextV2MainLayer(tf.keras.layers.Layer): + config_class = ConvNextV2Config + + def __init__(self, config: ConvNextV2Config, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.embeddings = TFConvNextV2Embeddings(config, name="embeddings") + self.encoder = TFConvNextV2Encoder(config, name="encoder") + self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") + # We are setting the `data_format` like so because from here on we will revert to the + # NCHW output format + self.pooler = tf.keras.layers.GlobalAvgPool2D(data_format="channels_last") + + @unpack_inputs + def call( + self, + pixel_values: TFModelInputType | None = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + embedding_output = self.embeddings(pixel_values, training=training) + + encoder_outputs = self.encoder( + embedding_output, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + last_hidden_state = encoder_outputs[0] + + # Change to NCHW output format have uniformity in the modules + pooled_output = self.pooler(last_hidden_state) + last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2)) + pooled_output = self.layernorm(pooled_output) + + # Change the other hidden state outputs to NCHW as well + if output_hidden_states: + hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) + + if not return_dict: + hidden_states = hidden_states if output_hidden_states else () + return (last_hidden_state, pooled_output) + hidden_states + + return TFBaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, + ) + + +class TFConvNextV2PreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ConvNextV2Config + base_model_prefix = "convnextv2" + main_input_name = "pixel_values" + + +CONVNEXTV2_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Parameters: + config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CONVNEXTV2_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`ConvNextImageProcessor.__call__`] for details. + + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to `True`. +""" + + +@add_start_docstrings( + "The bare ConvNextV2 model outputting raw features without any specific head on top.", + CONVNEXTV2_START_DOCSTRING, +) +class TFConvNextV2Model(TFConvNextV2PreTrainedModel): + def __init__(self, config: ConvNextV2Config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2") + + @unpack_inputs + @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutputWithPoolingAndNoAttention, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def call( + self, + pixel_values: TFModelInputType | None = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + outputs = self.convnextv2( + pixel_values=pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + if not return_dict: + return outputs[:] + + return TFBaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=outputs.last_hidden_state, + pooler_output=outputs.pooler_output, + hidden_states=outputs.hidden_states, + ) + + +@add_start_docstrings( + """ + ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for + ImageNet. + """, + CONVNEXTV2_START_DOCSTRING, +) +class TFConvNextV2ForImageClassification(TFConvNextV2PreTrainedModel, TFSequenceClassificationLoss): + def __init__(self, config: ConvNextV2Config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.num_labels = config.num_labels + self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2") + + # Classifier head + self.classifier = tf.keras.layers.Dense( + units=config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + bias_initializer=tf.keras.initializers.Zeros(), + name="classifier", + ) + + @unpack_inputs + @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=TFImageClassifierOutputWithNoAttention, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def call( + self, + pixel_values: TFModelInputType | None = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + outputs = self.convnextv2( + pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + pooled_output = outputs.pooler_output if return_dict else outputs[1] + + logits = self.classifier(pooled_output) + loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFImageClassifierOutputWithNoAttention( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + ) diff --git a/src/transformers/models/efficientformer/modeling_tf_efficientformer.py b/src/transformers/models/efficientformer/modeling_tf_efficientformer.py index 1907af388f92..c44a15342874 100644 --- a/src/transformers/models/efficientformer/modeling_tf_efficientformer.py +++ b/src/transformers/models/efficientformer/modeling_tf_efficientformer.py @@ -337,11 +337,11 @@ class TFEfficientFormerDropPath(tf.keras.layers.Layer): (1) github.com:rwightman/pytorch-image-models """ - def __init__(self, drop_path, **kwargs): + def __init__(self, drop_path: float, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path - def call(self, x, training=None): + def call(self, x: tf.Tensor, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) diff --git a/src/transformers/models/segformer/modeling_tf_segformer.py b/src/transformers/models/segformer/modeling_tf_segformer.py index 9d6132928a61..b7fd4d2258a7 100644 --- a/src/transformers/models/segformer/modeling_tf_segformer.py +++ b/src/transformers/models/segformer/modeling_tf_segformer.py @@ -62,11 +62,11 @@ class TFSegformerDropPath(tf.keras.layers.Layer): (1) github.com:rwightman/pytorch-image-models """ - def __init__(self, drop_path, **kwargs): + def __init__(self, drop_path: float, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path - def call(self, x, training=None): + def call(self, x: tf.Tensor, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 972ab49c0f5b..5bc238f54278 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -836,6 +836,27 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +class TFConvNextV2ForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextV2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextV2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/convnextv2/test_modeling_tf_convnextv2.py b/tests/models/convnextv2/test_modeling_tf_convnextv2.py new file mode 100644 index 000000000000..5f0bd2f29de8 --- /dev/null +++ b/tests/models/convnextv2/test_modeling_tf_convnextv2.py @@ -0,0 +1,308 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the TensorFlow ConvNext model. """ + +from __future__ import annotations + +import inspect +import unittest +from typing import List, Tuple + +import numpy as np + +from transformers import ConvNextV2Config +from transformers.testing_utils import require_tf, require_vision, slow +from transformers.utils import cached_property, is_tf_available, is_vision_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin + + +if is_tf_available(): + import tensorflow as tf + + from transformers import TFConvNextV2ForImageClassification, TFConvNextV2Model + + +if is_vision_available(): + from PIL import Image + + from transformers import ConvNextImageProcessor + + +class TFConvNextV2ModelTester: + def __init__( + self, + parent, + batch_size=13, + image_size=32, + num_channels=3, + num_stages=4, + hidden_sizes=[10, 20, 30, 40], + depths=[2, 2, 3, 2], + is_training=True, + use_labels=True, + intermediate_size=37, + hidden_act="gelu", + type_sequence_label_size=10, + initializer_range=0.02, + num_labels=3, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.num_channels = num_channels + self.num_stages = num_stages + self.hidden_sizes = hidden_sizes + self.depths = depths + self.is_training = is_training + self.use_labels = use_labels + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.scope = scope + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + labels = None + if self.use_labels: + labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + + config = self.get_config() + + return config, pixel_values, labels + + def get_config(self): + return ConvNextV2Config( + num_channels=self.num_channels, + hidden_sizes=self.hidden_sizes, + depths=self.depths, + num_stages=self.num_stages, + hidden_act=self.hidden_act, + is_decoder=False, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, pixel_values, labels): + model = TFConvNextV2Model(config=config) + result = model(pixel_values, training=False) + # expected last hidden states: batch_size, channels, height // 32, width // 32 + self.parent.assertEqual( + result.last_hidden_state.shape, + (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), + ) + + def create_and_check_for_image_classification(self, config, pixel_values, labels): + config.num_labels = self.type_sequence_label_size + model = TFConvNextV2ForImageClassification(config) + result = model(pixel_values, labels=labels, training=False) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values, labels = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_tf +class TFConvNextV2ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as ConvNext does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = (TFConvNextV2Model, TFConvNextV2ForImageClassification) if is_tf_available() else () + pipeline_model_mapping = ( + {"feature-extraction": TFConvNextV2Model, "image-classification": TFConvNextV2ForImageClassification} + if is_tf_available() + else {} + ) + + test_pruning = False + test_onnx = False + test_resize_embeddings = False + test_head_masking = False + has_attentions = False + + def setUp(self): + self.model_tester = TFConvNextV2ModelTester(self) + self.config_tester = ConfigTester( + self, + config_class=ConvNextV2Config, + has_text_modality=False, + hidden_size=37, + ) + + @unittest.skip(reason="ConvNext does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skipIf( + not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, + reason="TF does not support backprop for grouped convolutions on CPU.", + ) + @slow + def test_keras_fit(self): + super().test_keras_fit() + + @unittest.skip(reason="ConvNext does not support input and output embeddings") + def test_model_common_attributes(self): + pass + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.call) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + @unittest.skipIf( + not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, + reason="TF does not support backprop for grouped convolutions on CPU.", + ) + def test_dataset_conversion(self): + super().test_dataset_conversion() + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states + + expected_num_stages = self.model_tester.num_stages + self.assertEqual(len(hidden_states), expected_num_stages + 1) + + # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [self.model_tester.image_size // 4, self.model_tester.image_size // 4], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + # Since ConvNext does not have any attention we need to rewrite this test. + def test_model_outputs_equivalence(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): + tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) + dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + all(tf.equal(tuple_object, dict_object)), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" + ), + ) + + recursive_check(tuple_output, dict_output) + + for model_class in self.all_model_classes: + model = model_class(config) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class) + dict_inputs = self._prepare_for_class(inputs_dict, model_class) + check_equivalence(model, tuple_inputs, dict_inputs) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + check_equivalence(model, tuple_inputs, dict_inputs) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class) + dict_inputs = self._prepare_for_class(inputs_dict, model_class) + check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) + + def test_for_image_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_image_classification(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + model = TFConvNextV2Model.from_pretrained("facebook/convnextv2-tiny-1k-224") + self.assertIsNotNone(model) + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_tf +@require_vision +class TFConvNextV2ModelIntegrationTest(unittest.TestCase): + @cached_property + def default_image_processor(self): + return ( + ConvNextImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") + if is_vision_available() + else None + ) + + @slow + def test_inference_image_classification_head(self): + model = TFConvNextV2ForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224") + + image_processor = self.default_image_processor + image = prepare_img() + inputs = image_processor(images=image, return_tensors="tf") + + # forward pass + outputs = model(**inputs) + + # verify the logits + expected_shape = tf.TensorShape((1, 1000)) + self.assertEqual(outputs.logits.shape, expected_shape) + + expected_slice = np.array([0.9996, 0.1966, -0.4386]) + + self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4)) diff --git a/utils/check_docstrings.py b/utils/check_docstrings.py index 3d8a2881bf20..cddd04ac6516 100644 --- a/utils/check_docstrings.py +++ b/utils/check_docstrings.py @@ -533,6 +533,8 @@ "TFConvBertModel", "TFConvNextForImageClassification", "TFConvNextModel", + "TFConvNextV2Model", # Parsing issue. Equivalent to PT ConvNextV2Model, see PR #25558 + "TFConvNextV2ForImageClassification", "TFCvtForImageClassification", "TFCvtModel", "TFDPRReader", From 21a2fbaf48ad7841567472b23dd8c0e553edd495 Mon Sep 17 00:00:00 2001 From: Wesley L Passos <33898112+wesleylp@users.noreply.github.com> Date: Wed, 1 Nov 2023 12:31:13 -0300 Subject: [PATCH 049/268] Fix docstring in get_oneformer_resize_output_image_size func (#27207) --- .../models/oneformer/image_processing_oneformer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/oneformer/image_processing_oneformer.py b/src/transformers/models/oneformer/image_processing_oneformer.py index 16f5013f154a..da060fa0514f 100644 --- a/src/transformers/models/oneformer/image_processing_oneformer.py +++ b/src/transformers/models/oneformer/image_processing_oneformer.py @@ -307,14 +307,16 @@ def get_oneformer_resize_output_image_size( Computes the output size given the desired size. Args: - input_image (`np.ndarray`): + image (`np.ndarray`): The input image. - size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): + size (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`): The size of the output image. max_size (`int`, *optional*): The maximum size of the output image. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `Tuple[int, int]`: The output size. From 1e32b05e066922e1bb3abdaa99e3b0fb3c0ee6e1 Mon Sep 17 00:00:00 2001 From: Rafael Padilla <31217453+rafaelpadilla@users.noreply.github.com> Date: Wed, 1 Nov 2023 12:58:35 -0300 Subject: [PATCH 050/268] improving TimmBackbone to support FrozenBatchNorm2d (#27160) * supporting freeze_batch_norm_2d * supporting freeze_batch_norm_2d * including unfreeze + separate into methods * fix typo * calling unfreeze * lint * Update src/transformers/models/timm_backbone/modeling_timm_backbone.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: Rafael Padilla Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../timm_backbone/configuration_timm_backbone.py | 4 ++++ .../models/timm_backbone/modeling_timm_backbone.py | 11 +++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/transformers/models/timm_backbone/configuration_timm_backbone.py b/src/transformers/models/timm_backbone/configuration_timm_backbone.py index 19bfcbebf62b..23d2aa223704 100644 --- a/src/transformers/models/timm_backbone/configuration_timm_backbone.py +++ b/src/transformers/models/timm_backbone/configuration_timm_backbone.py @@ -43,6 +43,8 @@ class TimmBackboneConfig(PretrainedConfig): out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). Will default to the last stage if unset. + freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`): + Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. Example: ```python @@ -67,6 +69,7 @@ def __init__( features_only=True, use_pretrained_backbone=True, out_indices=None, + freeze_batch_norm_2d=False, **kwargs, ): super().__init__(**kwargs) @@ -76,3 +79,4 @@ def __init__( self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = True self.out_indices = out_indices if out_indices is not None else (-1,) + self.freeze_batch_norm_2d = freeze_batch_norm_2d diff --git a/src/transformers/models/timm_backbone/modeling_timm_backbone.py b/src/transformers/models/timm_backbone/modeling_timm_backbone.py index dc117f743642..0c6fe67b7573 100644 --- a/src/transformers/models/timm_backbone/modeling_timm_backbone.py +++ b/src/transformers/models/timm_backbone/modeling_timm_backbone.py @@ -72,6 +72,11 @@ def __init__(self, config, **kwargs): out_indices=out_indices, **kwargs, ) + + # Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively + if getattr(config, "freeze_batch_norm_2d", False): + self.freeze_batch_norm_2d() + # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. self._return_layers = self._backbone.return_layers @@ -102,6 +107,12 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): ) return super()._from_config(config, **kwargs) + def freeze_batch_norm_2d(self): + timm.layers.freeze_batch_norm_2d(self._backbone) + + def unfreeze_batch_norm_2d(self): + timm.layers.unfreeze_batch_norm_2d(self._backbone) + def _init_weights(self, module): """ Empty init weights function to ensure compatibility of the class in the library. From 239cd0eaa2d8d6cd3147f1e1457d4c1b0f0ad48a Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Wed, 1 Nov 2023 11:28:34 -0500 Subject: [PATCH 051/268] Translate task summary to chinese (#27180) * translate task_summary.md to chinese * update translation * update translation * fix _toctree.yml --- docs/source/zh/_toctree.yml | 4 + docs/source/zh/task_summary.md | 347 +++++++++++++++++++++++++++++++++ 2 files changed, 351 insertions(+) create mode 100644 docs/source/zh/task_summary.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index 659c81d7582b..d77fe9adc411 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -32,3 +32,7 @@ - local: tflite title: 导出为 TFLite title: 开发者指南 +- sections: + - local: task_summary + title: 🤗Transformers能做什么 + title: 概念指南 \ No newline at end of file diff --git a/docs/source/zh/task_summary.md b/docs/source/zh/task_summary.md new file mode 100644 index 000000000000..da60f4a080a2 --- /dev/null +++ b/docs/source/zh/task_summary.md @@ -0,0 +1,347 @@ + + +# 🤗 Transformers 能做什么 + +🤗 Transformers是一个用于自然语言处理(NLP)、计算机视觉和音频和语音处理任务的预训练模型库。该库不仅包含Transformer模型,还包括用于计算机视觉任务的现代卷积网络等非Transformer模型。如果您看看今天最受欢迎的一些消费产品,比如智能手机、应用程序和电视,很可能背后都有某种深度学习技术的支持。想要从您智能手机拍摄的照片中删除背景对象吗?这里是一个全景分割任务的例子(如果您还不了解这是什么意思,我们将在以下部分进行描述!)。 + +本页面提供了使用🤗 Transformers库仅用三行代码解决不同的语音和音频、计算机视觉和NLP任务的概述! + + +## 音频 +音频和语音处理任务与其他模态略有不同,主要是因为音频作为输入是一个连续的信号。与文本不同,原始音频波形不能像句子可以被划分为单词那样被整齐地分割成离散的块。为了解决这个问题,通常在固定的时间间隔内对原始音频信号进行采样。如果在每个时间间隔内采样更多样本,采样率就会更高,音频更接近原始音频源。 + +以前的方法是预处理音频以从中提取有用的特征。现在更常见的做法是直接将原始音频波形输入到特征编码器中,以提取音频表示。这样可以简化预处理步骤,并允许模型学习最重要的特征。 + +### 音频分类 + +音频分类是一项将音频数据从预定义的类别集合中进行标记的任务。这是一个广泛的类别,具有许多具体的应用,其中一些包括: + +* 声学场景分类:使用场景标签("办公室"、"海滩"、"体育场")对音频进行标记。 +* 声学事件检测:使用声音事件标签("汽车喇叭声"、"鲸鱼叫声"、"玻璃破碎声")对音频进行标记。 +* 标记:对包含多种声音的音频进行标记(鸟鸣、会议中的说话人识别)。 +* 音乐分类:使用流派标签("金属"、"嘻哈"、"乡村")对音乐进行标记。 + +```py +>>> from transformers import pipeline + +>>> classifier = pipeline(task="audio-classification", model="superb/hubert-base-superb-er") +>>> preds = classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") +>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] +>>> preds +[{'score': 0.4532, 'label': 'hap'}, + {'score': 0.3622, 'label': 'sad'}, + {'score': 0.0943, 'label': 'neu'}, + {'score': 0.0903, 'label': 'ang'}] +``` + +### 自动语音识别 + +自动语音识别(ASR)将语音转录为文本。这是最常见的音频任务之一,部分原因是因为语音是人类交流的自然形式。如今,ASR系统嵌入在智能技术产品中,如扬声器、电话和汽车。我们可以要求虚拟助手播放音乐、设置提醒和告诉我们天气。 + +但是,Transformer架构帮助解决的一个关键挑战是低资源语言。通过在大量语音数据上进行预训练,仅在一个低资源语言的一小时标记语音数据上进行微调,仍然可以产生与以前在100倍更多标记数据上训练的ASR系统相比高质量的结果。 + +```py +>>> from transformers import pipeline + +>>> transcriber = pipeline(task="automatic-speech-recognition", model="openai/whisper-small") +>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") +{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} +``` + +## 计算机视觉 + +计算机视觉任务中最早成功之一是使用卷积神经网络([CNN](glossary#convolution))识别邮政编码数字图像。图像由像素组成,每个像素都有一个数值。这使得将图像表示为像素值矩阵变得容易。每个像素值组合描述了图像的颜色。 + +计算机视觉任务可以通过以下两种通用方式解决: + +1. 使用卷积来学习图像的层次特征,从低级特征到高级抽象特征。 +2. 将图像分成块,并使用Transformer逐步学习每个图像块如何相互关联以形成图像。与CNN偏好的自底向上方法不同,这种方法有点像从一个模糊的图像开始,然后逐渐将其聚焦清晰。 + +### 图像分类 + +图像分类将整个图像从预定义的类别集合中进行标记。像大多数分类任务一样,图像分类有许多实际用例,其中一些包括: + +* 医疗保健:标记医学图像以检测疾病或监测患者健康状况 +* 环境:标记卫星图像以监测森林砍伐、提供野外管理信息或检测野火 +* 农业:标记农作物图像以监测植物健康或用于土地使用监测的卫星图像 +* 生态学:标记动物或植物物种的图像以监测野生动物种群或跟踪濒危物种 + +```py +>>> from transformers import pipeline + +>>> classifier = pipeline(task="image-classification") +>>> preds = classifier( +... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" +... ) +>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] +>>> print(*preds, sep="\n") +{'score': 0.4335, 'label': 'lynx, catamount'} +{'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'} +{'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'} +{'score': 0.0239, 'label': 'Egyptian cat'} +{'score': 0.0229, 'label': 'tiger cat'} +``` + +### 目标检测 + +与图像分类不同,目标检测在图像中识别多个对象以及这些对象在图像中的位置(由边界框定义)。目标检测的一些示例应用包括: + +* 自动驾驶车辆:检测日常交通对象,如其他车辆、行人和红绿灯 +* 遥感:灾害监测、城市规划和天气预报 +* 缺陷检测:检测建筑物中的裂缝或结构损坏,以及制造业产品缺陷 + + +```py +>>> from transformers import pipeline + +>>> detector = pipeline(task="object-detection") +>>> preds = detector( +... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" +... ) +>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"], "box": pred["box"]} for pred in preds] +>>> preds +[{'score': 0.9865, + 'label': 'cat', + 'box': {'xmin': 178, 'ymin': 154, 'xmax': 882, 'ymax': 598}}] +``` + +### 图像分割 + +图像分割是一项像素级任务,将图像中的每个像素分配给一个类别。它与使用边界框标记和预测图像中的对象的目标检测不同,因为分割更加精细。分割可以在像素级别检测对象。有几种类型的图像分割: + +* 实例分割:除了标记对象的类别外,还标记每个对象的不同实例(“dog-1”,“dog-2”) +* 全景分割:语义分割和实例分割的组合; 它使用语义类为每个像素标记并标记每个对象的不同实例 + +分割任务对于自动驾驶车辆很有帮助,可以创建周围世界的像素级地图,以便它们可以在行人和其他车辆周围安全导航。它还适用于医学成像,其中任务的更精细粒度可以帮助识别异常细胞或器官特征。图像分割也可以用于电子商务,通过您的相机在现实世界中覆盖物体来虚拟试穿衣服或创建增强现实体验。 + +```py +>>> from transformers import pipeline + +>>> segmenter = pipeline(task="image-segmentation") +>>> preds = segmenter( +... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" +... ) +>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] +>>> print(*preds, sep="\n") +{'score': 0.9879, 'label': 'LABEL_184'} +{'score': 0.9973, 'label': 'snow'} +{'score': 0.9972, 'label': 'cat'} +``` + +### 深度估计 + +深度估计预测图像中每个像素到相机的距离。这个计算机视觉任务对于场景理解和重建尤为重要。例如,在自动驾驶汽车中,车辆需要了解行人、交通标志和其他车辆等物体的距离,以避免障碍物和碰撞。深度信息还有助于从2D图像构建3D表示,并可用于创建生物结构或建筑物的高质量3D表示。 + +有两种方法可以进行深度估计: + +* stereo(立体):通过比较同一图像的两个略微不同角度的图像来估计深度 +* monocular(单目):从单个图像中估计深度 + + +```py +>>> from transformers import pipeline + +>>> depth_estimator = pipeline(task="depth-estimation") +>>> preds = depth_estimator( +... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" +... ) +``` + +## 自然语言处理 + +NLP任务是最常见的类型之一,因为文本是我们进行交流的自然方式。为了让文本变成模型识别的格式,需要对其进行分词。这意味着将一段文本分成单独的单词或子词(`tokens`),然后将这些`tokens`转换为数字。因此,可以将一段文本表示为一系列数字,一旦有了一系列的数字,就可以将其输入到模型中以解决各种NLP任务! + +### 文本分类 + +像任何模态的分类任务一样,文本分类将一段文本(可以是句子级别、段落或文档)从预定义的类别集合中进行标记。文本分类有许多实际应用,其中一些包括: + +* 情感分析:根据某些极性(如`积极`或`消极`)对文本进行标记,可以支持政治、金融和营销等领域的决策制定 +* 内容分类:根据某些主题对文本进行标记,有助于组织和过滤新闻和社交媒体提要中的信息(`天气`、`体育`、`金融`等) + + +```py +>>> from transformers import pipeline + +>>> classifier = pipeline(task="sentiment-analysis") +>>> preds = classifier("Hugging Face is the best thing since sliced bread!") +>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] +>>> preds +[{'score': 0.9991, 'label': 'POSITIVE'}] +``` + +### Token分类 + +在任何NLP任务中,文本都经过预处理,将文本序列分成单个单词或子词。这些被称为[tokens](/glossary#token)。Token分类将每个`token`分配一个来自预定义类别集的标签。 + +两种常见的Token分类是: + +* 命名实体识别(NER):根据实体类别(如组织、人员、位置或日期)对`token`进行标记。NER在生物医学设置中特别受欢迎,可以标记基因、蛋白质和药物名称。 +* 词性标注(POS):根据其词性(如名词、动词或形容词)对标记进行标记。POS对于帮助翻译系统了解两个相同的单词如何在语法上不同很有用(作为名词的银行与作为动词的银行)。 + +```py +>>> from transformers import pipeline + +>>> classifier = pipeline(task="ner") +>>> preds = classifier("Hugging Face is a French company based in New York City.") +>>> preds = [ +... { +... "entity": pred["entity"], +... "score": round(pred["score"], 4), +... "index": pred["index"], +... "word": pred["word"], +... "start": pred["start"], +... "end": pred["end"], +... } +... for pred in preds +... ] +>>> print(*preds, sep="\n") +{'entity': 'I-ORG', 'score': 0.9968, 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2} +{'entity': 'I-ORG', 'score': 0.9293, 'index': 2, 'word': '##gging', 'start': 2, 'end': 7} +{'entity': 'I-ORG', 'score': 0.9763, 'index': 3, 'word': 'Face', 'start': 8, 'end': 12} +{'entity': 'I-MISC', 'score': 0.9983, 'index': 6, 'word': 'French', 'start': 18, 'end': 24} +{'entity': 'I-LOC', 'score': 0.999, 'index': 10, 'word': 'New', 'start': 42, 'end': 45} +{'entity': 'I-LOC', 'score': 0.9987, 'index': 11, 'word': 'York', 'start': 46, 'end': 50} +{'entity': 'I-LOC', 'score': 0.9992, 'index': 12, 'word': 'City', 'start': 51, 'end': 55} +``` + +### 问答 + +问答是另一个`token-level`的任务,返回一个问题的答案,有时带有上下文(开放领域),有时不带上下文(封闭领域)。每当我们向虚拟助手提出问题时,例如询问一家餐厅是否营业,就会发生这种情况。它还可以提供客户或技术支持,并帮助搜索引擎检索您要求的相关信息。 + +有两种常见的问答类型: + +* 提取式:给定一个问题和一些上下文,答案是从模型必须提取的上下文中的一段文本跨度。 +* 抽象式:给定一个问题和一些上下文,答案从上下文中生成;这种方法由[`Text2TextGenerationPipeline`]处理,而不是下面显示的[`QuestionAnsweringPipeline`]。 + + +```py +>>> from transformers import pipeline + +>>> question_answerer = pipeline(task="question-answering") +>>> preds = question_answerer( +... question="What is the name of the repository?", +... context="The name of the repository is huggingface/transformers", +... ) +>>> print( +... f"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}" +... ) +score: 0.9327, start: 30, end: 54, answer: huggingface/transformers +``` + +### 摘要 + +摘要从较长的文本中创建一个较短的版本,同时尽可能保留原始文档的大部分含义。摘要是一个序列到序列的任务;它输出比输入更短的文本序列。有许多长篇文档可以进行摘要,以帮助读者快速了解主要要点。法案、法律和财务文件、专利和科学论文等文档可以摘要,以节省读者的时间并作为阅读辅助工具。 + +像问答一样,摘要有两种类型: + +* 提取式:从原始文本中识别和提取最重要的句子 +* 抽象式:从原始文本生成目标摘要(可能包括不在输入文档中的新单词);[`SummarizationPipeline`]使用抽象方法。 + + +```py +>>> from transformers import pipeline + +>>> summarizer = pipeline(task="summarization") +>>> summarizer( +... "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles." +... ) +[{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention . For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers .'}] +``` + +### 翻译 + +翻译将一种语言的文本序列转换为另一种语言。它对于帮助来自不同背景的人们相互交流、帮助翻译内容以吸引更广泛的受众,甚至成为学习工具以帮助人们学习一门新语言都非常重要。除了摘要之外,翻译也是一个序列到序列的任务,意味着模型接收输入序列并返回目标输出序列。 + +在早期,翻译模型大多是单语的,但最近,越来越多的人对可以在多种语言之间进行翻译的多语言模型感兴趣。 + +```py +>>> from transformers import pipeline + +>>> text = "translate English to French: Hugging Face is a community-based open-source platform for machine learning." +>>> translator = pipeline(task="translation", model="t5-small") +>>> translator(text) +[{'translation_text': "Hugging Face est une tribune communautaire de l'apprentissage des machines."}] +``` + +### 语言模型 + +语言模型是一种预测文本序列中单词的任务。它已成为一种非常流行的NLP任务,因为预训练的语言模型可以微调用于许多其他下游任务。最近,人们对大型语言模型(LLMs)表现出了极大的兴趣,这些模型展示了`zero learning`或`few-shot learning`的能力。这意味着模型可以解决它未被明确训练过的任务!语言模型可用于生成流畅和令人信服的文本,但需要小心,因为文本可能并不总是准确的。 + +有两种类型的话语模型: + +* causal:模型的目标是预测序列中的下一个`token`,而未来的`tokens`被遮盖。 + + + ```py + >>> from transformers import pipeline + + >>> prompt = "Hugging Face is a community-based open-source platform for machine learning." + >>> generator = pipeline(task="text-generation") + >>> generator(prompt) # doctest: +SKIP + ``` + +* masked:模型的目标是预测序列中被遮蔽的`token`,同时具有对序列中所有`tokens`的完全访问权限。 + + + ```py + >>> text = "Hugging Face is a community-based open-source for machine learning." + >>> fill_mask = pipeline(task="fill-mask") + >>> preds = fill_mask(text, top_k=1) + >>> preds = [ + ... { + ... "score": round(pred["score"], 4), + ... "token": pred["token"], + ... "token_str": pred["token_str"], + ... "sequence": pred["sequence"], + ... } + ... for pred in preds + ... ] + >>> preds + [{'score': 0.2236, + 'token': 1761, + 'token_str': ' platform', + 'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}] + ``` + +## 多模态 + +多模态任务要求模型处理多种数据模态(文本、图像、音频、视频)以解决特定问题。图像描述是一个多模态任务的例子,其中模型将图像作为输入并输出描述图像或图像某些属性的文本序列。 + +虽然多模态模型处理不同的数据类型或模态,但内部预处理步骤帮助模型将所有数据类型转换为`embeddings`(向量或数字列表,包含有关数据的有意义信息)。对于像图像描述这样的任务,模型学习图像嵌入和文本嵌入之间的关系。 + +### 文档问答 + +文档问答是从文档中回答自然语言问题的任务。与`token-level`问答任务不同,文档问答将包含问题的文档的图像作为输入,并返回答案。文档问答可用于解析结构化文档并从中提取关键信息。在下面的例子中,可以从收据中提取总金额和找零金额。 + +```py +>>> from transformers import pipeline +>>> from PIL import Image +>>> import requests + +>>> url = "https://datasets-server.huggingface.co/assets/hf-internal-testing/example-documents/--/hf-internal-testing--example-documents/test/2/image/image.jpg" +>>> image = Image.open(requests.get(url, stream=True).raw) + +>>> doc_question_answerer = pipeline("document-question-answering", model="magorshunov/layoutlm-invoices") +>>> preds = doc_question_answerer( +... question="What is the total amount?", +... image=image, +... ) +>>> preds +[{'score': 0.8531, 'answer': '17,000', 'start': 4, 'end': 4}] +``` + +希望这个页面为您提供了一些有关每种模态中所有类型任务的背景信息以及每个任务的实际重要性。在[下一节](tasks_explained)中,您将了解Transformers如何解决这些任务。 From c9e72f55b2dc4b9be4edb986dce0552582b328f2 Mon Sep 17 00:00:00 2001 From: Marc Sun <57196510+SunMarc@users.noreply.github.com> Date: Wed, 1 Nov 2023 18:09:21 +0100 Subject: [PATCH 052/268] Add exllamav2 better (#27111) * add_ xllamav2 arg * add test * style * add check * add doc * replace by use_exllama_v2 * fix tests * fix doc * style * better condition * fix logic * add deprecate msg * deprecate exllama * remove disable_exllama from the linter * remove * fix warning * Revert the commits deprecating exllama * deprecate disable_exllama for use_exllama * fix * fix loading attribute * better handling of args * remove disable_exllama from init and linter * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * better arg * fix warning * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * switch to dict * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * style * nits * style * better tests * style --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docs/source/en/main_classes/quantization.md | 13 ++- src/transformers/modeling_utils.py | 7 +- src/transformers/utils/quantization_config.py | 88 ++++++++++++++- tests/quantization/gptq/test_gptq.py | 101 +++++++++++++++--- 4 files changed, 181 insertions(+), 28 deletions(-) diff --git a/docs/source/en/main_classes/quantization.md b/docs/source/en/main_classes/quantization.md index 1ab20f1fa110..4de6a50d0913 100644 --- a/docs/source/en/main_classes/quantization.md +++ b/docs/source/en/main_classes/quantization.md @@ -223,16 +223,25 @@ model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", de ### Exllama kernels for faster inference -For 4-bit model, you can use the exllama kernels in order to a faster inference speed. It is activated by default. You can change that behavior by passing `disable_exllama` in [`GPTQConfig`]. This will overwrite the quantization config stored in the config. Note that you will only be able to overwrite the attributes related to the kernels. Furthermore, you need to have the entire model on gpus if you want to use exllama kernels. Also, you can perform CPU inference using Auto-GPTQ for Auto-GPTQ version > 0.4.2 by passing `device_map` = "cpu". For CPU inference, you have to pass `disable_exallama = True` in the `GPTQConfig.` +For 4-bit model, you can use the exllama kernels in order to a faster inference speed. It is activated by default. You can change that behavior by passing `use_exllama` in [`GPTQConfig`]. This will overwrite the quantization config stored in the config. Note that you will only be able to overwrite the attributes related to the kernels. Furthermore, you need to have the entire model on gpus if you want to use exllama kernels. Also, you can perform CPU inference using Auto-GPTQ for Auto-GPTQ version > 0.4.2 by passing `device_map` = "cpu". For CPU inference, you have to pass `use_exllama = False` in the `GPTQConfig.` ```py import torch -gptq_config = GPTQConfig(bits=4, disable_exllama=False) +gptq_config = GPTQConfig(bits=4) +model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config=gptq_config) +``` + +With the release of the exllamav2 kernels, you can get faster inference speed compared to the exllama kernels. You just need to pass `exllama_config={"version": 2}` in [`GPTQConfig`]: + +```py +import torch +gptq_config = GPTQConfig(bits=4, exllama_config={"version":2}) model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config) ``` Note that only 4-bit models are supported for now. Furthermore, it is recommended to deactivate the exllama kernels if you are finetuning a quantized model with peft. +You can find the benchmark of these kernels [here](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark) #### Fine-tune a quantized model With the official support of adapters in the Hugging Face ecosystem, you can fine-tune models that have been quantized with GPTQ. diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index be88dfd204d7..31ed52b47481 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2784,7 +2784,7 @@ def from_pretrained( logger.warning( "You passed `quantization_config` to `from_pretrained` but the model you're loading already has a " "`quantization_config` attribute and has already quantized weights. However, loading attributes" - " (e.g. disable_exllama, use_cuda_fp16, max_input_length) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." + " (e.g. use_exllama, exllama_config, use_cuda_fp16, max_input_length) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." ) if ( quantization_method_from_args == QuantizationMethod.GPTQ @@ -2811,8 +2811,7 @@ def from_pretrained( torch_dtype = torch.float16 else: logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.") - - quantizer = GPTQQuantizer.from_dict(quantization_config.to_dict()) + quantizer = GPTQQuantizer.from_dict(quantization_config.to_dict_optimum()) elif quantization_method_from_config == QuantizationMethod.AWQ: if not torch.cuda.is_available(): raise RuntimeError("GPU is required to run AWQ quantized model.") @@ -3539,7 +3538,7 @@ def from_pretrained( if cls.main_input_name != "input_ids": raise RuntimeError("We can only quantize pure text model.") quantizer.quantize_model(model, quantization_config.tokenizer) - config.quantization_config = GPTQConfig.from_dict(quantizer.to_dict()) + config.quantization_config = GPTQConfig.from_dict_optimum(quantizer.to_dict()) model._is_quantized_training_enabled = True if quantization_method_from_config == QuantizationMethod.GPTQ: model = quantizer.post_init_model(model) diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 35542d3f78fd..ae8909352c19 100644 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -310,6 +310,11 @@ def to_diff_dict(self) -> Dict[str, Any]: return serializable_config_dict +class ExllamaVersion(int, Enum): + ONE = 1 + TWO = 2 + + @dataclass class GPTQConfig(QuantizationConfigMixin): """ @@ -355,11 +360,14 @@ class GPTQConfig(QuantizationConfigMixin): The batch size used when processing the dataset pad_token_id (`int`, *optional*): The pad token id. Needed to prepare the dataset when `batch_size` > 1. - disable_exllama (`bool`, *optional*, defaults to `False`): - Whether to use exllama backend. Only works with `bits` = 4. + use_exllama (`bool`, *optional*): + Whether to use exllama backend. Defaults to `True` if unset. Only works with `bits` = 4. max_input_length (`int`, *optional*): The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input length. It is specific to the exllama backend with act-order. + exllama_config (`Dict[str, Any]`, *optional*): + The exllama config. You can specify the version of the exllama kernel through the `version` key. Defaults + to `{"version": 1}` if unset. cache_block_outputs (`bool`, *optional*, defaults to `True`): Whether to cache block outputs to reuse as inputs for the succeeding block. """ @@ -380,8 +388,9 @@ def __init__( module_name_preceding_first_block: Optional[List[str]] = None, batch_size: int = 1, pad_token_id: Optional[int] = None, - disable_exllama: bool = False, + use_exllama: Optional[bool] = None, max_input_length: Optional[int] = None, + exllama_config: Optional[Dict[str, Any]] = None, cache_block_outputs: bool = True, **kwargs, ): @@ -400,14 +409,16 @@ def __init__( self.module_name_preceding_first_block = module_name_preceding_first_block self.batch_size = batch_size self.pad_token_id = pad_token_id - self.disable_exllama = disable_exllama + self.use_exllama = use_exllama self.max_input_length = max_input_length + self.exllama_config = exllama_config + self.disable_exllama = kwargs.pop("disable_exllama", None) self.cache_block_outputs = cache_block_outputs self.post_init() def get_loading_attributes(self): attibutes_dict = copy.deepcopy(self.__dict__) - loading_attibutes = ["disable_exllama", "use_cuda_fp16", "max_input_length"] + loading_attibutes = ["disable_exllama", "use_exllama", "exllama_config", "use_cuda_fp16", "max_input_length"] loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} return loading_attibutes_dict @@ -434,6 +445,73 @@ def post_init(self): ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" ) + if self.disable_exllama is None and self.use_exllama is None: + # New default behaviour + self.use_exllama = True + elif self.disable_exllama is not None and self.use_exllama is None: + # Follow pattern of old config + logger.warning( + "Using `disable_exllama` is deprecated and will be removed in version 4.37. Use `use_exllama` instead and specify the version with `exllama_config`." + "The value of `use_exllama` will be overwritten by `disable_exllama` passed in `GPTQConfig` or stored in your config file." + ) + self.use_exllama = not self.disable_exllama + elif self.disable_exllama is not None and self.use_exllama is not None: + # Only happens if user explicitly passes in both arguments + raise ValueError("Cannot specify both `disable_exllama` and `use_exllama`. Please use just `use_exllama`") + + if self.exllama_config is None: + self.exllama_config = {"version": ExllamaVersion.ONE} + else: + if "version" not in self.exllama_config: + raise ValueError("`exllama_config` needs to have a `version` key.") + elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: + exllama_version = self.exllama_config["version"] + raise ValueError( + f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}" + ) + + if self.bits == 4 and self.use_exllama: + if self.exllama_config["version"] == ExllamaVersion.ONE: + logger.info( + "You have activated exllama backend. Note that you can get better inference " + "speed using exllamav2 kernel by setting `exllama_config`." + ) + elif self.exllama_config["version"] == ExllamaVersion.TWO: + optimum_version = version.parse(importlib.metadata.version("optimum")) + autogptq_version = version.parse(importlib.metadata.version("auto_gptq")) + if optimum_version <= version.parse("1.13.2") or autogptq_version <= version.parse("0.4.2"): + raise ValueError( + f"You need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum {optimum_version} and autogptq {autogptq_version}" + ) + + def to_dict(self): + config_dict = super().to_dict() + config_dict.pop("disable_exllama", None) + return config_dict + + def to_dict_optimum(self): + """ + Get compatible dict for optimum gptq config + """ + quant_dict = self.to_dict() + # make it compatible with optimum config + quant_dict["disable_exllama"] = not self.use_exllama + return quant_dict + + @classmethod + def from_dict_optimum(cls, config_dict): + """ + Get compatible class with optimum gptq config dict + """ + + if "disable_exllama" in config_dict: + config_dict["use_exllama"] = not config_dict["disable_exllama"] + # switch to None to not trigger the warning + config_dict["disable_exllama"] = None + + config = cls(**config_dict) + return config + @dataclass class AwqConfig(QuantizationConfigMixin): diff --git a/tests/quantization/gptq/test_gptq.py b/tests/quantization/gptq/test_gptq.py index 4c7587f063a5..30eae22ccdab 100644 --- a/tests/quantization/gptq/test_gptq.py +++ b/tests/quantization/gptq/test_gptq.py @@ -69,9 +69,9 @@ def test_optimum_config(self): from optimum.gptq import GPTQQuantizer config = GPTQConfig(bits=2) - optimum_config = GPTQQuantizer.from_dict(config.to_dict()) + optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum()) self.assertEqual(optimum_config.bits, config.bits) - new_config = GPTQConfig.from_dict(optimum_config.to_dict()) + new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict()) self.assertEqual(optimum_config.bits, new_config.bits) @@ -98,7 +98,7 @@ class GPTQTest(unittest.TestCase): bits = 4 group_size = 128 desc_act = False - disable_exllama = True + use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." @@ -125,7 +125,7 @@ def setUpClass(cls): tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, - disable_exllama=cls.disable_exllama, + use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( @@ -147,11 +147,12 @@ def test_memory_footprint(self): def test_device_and_dtype_assignment(self): r""" - Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error. + Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly. """ # This should work - _ = self.quantized_model.to(0) + if self.device_map is None: + _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` @@ -177,7 +178,8 @@ def test_quantized_layers_class(self): desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, - disable_exllama=self.disable_exllama, + disable_exllama=not self.use_exllama, + disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) @@ -196,6 +198,9 @@ def check_inference_correctness(self, model): # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) + def check_quantized_layers_type(self, model, value): + self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) + def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens @@ -211,11 +216,13 @@ def test_serialization(self): """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) - if self.disable_exllama: + if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname).to(0) + self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old") else: # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map={"": 0}) + self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate @@ -234,14 +241,15 @@ def test_change_loading_attributes(self): """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) - if self.disable_exllama: - self.assertEqual(self.quantized_model.config.quantization_config.disable_exllama, True) + if not self.use_exllama: + self.assertEqual(self.quantized_model.config.quantization_config.use_exllama, False) # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( - tmpdirname, quantization_config=GPTQConfig(disable_exllama=False, bits=4), device_map={"": 0} + tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0} ) - self.assertEqual(quantized_model_from_saved.config.quantization_config.disable_exllama, False) + self.assertEqual(quantized_model_from_saved.config.quantization_config.use_exllama, True) self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits) + self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @@ -255,7 +263,7 @@ class GPTQTestDeviceMap(GPTQTest): @require_torch_multi_gpu class GPTQTestDeviceMapExllama(GPTQTest): device_map = "auto" - disable_exllama = False + use_exllama = True @slow @@ -281,8 +289,7 @@ def setUpClass(cls): """ Setup quantized model """ - - cls.quantization_config = GPTQConfig(bits=4, disable_exllama=False, max_input_length=4028) + cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, revision=cls.revision, @@ -308,14 +315,15 @@ def check_inference_correctness(self, model): # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) + def test_quantized_layers_type(self): + self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama") + def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) - # this test will fail until the next release of optimum - @pytest.mark.skip def test_max_input_length(self): """ Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend. @@ -334,6 +342,65 @@ def test_max_input_length(self): self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) +@slow +@require_optimum +@require_auto_gptq +@require_torch_gpu +@require_accelerate +class GPTQTestExllamaV2(unittest.TestCase): + """ + Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order). + More information on those arguments here: + https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig + """ + + EXPECTED_OUTPUTS = set() + EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year") + model_name = "hf-internal-testing/Llama-2-7B-GPTQ" + revision = "gptq-4bit-128g-actorder_True" + input_text = "Hello my name is" + + @classmethod + def setUpClass(cls): + """ + Setup quantized model + """ + cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2}) + cls.quantized_model = AutoModelForCausalLM.from_pretrained( + cls.model_name, + revision=cls.revision, + torch_dtype=torch.float16, + device_map={"": 0}, + quantization_config=cls.quantization_config, + ) + cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) + + def test_quantized_layers_type(self): + self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllamav2") + + def check_inference_correctness(self, model): + """ + Test the generation quality of the quantized model and see that we are matching the expected output. + Given that we are operating on small numbers + the testing model is relatively small, we might not get + the same output across GPUs. So we'll generate few tokens (5-10) and check their output. + """ + + # Check that inference pass works on the model + encoded_input = self.tokenizer(self.input_text, return_tensors="pt") + + # Check the exactness of the results + output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) + + # Get the generation + self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) + + def test_generate_quality(self): + """ + Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens + """ + self.check_inference_correctness(self.quantized_model) + + # fail when run all together @pytest.mark.skip @require_accelerate From 95020f208ed7c30895685af60ef3a791fb2d45ff Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Wed, 1 Nov 2023 19:25:23 +0100 Subject: [PATCH 053/268] Fix CPU offload + disk offload tests (#27204) Fix disk offload tests + weight sharing issues --- src/transformers/modeling_utils.py | 4 ++- src/transformers/models/bart/modeling_bart.py | 5 +++ .../modeling_bigbird_pegasus.py | 5 +++ .../models/longt5/modeling_longt5.py | 14 ++++++++ .../models/m2m_100/modeling_m2m_100.py | 5 +++ .../models/nllb_moe/modeling_nllb_moe.py | 5 +++ .../models/plbart/modeling_plbart.py | 5 +++ .../seamless_m4t/modeling_seamless_m4t.py | 5 +++ .../modeling_switch_transformers.py | 14 ++++++++ src/transformers/models/t5/modeling_t5.py | 19 ++++++++++ src/transformers/models/umt5/modeling_umt5.py | 23 ++++++++++++ tests/models/vitdet/test_modeling_vitdet.py | 6 +++- tests/models/whisper/test_modeling_whisper.py | 6 +++- tests/test_modeling_common.py | 36 +++++++++++++++++-- 14 files changed, 147 insertions(+), 5 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 31ed52b47481..e48c98c791be 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -4576,7 +4576,9 @@ def expand_device_map(device_map, param_names): """ new_device_map = {} for module, device in device_map.items(): - new_device_map.update({p: device for p in param_names if p == module or p.startswith(f"{module}.")}) + new_device_map.update( + {p: device for p in param_names if p == module or p.startswith(f"{module}.") or module == ""} + ) return new_device_map diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index c271aabcb4d1..60ec557eba7f 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -1125,6 +1125,11 @@ def __init__(self, config: BartConfig): # Initialize weights and apply final processing self.post_init() + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_input_embeddings(self): return self.shared diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index cc69e31e5a7f..b9a84a869dac 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -2312,6 +2312,11 @@ def set_input_embeddings(self, value): self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder diff --git a/src/transformers/models/longt5/modeling_longt5.py b/src/transformers/models/longt5/modeling_longt5.py index 91e584d80d3f..0ae7cedea00b 100644 --- a/src/transformers/models/longt5/modeling_longt5.py +++ b/src/transformers/models/longt5/modeling_longt5.py @@ -1783,6 +1783,11 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder @@ -1937,6 +1942,11 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @@ -2170,6 +2180,10 @@ def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index ed17796d27dc..4e5004fc98ff 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -1103,6 +1103,11 @@ def set_input_embeddings(self, value): self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder diff --git a/src/transformers/models/nllb_moe/modeling_nllb_moe.py b/src/transformers/models/nllb_moe/modeling_nllb_moe.py index 1c08a70875e1..22708f112522 100644 --- a/src/transformers/models/nllb_moe/modeling_nllb_moe.py +++ b/src/transformers/models/nllb_moe/modeling_nllb_moe.py @@ -1471,6 +1471,11 @@ def set_input_embeddings(self, value): self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder diff --git a/src/transformers/models/plbart/modeling_plbart.py b/src/transformers/models/plbart/modeling_plbart.py index 79b5c09cba2f..4d8fe161f806 100644 --- a/src/transformers/models/plbart/modeling_plbart.py +++ b/src/transformers/models/plbart/modeling_plbart.py @@ -1084,6 +1084,11 @@ def set_input_embeddings(self, value): self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index 3fe519d2d259..62d1f3e21f9a 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -4125,6 +4125,11 @@ def set_input_embeddings(self, value): self.text_decoder.embed_tokens = value self.shared = value + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) + @add_start_docstrings_to_model_forward(M4T_MODEL_INPUTS_DOCSTRING) def forward( self, diff --git a/src/transformers/models/switch_transformers/modeling_switch_transformers.py b/src/transformers/models/switch_transformers/modeling_switch_transformers.py index e00a0147e420..07c96a5aa828 100644 --- a/src/transformers/models/switch_transformers/modeling_switch_transformers.py +++ b/src/transformers/models/switch_transformers/modeling_switch_transformers.py @@ -1329,6 +1329,11 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder @@ -1505,6 +1510,11 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @@ -1807,6 +1817,10 @@ def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index 3748e5af778f..ff8e6609b94d 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -1414,6 +1414,11 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder @@ -1620,6 +1625,11 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @@ -1920,6 +1930,10 @@ def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder @@ -2152,6 +2166,11 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + def get_encoder(self): return self.encoder diff --git a/src/transformers/models/umt5/modeling_umt5.py b/src/transformers/models/umt5/modeling_umt5.py index bfcbfb13eb45..220aff273bc6 100644 --- a/src/transformers/models/umt5/modeling_umt5.py +++ b/src/transformers/models/umt5/modeling_umt5.py @@ -973,6 +973,12 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + # Copied from transformers.models.t5.modeling_t5.T5Model._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + # Copied from transformers.models.t5.modeling_t5.T5Model.get_encoder def get_encoder(self): return self.encoder @@ -1142,6 +1148,12 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_output_embeddings def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @@ -1380,6 +1392,11 @@ def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) + # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_encoder def get_encoder(self): return self.encoder @@ -1615,6 +1632,12 @@ def set_input_embeddings(self, new_embeddings): self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) + # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_encoder def get_encoder(self): return self.encoder diff --git a/tests/models/vitdet/test_modeling_vitdet.py b/tests/models/vitdet/test_modeling_vitdet.py index d6ffd03cbd7f..361e563d58d4 100644 --- a/tests/models/vitdet/test_modeling_vitdet.py +++ b/tests/models/vitdet/test_modeling_vitdet.py @@ -182,7 +182,11 @@ def test_cpu_offload(self): # TODO: Fix me (once this model gets more usage) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") - def test_disk_offload(self): + def test_disk_offload_bin(self): + super().test_disk_offload() + + @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") + def test_disk_offload_safetensors(self): super().test_disk_offload() # TODO: Fix me (once this model gets more usage) diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index bc1a7bd218c4..60a2d3b93ea6 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -1788,7 +1788,11 @@ def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") - def test_disk_offload(self): + def test_disk_offload_bin(self): + pass + + @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") + def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 3c4810074728..595c72cda6fd 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2578,7 +2578,7 @@ def check_device_map_is_respected(self, model, device_map): @require_accelerate @mark.accelerate_tests @require_torch_gpu - def test_disk_offload(self): + def test_disk_offload_bin(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: @@ -2593,7 +2593,7 @@ def test_disk_offload(self): model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: - model.cpu().save_pretrained(tmp_dir) + model.cpu().save_pretrained(tmp_dir, safe_serialization=False) with self.assertRaises(ValueError): max_size = int(self.model_split_percents[0] * model_size) @@ -2613,6 +2613,38 @@ def test_disk_offload(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + @require_accelerate + @mark.accelerate_tests + @require_torch_gpu + def test_disk_offload_safetensors(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + if model_class._no_split_modules is None: + continue + + inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) + model = model_class(config).eval() + model = model.to(torch_device) + torch.manual_seed(0) + base_output = model(**inputs_dict_class) + + model_size = compute_module_sizes(model)[""] + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained(tmp_dir) + + max_size = int(self.model_split_percents[1] * model_size) + max_memory = {0: max_size, "cpu": max_size} + + # This doesn't error out as it's in safetensors and doesn't need an offload folder + new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) + + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + torch.manual_seed(0) + new_output = new_model(**inputs_dict_class) + + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + @require_accelerate @mark.accelerate_tests @require_torch_gpu From 3520e37e86913715959ff14fef76340010c8de57 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Wed, 1 Nov 2023 14:42:38 -0400 Subject: [PATCH 054/268] Enable split_batches through TrainingArguments (#26798) * Enable split_batches through TrainingArguments * Extra dispatch_batches * Keep as default false * Add to docstring * Add to docstring * Remove the capturewarnings change * Comma --- src/transformers/trainer.py | 1 + src/transformers/training_args.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index aa5e372bdc24..5def3ca89049 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -3906,6 +3906,7 @@ def create_accelerator_and_postprocess(self): # create accelerator object self.accelerator = Accelerator( dispatch_batches=self.args.dispatch_batches, + split_batches=self.args.split_batches, deepspeed_plugin=self.args.deepspeed_plugin, gradient_accumulation_plugin=gradient_accumulation_plugin, ) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 147d1e6b1c63..aaedc83528a9 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -621,6 +621,14 @@ class TrainingArguments: Refer to the PyTorch doc for possible values and note that they may change across PyTorch versions. This flag is experimental and subject to change in future releases. + split_batches (`bool`, *optional*): + Whether or not the accelerator should split the batches yielded by the dataloaders across the devices + during distributed training. If + + set to `True`, the actual batch size used will be the same on any kind of distributed processes, but it + must be a + + round multiple of the number of processes you are using (such as GPUs). include_tokens_per_second (`bool`, *optional*): Whether or not to compute the number of tokens per second per device for training speed metrics. @@ -1226,6 +1234,15 @@ class TrainingArguments: }, ) + split_batches: Optional[bool] = field( + default=False, + metadata={ + "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices during distributed training. If" + "set to `True`, the actual batch size used will be the same on any kind of distributed processes, but it must be a" + "round multiple of the number of processes you are using (such as GPUs)." + }, + ) + include_tokens_per_second: Optional[bool] = field( default=False, metadata={"help": "If set to `True`, the speed metrics will include `tgs` (tokens per second per device)."}, From af3de8d87c717c4bb090f037d0d89413c195a42f Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 1 Nov 2023 21:03:01 +0100 Subject: [PATCH 055/268] [Whisper, Bart, MBart] Add Flash Attention 2 (#27203) * add whisper fa2 * correct * change all * correct * correct * fix more * fix more * fix more * fix more * fix more * fix more * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * fix more * fix more * fix more * fix more * fix more --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- src/transformers/models/bart/modeling_bart.py | 275 +++++++++++++++++- .../modeling_bigbird_pegasus.py | 6 +- .../models/biogpt/modeling_biogpt.py | 4 + .../models/blenderbot/modeling_blenderbot.py | 24 +- .../modeling_blenderbot_small.py | 24 +- .../data2vec/modeling_data2vec_audio.py | 4 + .../modeling_gptsan_japanese.py | 4 + .../models/hubert/modeling_hubert.py | 4 + .../models/informer/modeling_informer.py | 4 + .../models/m2m_100/modeling_m2m_100.py | 24 +- .../models/marian/modeling_marian.py | 24 +- .../models/mbart/modeling_mbart.py | 275 +++++++++++++++++- .../models/musicgen/modeling_musicgen.py | 6 +- .../models/nllb_moe/modeling_nllb_moe.py | 4 + .../models/pegasus/modeling_pegasus.py | 24 +- .../models/pegasus_x/modeling_pegasus_x.py | 4 + .../models/plbart/modeling_plbart.py | 53 +++- .../seamless_m4t/modeling_seamless_m4t.py | 4 + src/transformers/models/sew/modeling_sew.py | 4 + .../speech_to_text/modeling_speech_to_text.py | 24 +- .../modeling_speech_to_text_2.py | 4 + .../modeling_time_series_transformer.py | 24 +- .../models/unispeech/modeling_unispeech.py | 4 + .../unispeech_sat/modeling_unispeech_sat.py | 4 + .../models/wav2vec2/modeling_wav2vec2.py | 4 + .../models/whisper/modeling_whisper.py | 262 ++++++++++++++++- tests/models/whisper/test_modeling_whisper.py | 104 +++++++ tests/test_modeling_common.py | 222 +++++++++++--- 28 files changed, 1300 insertions(+), 123 deletions(-) diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index 60ec557eba7f..efca985f6784 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -19,6 +19,7 @@ from typing import List, Optional, Tuple, Union import torch +import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -40,12 +41,18 @@ add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, + is_flash_attn_2_available, logging, replace_return_docstrings, ) from .configuration_bart import BartConfig +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/bart-base" @@ -71,6 +78,19 @@ ] +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. @@ -119,12 +139,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[BartConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -133,6 +156,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -263,14 +287,225 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value +class BartFlashAttention2(BartAttention): + """ + Bart flash attention module. This module inherits from `BartAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # BartFlashAttention2 attention does not support output_attentions + if output_attentions: + raise ValueError("BartFlashAttention2 attention does not support output_attentions") + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, q_len, _ = hidden_states.size() + + # get query proj + query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0].transpose(1, 2) + value_states = past_key_value[1].transpose(1, 2) + elif is_cross_attention: + # cross_attentions + key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) + value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) + value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) + value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) + else: + # self_attention + key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) + value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout + ) + + attn_output = attn_output.reshape(bsz, q_len, -1) + attn_output = self.out_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +BART_ATTENTION_CLASSES = { + "default": BartAttention, + "flash_attention_2": BartFlashAttention2, +} + + class BartEncoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = BartAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = BART_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -336,22 +571,26 @@ def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = BartAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + self.self_attn = BART_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = BartAttention( + self.encoder_attn = BART_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) @@ -479,6 +718,7 @@ class BartPreTrainedModel(PreTrainedModel): _keys_to_ignore_on_load_unexpected = ["encoder.version", "decoder.version"] _no_split_modules = [r"BartEncoderLayer", r"BartDecoderLayer"] _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True def _init_weights(self, module): std = self.config.init_std @@ -792,8 +1032,11 @@ def forward( # expand attention_mask if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) + if getattr(self.config, "_flash_attn_2_enabled", False): + attention_mask = attention_mask if 0 in attention_mask else None + else: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None @@ -995,16 +1238,24 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) * self.embed_scale - attention_mask = _prepare_4d_causal_attention_mask( - attention_mask, input_shape, inputs_embeds, past_key_values_length - ) + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - encoder_attention_mask = _prepare_4d_attention_mask( - encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] - ) + if getattr(self.config, "_flash_attn_2_enabled", False): + encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None + else: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _prepare_4d_attention_mask( + encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ) # embed positions positions = self.embed_positions(input, past_key_values_length) diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index b9a84a869dac..222873ac852b 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -1174,7 +1174,7 @@ def forward( return outputs -# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BigBirdPegasusDecoder +# Copied from transformers.models.bart.modeling_bart.BartAttention with BartConfig->BigBirdPegasusConfig, Bart->BigBirdPegasusDecoder class BigBirdPegasusDecoderAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" @@ -1185,12 +1185,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[BigBirdPegasusConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -1199,6 +1202,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/biogpt/modeling_biogpt.py b/src/transformers/models/biogpt/modeling_biogpt.py index 2cbfa4ef0f82..221c97c86885 100755 --- a/src/transformers/models/biogpt/modeling_biogpt.py +++ b/src/transformers/models/biogpt/modeling_biogpt.py @@ -90,12 +90,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[BioGptConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -104,6 +107,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 985c660fa0b8..f49f90f794fc 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -104,12 +104,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[BlenderbotConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -118,6 +121,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -248,15 +252,21 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Blenderbot +BLENDERBOT_ATTENTION_CLASSES = {"default": BlenderbotAttention} + + +# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Blenderbot, MBART->BLENDERBOT class BlenderbotEncoderLayer(nn.Module): def __init__(self, config: BlenderbotConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = BlenderbotAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = BLENDERBOT_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -317,28 +327,32 @@ def forward( return outputs -# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Blenderbot +# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Blenderbot, MBART->BLENDERBOT class BlenderbotDecoderLayer(nn.Module): def __init__(self, config: BlenderbotConfig): super().__init__() self.embed_dim = config.d_model + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" - self.self_attn = BlenderbotAttention( + self.self_attn = BLENDERBOT_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = BlenderbotAttention( + self.encoder_attn = BLENDERBOT_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index 3d51ee91284e..292b5a8c6e8b 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -101,12 +101,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[BlenderbotSmallConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -115,6 +118,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -245,15 +249,18 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall +# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL class BlenderbotSmallEncoderLayer(nn.Module): def __init__(self, config: BlenderbotSmallConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = BlenderbotSmallAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -314,28 +321,35 @@ def forward( return outputs -# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall +BLENDERBOT_SMALL_ATTENTION_CLASSES = {"default": BlenderbotSmallAttention} + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL class BlenderbotSmallDecoderLayer(nn.Module): def __init__(self, config: BlenderbotSmallConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = BlenderbotSmallAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = BlenderbotSmallAttention( + self.encoder_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) diff --git a/src/transformers/models/data2vec/modeling_data2vec_audio.py b/src/transformers/models/data2vec/modeling_data2vec_audio.py index 47cf2d6245ef..e393db64d045 100755 --- a/src/transformers/models/data2vec/modeling_data2vec_audio.py +++ b/src/transformers/models/data2vec/modeling_data2vec_audio.py @@ -330,12 +330,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[Data2VecAudioConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -344,6 +347,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py index 1232d24730c2..7591ecb0b82a 100644 --- a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py @@ -370,12 +370,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[GPTSanJapaneseConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -384,6 +387,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index ddb80f56723e..a45dcb2d11fe 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -396,12 +396,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[HubertConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -410,6 +413,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/informer/modeling_informer.py b/src/transformers/models/informer/modeling_informer.py index ebb4b2821ced..c0a5a2059502 100644 --- a/src/transformers/models/informer/modeling_informer.py +++ b/src/transformers/models/informer/modeling_informer.py @@ -287,12 +287,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[InformerConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -301,6 +304,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 4e5004fc98ff..c05948540f78 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -172,12 +172,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[M2M100Config] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -186,6 +189,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -316,15 +320,18 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->M2M100 +# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->M2M100, MBART->M2M100 class M2M100EncoderLayer(nn.Module): def __init__(self, config: M2M100Config): super().__init__() self.embed_dim = config.d_model - self.self_attn = M2M100Attention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = M2M100_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -385,28 +392,35 @@ def forward( return outputs -# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->M2M100 +M2M100_ATTENTION_CLASSES = {"default": M2M100Attention} + + +# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->M2M100, MBART->M2M100 class M2M100DecoderLayer(nn.Module): def __init__(self, config: M2M100Config): super().__init__() self.embed_dim = config.d_model + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" - self.self_attn = M2M100Attention( + self.self_attn = M2M100_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = M2M100Attention( + self.encoder_attn = M2M100_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index 10a6d5f342f1..cabf0c68f8b6 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -119,12 +119,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[MarianConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -133,6 +136,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -263,15 +267,18 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->Marian +# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->Marian, BART->MARIAN class MarianEncoderLayer(nn.Module): def __init__(self, config: MarianConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = MarianAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = MARIAN_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -332,28 +339,35 @@ def forward( return outputs -# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->Marian +MARIAN_ATTENTION_CLASSES = {"default": MarianAttention} + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->Marian, BART->MARIAN class MarianDecoderLayer(nn.Module): def __init__(self, config: MarianConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = MarianAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + self.self_attn = MARIAN_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = MarianAttention( + self.encoder_attn = MARIAN_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 96044ac1c276..97fdf9ed8799 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -18,6 +18,7 @@ from typing import List, Optional, Tuple, Union import torch +import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -39,12 +40,18 @@ add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, + is_flash_attn_2_available, logging, replace_return_docstrings, ) from .configuration_mbart import MBartConfig +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25" @@ -59,6 +66,19 @@ ] +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int): """ Shift input ids one token to the right, and wrap the last non pad token (the token) Note that MBart does not @@ -113,12 +133,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[MBartConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -127,6 +150,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -257,14 +281,226 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value +# Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->MBart +class MBartFlashAttention2(MBartAttention): + """ + MBart flash attention module. This module inherits from `MBartAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # MBartFlashAttention2 attention does not support output_attentions + if output_attentions: + raise ValueError("MBartFlashAttention2 attention does not support output_attentions") + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, q_len, _ = hidden_states.size() + + # get query proj + query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0].transpose(1, 2) + value_states = past_key_value[1].transpose(1, 2) + elif is_cross_attention: + # cross_attentions + key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) + value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) + value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) + value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) + else: + # self_attention + key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) + value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout + ) + + attn_output = attn_output.reshape(bsz, q_len, -1) + attn_output = self.out_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +MBART_ATTENTION_CLASSES = { + "default": MBartAttention, + "flash_attention_2": MBartFlashAttention2, +} + + class MBartEncoderLayer(nn.Module): def __init__(self, config: MBartConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = MBartAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = MBART_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -329,23 +565,27 @@ class MBartDecoderLayer(nn.Module): def __init__(self, config: MBartConfig): super().__init__() self.embed_dim = config.d_model + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" - self.self_attn = MBartAttention( + self.self_attn = MBART_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = MBartAttention( + self.encoder_attn = MBART_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) @@ -472,6 +712,7 @@ class MBartPreTrainedModel(PreTrainedModel): base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["MBartDecoderLayer", "MBartAttention"] + _supports_flash_attn_2 = True def _init_weights(self, module): std = self.config.init_std @@ -766,7 +1007,11 @@ def forward( # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) + if getattr(self.config, "_flash_attn_2_enabled", False): + attention_mask = attention_mask if 0 in attention_mask else None + else: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None @@ -970,16 +1215,24 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale - attention_mask = _prepare_4d_causal_attention_mask( - attention_mask, input_shape, inputs_embeds, past_key_values_length - ) + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - encoder_attention_mask = _prepare_4d_attention_mask( - encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] - ) + if getattr(self.config, "_flash_attn_2_enabled", False): + encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None + else: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _prepare_4d_attention_mask( + encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ) # embed positions positions = self.embed_positions(input, past_key_values_length) diff --git a/src/transformers/models/musicgen/modeling_musicgen.py b/src/transformers/models/musicgen/modeling_musicgen.py index 5d96359999b4..2a015fc0321f 100644 --- a/src/transformers/models/musicgen/modeling_musicgen.py +++ b/src/transformers/models/musicgen/modeling_musicgen.py @@ -145,7 +145,7 @@ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): return self.weights.index_select(0, position_ids.view(-1)).detach() -# Copied from transformers.models.bart.modeling_bart.BartAttention +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Musicgen class MusicgenAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" @@ -156,12 +156,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[MusicgenConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -170,6 +173,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/nllb_moe/modeling_nllb_moe.py b/src/transformers/models/nllb_moe/modeling_nllb_moe.py index 22708f112522..3dde07da66a8 100644 --- a/src/transformers/models/nllb_moe/modeling_nllb_moe.py +++ b/src/transformers/models/nllb_moe/modeling_nllb_moe.py @@ -467,12 +467,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[NllbMoeConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -481,6 +484,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index 35eb1ffc1b58..18af4d518a89 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -119,12 +119,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[PegasusConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -133,6 +136,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -263,15 +267,21 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Pegasus +PEGASUS_ATTENTION_CLASSES = {"default": PegasusAttention} + + +# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Pegasus, MBART->PEGASUS class PegasusEncoderLayer(nn.Module): def __init__(self, config: PegasusConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = PegasusAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = PEGASUS_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -332,28 +342,32 @@ def forward( return outputs -# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Pegasus +# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Pegasus, MBART->PEGASUS class PegasusDecoderLayer(nn.Module): def __init__(self, config: PegasusConfig): super().__init__() self.embed_dim = config.d_model + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" - self.self_attn = PegasusAttention( + self.self_attn = PEGASUS_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = PegasusAttention( + self.encoder_attn = PEGASUS_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) diff --git a/src/transformers/models/pegasus_x/modeling_pegasus_x.py b/src/transformers/models/pegasus_x/modeling_pegasus_x.py index 89c4f29cc026..5af397be97b3 100755 --- a/src/transformers/models/pegasus_x/modeling_pegasus_x.py +++ b/src/transformers/models/pegasus_x/modeling_pegasus_x.py @@ -128,12 +128,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[PegasusXConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -142,6 +145,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/plbart/modeling_plbart.py b/src/transformers/models/plbart/modeling_plbart.py index 4d8fe161f806..ad298c6d3890 100644 --- a/src/transformers/models/plbart/modeling_plbart.py +++ b/src/transformers/models/plbart/modeling_plbart.py @@ -112,12 +112,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[PLBartConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -126,6 +129,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -256,15 +260,18 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->PLBart +# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->PLBart, BART->PLBART class PLBartEncoderLayer(nn.Module): def __init__(self, config: PLBartConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = PLBartAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = PLBART_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -325,28 +332,35 @@ def forward( return outputs -# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->PLBart +PLBART_ATTENTION_CLASSES = {"default": PLBartAttention} + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->PLBart, BART->PLBART class PLBartDecoderLayer(nn.Module): def __init__(self, config: PLBartConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = PLBartAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + self.self_attn = PLBART_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = PLBartAttention( + self.encoder_attn = PLBART_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) @@ -743,8 +757,11 @@ def forward( # expand attention_mask if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) + if getattr(self.config, "_flash_attn_2_enabled", False): + attention_mask = attention_mask if 0 in attention_mask else None + else: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None @@ -947,16 +964,24 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) * self.embed_scale - attention_mask = _prepare_4d_causal_attention_mask( - attention_mask, input_shape, inputs_embeds, past_key_values_length - ) + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - encoder_attention_mask = _prepare_4d_attention_mask( - encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] - ) + if getattr(self.config, "_flash_attn_2_enabled", False): + encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None + else: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _prepare_4d_attention_mask( + encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ) # embed positions positions = self.embed_positions(input, past_key_values_length) diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index 62d1f3e21f9a..0745663bc0fd 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -1101,12 +1101,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[SeamlessM4TConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -1115,6 +1118,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/sew/modeling_sew.py b/src/transformers/models/sew/modeling_sew.py index b98e093f8cc3..a5ebb9b2bb42 100644 --- a/src/transformers/models/sew/modeling_sew.py +++ b/src/transformers/models/sew/modeling_sew.py @@ -392,12 +392,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[SEWConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -406,6 +409,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index d3c48e6c91ee..57c74c8c42e2 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -178,12 +178,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[Speech2TextConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -192,6 +195,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -322,15 +326,21 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Speech2Text +SPEECH_TO_TEXT_ATTENTION_CLASSES = {"default": Speech2TextAttention} + + +# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT class Speech2TextEncoderLayer(nn.Module): def __init__(self, config: Speech2TextConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = Speech2TextAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -391,28 +401,32 @@ def forward( return outputs -# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Speech2Text +# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT class Speech2TextDecoderLayer(nn.Module): def __init__(self, config: Speech2TextConfig): super().__init__() self.embed_dim = config.d_model + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" - self.self_attn = Speech2TextAttention( + self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = Speech2TextAttention( + self.encoder_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) diff --git a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py index eebc9e57cfa2..9a1bd94dd7f5 100755 --- a/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py @@ -125,12 +125,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[Speech2Text2Config] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -139,6 +142,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py index 5f961bc8e1ed..904c02b4f043 100644 --- a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py @@ -281,12 +281,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[TimeSeriesTransformerConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -295,6 +298,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -425,15 +429,18 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->TimeSeriesTransformer +# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->TimeSeriesTransformer, BART->TIME_SERIES_TRANSFORMER class TimeSeriesTransformerEncoderLayer(nn.Module): def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = TimeSeriesTransformerAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -494,28 +501,35 @@ def forward( return outputs -# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->TimeSeriesTransformer +TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES = {"default": TimeSeriesTransformerAttention} + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->TimeSeriesTransformer, with BART->TIME_SERIES_TRANSFORMER class TimeSeriesTransformerDecoderLayer(nn.Module): def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = TimeSeriesTransformerAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + self.self_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = TimeSeriesTransformerAttention( + self.encoder_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index 057a9579c12b..11965bdb50e9 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -432,12 +432,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[UniSpeechConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -446,6 +449,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index c2889299574f..10a05edc72b0 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -446,12 +446,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[UniSpeechSatConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -460,6 +463,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 9a2235cb2fdd..3d97e7c73d35 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -498,12 +498,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[Wav2Vec2Config] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -512,6 +515,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index 48f47fe12df7..a107adf74e16 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -19,6 +19,7 @@ import numpy as np import torch +import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss @@ -38,6 +39,7 @@ from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, + is_flash_attn_2_available, logging, replace_return_docstrings, ) @@ -45,6 +47,11 @@ from .tokenization_whisper import TASK_IDS, TO_LANGUAGE_CODE +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "WhisperConfig" @@ -57,6 +64,19 @@ ] +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + def sinusoids(length: int, channels: int, max_timescale: float = 10000) -> torch.Tensor: """Returns sinusoids for positional embedding""" if channels % 2 != 0: @@ -299,12 +319,15 @@ def __init__( dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, + is_causal: bool = False, + config: Optional[WhisperConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads + self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( @@ -313,6 +336,7 @@ def __init__( ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder + self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @@ -445,15 +469,227 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value -# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Whisper +# Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->Whisper +class WhisperFlashAttention2(WhisperAttention): + """ + Whisper flash attention module. This module inherits from `WhisperAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # WhisperFlashAttention2 attention does not support output_attentions + if output_attentions: + raise ValueError("WhisperFlashAttention2 attention does not support output_attentions") + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, q_len, _ = hidden_states.size() + + # get query proj + query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0].transpose(1, 2) + value_states = past_key_value[1].transpose(1, 2) + elif is_cross_attention: + # cross_attentions + key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) + value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) + value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) + value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) + else: + # self_attention + key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) + value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout + ) + + attn_output = attn_output.reshape(bsz, q_len, -1) + attn_output = self.out_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +WHISPER_ATTENTION_CLASSES = { + "default": WhisperAttention, + "flash_attention_2": WhisperFlashAttention2, +} + + +# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Whisper, MBART->WHISPER class WhisperEncoderLayer(nn.Module): def __init__(self, config: WhisperConfig): super().__init__() self.embed_dim = config.d_model - self.self_attn = WhisperAttention( + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + + self.self_attn = WHISPER_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, + config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout @@ -514,28 +750,32 @@ def forward( return outputs -# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Whisper +# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Whisper, MBART->WHISPER class WhisperDecoderLayer(nn.Module): def __init__(self, config: WhisperConfig): super().__init__() self.embed_dim = config.d_model + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" - self.self_attn = WhisperAttention( + self.self_attn = WHISPER_ATTENTION_CLASSES[attn_type]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + is_causal=True, + config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) - self.encoder_attn = WhisperAttention( + self.encoder_attn = WHISPER_ATTENTION_CLASSES[attn_type]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, + config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) @@ -638,6 +878,7 @@ class WhisperPreTrainedModel(PreTrainedModel): main_input_name = "input_features" supports_gradient_checkpointing = True _no_split_modules = ["WhisperEncoderLayer", "WhisperDecoderLayer"] + _supports_flash_attn_2 = True def _init_weights(self, module): std = self.config.init_std @@ -1070,9 +1311,14 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - attention_mask = _prepare_4d_causal_attention_mask( - attention_mask, input_shape, inputs_embeds, past_key_values_length - ) + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) # embed positions if input_ids is not None: diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 60a2d3b93ea6..05d48786148e 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -21,13 +21,16 @@ import unittest import numpy as np +from pytest import mark import transformers from transformers import WhisperConfig from transformers.testing_utils import ( is_pt_flax_cross_test, + require_flash_attn, require_torch, require_torch_fp16, + require_torch_gpu, require_torchaudio, slow, torch_device, @@ -795,6 +798,107 @@ def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_ use_cache=use_cache, ) + @require_flash_attn + @require_torch_gpu + @mark.flash_attn_test + @slow + def test_flash_attn_2_inference(self): + import torch + + for model_class in self.all_model_classes: + if not model_class._supports_flash_attn_2: + return + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model_fa = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True + ) + model_fa.to(torch_device) + + model = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False + ) + model.to(torch_device) + + dummy_input = inputs_dict[model.main_input_name][:1] + if dummy_input.dtype in [torch.float32, torch.float16]: + dummy_input = dummy_input.to(torch.bfloat16) + + decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] + + outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) + outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) + + logits = outputs.decoder_hidden_states[-1] + logits_fa = outputs_fa.decoder_hidden_states[-1] + + # whisper FA2 needs very high tolerance + assert torch.allclose(logits_fa, logits, atol=4e-1) + + # check with inference + dropout + model.train() + _ = model_fa(dummy_input, decoder_input_ids=decoder_input_ids) + + @require_flash_attn + @require_torch_gpu + @mark.flash_attn_test + @slow + def test_flash_attn_2_inference_padding_right(self): + import torch + + for model_class in self.all_model_classes: + if not model_class._supports_flash_attn_2: + return + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model_fa = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True + ) + model_fa.to(torch_device) + + model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=False) + model.to(torch_device) + + dummy_input = inputs_dict[model.main_input_name][:1] + dummy_input = dummy_input.to(torch.float16) + + decoder_input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=dummy_input.device, dtype=torch.long) + decoder_attention_mask = torch.tensor( + [[0, 0, 0, 1, 1, 1]], device=dummy_input.device, dtype=torch.long + ) + + outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) + outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) + + logits = outputs.decoder_hidden_states[-1] + logits_fa = outputs_fa.decoder_hidden_states[-1] + + # whisper FA2 needs very high tolerance + assert torch.allclose(logits_fa, logits, atol=4e-1) + + other_inputs = { + "decoder_input_ids": decoder_input_ids, + "decoder_attention_mask": decoder_attention_mask, + "output_hidden_states": True, + } + + outputs = model(dummy_input, **other_inputs) + outputs_fa = model_fa(dummy_input, **other_inputs) + + logits = outputs.decoder_hidden_states[-1] + logits_fa = outputs_fa.decoder_hidden_states[-1] + + # whisper FA2 needs very high tolerance + assert torch.allclose(logits_fa[:, -2:], logits[:, -2:], atol=4e-1) + def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 595c72cda6fd..f96812c36da8 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2856,7 +2856,7 @@ def test_flash_attn_2_inference(self): if not model_class._supports_flash_attn_2: return - config, _ = self.model_tester.prepare_config_and_inputs_for_common() + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: @@ -2871,25 +2871,76 @@ def test_flash_attn_2_inference(self): ) model.to(torch_device) - dummy_input = torch.LongTensor([[1, 2, 3, 4, 5]]).to(torch_device) - dummy_attention_mask = torch.LongTensor([[0, 1, 1, 1, 1]]).to(torch_device) + dummy_input = inputs_dict[model.main_input_name][:1] + if dummy_input.dtype in [torch.float32, torch.float16]: + dummy_input = dummy_input.to(torch.bfloat16) + + dummy_attention_mask = inputs_dict.get("attention_mask", None) + + if dummy_attention_mask is not None: + dummy_attention_mask = dummy_attention_mask[:1] + dummy_attention_mask[:, 1:] = 1 + dummy_attention_mask[:, :1] = 0 - logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] - logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] + if model.config.is_encoder_decoder: + decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] + + outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) + outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) + else: + outputs = model(dummy_input, output_hidden_states=True) + outputs_fa = model_fa(dummy_input, output_hidden_states=True) - self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)) + logits = ( + outputs.hidden_states[-1] + if not model.config.is_encoder_decoder + else outputs.decoder_hidden_states[-1] + ) + logits_fa = ( + outputs_fa.hidden_states[-1] + if not model.config.is_encoder_decoder + else outputs_fa.decoder_hidden_states[-1] + ) - output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) - logits_fa = output_fa.hidden_states[-1] + assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) - output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) - logits = output.hidden_states[-1] + if model.config.is_encoder_decoder: + other_inputs = { + "decoder_input_ids": decoder_input_ids, + "decoder_attention_mask": dummy_attention_mask, + "output_hidden_states": True, + } + if dummy_attention_mask is not None: + other_inputs["attention_mask"] = dummy_attention_mask + + outputs = model(dummy_input, **other_inputs) + outputs_fa = model_fa(dummy_input, **other_inputs) + else: + other_inputs = { + "output_hidden_states": True, + } + if dummy_attention_mask is not None: + other_inputs["attention_mask"] = dummy_attention_mask + + outputs = model(dummy_input, **other_inputs) + outputs_fa = model_fa(dummy_input, **other_inputs) + + logits = ( + outputs.hidden_states[-1] + if not model.config.is_encoder_decoder + else outputs.decoder_hidden_states[-1] + ) + logits_fa = ( + outputs_fa.hidden_states[-1] + if not model.config.is_encoder_decoder + else outputs_fa.decoder_hidden_states[-1] + ) - self.assertTrue(torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2)) + assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() - _ = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) + _ = model_fa(dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @@ -2902,7 +2953,7 @@ def test_flash_attn_2_inference_padding_right(self): if not model_class._supports_flash_attn_2: return - config, _ = self.model_tester.prepare_config_and_inputs_for_common() + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: @@ -2917,21 +2968,72 @@ def test_flash_attn_2_inference_padding_right(self): ) model.to(torch_device) - dummy_input = torch.LongTensor([[1, 2, 3, 4, 5]]).to(torch_device) - dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1, 0]]).to(torch_device) + dummy_input = inputs_dict[model.main_input_name][:1] + if dummy_input.dtype in [torch.float32, torch.float16]: + dummy_input = dummy_input.to(torch.bfloat16) + + dummy_attention_mask = inputs_dict.get("attention_mask", None) - logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] - logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] + if dummy_attention_mask is not None: + dummy_attention_mask = dummy_attention_mask[:1] + dummy_attention_mask[:, :-1] = 1 + dummy_attention_mask[:, -1:] = 0 - self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)) + if model.config.is_encoder_decoder: + decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] - output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) - logits_fa = output_fa.hidden_states[-1] + outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) + outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) + else: + outputs = model(dummy_input, output_hidden_states=True) + outputs_fa = model_fa(dummy_input, output_hidden_states=True) - output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) - logits = output.hidden_states[-1] + logits = ( + outputs.hidden_states[-1] + if not model.config.is_encoder_decoder + else outputs.decoder_hidden_states[-1] + ) + logits_fa = ( + outputs_fa.hidden_states[-1] + if not model.config.is_encoder_decoder + else outputs_fa.decoder_hidden_states[-1] + ) - self.assertTrue(torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2)) + assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) + + if model.config.is_encoder_decoder: + other_inputs = { + "decoder_input_ids": decoder_input_ids, + "decoder_attention_mask": dummy_attention_mask, + "output_hidden_states": True, + } + if dummy_attention_mask is not None: + other_inputs["attention_mask"] = dummy_attention_mask + + outputs = model(dummy_input, **other_inputs) + outputs_fa = model_fa(dummy_input, **other_inputs) + else: + other_inputs = { + "output_hidden_states": True, + } + if dummy_attention_mask is not None: + other_inputs["attention_mask"] = dummy_attention_mask + + outputs = model(dummy_input, **other_inputs) + outputs_fa = model_fa(dummy_input, **other_inputs) + + logits = ( + outputs.hidden_states[-1] + if not model.config.is_encoder_decoder + else outputs.decoder_hidden_states[-1] + ) + logits_fa = ( + outputs_fa.hidden_states[-1] + if not model.config.is_encoder_decoder + else outputs_fa.decoder_hidden_states[-1] + ) + + assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_flash_attn @require_torch_gpu @@ -2944,7 +3046,7 @@ def test_flash_attn_2_generate_left_padding(self): if not model_class._supports_flash_attn_2: return - config, _ = self.model_tester.prepare_config_and_inputs_for_common() + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: @@ -2953,8 +3055,14 @@ def test_flash_attn_2_generate_left_padding(self): tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=False, low_cpu_mem_usage=True ).to(torch_device) - dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) - dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [0, 1, 1, 1]]).to(torch_device) + dummy_input = inputs_dict[model.main_input_name] + if dummy_input.dtype in [torch.float32, torch.bfloat16]: + dummy_input = dummy_input.to(torch.float16) + + dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) + # make sure we do left padding + dummy_attention_mask[:, :-1] = 0 + dummy_attention_mask[:, -1:] = 1 out = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False @@ -2981,7 +3089,7 @@ def test_flash_attn_2_generate_padding_right(self): if not model_class._supports_flash_attn_2: return - config, _ = self.model_tester.prepare_config_and_inputs_for_common() + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: @@ -2990,8 +3098,14 @@ def test_flash_attn_2_generate_padding_right(self): tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=False, low_cpu_mem_usage=True ).to(torch_device) - dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) - dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [1, 1, 1, 0]]).to(torch_device) + dummy_input = inputs_dict[model.main_input_name] + if dummy_input.dtype in [torch.float32, torch.bfloat16]: + dummy_input = dummy_input.to(torch.float16) + + dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) + # make sure we do left padding + dummy_attention_mask[:, :-1] = 1 + dummy_attention_mask[:, -1:] = 0 out = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False @@ -3014,26 +3128,39 @@ def test_flash_attn_2_generate_padding_right(self): def test_flash_attn_2_generate_use_cache(self): import torch + max_new_tokens = 30 + for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: return - config, _ = self.model_tester.prepare_config_and_inputs_for_common() + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + dummy_input = inputs_dict[model_class.main_input_name] + if dummy_input.dtype in [torch.float32, torch.bfloat16]: + dummy_input = dummy_input.to(torch.float16) + + # make sure that all models have enough positions for generation + if hasattr(config, "max_position_embeddings"): + config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 + model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) - dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) - dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [0, 1, 1, 1]]).to(torch_device) + dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model = model_class.from_pretrained( - tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True + tmpdirname, + torch_dtype=torch.float16, + use_flash_attention_2=True, + low_cpu_mem_usage=True, ).to(torch_device) # Just test that a large cache works as expected _ = model.generate( - dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=30, do_sample=False + dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) @require_flash_attn @@ -3048,14 +3175,18 @@ def test_flash_attn_2_fp32_ln(self): if not model_class._supports_flash_attn_2: return - config, _ = self.model_tester.prepare_config_and_inputs_for_common() + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) - dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) - dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [0, 1, 1, 1]]).to(torch_device) + dummy_input = inputs_dict[model.main_input_name] + dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) + + if model.config.is_encoder_decoder: + dummy_decoder_input_ids = inputs_dict["decoder_input_ids"] + dummy_decoder_attention_mask = inputs_dict["decoder_attention_mask"] model = model_class.from_pretrained( tmpdirname, @@ -3070,10 +3201,19 @@ def test_flash_attn_2_fp32_ln(self): if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) - _ = model(input_ids=dummy_input) - - # with attention mask - _ = model(input_ids=dummy_input, attention_mask=dummy_attention_mask) + if model.config.is_encoder_decoder: + _ = model(dummy_input, decoder_input_ids=dummy_decoder_input_ids) + # with attention mask + _ = model( + dummy_input, + attention_mask=dummy_attention_mask, + decoder_input_ids=dummy_decoder_input_ids, + decoder_attention_mask=dummy_decoder_attention_mask, + ) + else: + _ = model(dummy_input) + # with attention mask + _ = model(dummy_input, attention_mask=dummy_attention_mask) global_rng = random.Random() From 7adaefe2bc8a2040c0089d04e269c25b044904a5 Mon Sep 17 00:00:00 2001 From: Roohollah Etemadi <90654451+etemadiamd@users.noreply.github.com> Date: Thu, 2 Nov 2023 06:05:20 -0400 Subject: [PATCH 056/268] support bf16 (#25879) * added bf16 support * added cuda availability check * applied make style, quality --- src/transformers/utils/import_utils.py | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 9ad802a48982..3c05cac7dbe2 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -305,26 +305,7 @@ def is_torch_bf16_gpu_available(): import torch - # since currently no utility function is available we build our own. - # some bits come from https://github.com/pytorch/pytorch/blob/2289a12f21c54da93bf5d696e3f9aea83dd9c10d/torch/testing/_internal/common_cuda.py#L51 - # with additional check for torch version - # to succeed: (torch is required to be >= 1.10 anyway) - # 1. the hardware needs to support bf16 (GPU arch >= Ampere, or CPU) - # 2. if using gpu, CUDA >= 11 - # 3. torch.autocast exists - # XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's - # really only correct for the 0th gpu (or currently set default device if different from 0) - if torch.cuda.is_available() and torch.version.cuda is not None: - if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: - return False - if int(torch.version.cuda.split(".")[0]) < 11: - return False - if not hasattr(torch.cuda.amp, "autocast"): - return False - else: - return False - - return True + return torch.cuda.is_available() and torch.cuda.is_bf16_supported() def is_torch_bf16_cpu_available(): From c52e429b1cb98ab2e6fa88f64f1a3255bd8ce26d Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Thu, 2 Nov 2023 18:27:13 +0800 Subject: [PATCH 057/268] Reproducible checkpoint for npu (#27208) * save NPU's RNG states when saving a checkpoint and set after all the data skip phase when resuming training. * re-trigger ci * re-trigger ci --- src/transformers/trainer.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 5def3ca89049..ad3b840385db 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -144,6 +144,7 @@ is_sagemaker_mp_enabled, is_torch_compile_available, is_torch_neuroncore_available, + is_torch_npu_available, is_torch_tpu_available, logging, strtobool, @@ -2321,6 +2322,17 @@ def _load_rng_state(self, checkpoint): ) if is_torch_tpu_available(): xm.set_rng_state(checkpoint_rng_state["xla"]) + if is_torch_npu_available(): + if self.args.parallel_mode == ParallelMode.DISTRIBUTED: + torch.npu.random.set_rng_state_all(checkpoint_rng_state["npu"]) + else: + try: + torch.npu.random.set_rng_state(checkpoint_rng_state["npu"]) + except Exception as e: + logger.info( + f"Didn't manage to set back the RNG states of the NPU because of the following error:\n {e}" + "\nThis won't yield the same results as if the training had not been interrupted." + ) def _save_checkpoint(self, model, trial, metrics=None): # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we @@ -2423,6 +2435,12 @@ def _save_checkpoint(self, model, trial, metrics=None): if is_torch_tpu_available(): rng_states["xla"] = xm.get_rng_state() + if is_torch_npu_available(): + if self.args.parallel_mode == ParallelMode.DISTRIBUTED: + rng_states["npu"] = torch.npu.random.get_rng_state_all() + else: + rng_states["npu"] = torch.npu.random.get_rng_state() + # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may # not yet exist. os.makedirs(output_dir, exist_ok=True) From 9b25c164bdb2754002e118065bc5045436b72773 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Thu, 2 Nov 2023 12:03:51 +0100 Subject: [PATCH 058/268] [`core` / `Quantization`] Fix for 8bit serialization tests (#27234) * fix for 8bit serialization * added regression tests. * fixup --- src/transformers/modeling_utils.py | 8 ++++++- tests/quantization/bnb/test_mixed_int8.py | 27 +++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index e48c98c791be..fcb51e6a56be 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2110,7 +2110,13 @@ def save_pretrained( # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in state_dict.items(): - ptrs[id_tensor_storage(tensor)].append(name) + # Sometimes in the state_dict we have non-tensor objects. + # e.g. in bitsandbytes we have some `str` objects in the state_dict + if isinstance(tensor, torch.Tensor): + ptrs[id_tensor_storage(tensor)].append(name) + else: + # In the non-tensor case, fall back to the pointer of the object itself + ptrs[id(tensor)].append(name) # These are all the pointers of shared tensors. shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} diff --git a/tests/quantization/bnb/test_mixed_int8.py b/tests/quantization/bnb/test_mixed_int8.py index 3be1e5582ac4..da2ce55d3105 100644 --- a/tests/quantization/bnb/test_mixed_int8.py +++ b/tests/quantization/bnb/test_mixed_int8.py @@ -369,6 +369,33 @@ def test_int8_serialization(self): self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) + def test_int8_serialization_regression(self): + r""" + Test whether it is possible to serialize a model in 8-bit - using not safetensors + """ + from bitsandbytes.nn import Int8Params + + with tempfile.TemporaryDirectory() as tmpdirname: + self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) + + # check that the file `quantization_config` is present + config = AutoConfig.from_pretrained(tmpdirname) + self.assertTrue(hasattr(config, "quantization_config")) + + model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") + + linear = get_some_linear_layer(model_from_saved) + self.assertTrue(linear.weight.__class__ == Int8Params) + self.assertTrue(hasattr(linear.weight, "SCB")) + + # generate + encoded_input = self.tokenizer(self.input_text, return_tensors="pt") + output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) + + self.assertEqual( + self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT + ) + def test_int8_serialization_sharded(self): r""" Test whether it is possible to serialize a model in 8-bit - sharded version. From 8a312956fd49efd69adb98c40996719d4c276a01 Mon Sep 17 00:00:00 2001 From: Pablo Montalvo <39954772+molbap@users.noreply.github.com> Date: Thu, 2 Nov 2023 12:25:41 +0100 Subject: [PATCH 059/268] Fuyu: improve image processing (#27007) * Fix Fuyu image scaling bug It could produce negative padding and hence inference errors for certain image sizes. * initial rework commit * add batching capabilities, refactor image processing * add functional batching for a list of images and texts * make args explicit * Fuyu processing update (#27133) * Add file headers * Add file headers * First pass - preprocess method with standard args * First pass image processor rework * Small tweaks * More args and docstrings * Tidying iterating over batch * Tidying up * Modify to have quick tests (for now) * Fix up * BatchFeature * Passing tests * Add tests for processor * Sense check when patchifying * Add some tests * FuyuBatchFeature * Post-process box coordinates * Update to `size` in processor * Remove unused and duplicate constants * Store unpadded dims after resize * Fix up * Return FuyuBatchFeature * Get unpadded sizes after resize * Update exception * Fix return * Convert input `` coordinates to model format. * Post-process point coords, support multiple boxes/points in a single sequence * Replace constants * Update src/transformers/models/fuyu/image_processing_fuyu.py Co-authored-by: Pedro Cuenca * Preprocess List[List[image]] * Update src/transformers/models/fuyu/image_processing_fuyu.py Co-authored-by: Pedro Cuenca * Update to Amy's latest state. * post-processing returns a list of tensors * Fix error when target_sizes is None Co-authored-by: Pablo Montalvo * Update src/transformers/models/fuyu/image_processing_fuyu.py Co-authored-by: Pedro Cuenca * Update src/transformers/models/fuyu/image_processing_fuyu.py Co-authored-by: Pedro Cuenca * Update src/transformers/models/fuyu/image_processing_fuyu.py Co-authored-by: Pedro Cuenca * Update src/transformers/models/fuyu/image_processing_fuyu.py Co-authored-by: Pedro Cuenca * Review comments * Update src/transformers/models/fuyu/image_processing_fuyu.py Co-authored-by: Pedro Cuenca * Fix up * Fix up --------- Co-authored-by: Ubuntu Co-authored-by: Pedro Cuenca Co-authored-by: Pablo Montalvo * Fix conflicts in fuyu_follow_up_image_processing (#27228) fixing conflicts and updating on main * Revert "Fix conflicts in fuyu_follow_up_image_processing" (#27232) Revert "Fix conflicts in fuyu_follow_up_image_processing (#27228)" This reverts commit acce10b6c653dc7041fb9d18cfed55775afd6207. --------- Co-authored-by: Pedro Cuenca Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: Ubuntu --- src/transformers/feature_extraction_utils.py | 27 +- .../models/fuyu/image_processing_fuyu.py | 756 ++++++++++++++---- src/transformers/models/fuyu/modeling_fuyu.py | 6 +- .../models/fuyu/processing_fuyu.py | 628 +++++++++------ .../models/fuyu/test_image_processing_fuyu.py | 25 +- tests/models/fuyu/test_modeling_fuyu.py | 20 +- tests/models/fuyu/test_processing_fuyu.py | 137 +++- 7 files changed, 1149 insertions(+), 450 deletions(-) diff --git a/src/transformers/feature_extraction_utils.py b/src/transformers/feature_extraction_utils.py index 18218534c0cc..b626ff3dd717 100644 --- a/src/transformers/feature_extraction_utils.py +++ b/src/transformers/feature_extraction_utils.py @@ -112,17 +112,9 @@ def values(self): def items(self): return self.data.items() - def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None): - """ - Convert the inner content to tensors. - - Args: - tensor_type (`str` or [`~utils.TensorType`], *optional*): - The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If - `None`, no modification is done. - """ + def _get_is_as_tensor_fns(self, tensor_type: Optional[Union[str, TensorType]] = None): if tensor_type is None: - return self + return None, None # Convert to TensorType if not isinstance(tensor_type, TensorType): @@ -167,6 +159,21 @@ def as_tensor(value, dtype=None): return np.asarray(value, dtype=dtype) is_tensor = is_numpy_array + return is_tensor, as_tensor + + def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None): + """ + Convert the inner content to tensors. + + Args: + tensor_type (`str` or [`~utils.TensorType`], *optional*): + The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If + `None`, no modification is done. + """ + if tensor_type is None: + return self + + is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type) # Do the tensor conversion in batch for key, value in self.items(): diff --git a/src/transformers/models/fuyu/image_processing_fuyu.py b/src/transformers/models/fuyu/image_processing_fuyu.py index 2d83e18af407..0e415980c97f 100644 --- a/src/transformers/models/fuyu/image_processing_fuyu.py +++ b/src/transformers/models/fuyu/image_processing_fuyu.py @@ -1,27 +1,182 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for Fuyu.""" + import math -from typing import List, Union +from typing import Dict, List, Optional, Union import numpy as np -from ...image_processing_utils import BaseImageProcessor +from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import ( - normalize, pad, resize, + to_channel_dimension_format, +) +from ...image_utils import ( + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + is_valid_image, + make_list_of_images, + to_numpy_array, +) +from ...utils import ( + TensorType, + is_torch_available, + is_torch_device, + is_torch_dtype, + logging, + requires_backends, ) -from ...image_utils import to_numpy_array -from ...utils import is_torch_available, is_vision_available, logging, requires_backends - -if is_vision_available(): - import PIL if is_torch_available(): import torch + logger = logging.get_logger(__name__) +def make_list_of_list_of_images( + images: Union[List[List[ImageInput]], List[ImageInput], ImageInput] +) -> List[List[ImageInput]]: + if is_valid_image(images): + return [[images]] + + if isinstance(images, list) and all(isinstance(image, list) for image in images): + return images + + if isinstance(images, list): + return [make_list_of_images(image) for image in images] + + raise ValueError("images must be a list of list of images or a list of images or an image.") + + +class FuyuBatchFeature(BatchFeature): + """ + BatchFeature class for Fuyu image processor and processor. + + The outputs dictionary from the processors contains a mix of tensors and lists of tensors. + """ + + def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None): + """ + Convert the inner content to tensors. + + Args: + tensor_type (`str` or [`~utils.TensorType`], *optional*): + The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If + `None`, no modification is done. + """ + if tensor_type is None: + return self + + is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type=tensor_type) + + def _convert_tensor(elem): + if is_tensor(elem): + return elem + return as_tensor(elem) + + def _safe_convert_tensor(elem): + try: + return _convert_tensor(elem) + except: # noqa E722 + if key == "overflowing_values": + raise ValueError("Unable to create tensor returning overflowing values of different lengths. ") + raise ValueError( + "Unable to create tensor, you should probably activate padding " + "with 'padding=True' to have batched tensors with the same length." + ) + + # Do the tensor conversion in batch + for key, value in self.items(): + if isinstance(value, list) and isinstance(value[0], list): + # List[List[Any]] -> List[List[Tensor]] + self[key] = [[_safe_convert_tensor(elem) for elem in elems] for elems in value] + elif isinstance(value, list): + # List[Any] -> List[Tensor] + self[key] = [_safe_convert_tensor(elem) for elem in value] + else: + # Any -> Tensor + self[key] = _safe_convert_tensor(value) + return self + + def to(self, *args, **kwargs) -> "BatchFeature": + """ + Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in + different `dtypes` and sending the `BatchFeature` to a different `device`. + + Args: + args (`Tuple`): + Will be passed to the `to(...)` function of the tensors. + kwargs (`Dict`, *optional*): + Will be passed to the `to(...)` function of the tensors. + + Returns: + [`BatchFeature`]: The same instance after modification. + """ + requires_backends(self, ["torch"]) + import torch # noqa + + new_data = {} + device = kwargs.get("device") + # Check if the args are a device or a dtype + if device is None and len(args) > 0: + # device should be always the first argument + arg = args[0] + if is_torch_dtype(arg): + # The first argument is a dtype + pass + elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): + device = arg + else: + # it's something else + raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.") + + def _to(elem): + # check if v is a floating point + if torch.is_floating_point(elem): + # cast and send to device + return elem.to(*args, **kwargs) + if device is not None: + return elem.to(device=device) + + return elem + + # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor` + for k, v in self.items(): + if isinstance(v, list) and isinstance(v[0], list): + # Data structure is a list of lists + new_v = [] + for elems in v: + new_v.append([_to(elem) for elem in elems]) + new_data[k] = new_v + elif isinstance(v, list): + # Data structure is a list + new_data[k] = [_to(elem) for elem in v] + else: + new_data[k] = _to(v) + self.data = new_data + return self + + class FuyuImageProcessor(BaseImageProcessor): """ This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should @@ -29,9 +184,9 @@ class FuyuImageProcessor(BaseImageProcessor): - Processing Images: Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch - dimensions. The image output is always img_h ........................................... 1080 img_w - ........................................... 1920 Then, it patches up these images using the patchify_image - function. + dimensions. The image output is always img_h, img_w of (1080, 1920) + + Then, it patches up these images using the patchify_image function. - Creating Image Input IDs: For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For @@ -40,6 +195,32 @@ class FuyuImageProcessor(BaseImageProcessor): - Image Patch Indices: For each image patch, the code maintains an index where these patches should be inserted in a token stream. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image to `size`. + size (`Dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. + do_pad (`bool`, *optional*, defaults to `True`): + Whether to pad the image to `size`. + padding_value (`float`, *optional*, defaults to 1.0): + The value to pad the image with. + padding_mode (`str`, *optional*, defaults to `"constant"`): + The padding mode to use when padding the image. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. + image_mean (`float`, *optional*, defaults to 0.5): + The mean to use when normalizing the image. + image_std (`float`, *optional*, defaults to 0.5): + The standard deviation to use when normalizing the image. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `1 / 255`): + The factor to use when rescaling the image. + patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. """ model_input_names = [ @@ -51,204 +232,483 @@ class FuyuImageProcessor(BaseImageProcessor): ] def __init__( - self, target_height=1080, target_width=1920, padding_value=1.0, padding_mode: str = "constant", **kwargs + self, + do_resize: bool = True, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_pad: bool = True, + padding_value: float = 1.0, + padding_mode: str = "constant", + do_normalize: bool = True, + image_mean: Union[float, List[float]] = 0.5, + image_std: Union[float, List[float]] = 0.5, + do_rescale: bool = True, + rescale_factor: float = 1 / 255, + patch_size: Optional[Dict[str, int]] = None, + **kwargs, ): super().__init__(**kwargs) - self.target_width = target_width - self.target_height = target_height + self.do_resize = do_resize + self.size = size if size is not None else {"height": 1080, "width": 1920} + self.resample = resample + self.do_pad = do_pad self.padding_value = padding_value self.padding_mode = padding_mode + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.patch_size = patch_size if patch_size is not None else {"height": 30, "width": 30} + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BILINEAR, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])`. - def get_num_patches(self, img_h: int, img_w: int, patch_dim_h: int, patch_dim_w: int) -> int: - """Calculate number of patches required to encode an image.""" - if img_h % patch_dim_h != 0: - raise ValueError(f"{img_h=} must be divisible by {patch_dim_h=}") - if img_w % patch_dim_w != 0: - raise ValueError(f"{img_w=} must be divisible by {patch_dim_w=}") + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + Returns: + `np.ndarray`: The resized image. + """ + image_height, image_width = get_image_size(image, input_data_format) + target_height, target_width = size["height"], size["width"] - num_patches_per_dim_h = img_h // patch_dim_h - num_patches_per_dim_w = img_w // patch_dim_w - num_patches = num_patches_per_dim_h * num_patches_per_dim_w + if image_width <= target_width and image_height <= target_height: + return image + + height_scale_factor = target_height / image_height + width_scale_factor = target_width / image_width + optimal_scale_factor = min(height_scale_factor, width_scale_factor) + new_height = int(image_height * optimal_scale_factor) + new_width = int(image_width * optimal_scale_factor) + + scaled_image = resize( + image=image, + size=(new_height, new_width), + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + return scaled_image + + def pad_image( + self, + image: np.ndarray, + size: Dict[str, int], + mode: str = "constant", + constant_values: float = 1.0, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Pad an image to `(size["height"], size["width"])`. + + Args: + image (`np.ndarray`): + Image to pad. + size (`Dict[str, int]`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + data_format (`ChannelDimension` or `str`, *optional*): + The data format of the output image. If unset, the same format as the input image is used. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + image_height, image_width = get_image_size(image, input_data_format) + target_height, target_width = size["height"], size["width"] + padding_top = 0 + padding_left = 0 + padding_bottom = target_height - image_height + padding_right = target_width - image_width + padded_image = pad( + image, + padding=((padding_top, padding_bottom), (padding_left, padding_right)), + mode=mode, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + return padded_image + + def preprocess( + self, + images, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + resample: Optional[PILImageResampling] = None, + do_pad: Optional[bool] = None, + padding_value: Optional[float] = None, + padding_mode: Optional[str] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[float] = None, + image_std: Optional[float] = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + patch_size: Optional[Dict[str, int]] = None, + data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + return_tensors: Optional[TensorType] = None, + ): + """ + + Utility function to preprocess the images and extract necessary information about original formats. + + Args: + images (`ImageInput`): + Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel + values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image to `size`. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. + do_pad (`bool`, *optional*, defaults to `self.do_pad`): + Whether to pad the image to `size`. + padding_value (`float`, *optional*, defaults to `self.padding_value`): + The value to pad the image with. + padding_mode (`str`, *optional*, defaults to `self.padding_mode`): + The padding mode to use when padding the image. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float`, *optional*, defaults to `self.image_mean`): + The mean to use when normalizing the image. + image_std (`float`, *optional*, defaults to `self.image_std`): + The standard deviation to use when normalizing the image. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + The factor to use when rescaling the image. + patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format of the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + """ + + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + resample = resample if resample is not None else self.resample + do_pad = do_pad if do_pad is not None else self.do_pad + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + padding_value = padding_value if padding_value is not None else self.padding_value + padding_mode = padding_mode if padding_mode is not None else self.padding_mode + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + patch_size = patch_size if patch_size is not None else self.patch_size + + if isinstance(images, list) and any(isinstance(elem, list) and len(elem) >= 2 for elem in images): + raise ValueError("Multiple images for a single sample are not yet supported.") + + batch_images = make_list_of_list_of_images(images) + + if do_resize and size is None: + raise ValueError("Size must be specified if do_resize is True.") + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_normalize and image_mean is None or image_std is None: + raise ValueError("image_mean and image_std must be specified if do_normalize is True.") + + # All transformations expect numpy arrays. + batch_images = [[to_numpy_array(image) for image in images] for images in batch_images] + + if is_scaled_image(batch_images[0][0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(batch_images[0][0]) + + original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] + + if do_resize: + batch_images = [ + [self.resize(image, size=size, input_data_format=input_data_format) for image in images] + for images in batch_images + ] + + image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] + image_unpadded_heights = [[image_size[0]] for image_size in image_sizes] + image_unpadded_widths = [[image_size[1]] for image_size in image_sizes] + + # scale_h is the same as scale_w + image_scale_factors = [ + [resized_size[0] / original_size[0]] + for original_size, resized_size in zip(original_image_sizes, image_sizes) + ] + + if do_pad: + batch_images = [ + [ + self.pad_image( + image, + size=size, + mode=padding_mode, + constant_values=padding_value, + input_data_format=input_data_format, + ) + for image in images + ] + for images in batch_images + ] + + if do_rescale: + batch_images = [ + [self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images] + for images in batch_images + ] + + if do_normalize: + batch_images = [ + [ + self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + for images in batch_images + ] + + if data_format is not None: + batch_images = [ + [to_channel_dimension_format(image, data_format, input_data_format) for image in images] + for images in batch_images + ] + + data = { + "images": batch_images, + "image_unpadded_heights": image_unpadded_heights, + "image_unpadded_widths": image_unpadded_widths, + "image_scale_factors": image_scale_factors, + } + return FuyuBatchFeature(data=data, tensor_type=return_tensors) + + def get_num_patches(self, image_height: int, image_width: int, patch_size: Dict[str, int] = None) -> int: + """ + Calculate number of patches required to encode an image. + + Args: + image_height (`int`): + Height of the image. + image_width (`int`): + Width of the image. + patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. + """ + patch_size = patch_size if patch_size is not None else self.patch_size + patch_height, patch_width = self.patch_size["height"], self.patch_size["width"] + + if image_height % patch_height != 0: + raise ValueError(f"{image_height=} must be divisible by {patch_height}") + if image_width % patch_width != 0: + raise ValueError(f"{image_width=} must be divisible by {patch_width}") + + num_patches_per_dim_h = image_height // patch_height + num_patches_per_dim_w = image_width // patch_width + num_patches = num_patches_per_dim_h * num_patches_per_dim_w return num_patches - def patchify_image(self, image: "torch.Tensor", patch_dim_h: int, patch_dim_w: int) -> "torch.Tensor": + def patchify_image(self, image: "torch.Tensor", patch_size: Optional[Dict[str, int]] = None) -> "torch.Tensor": """ Convert an image into a tensor of patches. Args: - image: Image to convert. Shape: [batch, channels, height, width] - patch_dim_h: Height of each patch. - patch_dim_w: Width of each patch. + image (`torch.Tensor`): + Image to convert. Shape: [batch, channels, height, width] + patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. """ requires_backends(self, ["torch"]) + patch_size = patch_size if patch_size is not None else self.patch_size + patch_height, patch_width = patch_size["height"], patch_size["width"] # TODO refer to https://github.com/ArthurZucker/transformers/blob/0f0a3fe5ca5697ee58faeb5b53f049af720b5e98/src/transformers/models/vit_mae/modeling_vit_mae.py#L871 # torch implementation is faster but does not handle non-squares - batch_size, channels, height, width = image.shape - unfolded_along_height = image.unfold(2, patch_dim_h, patch_dim_h) - patches = unfolded_along_height.unfold(3, patch_dim_w, patch_dim_w) - - patches_reshaped = patches.contiguous().view(batch_size, channels, -1, patch_dim_h, patch_dim_w) - - patches_final = patches_reshaped.permute(0, 2, 3, 4, 1).reshape( - batch_size, -1, channels * patch_dim_h * patch_dim_w - ) - - return patches_final + batch_size, channels, _, _ = image.shape + unfolded_along_height = image.unfold(2, patch_height, patch_height) + patches = unfolded_along_height.unfold(3, patch_width, patch_width) + patches = patches.contiguous() + patches = patches.view(batch_size, channels, -1, patch_height, patch_width) + patches = patches.permute(0, 2, 3, 4, 1) + patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width) + return patches - def process_images_for_model_input( + def preprocess_with_tokenizer_info( self, image_input: "torch.Tensor", image_present: "torch.Tensor", image_unpadded_h: "torch.Tensor", image_unpadded_w: "torch.Tensor", - image_patch_dim_h: int, - image_patch_dim_w: int, image_placeholder_id: int, image_newline_id: int, variable_sized: bool, - ) -> dict: + patch_size: Optional[Dict[str, int]] = None, + ) -> FuyuBatchFeature: """Process images for model input. In particular, variable-sized images are handled here. Args: - image_input: [batch_size, 1, c, h, w] tensor of images padded to model input size. - image_present: [batch_size, 1] tensor of 1s and 0s indicating whether an image is present. - image_unpadded_h: [batch_size, 1] tensor of unpadded image heights. - image_unpadded_w: [batch_size, 1] tensor of unpadded image widths. - image_patch_dim_h: The height of the image patches. - image_patch_dim_w: The width of the image patches. - image_placeholder_id: The id of the image placeholder token. - image_newline_id: The id of the image newline token. - variable_sized: Whether to process images as variable-sized. + image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]): + Tensor of images padded to model input size. + image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]): + Tensor of 1s and 0s indicating whether an image is present. + image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]): + Tensor of unpadded image heights. + image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]): + Tensor of unpadded image widths. + image_placeholder_id (int): + The id of the image placeholder token. Comes from an associated tokenizer. + image_newline_id (int): + The id of the image newline token. Comes from an associated tokenizer. + variable_sized (bool): + Whether to process images as variable-sized. + patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): + Size of the patches. """ requires_backends(self, ["torch"]) + + patch_size = patch_size if patch_size is not None else self.patch_size + patch_height, patch_width = patch_size["height"], patch_size["width"] + # Only images that are present. images: List[List[torch.Tensor]] = [] - image_patches: List[List[torch.Tensor]] = [] + batch_image_patches: List[List[torch.Tensor]] = [] # Image input ids for every subsequence, including ones with no image present. - image_input_ids: List[List[torch.Tensor]] = [] - for bi in range(image_input.shape[0]): - images.append([]) - image_input_ids.append([]) - image_patches.append([]) - for si in range(image_input.shape[1]): - if image_present[bi, si]: - image = image_input[bi, si] + batch_image_input_ids: List[List[torch.Tensor]] = [] + for batch_index in range(image_input.shape[0]): + image_input_ids = [] + image_patches = [] + for subseq_index in range(image_input.shape[1]): + if image_present[batch_index, subseq_index]: + image = image_input[batch_index, subseq_index] + image_height, image_width = image.shape[1], image.shape[2] if variable_sized: # The min() is required here due to floating point issues: # math.ceil(torch.tensor(300).cuda() / 30) == 11 new_h = min( - image.shape[1], math.ceil(image_unpadded_h[bi, si] / image_patch_dim_h) * image_patch_dim_h + image_height, + math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height, ) new_w = min( - image.shape[2], math.ceil(image_unpadded_w[bi, si] / image_patch_dim_w) * image_patch_dim_w + image_width, + math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width, ) image = image[:, :new_h, :new_w] - images[bi].append(image) - num_patches = self.get_num_patches( - img_h=image.shape[1], - img_w=image.shape[2], - patch_dim_h=image_patch_dim_h, - patch_dim_w=image_patch_dim_w, + image_height, image_width = new_h, new_w + + num_patches = self.get_num_patches(image_height=image_height, image_width=image_width) + tensor_of_image_ids = torch.full( + [num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device ) - ids = torch.full([num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device) - patches = self.patchify_image( - image=image.unsqueeze(0), patch_dim_h=image_patch_dim_h, patch_dim_w=image_patch_dim_w - ).squeeze(0) + patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0) + assert num_patches == patches.shape[0] + if variable_sized: # Now terminate each line with |NEWLINE|. - ids = ids.reshape(-1, new_w // image_patch_dim_w) - ids = torch.cat( - [ - ids, - torch.full( - [ids.shape[0], 1], image_newline_id, dtype=torch.int32, device=image_input.device - ), - ], - dim=1, + tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width) + newline_ids = torch.full( + [tensor_of_image_ids.shape[0], 1], + image_newline_id, + dtype=torch.int32, + device=image_input.device, ) - ids = ids.reshape(-1) - image_input_ids[bi].append(ids) - image_patches[bi].append(patches) + tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1) + tensor_of_image_ids = tensor_of_image_ids.reshape(-1) + + images.append([image]) + image_input_ids.append(tensor_of_image_ids) + image_patches.append(patches) else: - image_input_ids[bi].append(torch.tensor([], dtype=torch.int32, device=image_input.device)) + image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device)) + + batch_image_input_ids.append(image_input_ids) + batch_image_patches.append(image_patches) # Create image_patch_input_indices, where non-negative values correspond to image patches to be inserted in # the stream. image_patch_indices_per_batch: List[List[torch.Tensor]] = [] image_patch_indices_per_subsequence: List[List[torch.Tensor]] = [] - for bi in range(len(image_input_ids)): - image_patch_indices_per_batch.append([]) - image_patch_indices_per_subsequence.append([]) + + for sample_image_input_ids in batch_image_input_ids: index_offset = 0 - for si in range(len(image_input_ids[bi])): + per_batch_indices = [] + per_subsequence_indices = [] + for subseq_image_input_ids in sample_image_input_ids: # Indices of image patches. - num_patches = torch.count_nonzero(image_input_ids[bi][si] == image_placeholder_id) + patches_mask = subseq_image_input_ids == image_placeholder_id + num_patches = torch.count_nonzero(patches_mask) indices = torch.arange( - num_patches, - dtype=image_input_ids[bi][si].dtype, - device=image_input_ids[bi][si].device, + num_patches, dtype=subseq_image_input_ids.dtype, device=subseq_image_input_ids.device ) # Place those indices in the image input ids token stream, with -1 representing non-index tokens. - indices_in_stream_per_batch = torch.full_like(image_input_ids[bi][si], -1) - indices_in_stream_per_subsequence = torch.full_like(image_input_ids[bi][si], -1) - indices_in_stream_per_batch[ - torch.nonzero(image_input_ids[bi][si] == image_placeholder_id, as_tuple=True)[0] - ] = (indices + index_offset) - indices_in_stream_per_subsequence[ - torch.nonzero(image_input_ids[bi][si] == image_placeholder_id, as_tuple=True)[0] - ] = indices - - image_patch_indices_per_batch[bi].append(indices_in_stream_per_batch) - image_patch_indices_per_subsequence[bi].append(indices_in_stream_per_subsequence) - index_offset += num_patches - - return { - "images": images, - "image_input_ids": image_input_ids, - "image_patches": image_patches, - "image_patch_indices_per_batch": image_patch_indices_per_batch, - "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence, - } + indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1) + indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1) + patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0] - def _scale_to_target_aspect_ratio(self, image: np.ndarray) -> np.ndarray: - image_height, image_width, _ = image.shape - if image_width <= self.target_width and image_height <= self.target_height: - return image - - height_scale_factor = self.target_height / image_height - width_scale_factor = self.target_width / image_width - optimal_scale_factor = min(height_scale_factor, width_scale_factor) + indices_in_stream_per_batch[patches_inds] = indices + index_offset + indices_in_stream_per_subsequence[patches_inds] = indices - new_height = int(image_height * optimal_scale_factor) - new_width = int(image_width * optimal_scale_factor) - - scaled_image = resize(image=image, size=(new_height, new_width)) - return np.array(scaled_image) - - def _pad_to_target_size(self, image: np.ndarray) -> np.ndarray: - image_height, image_width, _ = image.shape - - padding_top = 0 - padding_left = 0 - padding_bottom = self.target_height - image_height - padding_right = self.target_width - image_width + per_batch_indices.append(indices_in_stream_per_batch) + per_subsequence_indices.append(indices_in_stream_per_subsequence) + index_offset += num_patches - padded_image = pad( - image, - ((padding_top, padding_bottom), (padding_left, padding_right)), - mode=self.padding_mode, - constant_values=self.padding_value, + image_patch_indices_per_batch.append(per_batch_indices) + image_patch_indices_per_subsequence.append(per_subsequence_indices) + + return FuyuBatchFeature( + data={ + "images": images, + "image_input_ids": batch_image_input_ids, + "image_patches": batch_image_patches, + "image_patch_indices_per_batch": image_patch_indices_per_batch, + "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence, + } ) - return padded_image - - def apply_transformation(self, image: Union[np.ndarray, PIL.Image.Image]) -> np.ndarray: - if isinstance(image, PIL.Image.Image): - image = to_numpy_array(image) - scaled_image = self._scale_to_target_aspect_ratio(image) - padded_image = self._pad_to_target_size(scaled_image) - normalized_padded_image = normalize(padded_image, 0.5, 0.5) - return normalized_padded_image diff --git a/src/transformers/models/fuyu/modeling_fuyu.py b/src/transformers/models/fuyu/modeling_fuyu.py index 89127843befe..345d0a0e92a5 100644 --- a/src/transformers/models/fuyu/modeling_fuyu.py +++ b/src/transformers/models/fuyu/modeling_fuyu.py @@ -257,8 +257,10 @@ def forward( if inputs_embeds is None: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if image_patches is not None and past_key_values is None: - patch_embeddings = self.vision_embed_tokens(image_patches.to(self.vision_embed_tokens.weight.dtype)) - patch_embeddings = patch_embeddings.to(inputs_embeds.device) + patch_embeddings = [ + self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)).squeeze(0) + for patch in image_patches + ] inputs_embeds = self.gather_continuous_embeddings( word_embeddings=inputs_embeds, continuous_embeddings=patch_embeddings, diff --git a/src/transformers/models/fuyu/processing_fuyu.py b/src/transformers/models/fuyu/processing_fuyu.py index ea660b072d72..e0f362a6c876 100644 --- a/src/transformers/models/fuyu/processing_fuyu.py +++ b/src/transformers/models/fuyu/processing_fuyu.py @@ -1,45 +1,50 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image/Text processor class for GIT +""" import re -from typing import Any, Iterable, List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple, Union import numpy as np -from ...image_utils import ( - ChannelDimension, - get_image_size, - infer_channel_dimension_format, - is_scaled_image, - to_numpy_array, -) from ...processing_utils import ProcessorMixin -from ...utils import is_torch_available, is_vision_available, logging +from ...tokenization_utils_base import PaddingStrategy, TruncationStrategy +from ...utils import TensorType, is_torch_available, logging, requires_backends -if is_torch_available() and is_vision_available(): - from .image_processing_fuyu import FuyuImageProcessor +if is_torch_available(): + from .image_processing_fuyu import FuyuBatchFeature logger = logging.get_logger(__name__) -if is_vision_available(): - import PIL if is_torch_available(): import torch -BBOX_OPEN_STRING = "<0x00>" # -BBOX_CLOSE_STRING = "<0x01>" # -POINT_OPEN_STRING = "<0x02>" # -POINT_CLOSE_STRING = "<0x03>" # TEXT_REPR_BBOX_OPEN = "" TEXT_REPR_BBOX_CLOSE = "" TEXT_REPR_POINT_OPEN = "" TEXT_REPR_POINT_CLOSE = "" -TOKEN_BBOX_OPEN_STRING = BBOX_OPEN_STRING = "<0x00>" # -BBOX_CLOSE_STRING = "<0x01>" # -TOKEN_BBOX_CLOSE_STRING = TOKEN_POINT_OPEN_STRING = POINT_OPEN_STRING = "<0x02>" # -TOKEN_POINT_CLOSE_STRING = POINT_CLOSE_STRING = "<0x03>" # +TOKEN_BBOX_OPEN_STRING = "<0x00>" # +TOKEN_BBOX_CLOSE_STRING = "<0x01>" # +TOKEN_POINT_OPEN_STRING = "<0x02>" # +TOKEN_POINT_CLOSE_STRING = "<0x03>" # BEGINNING_OF_ANSWER_STRING = "<0x04>" # @@ -87,18 +92,16 @@ def construct_full_unpacked_stream( all_bi_stream = [] - for bi in range(batch_size): + for batch_index in range(batch_size): all_si_stream = [] # First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence # and append to lists. We use lists rather than tensors because each subsequence is variable-sized. - for si in range(num_sub_sequences): - image_adjustment = image_tokens[bi][si] - si_stream = torch.cat([image_adjustment, input_stream[bi, si]], dim=0) - num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[bi][si] - - all_si_stream.append(si_stream[:num_real_tokens]) - # Combine all subsequences for this batch entry. Still using a list because each batch entry is variable-sized. + # TODO Remove this logic in a subsequent release since subsequences are not supported. + image_adjustment = image_tokens[batch_index][0] + subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0) + num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0] + all_si_stream.append(subsequence_stream[:num_real_tokens]) all_bi_stream.append(torch.cat(all_si_stream, dim=0)) return all_bi_stream @@ -137,7 +140,7 @@ def _segment_prompt_into_text_token_conversions(prompt: str) -> List: return prompt_text_list -def _transform_coordinates_and_tokenize(prompt: str, transformed_image, tokenizer) -> List[int]: +def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]: """ This function transforms the prompt in the following fashion: - and to their respective token mappings @@ -161,7 +164,7 @@ def _transform_coordinates_and_tokenize(prompt: str, transformed_image, tokenize for elem in prompt_text_list: if elem[1]: # This is a location, we need to tokenize it - within_tag_tokenized = _transform_within_tags(elem[0], transformed_image, tokenizer) + within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer) # Surround the text with the open and close tags transformed_prompt_tokens.extend(within_tag_tokenized) else: @@ -169,7 +172,7 @@ def _transform_coordinates_and_tokenize(prompt: str, transformed_image, tokenize return transformed_prompt_tokens -def _transform_within_tags(text: str, transformed_image, tokenizer) -> List[int]: +def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> List[int]: """ Given a bounding box of the fashion 1, 2, 3, 4 | 1, 2 This function is responsible for converting 1, 2, 3, 4 into tokens of 1 2 3 4 without any commas. @@ -188,16 +191,14 @@ def _transform_within_tags(text: str, transformed_image, tokenizer) -> List[int] num_ints = [float(num.strip()) for num in num_int_strs] # scale to transformed image siz if len(num_ints) == 2: - num_ints_translated = scale_point_to_transformed_image( - x=num_ints[0], y=num_ints[1], transformed_image=transformed_image - ) + num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor) elif len(num_ints) == 4: num_ints_translated = scale_bbox_to_transformed_image( top=num_ints[0], left=num_ints[1], bottom=num_ints[2], right=num_ints[3], - transformed_image=transformed_image, + scale_factor=scale_factor, ) else: raise ValueError(f"Invalid number of ints: {len(num_ints)}") @@ -209,7 +210,7 @@ def _transform_within_tags(text: str, transformed_image, tokenizer) -> List[int] def _tokenize_prompts_with_image_and_batch( tokenizer, prompts: List[List[str]], - transformed_images: Optional[List[List["torch.Tensor"]]], + scale_factors: Optional[List[List["torch.Tensor"]]], max_tokens_to_generate: int, max_position_embeddings: int, add_BOS: bool, # Same issue with types as above @@ -223,13 +224,13 @@ def _tokenize_prompts_with_image_and_batch( """ # If not tool use, tranform the coordinates while tokenizing - if transformed_images is not None: + if scale_factors is not None: transformed_prompt_tokens = [] - for prompt_seq, transformed_image_seq in zip(prompts, transformed_images): + for prompt_seq, scale_factor_seq in zip(prompts, scale_factors): transformed_prompt_tokens.append( [ - _transform_coordinates_and_tokenize(prompt, transformed_image, tokenizer) - for prompt, transformed_image in zip(prompt_seq, transformed_image_seq) + _transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer) + for prompt, scale_factor in zip(prompt_seq, scale_factor_seq) ] ) else: @@ -260,7 +261,7 @@ def _tokenize_prompts_with_image_and_batch( # Number of tokens in the each sample of the batch. samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings) if max_prompt_len + max_tokens_to_generate > max_position_embeddings: - print( + logger.warning( f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}", f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.", ) @@ -279,86 +280,30 @@ def _tokenize_prompts_with_image_and_batch( return prompts_tokens_tensor, prompts_length_tensor -def original_to_transformed_h_coords(self, original_coords): - # apply crop - cropped_coords = ( - self._clamp_coords(original_coords, min_value=self.crop_top, max_value=self.crop_bottom) - self.crop_top - ) - # apply scale - scaled_coords = self._scale_coords(cropped_coords, scale=self.scaled_h / self.original_h) - # apply pad - return scaled_coords + self.padding_top +# Simplified assuming self.crop_top = self.padding_top = 0 +def original_to_transformed_h_coords(original_coords, scale_h): + return np.round(original_coords * scale_h).astype(np.int32) -def original_to_transformed_w_coords(self, original_coords): - # apply crop - cropped_coords = ( - self._clamp_coords(original_coords, min_value=self.crop_left, max_value=self.crop_right) - self.crop_left - ) - # apply scale - scaled_coords = self._scale_coords(cropped_coords, scale=self.scaled_w / self.original_w) - # apply pad - return scaled_coords + self.padding_left +# Simplified assuming self.crop_left = self.padding_left = 0 +def original_to_transformed_w_coords(original_coords, scale_w): + return np.round(original_coords * scale_w).astype(np.int32) -def scale_point_to_transformed_image(x: float, y: float) -> List[int]: - x_scaled = original_to_transformed_w_coords(np.array([x / 2]))[0] - y_scaled = original_to_transformed_h_coords(np.array([y / 2]))[0] +def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> List[int]: + x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0] + y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0] return [x_scaled, y_scaled] -def scale_bbox_to_transformed_image(top: float, left: float, bottom: float, right: float) -> List[int]: - top_scaled = original_to_transformed_w_coords(np.array([top / 2]))[0] - left_scaled = original_to_transformed_h_coords(np.array([left / 2]))[0] - bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]))[0] - right_scaled = original_to_transformed_h_coords(np.array([right / 2]))[0] - return [top_scaled, left_scaled, bottom_scaled, right_scaled] - - -# Copied from transformers.models.detr.image_processing_detr.max_across_indices -def max_across_indices(values: Iterable[Any]) -> List[Any]: - """ - Return the maximum value across all indices of an iterable of values. - """ - return [max(values_i) for values_i in zip(*values)] - - -# Copied from transformers.models.detr.image_processing_detr.get_max_height_width -def get_max_height_width( - images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None +def scale_bbox_to_transformed_image( + top: float, left: float, bottom: float, right: float, scale_factor: float ) -> List[int]: - """ - Get the maximum height and width across all images in a batch. - """ - if input_data_format is None: - input_data_format = infer_channel_dimension_format(images[0]) - - if input_data_format == ChannelDimension.FIRST: - _, max_height, max_width = max_across_indices([img.shape for img in images]) - elif input_data_format == ChannelDimension.LAST: - max_height, max_width, _ = max_across_indices([img.shape for img in images]) - else: - raise ValueError(f"Invalid channel dimension format: {input_data_format}") - return (max_height, max_width) - - -# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask -def make_pixel_mask( - image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None -) -> np.ndarray: - """ - Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. - - Args: - image (`np.ndarray`): - Image to make the pixel mask for. - output_size (`Tuple[int, int]`): - Output size of the mask. - """ - input_height, input_width = get_image_size(image, channel_dim=input_data_format) - mask = np.zeros(output_size, dtype=np.int64) - mask[:input_height, :input_width] = 1 - return mask + top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0] + left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0] + bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0] + right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0] + return [top_scaled, left_scaled, bottom_scaled, right_scaled] class FuyuProcessor(ProcessorMixin): @@ -384,42 +329,148 @@ def __init__(self, image_processor, tokenizer): self.tokenizer = tokenizer self.max_tokens_to_generate = 10 self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it? - self.image_processor = FuyuImageProcessor() - - def _process_images(self, images): - """Utility function to preprocess the images and extract necessary information about original formats.""" - batch_images = [] - image_unpadded_heights = [] - image_unpadded_widths = [] - - for image in images: - image = to_numpy_array(image) - if not is_scaled_image(image): - image = image / 255.0 - channel_dimension = infer_channel_dimension_format(image, 3) - if channel_dimension == ChannelDimension.FIRST: - width_index = 2 - height_index = 1 - elif channel_dimension == ChannelDimension.LAST: - width_index = 1 - height_index = 0 - - image_unpadded_widths.append([image.shape[width_index]]) - image_unpadded_heights.append([image.shape[height_index]]) - - # Reproduct adept padding sampler - padded_image = self.image_processor.apply_transformation(image) - - tensor_img = torch.Tensor(padded_image).permute(2, 0, 1) - batch_images.append([tensor_img]) - - return batch_images, torch.Tensor(image_unpadded_heights), torch.Tensor(image_unpadded_widths) - - def __call__(self, text=None, images=None, return_tensors=None, **kwargs): + self.pad_token_id = 0 + self.dummy_image_index = -1 + + def _left_pad_inputs_with_attention_mask(self, model_inputs: List[Dict], return_attention_mask: bool): + max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs) + max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs) + + batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []} + + for entry in model_inputs: + for key, tensor in entry.items(): + if key == "input_ids": + num_padding_tokens = max_length_input_ids - tensor.shape[1] + padded_input_ids = torch.cat( + [ + torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long), + tensor, + ], + dim=1, + ) + batched_inputs[key].append(padded_input_ids) + + attention_mask = torch.cat( + [torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)], + dim=1, + ) + batched_inputs["attention_mask"].append(attention_mask) + + elif key == "image_patches": + # For image_patches, we don't pad but just append them to the list. + batched_inputs[key].append(tensor) + + else: # for image_patches_indices + num_padding_indices = max_length_image_patch_indices - tensor.shape[1] + padded_indices = torch.cat( + [ + torch.full( + (tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long + ), + tensor, + ], + dim=1, + ) + batched_inputs[key].append(padded_indices) + batched_keys = ["input_ids", "image_patches_indices"] + if return_attention_mask: + batched_keys.append("attention_mask") + for key in batched_keys: + batched_inputs[key] = torch.cat(batched_inputs[key], dim=0) + + return batched_inputs + + def get_sample_encoding( + self, + prompts, + scale_factors, + image_unpadded_heights, + image_unpadded_widths, + image_placeholder_id, + image_newline_id, + tensor_batch_images, + ): + image_present = torch.ones(1, 1, 1) + model_image_input = self.image_processor.preprocess_with_tokenizer_info( + image_input=tensor_batch_images, + image_present=image_present, + image_unpadded_h=image_unpadded_heights, + image_unpadded_w=image_unpadded_widths, + image_placeholder_id=image_placeholder_id, + image_newline_id=image_newline_id, + variable_sized=True, + ) + # FIXME max_tokens_to_generate is embedded into this processor's call. + prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch( + tokenizer=self.tokenizer, + prompts=prompts, + scale_factors=scale_factors, + max_tokens_to_generate=self.max_tokens_to_generate, + max_position_embeddings=self.max_position_embeddings, + add_BOS=True, + add_beginning_of_answer_token=True, + ) + image_padded_unpacked_tokens = construct_full_unpacked_stream( + num_real_text_tokens=prompts_length, + input_stream=prompt_tokens, + image_tokens=model_image_input["image_input_ids"], + batch_size=1, + num_sub_sequences=self.subsequence_length, + ) + # Construct inputs for image patch indices. + unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream( + num_real_text_tokens=prompts_length, + input_stream=torch.full_like(prompt_tokens, -1), + image_tokens=model_image_input["image_patch_indices_per_batch"], + batch_size=1, + num_sub_sequences=self.subsequence_length, + ) + max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens) + max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings) + tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0])) + + # Use same packing logic for the image patch indices. + image_patch_input_indices = full_unpacked_stream_to_tensor( + all_bi_tokens_to_place=[tokens_to_place], + full_unpacked_stream=unpacked_image_patch_indices_per_batch, + fill_value=-1, + batch_size=1, + new_seq_len=max_seq_len_batch, + offset=0, + ) + image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]]) + batch_encoding = { + "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0), + "image_patches": image_patches_tensor, + "image_patches_indices": image_patch_input_indices, + } + return batch_encoding + + def __call__( + self, + text=None, + images=None, + add_special_tokens: bool = True, + return_attention_mask: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_token_type_ids: bool = False, + return_length: bool = False, + verbose: bool = True, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> "FuyuBatchFeature": """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to - encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to + encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. @@ -433,130 +484,211 @@ def __call__(self, text=None, images=None, return_tensors=None, **kwargs): tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. - return_tensors (`str` or [`~utils.TensorType`], *optional*): - If set, will return tensors of a particular framework. Acceptable values are: - - - `'tf'`: Return TensorFlow `tf.constant` objects. - - `'pt'`: Return PyTorch `torch.Tensor` objects. - - `'np'`: Return NumPy `np.ndarray` objects. - - `'jax'`: Return JAX `jnp.ndarray` objects. - Returns: - [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + [`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields: - - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when - `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not - `None`). - - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + - **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`. + - **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`. + - **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model when + `return_attention_mask=True`. """ + requires_backends(self, ["torch"]) + + # --- Check input validity --- + if not return_attention_mask: + raise ValueError("`return_attention_mask=False` is not supported for this model.") if text is None and images is None: - raise ValueError("You have to specify either text or images. Both cannot be none.") + raise ValueError("You have to specify either text or images. Both cannot be None.") + if text is not None and images is None: + logger.warning("You are processing a text with no associated image. Make sure it is intended.") + self.current_processor = self.tokenizer + text_encoding = self.tokenizer( + text=text, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_token_type_ids=return_token_type_ids, + return_length=return_length, + verbose=verbose, + return_tensors=return_tensors, + **kwargs, + ) + return text_encoding + + if text is None and images is not None: + logger.warning("You are processing an image with no associated text. Make sure it is intended.") + prompts = [[""]] if text is not None and images is not None: if isinstance(text, str): prompts = [[text]] elif isinstance(text, list): prompts = [[text_seq] for text_seq in text] - batch_images = [] - if isinstance(images, PIL.Image.Image): - images = [images] - if isinstance(images, list): - batch_images, image_unpadded_heights, image_unpadded_widths = self._process_images(images) - # image_unpadded_heights = image_unpadded_heights.unsqueeze(0) - # image_unpadded_widths = image_unpadded_widths.unsqueeze(0) - else: - raise ValueError("images must be a list of ndarrays or PIL Images to be processed.") - - # Note: the original adept code has a handling of image_unpadded_h and w, but it doesn't seem to hold - # when there are several different size subsequences per batch. The current implementation reflects - # that limitation and should be documented. - # - self.subsequence_length = 1 # Each batch contains only one sequence. - self.batch_size = len(batch_images) - # FIXME max_tokens_to_generate is embedded into this processor's call. - prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch( - tokenizer=self.tokenizer, - prompts=prompts, - transformed_images=batch_images, - max_tokens_to_generate=self.max_tokens_to_generate, - max_position_embeddings=self.max_position_embeddings, - add_BOS=True, - add_beginning_of_answer_token=True, - ) - # same so far - - # This is 1 if there is an image per subsequence, else 0. [batch, 1, presence] - # the remainder of current image processing logic assumes subsequence_size = 1. - # Here it is OK as the model cannot handle > 1 subsequences - # the image could be absent however and image presence should be inferred from user batch input - # hence this code assumes the images are present. Use an assert? - - image_present = torch.ones(self.batch_size, 1, 1) - - image_placeholder_id = self.tokenizer("|SPEAKER|", add_special_tokens=False)["input_ids"][1] - image_newline_id = self.tokenizer("|NEWLINE|", add_special_tokens=False)["input_ids"][1] - tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1) - model_image_input = self.image_processor.process_images_for_model_input( - image_input=tensor_batch_images, - image_present=image_present, - image_unpadded_h=image_unpadded_heights, - image_unpadded_w=image_unpadded_widths, - image_patch_dim_h=30, - image_patch_dim_w=30, + + # --- Preprocess images using self.image_processor --- + + # FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors + image_encoding = self.image_processor.preprocess(images, return_tensors="pt") + batch_images = image_encoding["images"] + image_unpadded_heights = image_encoding["image_unpadded_heights"] + image_unpadded_widths = image_encoding["image_unpadded_widths"] + scale_factors = image_encoding["image_scale_factors"] + self.subsequence_length = 1 # Each batch contains only one sequence. + self.batch_size = len(batch_images) + + # --- Use self.tokenizer to get the ids of special tokens to insert into image ids --- + + image_placeholder_id = self.tokenizer("|SPEAKER|", add_special_tokens=False)["input_ids"][1] + image_newline_id = self.tokenizer("|NEWLINE|", add_special_tokens=False)["input_ids"][1] + tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1) + + # --- Use self.image_processor again to obtain the full token ids and batch inputs --- + all_encodings = [] + + for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip( + prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images + ): + sample_encoding = self.get_sample_encoding( + prompts=[prompt], + scale_factors=[scale_factor], + image_unpadded_heights=torch.tensor([image_unpadded_height]), + image_unpadded_widths=torch.tensor([image_unpadded_width]), image_placeholder_id=image_placeholder_id, image_newline_id=image_newline_id, - variable_sized=True, + tensor_batch_images=tensor_batch_image.unsqueeze(0), ) + all_encodings.append(sample_encoding) + batch_encoding = self._left_pad_inputs_with_attention_mask( + model_inputs=all_encodings, return_attention_mask=return_attention_mask + ) + return FuyuBatchFeature(data=batch_encoding) - image_padded_unpacked_tokens = construct_full_unpacked_stream( - num_real_text_tokens=prompts_length, - input_stream=prompt_tokens, - image_tokens=model_image_input["image_input_ids"], - batch_size=self.batch_size, - num_sub_sequences=self.subsequence_length, - ) - # Construct inputs for image patch indices. - unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream( - num_real_text_tokens=prompts_length, - input_stream=torch.full_like(prompt_tokens, -1), - image_tokens=model_image_input["image_patch_indices_per_batch"], - batch_size=self.batch_size, - num_sub_sequences=self.subsequence_length, - ) - max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens) - max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings) - all_bi_tokens_to_place = [] - for bi in range(self.batch_size): - tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[bi].shape[0])) - all_bi_tokens_to_place.append(tokens_to_place) - - # Use same packing logic for the image patch indices. - image_patch_input_indices = full_unpacked_stream_to_tensor( - all_bi_tokens_to_place=all_bi_tokens_to_place, - full_unpacked_stream=unpacked_image_patch_indices_per_batch, - fill_value=-1, - batch_size=self.batch_size, - new_seq_len=max_seq_len_batch, - offset=0, - ) + def post_process_box_coordinates(self, outputs, target_sizes=None): + """ + Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space. + Coordinates will be returned in "box" format, with the following pattern: + `top, left, bottom, right` + + Point coordinates are not supported yet. - image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]]).unsqueeze(1) - return { - "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0), - "image_patches": image_patches_tensor[0][0].unsqueeze(0), - "image_patches_indices": image_patch_input_indices, - } + Args: + outputs ([`GenerateOutput`]): + Raw outputs from `generate`. + target_sizes (`torch.Tensor`, *optional*): + Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in + the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left + to None, coordinates will not be rescaled. + + Returns: + `GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with + boxed and possible rescaled coordinates. + """ + + def scale_factor_to_fit(original_size, target_size=None): + height, width = original_size + if target_size is None: + max_height = self.image_processor.size["height"] + max_width = self.image_processor.size["width"] + else: + max_height, max_width = target_size + if width <= max_width and height <= max_height: + return 1.0 + return min(max_height / height, max_width / width) + + def find_delimiters_pair(tokens, start_token, end_token): + start_id = self.tokenizer.convert_tokens_to_ids(start_token) + end_id = self.tokenizer.convert_tokens_to_ids(end_token) + + starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0] + ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0] + + if torch.any(starting_positions) and torch.any(ending_positions): + return (starting_positions[0], ending_positions[0]) + return (None, None) + + def tokens_to_boxes(tokens, original_size): + while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != ( + None, + None, + ): + start, end = pair + if end != start + 5: + continue + + # Retrieve transformed coordinates from tokens + coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end]) + + # Scale back to original image size and multiply by 2 + scale = scale_factor_to_fit(original_size) + top, left, bottom, right = [2 * int(float(c) / scale) for c in coords] + + # Replace the IDs so they get detokenized right + replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}" + replacement = self.tokenizer.tokenize(replacement)[1:] + replacement = self.tokenizer.convert_tokens_to_ids(replacement) + replacement = torch.tensor(replacement).to(tokens) + + tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0) + return tokens + + def tokens_to_points(tokens, original_size): + while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != ( + None, + None, + ): + start, end = pair + if end != start + 3: + continue + + # Retrieve transformed coordinates from tokens + coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end]) + + # Scale back to original image size and multiply by 2 + scale = scale_factor_to_fit(original_size) + x, y = [2 * int(float(c) / scale) for c in coords] + + # Replace the IDs so they get detokenized right + replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}" + replacement = self.tokenizer.tokenize(replacement)[1:] + replacement = self.tokenizer.convert_tokens_to_ids(replacement) + replacement = torch.tensor(replacement).to(tokens) + + tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0) + return tokens + + if target_sizes is None: + target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs) + elif target_sizes.shape[1] != 2: + raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") + + if len(outputs) != len(target_sizes): + raise ValueError("Make sure that you pass in as many target sizes as output sequences") + + results = [] + for seq, size in zip(outputs, target_sizes): + seq = tokens_to_boxes(seq, size) + seq = tokens_to_points(seq, size) + results.append(seq) + + return results def batch_decode(self, *args, **kwargs): """ - This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please + This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ - This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to + This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) diff --git a/tests/models/fuyu/test_image_processing_fuyu.py b/tests/models/fuyu/test_image_processing_fuyu.py index 73f0936aacf1..a9930e2fb812 100644 --- a/tests/models/fuyu/test_image_processing_fuyu.py +++ b/tests/models/fuyu/test_image_processing_fuyu.py @@ -24,7 +24,8 @@ @require_torchvision class TestFuyuImageProcessor(unittest.TestCase): def setUp(self): - self.processor = FuyuImageProcessor(target_height=160, target_width=320, padding_value=1.0) + self.size = {"height": 160, "width": 320} + self.processor = FuyuImageProcessor(size=self.size, padding_value=1.0) self.batch_size = 3 self.channels = 3 self.height = 300 @@ -38,29 +39,25 @@ def setUp(self): self.sample_image_pil = Image.fromarray(self.sample_image) def test_patches(self): - expected_num_patches = self.processor.get_num_patches( - img_h=self.height, img_w=self.width, patch_dim_h=self.image_patch_dim_h, patch_dim_w=self.image_patch_dim_w - ) + expected_num_patches = self.processor.get_num_patches(image_height=self.height, image_width=self.width) - patches_final = self.processor.patchify_image( - image=self.image_input, patch_dim_h=self.image_patch_dim_h, patch_dim_w=self.image_patch_dim_w - ) + patches_final = self.processor.patchify_image(image=self.image_input) assert ( patches_final.shape[1] == expected_num_patches ), f"Expected {expected_num_patches} patches, got {patches_final.shape[1]}." def test_scale_to_target_aspect_ratio(self): # (h:450, w:210) fitting (160, 320) -> (160, 210*160/450) - scaled_image = self.processor._scale_to_target_aspect_ratio(self.sample_image) + scaled_image = self.processor.resize(self.sample_image, size=self.size) self.assertEqual(scaled_image.shape[0], 160) self.assertEqual(scaled_image.shape[1], 74) def test_apply_transformation_numpy(self): - transformed_image = self.processor.apply_transformation(self.sample_image) - self.assertEqual(transformed_image.shape[0], 160) - self.assertEqual(transformed_image.shape[1], 320) + transformed_image = self.processor.preprocess(self.sample_image).images[0][0] + self.assertEqual(transformed_image.shape[1], 160) + self.assertEqual(transformed_image.shape[2], 320) def test_apply_transformation_pil(self): - transformed_image = self.processor.apply_transformation(self.sample_image_pil) - self.assertEqual(transformed_image.shape[0], 160) - self.assertEqual(transformed_image.shape[1], 320) + transformed_image = self.processor.preprocess(self.sample_image_pil).images[0][0] + self.assertEqual(transformed_image.shape[1], 160) + self.assertEqual(transformed_image.shape[2], 320) diff --git a/tests/models/fuyu/test_modeling_fuyu.py b/tests/models/fuyu/test_modeling_fuyu.py index b9c061e7a004..9fb6820e45ff 100644 --- a/tests/models/fuyu/test_modeling_fuyu.py +++ b/tests/models/fuyu/test_modeling_fuyu.py @@ -3,7 +3,7 @@ import requests -from transformers import AutoTokenizer, FuyuConfig, is_torch_available, is_vision_available +from transformers import FuyuConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_modeling_common import ids_tensor, random_attention_mask @@ -14,7 +14,7 @@ if is_torch_available() and is_vision_available(): - from transformers import FuyuImageProcessor, FuyuProcessor + from transformers import FuyuProcessor if is_torch_available(): @@ -267,11 +267,8 @@ class FuyuIntegrationTest(unittest.TestCase): # , ModelTesterMixin) all_model_classes = ("FuyuForCausalLM") if is_torch_available() else () def setUp(self): - self.pretrained_model_name = "huggingface/new_model_release_weights" - tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model_name) - image_processor = FuyuImageProcessor() - - self.processor = FuyuProcessor(image_processor=image_processor, tokenizer=tokenizer) + self.pretrained_model_name = "adept/fuyu-8b" + self.processor = FuyuProcessor.from_pretrained(self.pretrained_model_name) self.model = FuyuForCausalLM.from_pretrained(self.pretrained_model_name) self.bus_image_url = ( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" @@ -280,9 +277,8 @@ def setUp(self): @slow def test_model_8b_chat_greedy_generation_bus_captioning(self): - EXPECTED_TEXT_COMPLETION = """A bus parked on the side of a road.|ENDOFTEXT|""" + EXPECTED_TEXT_COMPLETION = """A blue bus parked on the side of a road.|ENDOFTEXT|""" text_prompt_coco_captioning = "Generate a coco-style caption.\n" - model_inputs_bus_captioning = self.processor(text=text_prompt_coco_captioning, images=self.bus_image_pil) generated_tokens = self.model.generate(**model_inputs_bus_captioning, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) @@ -297,7 +293,7 @@ def test_model_8b_chat_greedy_generation_bus_captioning(self): """ @slow - @require_torch_gpu + @require_torch_accelerator def test_model_8b_chat_greedy_generation_bus_color(self): EXPECTED_TEXT_COMPLETION = "The bus is blue.\n|ENDOFTEXT|" text_prompt_bus_color = "What color is the bus?\n" @@ -314,7 +310,7 @@ def test_model_8b_chat_greedy_generation_bus_color(self): self.assertEqual(EXPECTED_TEXT_COMPLETION, clean_sequence) @slow - @require_torch_gpu + @require_torch_accelerator def test_model_8b_chat_greedy_generation_chart_vqa(self): # fmt: off EXPECTED_TEXT_TOKENS = ["The","life expectancy","at","birth","of male","s in","","20","18","is","","80",".","7",".","\n","|ENDOFTEXT|",] @@ -340,7 +336,7 @@ def test_model_8b_chat_greedy_generation_chart_vqa(self): self.assertEqual(expected_text_completion, clean_sequence) @slow - @require_torch_gpu + @require_torch_accelerator def test_model_8b_chat_greedy_generation_bounding_box(self): EXPECTED_TEXT_COMPLETION = "\x00194213202244\x01|ENDOFTEXT|" text_prompt_bbox = "When presented with a box, perform OCR to extract text contained within it. If provided with text, generate the corresponding bounding box.\\nWilliams" # noqa: E231 diff --git a/tests/models/fuyu/test_processing_fuyu.py b/tests/models/fuyu/test_processing_fuyu.py index 1c75b2b0ae31..459386952c3e 100644 --- a/tests/models/fuyu/test_processing_fuyu.py +++ b/tests/models/fuyu/test_processing_fuyu.py @@ -26,16 +26,14 @@ class FuyuProcessingTest(unittest.TestCase): # TODO Which mixins do we add here """ """ def setUp(self): - pretrained_model_name = "huggingface/pre_release_model" - tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name) - image_processor = FuyuImageProcessor() + pretrained_model_name = "adept/fuyu-8b" + self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name) + self.image_processor = FuyuImageProcessor() - processor = FuyuProcessor(image_processor=image_processor, tokenizer=tokenizer) - text_prompt = "Generate a coco-style caption.\\n" + self.processor = FuyuProcessor(image_processor=self.image_processor, tokenizer=self.tokenizer) + self.text_prompt = "Generate a coco-style caption.\\n" bus_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" - bus_image_pil = Image.open(io.BytesIO(requests.get(bus_image_url).content)) - - self.one_image_bus_model_inputs = processor(text=text_prompt, images=bus_image_pil) + self.bus_image_pil = Image.open(io.BytesIO(requests.get(bus_image_url).content)) def test_fuyu_processing(self): """ @@ -44,11 +42,119 @@ def test_fuyu_processing(self): # fmt: off EXPECTED_IMAGE_PATCH_INPUTS = torch.Tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, -1, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, -1, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, -1, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, -1, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, -1, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, -1, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, -1, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, -1, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, -1, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, -1, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,]]).to(torch.int64) EXPECTED_PADDED_UNPACKED_TOKEN_INPUTS = torch.Tensor([[71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 1, 128340, 71374, 71389, 120412, 71377, 71835, 71374, 73615, 71375, 71399, 71435, 71122,]]).to(torch.int64) + + one_image_bus_model_inputs = self.processor(text=self.text_prompt, images=self.bus_image_pil) + + # fmt: on + torch.testing.assert_close(one_image_bus_model_inputs["image_patches_indices"], EXPECTED_IMAGE_PATCH_INPUTS) + torch.testing.assert_close(one_image_bus_model_inputs["input_ids"], EXPECTED_PADDED_UNPACKED_TOKEN_INPUTS) + + def test_fuyu_processing_no_image(self): + """ + Test to check processor works with just text input + """ + processor_outputs = self.processor(text=self.text_prompt) + tokenizer_outputs = self.tokenizer(self.text_prompt) + self.assertEqual(processor_outputs["input_ids"], tokenizer_outputs["input_ids"]) + + def test_fuyu_processing_no_text(self): + """ + Test to check processor works with just image input + """ + # fmt: off + EXPECTED_IMAGE_PATCH_INPUTS = torch.Tensor([ + [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, -1, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, -1, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, -1, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, -1, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, -1, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, -1, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, -1, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + -1, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, -1, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, -1, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1] + ]).to(torch.int64) + # fmt: on + + processor_outputs = self.processor(images=self.bus_image_pil) + self.assertTrue((processor_outputs["image_patches_indices"] == EXPECTED_IMAGE_PATCH_INPUTS).all()) + + def test_fuyu_processing_multiple_image_sample(self): + """ + Test to check processor works with multiple image inputs for a single text input + """ + # fmt: off + SINGLE_IMAGE_PATCH_INPUTS = torch.Tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, -1, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, -1, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, -1, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, -1, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, -1, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, -1, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, -1, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, -1, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, -1, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, -1, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,]]).to(torch.int64) + SINGLE_PADDED_UNPACKED_TOKEN_INPUTS = torch.Tensor([[71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 1, 128340, 71374, 71389, 120412, 71377, 71835, 71374, 73615, 71375, 71399, 71435, 71122,]]).to(torch.int64) + + SINGLE_RESIZED_IMAGE_PATCH_INPUTS = torch.Tensor([[ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, -1, 12, 13, 14, -1, 15, 16, 17, -1, 18, 19, 20, -1, 21, 22, 23, -1, 24, 25, 26, -1, 27, 28, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]]) + SINGLE_RESIZED_PADDED_UNPACKED_TOKEN_INPUTS = torch.Tensor([[ 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 1, 128340, 71374, 71389, 120412, 71377, 71835, 71374, 73615, 71375, 71399, 71435, 71122]]) # fmt: on - torch.testing.assert_close( - self.one_image_bus_model_inputs["image_patches_indices"], EXPECTED_IMAGE_PATCH_INPUTS + + # Batch of two images - equally sized + images = [self.bus_image_pil, self.bus_image_pil] + processor_outputs = self.processor(text=[self.text_prompt, self.text_prompt], images=images) + + self.assertTrue( + ( + processor_outputs["image_patches_indices"] + == torch.cat([SINGLE_IMAGE_PATCH_INPUTS, SINGLE_IMAGE_PATCH_INPUTS], dim=0) + ).all() + ) + self.assertTrue( + ( + processor_outputs["input_ids"] + == torch.cat([SINGLE_PADDED_UNPACKED_TOKEN_INPUTS, SINGLE_PADDED_UNPACKED_TOKEN_INPUTS], dim=0) + ).all() ) - torch.testing.assert_close(self.one_image_bus_model_inputs["input_ids"], EXPECTED_PADDED_UNPACKED_TOKEN_INPUTS) + + # Processes single images with different sizes as expected + images = [self.bus_image_pil] + processor_outputs = self.processor(text=self.text_prompt, images=images) + self.assertTrue((processor_outputs["image_patches_indices"] == SINGLE_IMAGE_PATCH_INPUTS).all()) + self.assertTrue((processor_outputs["input_ids"] == SINGLE_PADDED_UNPACKED_TOKEN_INPUTS).all()) + + images = [self.bus_image_pil.resize((64, 300))] + processor_outputs = self.processor(text=self.text_prompt, images=images) + self.assertTrue((processor_outputs["image_patches_indices"] == SINGLE_RESIZED_IMAGE_PATCH_INPUTS).all()) + self.assertTrue((processor_outputs["input_ids"] == SINGLE_RESIZED_PADDED_UNPACKED_TOKEN_INPUTS).all()) + + # Batch of two images - different sizes. Left-pads the smaller image inputs + images = [self.bus_image_pil, self.bus_image_pil.resize((64, 300))] + processor_outputs = self.processor(text=[self.text_prompt, self.text_prompt], images=images) + + padding_len_patch = SINGLE_IMAGE_PATCH_INPUTS.shape[1] - SINGLE_RESIZED_IMAGE_PATCH_INPUTS.shape[1] + padded_single_resized_image_patch = torch.cat( + [torch.ones([1, padding_len_patch]) * -1, SINGLE_RESIZED_IMAGE_PATCH_INPUTS], dim=1 + ) + expected_image_patch_inputs = torch.cat([SINGLE_IMAGE_PATCH_INPUTS, padded_single_resized_image_patch], dim=0) + + padding_len_token = ( + SINGLE_PADDED_UNPACKED_TOKEN_INPUTS.shape[1] - SINGLE_RESIZED_PADDED_UNPACKED_TOKEN_INPUTS.shape[1] + ) + padded_single_resized_padded_unpacked_token_inputs = torch.cat( + [torch.zeros([1, padding_len_token]), SINGLE_RESIZED_PADDED_UNPACKED_TOKEN_INPUTS], dim=1 + ) + expected_padded_unpacked_token_inputs = torch.cat( + [SINGLE_PADDED_UNPACKED_TOKEN_INPUTS, padded_single_resized_padded_unpacked_token_inputs], dim=0 + ) + + self.assertTrue((processor_outputs["image_patches_indices"] == expected_image_patch_inputs).all()) + self.assertTrue((processor_outputs["input_ids"] == expected_padded_unpacked_token_inputs).all()) @require_torch @@ -97,7 +203,6 @@ def setUp(self): """ Adding a mix of present and absent images. """ - self.image_processor = FuyuImageProcessor() self.image_input = torch.randn([1, 1, 3, 64, 64]) self.image_present = torch.tensor([[1]]) @@ -108,19 +213,19 @@ def setUp(self): self.image_placeholder_id = 999 self.image_newline_id = 888 self.variable_sized = True + self.image_processor = FuyuImageProcessor( + patch_size={"height": self.image_patch_dim_h, "width": self.image_patch_dim_w} + ) def test_process_images_for_model_input_fixed_sized(self): self.variable_sized = False - result = self.image_processor.process_images_for_model_input( + result = self.image_processor.preprocess_with_tokenizer_info( image_input=self.image_input, image_present=self.image_present, image_unpadded_h=self.image_unpadded_h, image_unpadded_w=self.image_unpadded_w, - image_patch_dim_h=self.image_patch_dim_h, - image_patch_dim_w=self.image_patch_dim_w, image_placeholder_id=self.image_placeholder_id, image_newline_id=self.image_newline_id, variable_sized=self.variable_sized, ) - print(result["images"][0][0]) self.assertEqual(result["images"][0][0].shape, torch.Size([3, 64, 64])) From 4557a0dede92ce985576fac478b754d76bba3c18 Mon Sep 17 00:00:00 2001 From: Michael Benayoun Date: Thu, 2 Nov 2023 13:03:30 +0100 Subject: [PATCH 060/268] Wrap `_prepare_4d_causal_attention_mask` as a leaf function (#27236) Wrap _prepare_4d_causal_attention_mask as a leaf function --- src/transformers/models/llama/modeling_llama.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index d9e3907fb971..703ebf0d9560 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -40,6 +40,7 @@ logging, replace_return_docstrings, ) +from ...utils.import_utils import is_torch_fx_available from .configuration_llama import LlamaConfig @@ -48,6 +49,12 @@ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa +# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. +# It means that the function will not be traced through and simply appear as a node in the graph. +if is_torch_fx_available(): + _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) + + logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LlamaConfig" From 443bf5e9e25fdd6dfa31e35879485edacd848b4f Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Thu, 2 Nov 2023 15:03:09 +0100 Subject: [PATCH 061/268] Fix safetensors failing tests (#27231) * Fix Kosmos2 * Fix ProphetNet * Fix MarianMT * Fix M4T * XLM ProphetNet * ProphetNet fix * XLM ProphetNet * Final M4T fixes * Tied weights keys * Revert M4T changes * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../models/prophetnet/modeling_prophetnet.py | 29 +++++++++++++++++-- .../xlm_prophetnet/modeling_xlm_prophetnet.py | 29 +++++++++++++++++-- tests/models/kosmos2/test_modeling_kosmos2.py | 19 ++++++++++++ tests/test_modeling_common.py | 18 +++++++----- 4 files changed, 84 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index 9c84a85f1cf7..eb1576197e5e 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -1755,6 +1755,11 @@ def set_input_embeddings(self, value): self.encoder.word_embeddings = self.word_embeddings self.decoder.word_embeddings = self.word_embeddings + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings) + self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings) + def get_encoder(self): return self.encoder @@ -1876,6 +1881,10 @@ def get_output_embeddings(self): def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head) + def get_input_embeddings(self): return self.prophetnet.word_embeddings @@ -2070,7 +2079,11 @@ def get_decoder(self): PROPHETNET_START_DOCSTRING, ) class ProphetNetForCausalLM(ProphetNetPreTrainedModel): - _tied_weights_keys = ["lm_head.weight"] + _tied_weights_keys = [ + "prophetnet.word_embeddings.weight", + "prophetnet.decoder.word_embeddings.weight", + "lm_head.weight", + ] def __init__(self, config: ProphetNetConfig): # set config for CLM @@ -2100,6 +2113,10 @@ def get_output_embeddings(self): def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head) + def set_decoder(self, decoder): self.prophetnet.decoder = decoder @@ -2311,7 +2328,15 @@ class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel): def __init__(self, config: ProphetNetConfig): super().__init__(config) - self.decoder = ProphetNetDecoder(config) + + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.decoder = ProphetNetDecoder(config, word_embeddings=self.word_embeddings) + + # Initialize weights and apply final processing + self.post_init() + + def _tie_weights(self): + self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings()) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) diff --git a/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py index faa5080b2d93..f99cd4549a49 100644 --- a/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py @@ -1779,6 +1779,11 @@ def set_input_embeddings(self, value): self.encoder.word_embeddings = self.word_embeddings self.decoder.word_embeddings = self.word_embeddings + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings) + self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings) + def get_encoder(self): return self.encoder @@ -1901,6 +1906,10 @@ def get_output_embeddings(self): def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head) + def get_input_embeddings(self): return self.prophetnet.word_embeddings @@ -2098,7 +2107,11 @@ def get_decoder(self): ) # Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetForCausalLM with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET class XLMProphetNetForCausalLM(XLMProphetNetPreTrainedModel): - _tied_weights_keys = ["lm_head.weight"] + _tied_weights_keys = [ + "prophetnet.word_embeddings.weight", + "prophetnet.decoder.word_embeddings.weight", + "lm_head.weight", + ] def __init__(self, config: XLMProphetNetConfig): # set config for CLM @@ -2128,6 +2141,10 @@ def get_output_embeddings(self): def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head) + def set_decoder(self, decoder): self.prophetnet.decoder = decoder @@ -2340,7 +2357,15 @@ class XLMProphetNetDecoderWrapper(XLMProphetNetPreTrainedModel): def __init__(self, config: XLMProphetNetConfig): super().__init__(config) - self.decoder = XLMProphetNetDecoder(config) + + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.decoder = XLMProphetNetDecoder(config, word_embeddings=self.word_embeddings) + + # Initialize weights and apply final processing + self.post_init() + + def _tie_weights(self): + self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings()) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) diff --git a/tests/models/kosmos2/test_modeling_kosmos2.py b/tests/models/kosmos2/test_modeling_kosmos2.py index 2649b8f41d66..3f55ad9759dd 100644 --- a/tests/models/kosmos2/test_modeling_kosmos2.py +++ b/tests/models/kosmos2/test_modeling_kosmos2.py @@ -304,6 +304,25 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) + def test_load_save_without_tied_weights(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + config.text_config.tie_word_embeddings = False + for model_class in self.all_model_classes: + model = model_class(config) + with tempfile.TemporaryDirectory() as d: + model.save_pretrained(d) + + model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) + # Checking the state dicts are correct + reloaded_state = model_reloaded.state_dict() + for k, v in model.state_dict().items(): + self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") + torch.testing.assert_close( + v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" + ) + # Checking there was no complain of missing weights + self.assertEqual(infos["missing_keys"], []) + # overwrite from common in order to use `self.model_tester.text_model_tester.num_hidden_layers` def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index f96812c36da8..fdd48de2fd7f 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -76,7 +76,7 @@ from transformers.utils import ( CONFIG_NAME, GENERATION_CONFIG_NAME, - WEIGHTS_NAME, + SAFE_WEIGHTS_NAME, is_accelerate_available, is_flax_available, is_tf_available, @@ -91,6 +91,7 @@ if is_torch_available(): import torch + from safetensors.torch import load_file as safe_load_file from safetensors.torch import save_file as safe_save_file from torch import nn @@ -311,17 +312,20 @@ def test_save_load_keys_to_ignore_on_save(self): # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) - output_model_file = os.path.join(tmpdirname, WEIGHTS_NAME) - state_dict_saved = torch.load(output_model_file) + output_model_file = os.path.join(tmpdirname, SAFE_WEIGHTS_NAME) + state_dict_saved = safe_load_file(output_model_file) + for k in _keys_to_ignore_on_save: self.assertNotIn(k, state_dict_saved.keys(), "\n".join(state_dict_saved.keys())) # Test we can load the state dict in the model, necessary for the checkpointing API in Trainer. load_result = model.load_state_dict(state_dict_saved, strict=False) - self.assertTrue( - len(load_result.missing_keys) == 0 - or set(load_result.missing_keys) == set(model._keys_to_ignore_on_save) - ) + keys_to_ignore = set(model._keys_to_ignore_on_save) + + if hasattr(model, "_tied_weights_keys"): + keys_to_ignore.update(set(model._tied_weights_keys)) + + self.assertTrue(len(load_result.missing_keys) == 0 or set(load_result.missing_keys) == keys_to_ignore) self.assertTrue(len(load_result.unexpected_keys) == 0) def test_gradient_checkpointing_backward_compatibility(self): From 8801861d2de1568e8ca8f81d96a7ddf3964f6373 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 2 Nov 2023 15:32:17 +0100 Subject: [PATCH 062/268] Fixing m4t. (#27240) * Fixing m4t. * Trying to remove comparison ? Odd test failure. * Adding shared. But why on earth does it hang ???? * Putting back the model weights checks the test is silently failing on cuda. * Fix style + unremoved comment. --- .../seamless_m4t/modeling_seamless_m4t.py | 6 +- .../test_modeling_seamless_m4t.py | 64 +++++++++++++------ 2 files changed, 48 insertions(+), 22 deletions(-) diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index 0745663bc0fd..ddfce18fb1dc 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -3051,8 +3051,9 @@ class SeamlessM4TForSpeechToText(SeamlessM4TPreTrainedModel): def __init__(self, config: SeamlessM4TConfig): super().__init__(config) + self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.speech_encoder = SeamlessM4TSpeechEncoder(config) - self.text_decoder = SeamlessM4TDecoder(config) + self.text_decoder = SeamlessM4TDecoder(config, self.shared) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing @@ -3710,8 +3711,9 @@ class SeamlessM4TForSpeechToSpeech(SeamlessM4TPreTrainedModel): def __init__(self, config): super().__init__(config) + self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.speech_encoder = SeamlessM4TSpeechEncoder(config) - self.text_decoder = SeamlessM4TDecoder(config) + self.text_decoder = SeamlessM4TDecoder(config, self.shared) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing diff --git a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py index 2abedb6dd708..6963433e01b3 100644 --- a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py +++ b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py @@ -863,17 +863,23 @@ def test_speech_generation(self): output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) - model = SeamlessM4TForTextToSpeech.from_pretrained(self.tmpdirname) - self.update_generation(model) - model.to(torch_device) - model.eval() + state_dict = model.state_dict() + + text_model = SeamlessM4TForTextToSpeech.from_pretrained(self.tmpdirname) + self.update_generation(text_model) + text_model.to(torch_device) + text_model.eval() output_text = self.factory_generation_speech_test(model, input_text) - model = SeamlessM4TForSpeechToSpeech.from_pretrained(self.tmpdirname) - self.update_generation(model) - model.to(torch_device) - model.eval() + speech_model = SeamlessM4TForSpeechToSpeech.from_pretrained(self.tmpdirname) + self.update_generation(speech_model) + speech_model.to(torch_device) + speech_model.eval() + + for name, tensor in speech_model.state_dict().items(): + right_tensor = state_dict.get(name) + self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") output_speech = self.factory_generation_speech_test(model, input_speech) @@ -882,8 +888,15 @@ def test_speech_generation(self): self.assertListEqual(output_original_text[1].ravel().tolist(), output_text[1].ravel().tolist()) # test same speech output from input text - self.assertListEqual(output_original_speech[0].ravel().tolist(), output_speech[0].ravel().tolist()) - self.assertListEqual(output_original_speech[1].ravel().tolist(), output_speech[1].ravel().tolist()) + # assertTrue because super long list makes this hang in case of failure + self.assertTrue( + output_original_speech[0].ravel().tolist() == output_speech[0].ravel().tolist(), + "Speech generated was different", + ) + self.assertTrue( + output_original_speech[1].ravel().tolist() == output_speech[1].ravel().tolist(), + "Speech generated was different", + ) def test_text_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() @@ -905,19 +918,30 @@ def test_text_generation(self): input_speech.pop("generate_speech") input_text.pop("generate_speech") - model = SeamlessM4TForTextToText.from_pretrained(self.tmpdirname) - self.update_generation(model) - model.to(torch_device) - model.eval() + state_dict = model.state_dict() - output_text = self.factory_generation_speech_test(model, input_text) + text_model = SeamlessM4TForTextToText.from_pretrained(self.tmpdirname) + self.update_generation(text_model) + text_model.to(torch_device) + text_model.eval() - model = SeamlessM4TForSpeechToText.from_pretrained(self.tmpdirname) - self.update_generation(model) - model.to(torch_device) - model.eval() + for name, tensor in text_model.state_dict().items(): + right_tensor = state_dict.get(name) + self.assertEqual(tensor.tolist(), right_tensor.tolist()) - output_speech = self.factory_generation_speech_test(model, input_speech) + output_text = self.factory_generation_speech_test(text_model, input_text) + + speech_model = SeamlessM4TForSpeechToText.from_pretrained(self.tmpdirname) + + for name, tensor in speech_model.state_dict().items(): + right_tensor = state_dict.get(name) + self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") + + self.update_generation(speech_model) + speech_model.to(torch_device) + speech_model.eval() + + output_speech = self.factory_generation_speech_test(speech_model, input_speech) # test same text output from input text self.assertListEqual(output_original_text[0].ravel().tolist(), output_text.ravel().tolist()) From 441c3e0dd28194df9366773376ae5878b2db465e Mon Sep 17 00:00:00 2001 From: Marc Sun <57196510+SunMarc@users.noreply.github.com> Date: Thu, 2 Nov 2023 16:23:31 +0100 Subject: [PATCH 063/268] fix-deprecated-exllama-arg (#27243) fix-exllama --- src/transformers/utils/quantization_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index ae8909352c19..34f7cb799a72 100644 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -455,6 +455,7 @@ def post_init(self): "The value of `use_exllama` will be overwritten by `disable_exllama` passed in `GPTQConfig` or stored in your config file." ) self.use_exllama = not self.disable_exllama + self.disable_exllama = None elif self.disable_exllama is not None and self.use_exllama is not None: # Only happens if user explicitly passes in both arguments raise ValueError("Cannot specify both `disable_exllama` and `use_exllama`. Please use just `use_exllama`") From a6c82d45670918acf1a9331abf89be210c72ca4f Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Thu, 2 Nov 2023 15:39:21 +0000 Subject: [PATCH 064/268] Generate: return `past_key_values` (#25086) --- src/transformers/generation/utils.py | 117 ++++++++++++++++++++++++-- tests/generation/test_utils.py | 121 +++++++++++++++++++++++++++ 2 files changed, 233 insertions(+), 5 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index df4239d05421..69cbc373e5f7 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -104,12 +104,20 @@ class GreedySearchDecoderOnlyOutput(ModelOutput): hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -140,6 +148,13 @@ class ContrastiveSearchEncoderDecoderOutput(ModelOutput): decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None @@ -149,6 +164,7 @@ class ContrastiveSearchEncoderDecoderOutput(ModelOutput): decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -169,15 +185,23 @@ class ContrastiveSearchDecoderOnlyOutput(ModelOutput): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is - passed or when `config.output_hidden_states=True`): - Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of - `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples + (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, + hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -211,6 +235,13 @@ class GreedySearchEncoderDecoderOutput(ModelOutput): decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None @@ -220,6 +251,7 @@ class GreedySearchEncoderDecoderOutput(ModelOutput): decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -243,12 +275,20 @@ class SampleDecoderOnlyOutput(ModelOutput): hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -283,6 +323,13 @@ class SampleEncoderDecoderOutput(ModelOutput): decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None @@ -292,6 +339,7 @@ class SampleEncoderDecoderOutput(ModelOutput): decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -319,6 +367,13 @@ class BeamSearchDecoderOnlyOutput(ModelOutput): hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None @@ -327,6 +382,7 @@ class BeamSearchDecoderOnlyOutput(ModelOutput): beam_indices: Optional[torch.LongTensor] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -366,6 +422,13 @@ class BeamSearchEncoderDecoderOutput(ModelOutput): decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None @@ -377,6 +440,7 @@ class BeamSearchEncoderDecoderOutput(ModelOutput): decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -404,6 +468,13 @@ class BeamSampleDecoderOnlyOutput(ModelOutput): hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None @@ -412,6 +483,7 @@ class BeamSampleDecoderOnlyOutput(ModelOutput): beam_indices: Optional[torch.LongTensor] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None @dataclass @@ -450,6 +522,13 @@ class BeamSampleEncoderDecoderOutput(ModelOutput): decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. """ sequences: torch.LongTensor = None @@ -461,6 +540,7 @@ class BeamSampleEncoderDecoderOutput(ModelOutput): decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput] @@ -2148,8 +2228,8 @@ def contrastive_search( items.append(item.repeat_interleave(1, dim=0)) else: items.append(item.repeat_interleave(top_k, dim=0)) - new_key_values.append(items) - model_kwargs["past_key_values"] = new_key_values + new_key_values.append(tuple(items)) + model_kwargs["past_key_values"] = tuple(new_key_values) if sequential: all_outputs = {key: [] for key in outputs} # defined in first loop iteration @@ -2330,6 +2410,17 @@ def contrastive_search( streamer.end() if return_dict_in_generate: + # Contrastive search works by forward looking at the next token, so we need to exclude it from + # `past_key_values` to be consistent with the other decoding methods + if model_kwargs.get("past_key_values") is not None: + past_key_values = [] + for layer in model_kwargs["past_key_values"]: + layer_past_key_values = [] + for item in layer: + layer_past_key_values.append(item[..., :-1, :]) + past_key_values.append(tuple(layer_past_key_values)) + model_kwargs["past_key_values"] = tuple(past_key_values) + if self.config.is_encoder_decoder: return ContrastiveSearchEncoderDecoderOutput( sequences=input_ids, @@ -2339,6 +2430,7 @@ def contrastive_search( decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return ContrastiveSearchDecoderOnlyOutput( @@ -2346,6 +2438,7 @@ def contrastive_search( scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return input_ids @@ -2598,6 +2691,7 @@ def greedy_search( decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return GreedySearchDecoderOnlyOutput( @@ -2605,6 +2699,7 @@ def greedy_search( scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return input_ids @@ -2880,6 +2975,7 @@ def sample( decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return SampleDecoderOnlyOutput( @@ -2887,6 +2983,7 @@ def sample( scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return input_ids @@ -3201,6 +3298,7 @@ def beam_search( decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return BeamSearchDecoderOnlyOutput( @@ -3210,6 +3308,7 @@ def beam_search( beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return sequence_outputs["sequences"] @@ -3530,6 +3629,7 @@ def beam_sample( decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return BeamSampleDecoderOnlyOutput( @@ -3539,6 +3639,7 @@ def beam_sample( beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return sequence_outputs["sequences"] @@ -3909,6 +4010,7 @@ def group_beam_search( decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return BeamSearchDecoderOnlyOutput( @@ -3918,6 +4020,7 @@ def group_beam_search( beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return sequence_outputs["sequences"] @@ -4244,6 +4347,7 @@ def constrained_beam_search( decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return BeamSearchDecoderOnlyOutput( @@ -4253,6 +4357,7 @@ def constrained_beam_search( beam_indices=sequence_outputs["beam_indices"], attentions=decoder_attentions, hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return sequence_outputs["sequences"] @@ -4672,6 +4777,7 @@ def assisted_decoding( decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return GreedySearchDecoderOnlyOutput( @@ -4679,6 +4785,7 @@ def assisted_decoding( scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), ) else: return input_ids diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 6468973d6758..7e2f242c6fd6 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1829,6 +1829,85 @@ def test_generate_from_inputs_embeds_decoder_only(self): outputs_from_embeds_wo_ids[:, 1:].tolist(), ) + def test_generate_continue_from_past_key_values(self): + # Tests that we can continue generating from past key values, returned from a previous `generate` call + for model_class in self.all_generative_model_classes: + # won't fix: old models with unique inputs/caches/others + if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt"]): + return + # may fix in the future: needs modeling or test input preparation fixes for compatibility + if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): + return + + config, inputs = self.model_tester.prepare_config_and_inputs_for_common() + + # If it doesn't support cache, pass the test + if not hasattr(config, "use_cache"): + return + + # Let's make it always: + # 1. use cache (for obvious reasons) + # 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which + # would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the + # continuation would force it to generate beyond an EOS token) + # 3. ignore `token_type_ids` for simplicity + # 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is + # active by default on some models + config.use_cache = True + if "token_type_ids" in inputs: + del inputs["token_type_ids"] + + model = model_class(config).to(torch_device) + model.eval() + model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 + model.generation_config.forced_eos_token_id = None + + # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) + outputs = model(**inputs) + if "past_key_values" not in outputs: + return + + # Traditional way of generating text, with `return_dict_in_generate` to return the past key values + outputs = model.generate(**inputs, do_sample=False, max_new_tokens=4, return_dict_in_generate=True) + + # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the + # inputs may need to be tweaked across `generate` calls (like the attention mask). + outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=3, return_dict_in_generate=True) + + # Continue from the tokens generated above, preparing the inputs accordingly + inputs["past_key_values"] = outputs_cached.past_key_values + new_attention_len = outputs_cached.sequences.shape[-1] + if config.is_encoder_decoder: + inputs["decoder_input_ids"] = outputs_cached.sequences + if "decoder_attention_mask" in inputs: + inputs["decoder_attention_mask"] = torch.nn.functional.pad( + inputs["decoder_attention_mask"], + (0, new_attention_len - inputs["decoder_attention_mask"].shape[1]), + mode="constant", + value=1, + ) + else: + inputs["input_ids"] = outputs_cached.sequences + if "attention_mask" in inputs: + inputs["attention_mask"] = torch.nn.functional.pad( + inputs["attention_mask"], + (0, new_attention_len - inputs["attention_mask"].shape[1]), + mode="constant", + value=1, + ) + outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=1, return_dict_in_generate=True) + + # The two sets of generated text and past kv should be equal to each other + self.assertListEqual(outputs.sequences.tolist(), outputs_cached.sequences.tolist()) + for layer_idx in range(len(outputs_cached.past_key_values)): + for kv_idx in range(len(outputs_cached.past_key_values[layer_idx])): + self.assertTrue( + torch.allclose( + outputs.past_key_values[layer_idx][kv_idx], + outputs_cached.past_key_values[layer_idx][kv_idx], + ) + ) + def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences @@ -1894,6 +1973,24 @@ def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_ use_cache=use_cache, ) + # Past Key Value States -- two notes here: + # 1. Its inner sequence length is with respect to the inputs of the latest forward pass, hence the "-1" + # 2. Some old models still return `output.past_key_values` even without `use_cache=True` + # 3. TODO (joao): A few models have different formats, skipping those until the cache refactor is complete + models_without_standard_cache = ("bloom", "ctrl", "fsmt", "gptbigcode", "mega", "reformer") + has_standard_cache = not any( + model_name in config.__class__.__name__.lower() for model_name in models_without_standard_cache + ) + if use_cache and has_standard_cache: + past_key_values = output.past_key_values + past_sequence_length = output.sequences.shape[-1] - 1 + self._check_past_key_values_for_generate( + num_sequences_in_output, + past_key_values, + seq_length=past_sequence_length, + config=config, + ) + def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size) self.assertIsInstance(scores, tuple) @@ -1959,6 +2056,30 @@ def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, c [encoder_expected_shape] * len(hidden_states), ) + def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config, num_beam_groups=1): + self.assertIsInstance(past_key_values, tuple) + self.assertListEqual( + [isinstance(iter_past_key_values, tuple) for iter_past_key_values in past_key_values], + [True] * len(past_key_values), + ) + + # (batch, head, seq_length, head_features) + expected_shape = ( + batch_size * num_beam_groups, + config.num_key_value_heads if hasattr(config, "num_key_value_heads") else config.num_attention_heads, + seq_length, + config.hidden_size // config.num_attention_heads, + ) + # check shape key, value + self.assertListEqual( + [layer_past_key_values[0].shape for layer_past_key_values in past_key_values], + [expected_shape] * len(past_key_values), + ) + self.assertListEqual( + [layer_past_key_values[1].shape for layer_past_key_values in past_key_values], + [expected_shape] * len(past_key_values), + ) + def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. From 147e8ce4ae23f6f360ac0bc6e162cdf9995516b7 Mon Sep 17 00:00:00 2001 From: Pietro Lesci <61748653+pietrolesci@users.noreply.github.com> Date: Thu, 2 Nov 2023 16:01:41 +0000 Subject: [PATCH 065/268] Remove redundant code from T5 encoder mask creation (#27216) * remove redundant code * update * add typecasting * make `attention_mask` float again --- src/transformers/models/mt5/modeling_mt5.py | 15 ++++++--------- src/transformers/models/t5/modeling_t5.py | 15 ++++++--------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/src/transformers/models/mt5/modeling_mt5.py b/src/transformers/models/mt5/modeling_mt5.py index ba977ad6ae66..799b4c54bffb 100644 --- a/src/transformers/models/mt5/modeling_mt5.py +++ b/src/transformers/models/mt5/modeling_mt5.py @@ -997,18 +997,13 @@ def forward( if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") - if attention_mask is None: - attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) - if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: - encoder_seq_length = encoder_hidden_states.shape[1] - encoder_attention_mask = torch.ones( - batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long - ) - # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) + if attention_mask is None: + attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) @@ -1019,7 +1014,9 @@ def forward( encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long + ) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index ff8e6609b94d..e23a687f7996 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -1024,18 +1024,13 @@ def forward( if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") - if attention_mask is None: - attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) - if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: - encoder_seq_length = encoder_hidden_states.shape[1] - encoder_attention_mask = torch.ones( - batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long - ) - # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) + if attention_mask is None: + attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) @@ -1046,7 +1041,9 @@ def forward( encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long + ) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None From 0ed6729bb130cb1d43fb2ede60b0c50f9ee14d68 Mon Sep 17 00:00:00 2001 From: Yoach Lacombe <52246514+ylacombe@users.noreply.github.com> Date: Thu, 2 Nov 2023 17:06:56 +0000 Subject: [PATCH 066/268] Enrich TTS pipeline parameters naming (#26473) * enrich TTS pipeline docstring for clearer forward_params use * change token leghts * update Pipeline parameters * correct docstring and make style * fix tests * make style * change music prompt Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> * raise errors if generate_kwargs with forward-only models * make style --------- Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- src/transformers/pipelines/text_to_audio.py | 62 ++++++++++++++++--- .../pipelines/test_pipelines_text_to_audio.py | 55 ++++++++++++++++ 2 files changed, 110 insertions(+), 7 deletions(-) diff --git a/src/transformers/pipelines/text_to_audio.py b/src/transformers/pipelines/text_to_audio.py index 299fa7ac014b..58c21cc12168 100644 --- a/src/transformers/pipelines/text_to_audio.py +++ b/src/transformers/pipelines/text_to_audio.py @@ -43,6 +43,29 @@ class TextToAudioPipeline(Pipeline): Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + + You can specify parameters passed to the model by using [`TextToAudioPipeline.__call__.forward_params`] or + [`TextToAudioPipeline.__call__.generate_kwargs`]. + + Example: + + ```python + >>> from transformers import pipeline + + >>> music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") + + >>> # diversify the music generation by adding randomness with a high temperature and set a maximum music length + >>> generate_kwargs = { + ... "do_sample": True, + ... "temperature": 0.7, + ... "max_new_tokens": 35, + ... } + + >>> outputs = music_generator("Techno music with high melodic riffs", generate_kwargs=generate_kwargs) + ``` + + This pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"text-to-speech"` or `"text-to-audio"`. @@ -107,11 +130,26 @@ def preprocess(self, text, **kwargs): def _forward(self, model_inputs, **kwargs): # we expect some kwargs to be additional tensors which need to be on the right device kwargs = self._ensure_tensor_on_device(kwargs, device=self.device) + forward_params = kwargs["forward_params"] + generate_kwargs = kwargs["generate_kwargs"] if self.model.can_generate(): - output = self.model.generate(**model_inputs, **kwargs) + # we expect some kwargs to be additional tensors which need to be on the right device + generate_kwargs = self._ensure_tensor_on_device(generate_kwargs, device=self.device) + + # generate_kwargs get priority over forward_params + forward_params.update(generate_kwargs) + + output = self.model.generate(**model_inputs, **forward_params) else: - output = self.model(**model_inputs, **kwargs)[0] + if len(generate_kwargs): + raise ValueError( + f"""You're using the `TextToAudioPipeline` with a forward-only model, but `generate_kwargs` is non empty. + For forward-only TTA models, please use `forward_params` instead of of + `generate_kwargs`. For reference, here are the `generate_kwargs` used here: + {generate_kwargs.keys()}""" + ) + output = self.model(**model_inputs, **forward_params)[0] if self.vocoder is not None: # in that case, the output is a spectrogram that needs to be converted into a waveform @@ -126,8 +164,14 @@ def __call__(self, text_inputs: Union[str, List[str]], **forward_params): Args: text_inputs (`str` or `List[str]`): The text(s) to generate. - forward_params (*optional*): - Parameters passed to the model generation/forward method. + forward_params (`dict`, *optional*): + Parameters passed to the model generation/forward method. `forward_params` are always passed to the + underlying model. + generate_kwargs (`dict`, *optional*): + The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a + complete overview of generate, check the [following + guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). `generate_kwargs` are + only passed to the underlying model if the latter is a generative model. Return: A `dict` or a list of `dict`: The dictionaries have two keys: @@ -141,14 +185,18 @@ def _sanitize_parameters( self, preprocess_params=None, forward_params=None, + generate_kwargs=None, ): + params = { + "forward_params": forward_params if forward_params else {}, + "generate_kwargs": generate_kwargs if generate_kwargs else {}, + } + if preprocess_params is None: preprocess_params = {} - if forward_params is None: - forward_params = {} postprocess_params = {} - return preprocess_params, forward_params, postprocess_params + return preprocess_params, params, postprocess_params def postprocess(self, waveform): output_dict = {} diff --git a/tests/pipelines/test_pipelines_text_to_audio.py b/tests/pipelines/test_pipelines_text_to_audio.py index 6aca34ed98a0..a9f1eccae508 100644 --- a/tests/pipelines/test_pipelines_text_to_audio.py +++ b/tests/pipelines/test_pipelines_text_to_audio.py @@ -30,6 +30,7 @@ slow, torch_device, ) +from transformers.trainer_utils import set_seed from .test_pipelines_common import ANY @@ -174,6 +175,60 @@ def test_vits_model_pt(self): outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2) self.assertEqual(ANY(np.ndarray), outputs[0]["audio"]) + @slow + @require_torch + def test_forward_model_kwargs(self): + # use vits - a forward model + speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk", framework="pt") + + # for reproducibility + set_seed(555) + outputs = speech_generator("This is a test", forward_params={"speaker_id": 5}) + audio = outputs["audio"] + + with self.assertRaises(TypeError): + # assert error if generate parameter + outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True}) + + forward_params = {"speaker_id": 5} + generate_kwargs = {"do_sample": True} + + with self.assertRaises(ValueError): + # assert error if generate_kwargs with forward-only models + outputs = speech_generator( + "This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs + ) + self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5) + + @slow + @require_torch + def test_generative_model_kwargs(self): + # use musicgen - a generative model + music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") + + forward_params = { + "do_sample": True, + "max_new_tokens": 250, + } + + # for reproducibility + set_seed(555) + outputs = music_generator("This is a test", forward_params=forward_params) + audio = outputs["audio"] + self.assertEqual(ANY(np.ndarray), audio) + + # make sure generate kwargs get priority over forward params + forward_params = { + "do_sample": False, + "max_new_tokens": 250, + } + generate_kwargs = {"do_sample": True} + + # for reproducibility + set_seed(555) + outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs) + self.assertListEqual(outputs["audio"].tolist(), audio.tolist()) + def get_test_pipeline(self, model, tokenizer, processor): speech_generator = TextToAudioPipeline(model=model, tokenizer=tokenizer) return speech_generator, ["This is a test", "Another test"] From bc78fd12748a18dbc71faeae000f036378b065d5 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Thu, 2 Nov 2023 16:38:59 +0100 Subject: [PATCH 067/268] Dev version --- .github/conda/meta.yaml | 4 ++++ README.md | 6 +++--- README_es.md | 6 +++--- README_hd.md | 6 +++--- README_ja.md | 6 +++--- README_ko.md | 6 +++--- README_zh-hans.md | 6 +++--- README_zh-hant.md | 6 +++--- examples/flax/question-answering/run_qa.py | 2 +- .../run_flax_speech_recognition_seq2seq.py | 2 +- examples/flax/text-classification/run_flax_glue.py | 2 +- examples/flax/token-classification/run_flax_ner.py | 2 +- .../audio-classification/run_audio_classification.py | 2 +- examples/pytorch/contrastive-image-text/run_clip.py | 2 +- .../image-classification/run_image_classification.py | 2 +- .../run_image_classification_no_trainer.py | 2 +- examples/pytorch/image-pretraining/run_mae.py | 2 +- examples/pytorch/image-pretraining/run_mim.py | 2 +- examples/pytorch/image-pretraining/run_mim_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_clm.py | 2 +- examples/pytorch/language-modeling/run_clm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_mlm.py | 2 +- examples/pytorch/language-modeling/run_mlm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_plm.py | 2 +- examples/pytorch/multiple-choice/run_swag.py | 2 +- examples/pytorch/multiple-choice/run_swag_no_trainer.py | 2 +- examples/pytorch/question-answering/run_qa.py | 2 +- examples/pytorch/question-answering/run_qa_beam_search.py | 2 +- .../question-answering/run_qa_beam_search_no_trainer.py | 2 +- examples/pytorch/question-answering/run_qa_no_trainer.py | 2 +- examples/pytorch/question-answering/run_seq2seq_qa.py | 2 +- .../semantic-segmentation/run_semantic_segmentation.py | 2 +- .../run_semantic_segmentation_no_trainer.py | 2 +- .../speech-recognition/run_speech_recognition_ctc.py | 2 +- .../run_speech_recognition_ctc_adapter.py | 2 +- .../speech-recognition/run_speech_recognition_seq2seq.py | 2 +- examples/pytorch/summarization/run_summarization.py | 2 +- .../pytorch/summarization/run_summarization_no_trainer.py | 2 +- examples/pytorch/text-classification/run_classification.py | 2 +- examples/pytorch/text-classification/run_glue.py | 2 +- examples/pytorch/text-classification/run_glue_no_trainer.py | 2 +- examples/pytorch/text-classification/run_xnli.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- examples/pytorch/token-classification/run_ner_no_trainer.py | 2 +- examples/pytorch/translation/run_translation.py | 2 +- examples/pytorch/translation/run_translation_no_trainer.py | 2 +- examples/tensorflow/contrastive-image-text/run_clip.py | 2 +- .../image-classification/run_image_classification.py | 2 +- examples/tensorflow/multiple-choice/run_swag.py | 2 +- examples/tensorflow/question-answering/run_qa.py | 2 +- examples/tensorflow/summarization/run_summarization.py | 2 +- examples/tensorflow/text-classification/run_glue.py | 2 +- examples/tensorflow/translation/run_translation.py | 2 +- setup.py | 2 +- src/transformers/__init__.py | 2 +- 55 files changed, 72 insertions(+), 68 deletions(-) diff --git a/.github/conda/meta.yaml b/.github/conda/meta.yaml index 6bf33f842fbf..89dc353b1277 100644 --- a/.github/conda/meta.yaml +++ b/.github/conda/meta.yaml @@ -26,6 +26,8 @@ requirements: - protobuf - tokenizers >=0.11.1,!=0.11.3,<0.13 - pyyaml >=5.1 + - safetensors + - fsspec run: - python - numpy >=1.17 @@ -40,6 +42,8 @@ requirements: - protobuf - tokenizers >=0.11.1,!=0.11.3,<0.13 - pyyaml >=5.1 + - safetensors + - fsspec test: imports: diff --git a/README.md b/README.md index f761e0d149f7..307d1bf9cb8e 100644 --- a/README.md +++ b/README.md @@ -386,7 +386,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. -1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. @@ -437,7 +437,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. -1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. +1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. @@ -461,7 +461,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. 1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng), released on [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng. -1. **[SeamlessM4T](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. +1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. diff --git a/README_es.md b/README_es.md index 55a1fcf24abe..48e9ed3db0ed 100644 --- a/README_es.md +++ b/README_es.md @@ -361,7 +361,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. -1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. @@ -412,7 +412,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. -1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. +1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. @@ -436,7 +436,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. 1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng) released with the paper [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng. -1. **[SeamlessM4T](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. +1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. diff --git a/README_hd.md b/README_hd.md index 6862966d7b07..920aff460e0a 100644 --- a/README_hd.md +++ b/README_hd.md @@ -335,7 +335,7 @@ conda install -c huggingface transformers 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce से) Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. द्वाराअनुसंधान पत्र [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) के साथ जारी किया गया 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. -1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (माइक्रोसॉफ्ट रिसर्च एशिया से) साथ देने वाला पेपर [लेआउटएलएमवी3: यूनिफाइड टेक्स्ट और इमेज मास्किंग के साथ दस्तावेज़ एआई के लिए पूर्व-प्रशिक्षण](https://arxiv.org/abs/2204.08387) युपन हुआंग, टेंगचाओ लव, लेई कुई, युटोंग लू, फुरु वेई द्वारा पोस्ट किया गया। @@ -386,7 +386,7 @@ conda install -c huggingface transformers 1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI से) साथ में कागज [विज़न ट्रांसफॉर्मर्स के साथ सिंपल ओपन-वोकैबुलरी ऑब्जेक्ट डिटेक्शन](https:/ /arxiv.org/abs/2205.06230) मैथियास मिंडरर, एलेक्सी ग्रिट्सेंको, ऑस्टिन स्टोन, मैक्सिम न्यूमैन, डिर्क वीसेनबोर्न, एलेक्सी डोसोवित्स्की, अरविंद महेंद्रन, अनुराग अर्नब, मुस्तफा देहघानी, ज़ुओरन शेन, जिओ वांग, ज़ियाओहुआ झाई, थॉमस किफ़, और नील हॉल्सबी द्वारा पोस्ट किया गया। -1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (Google AI से) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. द्वाराअनुसंधान पत्र [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) के साथ जारी किया गया +1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI से) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. द्वाराअनुसंधान पत्र [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) के साथ जारी किया गया 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google की ओर से) साथ में दिया गया पेपर [लंबे इनपुट सारांश के लिए ट्रांसफ़ॉर्मरों को बेहतर तरीके से एक्सटेंड करना](https://arxiv .org/abs/2208.04347) जेसन फांग, याओ झाओ, पीटर जे लियू द्वारा। 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (दीपमाइंड से) साथ में पेपर [पर्सीवर आईओ: संरचित इनपुट और आउटपुट के लिए एक सामान्य वास्तुकला] (https://arxiv.org/abs/2107.14795) एंड्रयू जेगल, सेबेस्टियन बोरग्यूड, जीन-बैप्टिस्ट अलायराक, कार्ल डोर्श, कैटलिन इओनेस्कु, डेविड द्वारा डिंग, स्कंद कोप्पुला, डैनियल ज़ोरान, एंड्रयू ब्रॉक, इवान शेलहैमर, ओलिवियर हेनाफ, मैथ्यू एम। बोट्विनिक, एंड्रयू ज़िसरमैन, ओरिओल विनियल्स, जोआओ कैरेरा द्वारा पोस्ट किया गया। @@ -410,7 +410,7 @@ conda install -c huggingface transformers 1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (झुईई टेक्नोलॉजी से), साथ में पेपर [रोफॉर्मर: रोटरी पोजिशन एंबेडिंग के साथ एन्हांस्ड ट्रांसफॉर्मर] (https://arxiv.org/pdf/2104.09864v1.pdf) जियानलिन सु और यू लू और शेंगफेंग पैन और बो वेन और युनफेंग लियू द्वारा प्रकाशित। 1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng से) Bo Peng. द्वाराअनुसंधान पत्र [this repo](https://github.com/BlinkDL/RWKV-LM) के साथ जारी किया गया -1. **[SeamlessM4T](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. +1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI से) Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. द्वाराअनुसंधान पत्र [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) के साथ जारी किया गया 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP से) साथ देने वाला पेपर [भाषण पहचान के लिए अनसुपरवाइज्ड प्री-ट्रेनिंग में परफॉर्मेंस-एफिशिएंसी ट्रेड-ऑफ्स](https ://arxiv.org/abs/2109.06870) फेलिक्स वू, क्वांगयुन किम, जिंग पैन, क्यू हान, किलियन क्यू. वेनबर्गर, योव आर्टज़ी द्वारा। diff --git a/README_ja.md b/README_ja.md index 83f6126dea49..6079d40a2429 100644 --- a/README_ja.md +++ b/README_ja.md @@ -395,7 +395,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce から) Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. から公開された研究論文 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (OpenAI から) Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever から公開された研究論文: [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) -1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (Microsoft Research Asia から) Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou から公開された研究論文: [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (Microsoft Research Asia から) Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou から公開された研究論文: [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (Microsoft Research Asia から) Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei から公開された研究論文: [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) @@ -446,7 +446,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI から) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al から公開された研究論文: [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby から公開された研究論文: [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) -1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. から公開された研究論文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) +1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. から公開された研究論文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (Google から) Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu から公開された研究論文: [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google から) Jason Phang, Yao Zhao, and Peter J. Liu から公開された研究論文: [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind から) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira から公開された研究論文: [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) @@ -470,7 +470,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (WeChatAI から) HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou から公開された研究論文: [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (ZhuiyiTechnology から), Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu から公開された研究論文: [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) 1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng から) Bo Peng. から公開された研究論文 [this repo](https://github.com/BlinkDL/RWKV-LM) -1. **[SeamlessM4T](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. +1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (NVIDIA から) Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo から公開された研究論文: [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI から) Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. から公開された研究論文 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP から) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi から公開された研究論文: [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) diff --git a/README_ko.md b/README_ko.md index 3de95b9cfc8f..b5c0031b178a 100644 --- a/README_ko.md +++ b/README_ko.md @@ -310,7 +310,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce 에서 제공)은 Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi.의 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500)논문과 함께 발표했습니다. 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (OpenAI 에서) Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever 의 [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) 논문과 함께 발표했습니다. -1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (Microsoft Research Asia 에서) Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 의 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 논문과 함께 발표했습니다. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (Microsoft Research Asia 에서) Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 의 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 논문과 함께 발표했습니다. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (Microsoft Research Asia 에서) Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei 의 [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) 논문과 함께 발표했습니다. @@ -361,7 +361,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI 에서) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 의 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 논문과 함께 발표했습니다. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI 에서) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 의 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 논문과 함께 발표했습니다. -1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (Google AI 에서 제공)은 Matthias Minderer, Alexey Gritsenko, Neil Houlsby.의 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683)논문과 함께 발표했습니다. +1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI 에서 제공)은 Matthias Minderer, Alexey Gritsenko, Neil Houlsby.의 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683)논문과 함께 발표했습니다. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (Google 에서) Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 의 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 논문과 함께 발표했습니다. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google 에서) Jason Phang, Yao Zhao, Peter J. Liu 의 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 논문과 함께 발표했습니다. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind 에서) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 의 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 논문과 함께 발표했습니다. @@ -385,7 +385,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (WeChatAI 에서) HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou 의 [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) 논문과 함께 발표했습니다. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (ZhuiyiTechnology 에서) Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 의 a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 논문과 함께 발표했습니다. 1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng 에서 제공)은 Bo Peng.의 [this repo](https://github.com/BlinkDL/RWKV-LM)논문과 함께 발표했습니다. -1. **[SeamlessM4T](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. +1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (NVIDIA 에서) Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 의 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 논문과 함께 발표했습니다. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI 에서 제공)은 Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.의 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf)논문과 함께 발표했습니다. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP 에서) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 의 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 논문과 함께 발표했습니다. diff --git a/README_zh-hans.md b/README_zh-hans.md index 4a6d50da3c39..3fc5fd3b83f7 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -334,7 +334,7 @@ conda install -c huggingface transformers 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (来自 Salesforce) 伴随论文 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) 由 Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi 发布。 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. -1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 由 Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 发布。 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 由 Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 发布。 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) 由 Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei 发布。 @@ -385,7 +385,7 @@ conda install -c huggingface transformers 1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (来自 [s-JoL](https://huggingface.co/s-JoL)) 由 GitHub (现已删除). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。 -1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (来自 Google AI) 伴随论文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) 由 Matthias Minderer, Alexey Gritsenko, Neil Houlsby 发布。 +1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (来自 Google AI) 伴随论文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) 由 Matthias Minderer, Alexey Gritsenko, Neil Houlsby 发布。 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。 @@ -409,7 +409,7 @@ conda install -c huggingface transformers 1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (来自 WeChatAI), 伴随论文 [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) 由 HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou 发布。 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (来自 ZhuiyiTechnology), 伴随论文 [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 由 Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 发布。 1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (来自 Bo Peng) 伴随论文 [this repo](https://github.com/BlinkDL/RWKV-LM) 由 Bo Peng 发布。 -1. **[SeamlessM4T](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. +1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (来自 NVIDIA) 伴随论文 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 由 Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 发布。 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (来自 Meta AI) 伴随论文 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) 由 Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick 发布。 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (来自 ASAPP) 伴随论文 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 由 Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index ff6f14df4ad3..8b4ebed36386 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -346,7 +346,7 @@ conda install -c huggingface transformers 1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. 1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. 1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever. -1. **[KOSMOS-2](https://huggingface.co/docs/transformers/main/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. +1. **[KOSMOS-2](https://huggingface.co/docs/transformers/model_doc/kosmos-2)** (from Microsoft Research Asia) released with the paper [Kosmos-2: Grounding Multimodal Large Language Models to the World](https://arxiv.org/abs/2306.14824) by Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Furu Wei. 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. @@ -397,7 +397,7 @@ conda install -c huggingface transformers 1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released on GitHub (now removed). 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. -1. **[OWLv2](https://huggingface.co/docs/transformers/main/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. +1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. @@ -421,7 +421,7 @@ conda install -c huggingface transformers 1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. 1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng) released with the paper [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng. -1. **[SeamlessM4T](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. +1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. diff --git a/examples/flax/question-answering/run_qa.py b/examples/flax/question-answering/run_qa.py index 2fd167f49bfa..c0bf8b63250f 100644 --- a/examples/flax/question-answering/run_qa.py +++ b/examples/flax/question-answering/run_qa.py @@ -62,7 +62,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset diff --git a/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py b/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py index cdfeff42f812..67de00c00e19 100644 --- a/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py +++ b/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py @@ -60,7 +60,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risk. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=2.14.0", "To fix: pip install -r examples/flax/speech-recogintion/requirements.txt") diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py index 190a94886cde..37f16459d3b9 100755 --- a/examples/flax/text-classification/run_flax_glue.py +++ b/examples/flax/text-classification/run_flax_glue.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset diff --git a/examples/flax/token-classification/run_flax_ner.py b/examples/flax/token-classification/run_flax_ner.py index ead61f076efa..2060508079df 100644 --- a/examples/flax/token-classification/run_flax_ner.py +++ b/examples/flax/token-classification/run_flax_ner.py @@ -56,7 +56,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/audio-classification/run_audio_classification.py b/examples/pytorch/audio-classification/run_audio_classification.py index a3c24fd8ad63..61b7e40c13d3 100644 --- a/examples/pytorch/audio-classification/run_audio_classification.py +++ b/examples/pytorch/audio-classification/run_audio_classification.py @@ -45,7 +45,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 76e36964ba09..e72db1f7f1d4 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt") diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 65e71a2635c4..7cd54f8aabd8 100755 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -57,7 +57,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") diff --git a/examples/pytorch/image-classification/run_image_classification_no_trainer.py b/examples/pytorch/image-classification/run_image_classification_no_trainer.py index 52b5fabd8947..c8ce4e012bb1 100644 --- a/examples/pytorch/image-classification/run_image_classification_no_trainer.py +++ b/examples/pytorch/image-classification/run_image_classification_no_trainer.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index ade4db6f43ee..b3f3c3a847f0 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -44,7 +44,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index 06b3590bec38..e440d11d11df 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -49,7 +49,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") diff --git a/examples/pytorch/image-pretraining/run_mim_no_trainer.py b/examples/pytorch/image-pretraining/run_mim_no_trainer.py index 49a41e4ef54a..4f5dfd9f4785 100644 --- a/examples/pytorch/image-pretraining/run_mim_no_trainer.py +++ b/examples/pytorch/image-pretraining/run_mim_no_trainer.py @@ -54,7 +54,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 65cf2330d4b5..4e153e72fe48 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index 292b2e8db191..b14649483d5b 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index a91b6c577afe..3effeb16fc1e 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -54,7 +54,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index e87c3859e317..6427af1f4089 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index 169622a4cca7..34c75149caeb 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index e5632d1677b0..35a2ecd5e794 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index 02544bc41c60..91c9337f4b8a 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) # You should update this to your particular problem to have better documentation of `model_type` diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 4b00ba80518a..ff007292bb19 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index d410a515885a..187afe569388 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index c28c93e8fb39..ba813c321311 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index 77582378004e..97a72bf40cb5 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index c41184ce6323..cc5ccc97be55 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -47,7 +47,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index d3c033b625e1..19a62ee5518c 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -52,7 +52,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index 5eb054cd1f16..247e86fc6e76 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index cab1945db90c..b6b07261ce3e 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -51,7 +51,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py index 9a1f37fc888c..923c0378b43e 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py @@ -53,7 +53,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 7db11d27e7c2..907fb4f4c2cf 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index 582f78b77fd9..d7f8b9f1c559 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -53,7 +53,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index c693a8d7db8b..42232787dfa4 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/pytorch/text-classification/run_classification.py b/examples/pytorch/text-classification/run_classification.py index 6075a3403045..7e14c3deb69e 100755 --- a/examples/pytorch/text-classification/run_classification.py +++ b/examples/pytorch/text-classification/run_classification.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 7d41788c1187..ff2644f86507 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") diff --git a/examples/pytorch/text-classification/run_glue_no_trainer.py b/examples/pytorch/text-classification/run_glue_no_trainer.py index 9bf5806a9c9c..e4332966becd 100644 --- a/examples/pytorch/text-classification/run_glue_no_trainer.py +++ b/examples/pytorch/text-classification/run_glue_no_trainer.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index 33dea5e25ff8..d65be6d68efb 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index bc91df51af00..ec77e8ea6a82 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index 91cbc7a72d28..1f83c65fcb54 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index 6270f737350e..92af72ccd209 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -53,7 +53,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt") diff --git a/examples/pytorch/translation/run_translation_no_trainer.py b/examples/pytorch/translation/run_translation_no_trainer.py index 541608e42ee8..35bc9a59da34 100644 --- a/examples/pytorch/translation/run_translation_no_trainer.py +++ b/examples/pytorch/translation/run_translation_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt") diff --git a/examples/tensorflow/contrastive-image-text/run_clip.py b/examples/tensorflow/contrastive-image-text/run_clip.py index 7cc1289546f0..e56d66ce196c 100644 --- a/examples/tensorflow/contrastive-image-text/run_clip.py +++ b/examples/tensorflow/contrastive-image-text/run_clip.py @@ -52,7 +52,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version( "datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/contrastive-image-text/requirements.txt" diff --git a/examples/tensorflow/image-classification/run_image_classification.py b/examples/tensorflow/image-classification/run_image_classification.py index 58ec0bf5d20d..53c95b972a75 100644 --- a/examples/tensorflow/image-classification/run_image_classification.py +++ b/examples/tensorflow/image-classification/run_image_classification.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") diff --git a/examples/tensorflow/multiple-choice/run_swag.py b/examples/tensorflow/multiple-choice/run_swag.py index 1dbd56cb1683..e78becda89d5 100644 --- a/examples/tensorflow/multiple-choice/run_swag.py +++ b/examples/tensorflow/multiple-choice/run_swag.py @@ -51,7 +51,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/tensorflow/question-answering/run_qa.py b/examples/tensorflow/question-answering/run_qa.py index f1a03ac8d44a..4d458c1190dd 100755 --- a/examples/tensorflow/question-answering/run_qa.py +++ b/examples/tensorflow/question-answering/run_qa.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/tensorflow/summarization/run_summarization.py b/examples/tensorflow/summarization/run_summarization.py index 98f05a5e740f..3ca57b033ccf 100644 --- a/examples/tensorflow/summarization/run_summarization.py +++ b/examples/tensorflow/summarization/run_summarization.py @@ -54,7 +54,7 @@ # region Checking dependencies # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/tensorflow/text-classification/run_glue.py b/examples/tensorflow/text-classification/run_glue.py index d164e483ad65..618296aa71a8 100644 --- a/examples/tensorflow/text-classification/run_glue.py +++ b/examples/tensorflow/text-classification/run_glue.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") task_to_keys = { "cola": ("sentence", None), diff --git a/examples/tensorflow/translation/run_translation.py b/examples/tensorflow/translation/run_translation.py index 29963ef078b4..e7fc47b3388b 100644 --- a/examples/tensorflow/translation/run_translation.py +++ b/examples/tensorflow/translation/run_translation.py @@ -57,7 +57,7 @@ # region Dependencies and constants # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.35.0.dev0") +check_min_version("4.36.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/setup.py b/setup.py index 22407701f3c1..78763b23242b 100644 --- a/setup.py +++ b/setup.py @@ -428,7 +428,7 @@ def run(self): setup( name="transformers", - version="4.35.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) + version="4.36.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)", author_email="transformers@huggingface.co", description="State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow", diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index cb3e7b0353e3..62a070a986db 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -18,7 +18,7 @@ # to defer the actual importing for when the objects are requested. This way `import transformers` provides the names # in the namespace without actually importing anything (and especially none of the backends). -__version__ = "4.35.0.dev0" +__version__ = "4.36.0.dev0" from typing import TYPE_CHECKING From 00d8502b7ade5aa3da43b13f23bb447faa6d459e Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Thu, 2 Nov 2023 12:42:29 -0500 Subject: [PATCH 068/268] translate peft.md to chinese (#27215) * tranlsate peft.md to chinese * translate peft.md to chinese * fix missing link --- docs/source/zh/_toctree.yml | 2 + docs/source/zh/peft.md | 215 ++++++++++++++++++++++++++++++++++++ 2 files changed, 217 insertions(+) create mode 100644 docs/source/zh/peft.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index d77fe9adc411..6ba316d7a425 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -15,6 +15,8 @@ title: 微调预训练模型 - local: accelerate title: 使用🤗Accelerate进行分布式训练 + - local: peft + title: 使用🤗 PEFT加载和训练adapters - local: transformers_agents title: agents教程 title: 教程 diff --git a/docs/source/zh/peft.md b/docs/source/zh/peft.md new file mode 100644 index 000000000000..4241a15c00ea --- /dev/null +++ b/docs/source/zh/peft.md @@ -0,0 +1,215 @@ + + +# 使用 🤗 PEFT 加载adapters + +[[open-in-colab]] + +[参数高效微调(PEFT)方法](https://huggingface.co/blog/peft)在微调过程中冻结预训练模型的参数,并在其顶部添加少量可训练参数(adapters)。adapters被训练以学习特定任务的信息。这种方法已被证明非常节省内存,同时具有较低的计算使用量,同时产生与完全微调模型相当的结果。 + +使用PEFT训练的adapters通常比完整模型小一个数量级,使其方便共享、存储和加载。 + +
+ +
与完整尺寸的模型权重(约为700MB)相比,存储在Hub上的OPTForCausalLM模型的adapter权重仅为~6MB。
+
+ +如果您对学习更多关于🤗 PEFT库感兴趣,请查看[文档](https://huggingface.co/docs/peft/index)。 + + +## 设置 + +首先安装 🤗 PEFT: + +```bash +pip install peft +``` + +如果你想尝试全新的特性,你可能会有兴趣从源代码安装这个库: + +```bash +pip install git+https://github.com/huggingface/peft.git +``` +## 支持的 PEFT 模型 + +Transformers原生支持一些PEFT方法,这意味着你可以加载本地存储或在Hub上的adapter权重,并使用几行代码轻松运行或训练它们。以下是受支持的方法: + +- [Low Rank Adapters](https://huggingface.co/docs/peft/conceptual_guides/lora) +- [IA3](https://huggingface.co/docs/peft/conceptual_guides/ia3) +- [AdaLoRA](https://arxiv.org/abs/2303.10512) + +如果你想使用其他PEFT方法,例如提示学习或提示微调,或者关于通用的 🤗 PEFT库,请参阅[文档](https://huggingface.co/docs/peft/index)。 + +## 加载 PEFT adapter + +要从huggingface的Transformers库中加载并使用PEFTadapter模型,请确保Hub仓库或本地目录包含一个`adapter_config.json`文件和adapter权重,如上例所示。然后,您可以使用`AutoModelFor`类加载PEFT adapter模型。例如,要为因果语言建模加载一个PEFT adapter模型: + +1. 指定PEFT模型id +2. 将其传递给[`AutoModelForCausalLM`]类 + +```py +from transformers import AutoModelForCausalLM, AutoTokenizer + +peft_model_id = "ybelkada/opt-350m-lora" +model = AutoModelForCausalLM.from_pretrained(peft_model_id) +``` + + + +你可以使用`AutoModelFor`类或基础模型类(如`OPTForCausalLM`或`LlamaForCausalLM`)来加载一个PEFT adapter。 + + + + +您也可以通过`load_adapter`方法来加载 PEFT adapter。 + +```py +from transformers import AutoModelForCausalLM, AutoTokenizer + +model_id = "facebook/opt-350m" +peft_model_id = "ybelkada/opt-350m-lora" + +model = AutoModelForCausalLM.from_pretrained(model_id) +model.load_adapter(peft_model_id) +``` + +## 基于8bit或4bit进行加载 + +`bitsandbytes`集成支持8bit和4bit精度数据类型,这对于加载大模型非常有用,因为它可以节省内存(请参阅`bitsandbytes`[指南](./quantization#bitsandbytes-integration)以了解更多信息)。要有效地将模型分配到您的硬件,请在[`~PreTrainedModel.from_pretrained`]中添加`load_in_8bit`或`load_in_4bit`参数,并将`device_map="auto"`设置为: + +```py +from transformers import AutoModelForCausalLM, AutoTokenizer + +peft_model_id = "ybelkada/opt-350m-lora" +model = AutoModelForCausalLM.from_pretrained(peft_model_id, device_map="auto", load_in_8bit=True) +``` + +## 添加新的adapter + +你可以使用[`~peft.PeftModel.add_adapter`]方法为一个已有adapter的模型添加一个新的adapter,只要新adapter的类型与当前adapter相同即可。例如,如果你有一个附加到模型上的LoRA adapter: + +```py +from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer +from peft import PeftConfig + +model_id = "facebook/opt-350m" +model = AutoModelForCausalLM.from_pretrained(model_id) + +lora_config = LoraConfig( + target_modules=["q_proj", "k_proj"], + init_lora_weights=False +) + +model.add_adapter(lora_config, adapter_name="adapter_1") +``` + + +添加一个新的adapter: + +```py +# attach new adapter with same config +model.add_adapter(lora_config, adapter_name="adapter_2") +``` +现在您可以使用[`~peft.PeftModel.set_adapter`]来设置要使用的adapter。 + +```py +# use adapter_1 +model.set_adapter("adapter_1") +output = model.generate(**inputs) +print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) + +# use adapter_2 +model.set_adapter("adapter_2") +output_enabled = model.generate(**inputs) +print(tokenizer.decode(output_enabled[0], skip_special_tokens=True)) +``` + +## 启用和禁用adapters +一旦您将adapter添加到模型中,您可以启用或禁用adapter模块。要启用adapter模块: + + +```py +from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer +from peft import PeftConfig + +model_id = "facebook/opt-350m" +adapter_model_id = "ybelkada/opt-350m-lora" +tokenizer = AutoTokenizer.from_pretrained(model_id) +text = "Hello" +inputs = tokenizer(text, return_tensors="pt") + +model = AutoModelForCausalLM.from_pretrained(model_id) +peft_config = PeftConfig.from_pretrained(adapter_model_id) + +# to initiate with random weights +peft_config.init_lora_weights = False + +model.add_adapter(peft_config) +model.enable_adapters() +output = model.generate(**inputs) +``` +要禁用adapter模块: + +```py +model.disable_adapters() +output = model.generate(**inputs) +``` +## 训练一个 PEFT adapter + +PEFT适配器受[`Trainer`]类支持,因此您可以为您的特定用例训练适配器。它只需要添加几行代码即可。例如,要训练一个LoRA adapter: + + + + +如果你不熟悉如何使用[`Trainer`]微调模型,请查看[微调预训练模型](training)教程。 + + + +1. 使用任务类型和超参数定义adapter配置(参见[`~peft.LoraConfig`]以了解超参数的详细信息)。 + +```py +from peft import LoraConfig + +peft_config = LoraConfig( + lora_alpha=16, + lora_dropout=0.1, + r=64, + bias="none", + task_type="CAUSAL_LM", +) +``` + +2. 将adapter添加到模型中。 + +```py +model.add_adapter(peft_config) +``` + +3. 现在可以将模型传递给[`Trainer`]了! + +```py +trainer = Trainer(model=model, ...) +trainer.train() +``` + +要保存训练好的adapter并重新加载它: + +```py +model.save_pretrained(save_dir) +model = AutoModelForCausalLM.from_pretrained(save_dir) +``` + + From 4991216841968e11b78e16379c42e9fc1011db88 Mon Sep 17 00:00:00 2001 From: Chi Date: Thu, 2 Nov 2023 23:43:57 +0530 Subject: [PATCH 069/268] Removed the redundant SiLUActivation class. (#27136) * Removed the redundant SiLUActivation class and now use nn.functional.silu directly. * I apologize for adding torch.functional.silu. I have replaced it with nn.SiLU. --- src/transformers/activations.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/src/transformers/activations.py b/src/transformers/activations.py index 587dc2e59964..be26825f4bad 100644 --- a/src/transformers/activations.py +++ b/src/transformers/activations.py @@ -137,19 +137,6 @@ def forward(self, input: Tensor) -> Tensor: return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3)))) -class SiLUActivation(nn.Module): - """ - See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear - Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function - Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated - Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with - later. - """ - - def forward(self, input: Tensor) -> Tensor: - return nn.functional.silu(input) - - class MishActivation(nn.Module): """ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also @@ -226,8 +213,8 @@ def __getitem__(self, key): "relu2": ReLUSquaredActivation, "relu6": nn.ReLU6, "sigmoid": nn.Sigmoid, - "silu": SiLUActivation, - "swish": SiLUActivation, + "silu": nn.SiLU, + "swish": nn.SiLU, "tanh": nn.Tanh, } ACT2FN = ClassInstantier(ACT2CLS) From 552ff24488d4027590deded3b2b0d1716df341c3 Mon Sep 17 00:00:00 2001 From: Komal Kumar Date: Thu, 2 Nov 2023 16:08:03 -0400 Subject: [PATCH 070/268] Fixed base model class name extraction from PeftModels (#27162) * Fixed base model class name extraction from PeftModels * Changes to first unwrap the model then extract the base model name * Changed base_model to base_model.model to stay consistent with peft model abstractions --- src/transformers/trainer.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index ad3b840385db..7af453249269 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -646,7 +646,7 @@ def _activate_neftune(self, model): unwrapped_model = unwrap_model(model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): - embeddings = unwrapped_model.base_model.get_input_embeddings() + embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() @@ -667,7 +667,7 @@ def _deactivate_neftune(self, model): unwrapped_model = unwrap_model(model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): - embeddings = unwrapped_model.base_model.get_input_embeddings() + embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() @@ -2752,10 +2752,11 @@ def compute_loss(self, model, inputs, return_outputs=False): self._past = outputs[self.args.past_index] if labels is not None: - if is_peft_available() and isinstance(model, PeftModel): - model_name = unwrap_model(model.base_model)._get_name() + unwrapped_model = unwrap_model(model) + if is_peft_available() and isinstance(unwrapped_model, PeftModel): + model_name = unwrapped_model.base_model.model._get_name() else: - model_name = unwrap_model(model)._get_name() + model_name = unwrapped_model._get_name() if model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: From e9a6c72b5edfb9561a981959b0e7c62d8ab9ef6c Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Fri, 3 Nov 2023 08:45:05 +0100 Subject: [PATCH 071/268] Fuyu protection (#27248) --- src/transformers/__init__.py | 8 ++++---- src/transformers/utils/dummy_vision_objects.py | 7 +++++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 62a070a986db..97cc4e578c74 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -343,7 +343,7 @@ "models.focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"], "models.fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig", "FSMTTokenizer"], "models.funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig", "FunnelTokenizer"], - "models.fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig", "FuyuProcessor"], + "models.fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig"], "models.git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitProcessor", "GitVisionConfig"], "models.glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"], "models.gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2Tokenizer"], @@ -987,7 +987,7 @@ _import_structure["models.efficientformer"].append("EfficientFormerImageProcessor") _import_structure["models.efficientnet"].append("EfficientNetImageProcessor") _import_structure["models.flava"].extend(["FlavaFeatureExtractor", "FlavaImageProcessor", "FlavaProcessor"]) - _import_structure["models.fuyu"].append("FuyuImageProcessor") + _import_structure["models.fuyu"].extend(["FuyuImageProcessor", "FuyuProcessor"]) _import_structure["models.glpn"].extend(["GLPNFeatureExtractor", "GLPNImageProcessor"]) _import_structure["models.idefics"].extend(["IdeficsImageProcessor"]) _import_structure["models.imagegpt"].extend(["ImageGPTFeatureExtractor", "ImageGPTImageProcessor"]) @@ -4538,7 +4538,7 @@ from .models.focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig from .models.fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig, FSMTTokenizer from .models.funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig, FunnelTokenizer - from .models.fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig, FuyuProcessor + from .models.fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig from .models.git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitProcessor, GitVisionConfig from .models.glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig from .models.gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer @@ -5117,7 +5117,7 @@ from .models.efficientformer import EfficientFormerImageProcessor from .models.efficientnet import EfficientNetImageProcessor from .models.flava import FlavaFeatureExtractor, FlavaImageProcessor, FlavaProcessor - from .models.fuyu import FuyuImageProcessor + from .models.fuyu import FuyuImageProcessor, FuyuProcessor from .models.glpn import GLPNFeatureExtractor, GLPNImageProcessor from .models.idefics import IdeficsImageProcessor from .models.imagegpt import ImageGPTFeatureExtractor, ImageGPTImageProcessor diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index 0af6ef347d64..c4a3c9312ac8 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -226,6 +226,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) +class FuyuProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class GLPNFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] From 05ea7b79e6903623e4d8e697c9be88462a8d8071 Mon Sep 17 00:00:00 2001 From: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com> Date: Fri, 3 Nov 2023 12:05:55 +0100 Subject: [PATCH 072/268] Refactor: Use Llama RoPE implementation for Falcon (#26933) * Use Llama RoPE implementation for Falcon + Add copy functionalities * Use standard cache format for Falcon * Simplify apply_rotary_pos_emb, copy from Llama * Remove unnecessary cache conversion test We don't need to convert any caches anymore! * Resolve copy complaint --- .../models/falcon/modeling_falcon.py | 327 +++++++----------- tests/models/falcon/test_modeling_falcon.py | 18 - 2 files changed, 128 insertions(+), 217 deletions(-) diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 511c55a84882..d4c647c846fa 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -71,12 +71,43 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return hidden_states + self.bias -# rotary pos emb helpers (torch.jit.script does not seem to support staticmethod...) +# Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): - x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :] + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) +# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) @@ -90,138 +121,88 @@ def _get_unpad_data(attention_mask): ) -# TODO (joao): Is this the same implementation as in Llama? If so, let's make them the same and add the copy facilities +# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Falcon class FalconRotaryEmbedding(nn.Module): - """Implementation of RotaryEmbedding from GPT-NeoX. - This implementation is designed to operate on queries and keys that are compatible with `[batch_size, - n_heads_per_partition, seq_len, head_dim]` (e.g. MinGPTAttention format). - """ - - def __init__(self, head_dim: int, base=10000, max_position_embeddings=2048): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() - self.base = base + + self.dim = dim self.max_position_embeddings = max_position_embeddings - inv_freq = 1.0 / (self.base ** (torch.arange(0, head_dim, 2).float() / head_dim)) + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) - self.head_dim = head_dim - self.seq_len_cached = -1 - self.cos_cached: torch.Tensor | None = None - self.sin_cached: torch.Tensor | None = None + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) def _set_cos_sin_cache(self, seq_len, device, dtype): - self.seq_len_cached = seq_len - t = torch.arange(seq_len, device=device).to(dtype) + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) - emb = torch.cat((freqs, freqs), dim=-1).to(device) - - if dtype in [torch.float16, torch.bfloat16]: - emb = emb.float() - - self.cos_cached = emb.cos() - self.sin_cached = emb.sin() - - self.cos_cached = self.cos_cached.type(dtype) - self.sin_cached = self.sin_cached.type(dtype) - - def cos_sin( - self, seq_len: int, past_key_values_length: int, position_ids: torch.Tensor, device="cpu", dtype=torch.bfloat16 - ) -> torch.Tensor: - total_length = seq_len + past_key_values_length - if total_length > self.seq_len_cached: - self._set_cos_sin_cache(total_length, device, dtype) - - # the cached tensors need to update their devices (for example, after we change the model's device) - self.cos_cached = self.cos_cached.to(device) - self.sin_cached = self.sin_cached.to(device) - - # Gather cos, sin at the designated position ids - cos = self.cos_cached[position_ids] # [bs, seq_len, dim] - sin = self.sin_cached[position_ids] # [bs, seq_len, dim] - return cos, sin - - def forward(self, query, key, past_key_values_length, position_ids): - _, seq_len, _ = query.shape - cos, sin = self.cos_sin(seq_len, past_key_values_length, position_ids, query.device, query.dtype) - # Query and key's shapes are [bs * num_heads, seq_len, dim], might need manual expansion. Ifs and elses used to - # avoid unnecessary repeat_interleave operations. - query_expansion_factor = int(query.shape[0] / cos.shape[0]) - if query_expansion_factor > 1: - query_cos = torch.repeat_interleave(cos, query_expansion_factor, dim=0) - query_sin = torch.repeat_interleave(sin, query_expansion_factor, dim=0) - else: - query_cos, query_sin = cos, sin + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) - key_expansion_factor = int(key.shape[0] / cos.shape[0]) - if key_expansion_factor > 1: - if key_expansion_factor != query_expansion_factor: - key_cos = torch.repeat_interleave(cos, key_expansion_factor, dim=0) - key_sin = torch.repeat_interleave(sin, key_expansion_factor, dim=0) - else: - key_cos, key_sin = query_cos, query_sin - else: - key_cos, key_sin = cos, sin + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) - return (query * query_cos) + (rotate_half(query) * query_sin), (key * key_cos) + (rotate_half(key) * key_sin) + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) +# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->Falcon class FalconLinearScalingRotaryEmbedding(FalconRotaryEmbedding): """FalconRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" - def __init__(self, head_dim: int, base=10000, max_position_embeddings=2048, scaling_factor=1.0): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor - super().__init__(head_dim, base, max_position_embeddings) + super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): - self.seq_len_cached = seq_len - t = torch.arange(seq_len, device=device).to(dtype) - # This line is the only difference from FalconRotaryEmbedding._set_cos_sin_cache + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) t = t / self.scaling_factor freqs = torch.einsum("i,j->ij", t, self.inv_freq) - emb = torch.cat((freqs, freqs), dim=-1).to(device) - - if dtype in [torch.float16, torch.bfloat16]: - emb = emb.float() - - self.cos_cached = emb.cos() - self.sin_cached = emb.sin() - - self.cos_cached = self.cos_cached.type(dtype) - self.sin_cached = self.sin_cached.type(dtype) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) +# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->Falcon class FalconDynamicNTKScalingRotaryEmbedding(FalconRotaryEmbedding): - """ - FalconRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla - """ + """FalconRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" - def __init__(self, head_dim: int, base=10000, max_position_embeddings=2048, scaling_factor=1.0): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor - super().__init__(head_dim, base, max_position_embeddings) + super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): - self.seq_len_cached = seq_len + self.max_seq_len_cached = seq_len - # This if block is the only difference from FalconRotaryEmbedding._set_cos_sin_cache if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) - ) ** (self.head_dim / (self.head_dim - 2)) - inv_freq = 1.0 / (base ** (torch.arange(0, self.head_dim, 2).float().to(device) / self.head_dim)) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) - t = torch.arange(seq_len, device=device).to(dtype) - freqs = torch.einsum("i,j->ij", t, self.inv_freq) - emb = torch.cat((freqs, freqs), dim=-1).to(device) - - if dtype in [torch.float16, torch.bfloat16]: - emb = emb.float() + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) - self.cos_cached = emb.cos() - self.sin_cached = emb.sin() - - self.cos_cached = self.cos_cached.type(dtype) - self.sin_cached = self.sin_cached.type(dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def _prepare_4d_attention_mask(mask: torch.Tensor, past_key_values_length: int) -> torch.BoolTensor: @@ -293,6 +274,8 @@ def __init__(self, config: FalconConfig): self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta self.is_causal = True if self.head_dim * self.num_heads != self.hidden_size: @@ -301,7 +284,8 @@ def __init__(self, config: FalconConfig): f" {self.num_heads})." ) - self.maybe_rotary = self._init_rope() if config.rotary else lambda q, k, t, p: (q, k) + if config.rotary: + self._init_rope() # Layer-wise attention scaling self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) @@ -319,33 +303,33 @@ def __init__(self, config: FalconConfig): self.attention_dropout = nn.Dropout(config.attention_dropout) self.num_kv_heads = config.num_kv_heads if (self.new_decoder_architecture or not self.multi_query) else 1 + # Copied from transformers.models.llama.modeling_llama.LlamaAttention._init_rope with Llama->Falcon def _init_rope(self): if self.config.rope_scaling is None: - rotary_emb = FalconRotaryEmbedding( + self.rotary_emb = FalconRotaryEmbedding( self.head_dim, - base=self.config.rope_theta, - max_position_embeddings=self.config.max_position_embeddings, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, ) else: scaling_type = self.config.rope_scaling["type"] scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "linear": - rotary_emb = FalconLinearScalingRotaryEmbedding( + self.rotary_emb = FalconLinearScalingRotaryEmbedding( self.head_dim, - base=self.config.rope_theta, - max_position_embeddings=self.config.max_position_embeddings, + max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, + base=self.rope_theta, ) elif scaling_type == "dynamic": - rotary_emb = FalconDynamicNTKScalingRotaryEmbedding( + self.rotary_emb = FalconDynamicNTKScalingRotaryEmbedding( self.head_dim, - base=self.config.rope_theta, - max_position_embeddings=self.config.max_position_embeddings, + max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, + base=self.rope_theta, ) else: raise ValueError(f"Unknown RoPE scaling type {scaling_type}") - return rotary_emb def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ @@ -428,35 +412,31 @@ def forward( batch_size, query_length, _, _ = query_layer.shape - query_layer = query_layer.transpose(1, 2).reshape(batch_size * self.num_heads, query_length, self.head_dim) - key_layer = key_layer.transpose(1, 2).reshape( - batch_size * num_kv_heads, - query_length, - self.head_dim, - ) - value_layer = value_layer.transpose(1, 2).reshape(batch_size * num_kv_heads, query_length, self.head_dim) + query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim) + key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) + value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) - past_kv_length = 0 if layer_past is None else layer_past[0].shape[1] - query_layer, key_layer = self.maybe_rotary(query_layer, key_layer, past_kv_length, position_ids) + kv_seq_len = key_layer.shape[-2] + if layer_past is not None: + kv_seq_len += layer_past[0].shape[-2] + if alibi is None: + cos, sin = self.rotary_emb(value_layer, seq_len=kv_seq_len) + query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin, position_ids) if layer_past is not None: past_key, past_value = layer_past # concatenate along seq_length dimension: - # - key: [batch_size * self.num_heads, kv_length, head_dim] - # - value: [batch_size * self.num_heads, kv_length, head_dim] - key_layer = torch.cat((past_key, key_layer), dim=1) - value_layer = torch.cat((past_value, value_layer), dim=1) + # - key: [batch_size, self.num_heads, kv_length, head_dim] + # - value: [batch_size, self.num_heads, kv_length, head_dim] + key_layer = torch.cat((past_key, key_layer), dim=-2) + value_layer = torch.cat((past_value, value_layer), dim=-2) - _, kv_length, _ = key_layer.shape + kv_length = key_layer.shape[-2] if use_cache: present = (key_layer, value_layer) else: present = None - query_layer_ = query_layer.reshape(batch_size, self.num_heads, -1, self.head_dim) - key_layer_ = key_layer.reshape(batch_size, num_kv_heads, -1, self.head_dim) - value_layer_ = value_layer.reshape(batch_size, num_kv_heads, -1, self.head_dim) - if alibi is None: if hasattr(F, "scaled_dot_product_attention") and not output_attentions: # TODO: deprecate this once we add FA2 support in Falcon @@ -467,15 +447,15 @@ def forward( ) attn_output = F.scaled_dot_product_attention( - query_layer_, key_layer_, value_layer_, attention_mask, 0.0, is_causal=False + query_layer, key_layer, value_layer, attention_mask, 0.0, is_causal=False ) attention_scores = None else: - attention_scores = query_layer_ @ key_layer_.transpose(-1, -2) + attention_scores = query_layer @ key_layer.transpose(-1, -2) attention_scores /= math.sqrt(self.head_dim) attention_scores = F.softmax(attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype) - attn_output = attention_scores @ value_layer_ + attn_output = attention_scores @ value_layer attn_output = attn_output.view(batch_size, self.num_heads, query_length, self.head_dim) attn_output = attn_output.permute(0, 2, 1, 3) @@ -489,7 +469,7 @@ def forward( return output_tensor, present else: - matmul_result = query_layer_ @ key_layer_.transpose(-1, -2) + matmul_result = query_layer @ key_layer.transpose(-1, -2) # change view to [batch_size, num_heads, q_length, kv_length] attention_scores = matmul_result.view(batch_size, self.num_heads, query_length, kv_length) @@ -516,7 +496,7 @@ def forward( attention_probs_reshaped = attention_probs.view(batch_size, self.num_heads, query_length, kv_length) # matmul: [batch_size * num_heads, q_length, head_dim] - context_layer = (attention_probs_reshaped @ value_layer_).flatten(0, 1) + context_layer = (attention_probs_reshaped @ value_layer).flatten(0, 1) # change view [batch_size, q_length, num_heads * head_dim] context_layer = self._merge_heads(context_layer) @@ -563,37 +543,27 @@ def forward( batch_size, query_length, _, _ = query_layer.shape - query_layer = query_layer.transpose(1, 2).reshape(batch_size * self.num_heads, query_length, self.head_dim) - key_layer = key_layer.transpose(1, 2).reshape( - batch_size * num_kv_heads, - query_length, - self.head_dim, - ) - value_layer = value_layer.transpose(1, 2).reshape(batch_size * num_kv_heads, query_length, self.head_dim) + query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim) + key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) + value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) - past_kv_length = 0 if layer_past is None else layer_past[0].shape[1] - query_layer, key_layer = self.maybe_rotary(query_layer, key_layer, past_kv_length, position_ids) + kv_seq_len = key_layer.shape[-2] + if layer_past is not None: + kv_seq_len += layer_past[0].shape[-2] + if alibi is None: + cos, sin = self.rotary_emb(value_layer, seq_len=kv_seq_len) + query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin, position_ids) if layer_past is not None and use_cache: past_key, past_value = layer_past # concatenate along seq_length dimension: - # - key: [batch_size * self.num_heads, kv_length, head_dim] - # - value: [batch_size * self.num_heads, kv_length, head_dim] - key_layer = torch.cat((past_key, key_layer), dim=1) - value_layer = torch.cat((past_value, value_layer), dim=1) - - _, kv_seq_length, _ = key_layer.shape - - torch_dtype = query_layer.dtype + # - key: [batch_size, self.num_heads, kv_length, head_dim] + # - value: [batch_size, self.num_heads, kv_length, head_dim] + key_layer = torch.cat((past_key, key_layer), dim=-2) + value_layer = torch.cat((past_value, value_layer), dim=-2) past_key_value = (key_layer, value_layer) if use_cache else None - query_layer = ( - query_layer.reshape(batch_size, self.num_heads, -1, self.head_dim).transpose(1, 2).to(torch_dtype) - ) - key_layer = key_layer.reshape(batch_size, num_kv_heads, -1, self.head_dim).transpose(1, 2).to(torch_dtype) - value_layer = value_layer.reshape(batch_size, num_kv_heads, -1, self.head_dim).transpose(1, 2).to(torch_dtype) - if alibi is not None: raise ValueError("`alibi` is not supported when `use_flash_attn` is True") @@ -940,42 +910,6 @@ def _init_weights(self, module: nn.Module): module.bias.data.zero_() module.weight.data.fill_(1.0) - @staticmethod - def _convert_cache_to_standard_format( - past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int - ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: - """ - Standardizes the format of the cache so as to match most implementations, i.e. to tuple(tuple([batch_size, - num_heads, ...])) - """ - batch_size_times_num_heads, kv_length, head_dim = past_key_value[0][0].shape - # [batch_size * self.num_heads, kv_length, head_dim] -> [batch_size, num_heads, kv_length, head_dim] - # Note that don't want to use self.num_attention_heads because the number of heads may vary depending - # on whether we use multi_query attention. - num_heads = batch_size_times_num_heads // batch_size - return tuple( - ( - layer_past[0].view(batch_size, num_heads, kv_length, head_dim), - layer_past[1].view(batch_size, num_heads, kv_length, head_dim), - ) - for layer_past in past_key_value - ) - - @staticmethod - def _convert_to_rw_cache( - past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]] - ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: - batch_size, num_heads, kv_length, head_dim = past_key_value[0][0].shape - batch_size_times_num_heads = batch_size * num_heads - # [batch_size, num_heads, kv_length, head_dim] -> [batch_size * num_heads, kv_length, head_dim] - return tuple( - ( - layer_past[0].view(batch_size_times_num_heads, kv_length, head_dim), - layer_past[1].view(batch_size_times_num_heads, kv_length, head_dim), - ) - for layer_past in past_key_value - ) - @add_start_docstrings( "The bare Falcon Model transformer outputting raw hidden-states without any specific head on top.", @@ -1046,8 +980,6 @@ def forward( if past_key_values is None: past_key_values = tuple([None] * len(self.h)) - else: - past_key_values = self._convert_to_rw_cache(past_key_values) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head @@ -1073,7 +1005,7 @@ def forward( # Compute alibi tensor: check build_alibi_tensor documentation past_key_values_length = 0 if past_key_values[0] is not None: - past_key_values_length = past_key_values[0][0].shape[1] # 1 because RW-cache, not standard format + past_key_values_length = past_key_values[0][0].shape[-2] if self.use_alibi: mask = ( @@ -1143,9 +1075,6 @@ def forward( if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) - if presents is not None: - presents = self._convert_cache_to_standard_format(presents, batch_size) - if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) diff --git a/tests/models/falcon/test_modeling_falcon.py b/tests/models/falcon/test_modeling_falcon.py index 75b1e3e46acb..5956a9ed6bf0 100644 --- a/tests/models/falcon/test_modeling_falcon.py +++ b/tests/models/falcon/test_modeling_falcon.py @@ -340,24 +340,6 @@ def test_falcon_sequence_classification_model_for_single_label(self): result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) - def test_cache_conversions(self): - config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() - input_ids = input_dict["input_ids"] - model = FalconForCausalLM(config) - model.to(torch_device) - model.eval() - result = model(input_ids, use_cache=True) - batch_size = input_ids.shape[0] - rw_cache = model._convert_to_rw_cache(result.past_key_values) - standard_cache = model._convert_cache_to_standard_format(rw_cache, batch_size) - for layer in range(len(rw_cache)): - for tensor_idx in range(2): - self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3) - self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4) - self.assertTrue( - torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]) - ) - def test_falcon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 From 8f1a43cd91cb22b65f1f840f6bca0e156e5e8495 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Fri, 3 Nov 2023 12:23:02 +0100 Subject: [PATCH 073/268] [`PEFT` / `Tests` ] Fix peft integration failing tests (#27258) fix peft integration issues --- .../peft_integration/test_peft_integration.py | 50 +++++++++++++++++-- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/tests/peft_integration/test_peft_integration.py b/tests/peft_integration/test_peft_integration.py index 809282c7704b..95f5f844779d 100644 --- a/tests/peft_integration/test_peft_integration.py +++ b/tests/peft_integration/test_peft_integration.py @@ -98,17 +98,18 @@ def test_peft_save_pretrained(self): with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) - self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) + self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("config.json" not in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) + self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) - peft_model.save_pretrained(tmpdirname, safe_serialization=True) - self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) + peft_model.save_pretrained(tmpdirname, safe_serialization=False) + self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) @@ -328,9 +329,10 @@ def test_peft_save_quantized(self): with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) - self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) + self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) + self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: @@ -344,9 +346,49 @@ def test_peft_save_quantized(self): with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) + self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) + self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) + self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) + self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) + + @require_torch_gpu + def test_peft_save_quantized_regression(self): + """ + Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models + Regression test to make sure everything works as expected before the safetensors integration. + """ + # 4bit + for model_id in self.peft_test_model_ids: + for transformers_class in self.transformers_test_model_classes: + peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") + + module = peft_model.model.decoder.layers[0].self_attn.v_proj + self.assertTrue(module.__class__.__name__ == "Linear4bit") + self.assertTrue(peft_model.hf_device_map is not None) + + with tempfile.TemporaryDirectory() as tmpdirname: + peft_model.save_pretrained(tmpdirname, safe_serialization=False) + self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) + self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) + self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) + self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) + + # 8-bit + for model_id in self.peft_test_model_ids: + for transformers_class in self.transformers_test_model_classes: + peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") + + module = peft_model.model.decoder.layers[0].self_attn.v_proj + self.assertTrue(module.__class__.__name__ == "Linear8bitLt") + self.assertTrue(peft_model.hf_device_map is not None) + + with tempfile.TemporaryDirectory() as tmpdirname: + peft_model.save_pretrained(tmpdirname, safe_serialization=False) + self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) + self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) def test_peft_pipeline(self): """ From af8d1dc3093e2735a3aa82cd9195f04900364bee Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 3 Nov 2023 12:47:07 +0100 Subject: [PATCH 074/268] Avoid many failing tests in doctesting (#27262) * fix * update * update * fix --------- Co-authored-by: ydshieh --- utils/check_doctest_list.py | 2 +- utils/not_doctested.txt | 1982 ++++++++++++++++++----------------- utils/tests_fetcher.py | 6 +- 3 files changed, 997 insertions(+), 993 deletions(-) diff --git a/utils/check_doctest_list.py b/utils/check_doctest_list.py index 71f2b84654e4..f39895ff5218 100644 --- a/utils/check_doctest_list.py +++ b/utils/check_doctest_list.py @@ -54,7 +54,7 @@ def clean_doctest_list(doctest_file: str, overwrite: bool = False): all_paths = [] with open(doctest_file, "r", encoding="utf-8") as f: for line in f: - line = line.strip() + line = line.strip().split(" ")[0] path = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) diff --git a/utils/not_doctested.txt b/utils/not_doctested.txt index 31cda5fd76c6..79297cb17dc1 100644 --- a/utils/not_doctested.txt +++ b/utils/not_doctested.txt @@ -1,990 +1,992 @@ -docs/source/en/_config.py -docs/source/en/accelerate.md -docs/source/en/add_new_model.md -docs/source/en/add_new_pipeline.md -docs/source/en/add_tensorflow_model.md -docs/source/en/attention.md -docs/source/en/benchmarks.md -docs/source/en/bertology.md -docs/source/en/big_models.md -docs/source/en/community.md -docs/source/en/contributing.md -docs/source/en/create_a_model.md -docs/source/en/custom_models.md -docs/source/en/custom_tools.md -docs/source/en/debugging.md -docs/source/en/fast_tokenizers.md -docs/source/en/glossary.md -docs/source/en/hpo_train.md -docs/source/en/index.md -docs/source/en/installation.md -docs/source/en/internal/audio_utils.md -docs/source/en/internal/file_utils.md -docs/source/en/internal/image_processing_utils.md -docs/source/en/internal/modeling_utils.md -docs/source/en/internal/pipelines_utils.md -docs/source/en/internal/time_series_utils.md -docs/source/en/internal/tokenization_utils.md -docs/source/en/internal/trainer_utils.md -docs/source/en/llm_tutorial.md -docs/source/en/main_classes/agent.md -docs/source/en/main_classes/callback.md -docs/source/en/main_classes/configuration.md -docs/source/en/main_classes/data_collator.md -docs/source/en/main_classes/deepspeed.md -docs/source/en/main_classes/feature_extractor.md -docs/source/en/main_classes/image_processor.md -docs/source/en/main_classes/keras_callbacks.md -docs/source/en/main_classes/logging.md -docs/source/en/main_classes/model.md -docs/source/en/main_classes/onnx.md -docs/source/en/main_classes/optimizer_schedules.md -docs/source/en/main_classes/output.md -docs/source/en/main_classes/pipelines.md -docs/source/en/main_classes/processors.md -docs/source/en/main_classes/quantization.md -docs/source/en/main_classes/tokenizer.md -docs/source/en/main_classes/trainer.md -docs/source/en/model_doc/albert.md -docs/source/en/model_doc/align.md -docs/source/en/model_doc/altclip.md -docs/source/en/model_doc/audio-spectrogram-transformer.md -docs/source/en/model_doc/auto.md -docs/source/en/model_doc/autoformer.md -docs/source/en/model_doc/bark.md -docs/source/en/model_doc/bart.md -docs/source/en/model_doc/barthez.md -docs/source/en/model_doc/bartpho.md -docs/source/en/model_doc/beit.md -docs/source/en/model_doc/bert-generation.md -docs/source/en/model_doc/bert-japanese.md -docs/source/en/model_doc/bert.md -docs/source/en/model_doc/bertweet.md -docs/source/en/model_doc/big_bird.md -docs/source/en/model_doc/bigbird_pegasus.md -docs/source/en/model_doc/biogpt.md -docs/source/en/model_doc/bit.md -docs/source/en/model_doc/blenderbot-small.md -docs/source/en/model_doc/blenderbot.md -docs/source/en/model_doc/blip-2.md -docs/source/en/model_doc/blip.md -docs/source/en/model_doc/bloom.md -docs/source/en/model_doc/bort.md -docs/source/en/model_doc/bridgetower.md -docs/source/en/model_doc/camembert.md -docs/source/en/model_doc/canine.md -docs/source/en/model_doc/chinese_clip.md -docs/source/en/model_doc/clap.md -docs/source/en/model_doc/clip.md -docs/source/en/model_doc/clipseg.md -docs/source/en/model_doc/codegen.md -docs/source/en/model_doc/conditional_detr.md -docs/source/en/model_doc/convbert.md -docs/source/en/model_doc/convnext.md -docs/source/en/model_doc/convnextv2.md -docs/source/en/model_doc/cpm.md -docs/source/en/model_doc/cpmant.md -docs/source/en/model_doc/ctrl.md -docs/source/en/model_doc/cvt.md -docs/source/en/model_doc/data2vec.md -docs/source/en/model_doc/deberta-v2.md -docs/source/en/model_doc/deberta.md -docs/source/en/model_doc/decision_transformer.md -docs/source/en/model_doc/deformable_detr.md -docs/source/en/model_doc/deit.md -docs/source/en/model_doc/deplot.md -docs/source/en/model_doc/deta.md -docs/source/en/model_doc/detr.md -docs/source/en/model_doc/dialogpt.md -docs/source/en/model_doc/dinat.md -docs/source/en/model_doc/dinov2.md -docs/source/en/model_doc/distilbert.md -docs/source/en/model_doc/dit.md -docs/source/en/model_doc/dpr.md -docs/source/en/model_doc/dpt.md -docs/source/en/model_doc/efficientformer.md -docs/source/en/model_doc/efficientnet.md -docs/source/en/model_doc/electra.md -docs/source/en/model_doc/encodec.md -docs/source/en/model_doc/ernie.md -docs/source/en/model_doc/ernie_m.md -docs/source/en/model_doc/esm.md -docs/source/en/model_doc/flan-t5.md -docs/source/en/model_doc/flan-ul2.md -docs/source/en/model_doc/flaubert.md -docs/source/en/model_doc/flava.md -docs/source/en/model_doc/fnet.md -docs/source/en/model_doc/focalnet.md -docs/source/en/model_doc/fsmt.md -docs/source/en/model_doc/funnel.md -docs/source/en/model_doc/git.md -docs/source/en/model_doc/glpn.md -docs/source/en/model_doc/gpt-sw3.md -docs/source/en/model_doc/gpt2.md -docs/source/en/model_doc/gpt_bigcode.md -docs/source/en/model_doc/gpt_neo.md -docs/source/en/model_doc/gpt_neox.md -docs/source/en/model_doc/gpt_neox_japanese.md -docs/source/en/model_doc/gptj.md -docs/source/en/model_doc/gptsan-japanese.md -docs/source/en/model_doc/graphormer.md -docs/source/en/model_doc/groupvit.md -docs/source/en/model_doc/herbert.md -docs/source/en/model_doc/hubert.md -docs/source/en/model_doc/ibert.md -docs/source/en/model_doc/idefics.md -docs/source/en/model_doc/imagegpt.md -docs/source/en/model_doc/informer.md -docs/source/en/model_doc/instructblip.md -docs/source/en/model_doc/jukebox.md -docs/source/en/model_doc/layoutlm.md -docs/source/en/model_doc/layoutlmv2.md -docs/source/en/model_doc/layoutlmv3.md -docs/source/en/model_doc/layoutxlm.md -docs/source/en/model_doc/led.md -docs/source/en/model_doc/levit.md -docs/source/en/model_doc/lilt.md -docs/source/en/model_doc/llama.md -docs/source/en/model_doc/llama2.md -docs/source/en/model_doc/longformer.md -docs/source/en/model_doc/longt5.md -docs/source/en/model_doc/luke.md -docs/source/en/model_doc/lxmert.md -docs/source/en/model_doc/m2m_100.md -docs/source/en/model_doc/marian.md -docs/source/en/model_doc/mask2former.md -docs/source/en/model_doc/maskformer.md -docs/source/en/model_doc/matcha.md -docs/source/en/model_doc/mbart.md -docs/source/en/model_doc/mctct.md -docs/source/en/model_doc/mega.md -docs/source/en/model_doc/megatron-bert.md -docs/source/en/model_doc/megatron_gpt2.md -docs/source/en/model_doc/mgp-str.md -docs/source/en/model_doc/mistral.md -docs/source/en/model_doc/mluke.md -docs/source/en/model_doc/mms.md -docs/source/en/model_doc/mobilebert.md -docs/source/en/model_doc/mobilenet_v1.md -docs/source/en/model_doc/mobilenet_v2.md -docs/source/en/model_doc/mobilevit.md -docs/source/en/model_doc/mobilevitv2.md -docs/source/en/model_doc/mpnet.md -docs/source/en/model_doc/mpt.md -docs/source/en/model_doc/mra.md -docs/source/en/model_doc/mt5.md -docs/source/en/model_doc/musicgen.md -docs/source/en/model_doc/mvp.md -docs/source/en/model_doc/nat.md -docs/source/en/model_doc/nezha.md -docs/source/en/model_doc/nllb-moe.md -docs/source/en/model_doc/nllb.md -docs/source/en/model_doc/nystromformer.md -docs/source/en/model_doc/oneformer.md -docs/source/en/model_doc/open-llama.md -docs/source/en/model_doc/openai-gpt.md -docs/source/en/model_doc/opt.md -docs/source/en/model_doc/owlvit.md -docs/source/en/model_doc/pegasus.md -docs/source/en/model_doc/pegasus_x.md -docs/source/en/model_doc/perceiver.md -docs/source/en/model_doc/phobert.md -docs/source/en/model_doc/pix2struct.md -docs/source/en/model_doc/plbart.md -docs/source/en/model_doc/poolformer.md -docs/source/en/model_doc/pop2piano.md -docs/source/en/model_doc/prophetnet.md -docs/source/en/model_doc/pvt.md -docs/source/en/model_doc/qdqbert.md -docs/source/en/model_doc/rag.md -docs/source/en/model_doc/realm.md -docs/source/en/model_doc/reformer.md -docs/source/en/model_doc/regnet.md -docs/source/en/model_doc/rembert.md -docs/source/en/model_doc/resnet.md -docs/source/en/model_doc/retribert.md -docs/source/en/model_doc/roberta-prelayernorm.md -docs/source/en/model_doc/roberta.md -docs/source/en/model_doc/roc_bert.md -docs/source/en/model_doc/roformer.md -docs/source/en/model_doc/rwkv.md -docs/source/en/model_doc/sam.md -docs/source/en/model_doc/segformer.md -docs/source/en/model_doc/sew-d.md -docs/source/en/model_doc/sew.md -docs/source/en/model_doc/speech-encoder-decoder.md -docs/source/en/model_doc/speech_to_text_2.md -docs/source/en/model_doc/speecht5.md -docs/source/en/model_doc/splinter.md -docs/source/en/model_doc/squeezebert.md -docs/source/en/model_doc/swiftformer.md -docs/source/en/model_doc/swin.md -docs/source/en/model_doc/swin2sr.md -docs/source/en/model_doc/swinv2.md -docs/source/en/model_doc/table-transformer.md -docs/source/en/model_doc/tapas.md -docs/source/en/model_doc/time_series_transformer.md -docs/source/en/model_doc/timesformer.md -docs/source/en/model_doc/trajectory_transformer.md -docs/source/en/model_doc/transfo-xl.md -docs/source/en/model_doc/trocr.md -docs/source/en/model_doc/tvlt.md -docs/source/en/model_doc/ul2.md -docs/source/en/model_doc/umt5.md -docs/source/en/model_doc/unispeech-sat.md -docs/source/en/model_doc/unispeech.md -docs/source/en/model_doc/upernet.md -docs/source/en/model_doc/van.md -docs/source/en/model_doc/videomae.md -docs/source/en/model_doc/vilt.md -docs/source/en/model_doc/vision-encoder-decoder.md -docs/source/en/model_doc/vision-text-dual-encoder.md -docs/source/en/model_doc/visual_bert.md -docs/source/en/model_doc/vit.md -docs/source/en/model_doc/vit_hybrid.md -docs/source/en/model_doc/vit_mae.md -docs/source/en/model_doc/vit_msn.md -docs/source/en/model_doc/vivit.md -docs/source/en/model_doc/wav2vec2-conformer.md -docs/source/en/model_doc/wav2vec2.md -docs/source/en/model_doc/wav2vec2_phoneme.md -docs/source/en/model_doc/wavlm.md -docs/source/en/model_doc/whisper.md -docs/source/en/model_doc/xclip.md -docs/source/en/model_doc/xglm.md -docs/source/en/model_doc/xlm-prophetnet.md -docs/source/en/model_doc/xlm-roberta-xl.md -docs/source/en/model_doc/xlm-roberta.md -docs/source/en/model_doc/xlm-v.md -docs/source/en/model_doc/xlm.md -docs/source/en/model_doc/xlnet.md -docs/source/en/model_doc/xls_r.md -docs/source/en/model_doc/xlsr_wav2vec2.md -docs/source/en/model_doc/xmod.md -docs/source/en/model_doc/yolos.md -docs/source/en/model_doc/yoso.md -docs/source/en/model_memory_anatomy.md -docs/source/en/model_sharing.md -docs/source/en/model_summary.md -docs/source/en/multilingual.md -docs/source/en/notebooks.md -docs/source/en/pad_truncation.md -docs/source/en/peft.md -docs/source/en/perf_hardware.md -docs/source/en/perf_infer_cpu.md -docs/source/en/perf_infer_gpu_one.md -docs/source/en/perf_torch_compile.md -docs/source/en/perf_train_cpu.md -docs/source/en/perf_train_cpu_many.md -docs/source/en/perf_train_gpu_many.md -docs/source/en/perf_train_gpu_one.md -docs/source/en/perf_train_special.md -docs/source/en/perf_train_tpu.md -docs/source/en/perf_train_tpu_tf.md -docs/source/en/performance.md -docs/source/en/perplexity.md -docs/source/en/philosophy.md -docs/source/en/pipeline_webserver.md -docs/source/en/pr_checks.md -docs/source/en/preprocessing.md -docs/source/en/run_scripts.md -docs/source/en/sagemaker.md -docs/source/en/serialization.md -docs/source/en/tasks/asr.md -docs/source/en/tasks/audio_classification.md -docs/source/en/tasks/document_question_answering.md -docs/source/en/tasks/image_captioning.md -docs/source/en/tasks/image_classification.md -docs/source/en/tasks/language_modeling.md -docs/source/en/tasks/masked_language_modeling.md -docs/source/en/tasks/monocular_depth_estimation.md -docs/source/en/tasks/multiple_choice.md -docs/source/en/tasks/object_detection.md -docs/source/en/tasks/question_answering.md -docs/source/en/tasks/semantic_segmentation.md -docs/source/en/tasks/sequence_classification.md -docs/source/en/tasks/summarization.md -docs/source/en/tasks/text-to-speech.md -docs/source/en/tasks/token_classification.md -docs/source/en/tasks/translation.md -docs/source/en/tasks/video_classification.md -docs/source/en/tasks/visual_question_answering.md -docs/source/en/tasks/zero_shot_image_classification.md -docs/source/en/tasks/zero_shot_object_detection.md -docs/source/en/tasks_explained.md -docs/source/en/tf_xla.md -docs/source/en/tflite.md -docs/source/en/tokenizer_summary.md -docs/source/en/torchscript.md -docs/source/en/training.md -docs/source/en/transformers_agents.md -docs/source/en/troubleshooting.md -src/transformers/activations.py -src/transformers/activations_tf.py -src/transformers/audio_utils.py -src/transformers/benchmark/benchmark.py -src/transformers/benchmark/benchmark_args.py -src/transformers/benchmark/benchmark_args_tf.py -src/transformers/benchmark/benchmark_args_utils.py -src/transformers/benchmark/benchmark_tf.py -src/transformers/benchmark/benchmark_utils.py -src/transformers/commands/add_new_model.py -src/transformers/commands/add_new_model_like.py -src/transformers/commands/convert.py -src/transformers/commands/download.py -src/transformers/commands/env.py -src/transformers/commands/lfs.py -src/transformers/commands/pt_to_tf.py -src/transformers/commands/run.py -src/transformers/commands/serving.py -src/transformers/commands/train.py -src/transformers/commands/transformers_cli.py -src/transformers/commands/user.py -src/transformers/configuration_utils.py -src/transformers/convert_graph_to_onnx.py -src/transformers/convert_pytorch_checkpoint_to_tf2.py -src/transformers/convert_slow_tokenizer.py -src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py -src/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py -src/transformers/data/data_collator.py -src/transformers/data/datasets/glue.py -src/transformers/data/datasets/language_modeling.py -src/transformers/data/datasets/squad.py -src/transformers/data/metrics/squad_metrics.py -src/transformers/data/processors/glue.py -src/transformers/data/processors/squad.py -src/transformers/data/processors/utils.py -src/transformers/data/processors/xnli.py -src/transformers/debug_utils.py -src/transformers/deepspeed.py -src/transformers/dependency_versions_check.py -src/transformers/dependency_versions_table.py -src/transformers/dynamic_module_utils.py -src/transformers/feature_extraction_sequence_utils.py -src/transformers/feature_extraction_utils.py -src/transformers/file_utils.py -src/transformers/hf_argparser.py -src/transformers/hyperparameter_search.py -src/transformers/image_processing_utils.py -src/transformers/image_transforms.py -src/transformers/image_utils.py -src/transformers/integrations/bitsandbytes.py -src/transformers/integrations/deepspeed.py -src/transformers/integrations/integration_utils.py -src/transformers/integrations/peft.py -src/transformers/keras_callbacks.py -src/transformers/modelcard.py -src/transformers/modeling_flax_outputs.py -src/transformers/modeling_flax_pytorch_utils.py -src/transformers/modeling_flax_utils.py -src/transformers/modeling_outputs.py -src/transformers/modeling_tf_outputs.py -src/transformers/modeling_tf_pytorch_utils.py -src/transformers/modeling_tf_utils.py -src/transformers/modeling_utils.py -src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/albert/modeling_flax_albert.py -src/transformers/models/align/configuration_align.py -src/transformers/models/align/convert_align_tf_to_hf.py -src/transformers/models/align/modeling_align.py -src/transformers/models/altclip/configuration_altclip.py -src/transformers/models/altclip/modeling_altclip.py -src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py -src/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py -src/transformers/models/auto/auto_factory.py -src/transformers/models/auto/configuration_auto.py -src/transformers/models/auto/modeling_auto.py -src/transformers/models/auto/modeling_flax_auto.py -src/transformers/models/auto/modeling_tf_auto.py -src/transformers/models/autoformer/configuration_autoformer.py -src/transformers/models/autoformer/modeling_autoformer.py -src/transformers/models/bark/convert_suno_to_hf.py -src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/bart/modeling_flax_bart.py -src/transformers/models/bart/modeling_tf_bart.py -src/transformers/models/beit/convert_beit_unilm_to_pytorch.py -src/transformers/models/beit/modeling_flax_beit.py -src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py -src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py -src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py -src/transformers/models/bert/modeling_flax_bert.py -src/transformers/models/bert_generation/modeling_bert_generation.py -src/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py -src/transformers/models/big_bird/modeling_flax_big_bird.py -src/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py -src/transformers/models/biogpt/configuration_biogpt.py -src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/biogpt/modeling_biogpt.py -src/transformers/models/bit/configuration_bit.py -src/transformers/models/bit/convert_bit_to_pytorch.py -src/transformers/models/bit/modeling_bit.py -src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/blenderbot/modeling_flax_blenderbot.py -src/transformers/models/blenderbot/modeling_tf_blenderbot.py -src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py -src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py -src/transformers/models/blip/configuration_blip.py -src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py -src/transformers/models/blip/modeling_blip_text.py -src/transformers/models/blip/modeling_tf_blip_text.py -src/transformers/models/blip_2/configuration_blip_2.py -src/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py -src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py -src/transformers/models/bloom/modeling_bloom.py -src/transformers/models/bloom/modeling_flax_bloom.py -src/transformers/models/bridgetower/configuration_bridgetower.py -src/transformers/models/bridgetower/modeling_bridgetower.py -src/transformers/models/bros/convert_bros_to_pytorch.py -src/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py -src/transformers/models/camembert/modeling_camembert.py -src/transformers/models/camembert/modeling_tf_camembert.py -src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py -src/transformers/models/chinese_clip/configuration_chinese_clip.py -src/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py -src/transformers/models/chinese_clip/modeling_chinese_clip.py -src/transformers/models/clap/convert_clap_original_pytorch_to_hf.py -src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py -src/transformers/models/clip/modeling_clip.py -src/transformers/models/clip/modeling_flax_clip.py -src/transformers/models/clip/modeling_tf_clip.py -src/transformers/models/clipseg/configuration_clipseg.py -src/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py -src/transformers/models/codegen/modeling_codegen.py -src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py -src/transformers/models/convbert/modeling_convbert.py -src/transformers/models/convbert/modeling_tf_convbert.py -src/transformers/models/convnext/convert_convnext_to_pytorch.py -src/transformers/models/convnext/modeling_tf_convnext.py -src/transformers/models/convnextv2/configuration_convnextv2.py -src/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py -src/transformers/models/convnextv2/modeling_convnextv2.py -src/transformers/models/cpmant/configuration_cpmant.py -src/transformers/models/cpmant/modeling_cpmant.py -src/transformers/models/cpmant/tokenization_cpmant.py -src/transformers/models/ctrl/modeling_tf_ctrl.py -src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/cvt/modeling_tf_cvt.py -src/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/data2vec/modeling_data2vec_text.py -src/transformers/models/data2vec/modeling_tf_data2vec_vision.py -src/transformers/models/deberta/modeling_tf_deberta.py -src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py -src/transformers/models/decision_transformer/modeling_decision_transformer.py -src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py -src/transformers/models/deformable_detr/load_custom.py -src/transformers/models/deit/convert_deit_timm_to_pytorch.py -src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py -src/transformers/models/deprecated/mctct/configuration_mctct.py -src/transformers/models/deprecated/mctct/feature_extraction_mctct.py -src/transformers/models/deprecated/mctct/modeling_mctct.py -src/transformers/models/deprecated/mctct/processing_mctct.py -src/transformers/models/deprecated/mmbt/configuration_mmbt.py -src/transformers/models/deprecated/mmbt/modeling_mmbt.py -src/transformers/models/deprecated/open_llama/configuration_open_llama.py -src/transformers/models/deprecated/open_llama/modeling_open_llama.py -src/transformers/models/deprecated/retribert/configuration_retribert.py -src/transformers/models/deprecated/retribert/modeling_retribert.py -src/transformers/models/deprecated/retribert/tokenization_retribert.py -src/transformers/models/deprecated/retribert/tokenization_retribert_fast.py -src/transformers/models/deprecated/tapex/tokenization_tapex.py -src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py -src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py -src/transformers/models/deprecated/van/configuration_van.py -src/transformers/models/deprecated/van/convert_van_to_pytorch.py -src/transformers/models/deprecated/van/modeling_van.py -src/transformers/models/deta/convert_deta_resnet_to_pytorch.py -src/transformers/models/deta/convert_deta_swin_to_pytorch.py -src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/detr/convert_detr_to_pytorch.py -src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/dinov2/configuration_dinov2.py -src/transformers/models/dinov2/convert_dinov2_to_hf.py -src/transformers/models/dinov2/modeling_dinov2.py -src/transformers/models/distilbert/modeling_distilbert.py -src/transformers/models/distilbert/modeling_flax_distilbert.py -src/transformers/models/distilbert/modeling_tf_distilbert.py -src/transformers/models/dit/convert_dit_unilm_to_pytorch.py -src/transformers/models/donut/configuration_donut_swin.py -src/transformers/models/donut/convert_donut_to_pytorch.py -src/transformers/models/donut/modeling_donut_swin.py -src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py -src/transformers/models/dpr/modeling_dpr.py -src/transformers/models/dpr/modeling_tf_dpr.py -src/transformers/models/dpt/configuration_dpt.py -src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py -src/transformers/models/dpt/convert_dpt_to_pytorch.py -src/transformers/models/efficientformer/configuration_efficientformer.py -src/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/efficientformer/modeling_efficientformer.py -src/transformers/models/efficientnet/configuration_efficientnet.py -src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py -src/transformers/models/efficientnet/modeling_efficientnet.py -src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py -src/transformers/models/electra/modeling_flax_electra.py -src/transformers/models/encodec/configuration_encodec.py -src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py -src/transformers/models/encoder_decoder/modeling_encoder_decoder.py -src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py -src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py -src/transformers/models/ernie/modeling_ernie.py -src/transformers/models/esm/configuration_esm.py -src/transformers/models/esm/convert_esm.py -src/transformers/models/esm/modeling_esm.py -src/transformers/models/esm/modeling_esmfold.py -src/transformers/models/esm/modeling_tf_esm.py -src/transformers/models/esm/openfold_utils/chunk_utils.py -src/transformers/models/esm/openfold_utils/data_transforms.py -src/transformers/models/esm/openfold_utils/feats.py -src/transformers/models/esm/openfold_utils/loss.py -src/transformers/models/esm/openfold_utils/protein.py -src/transformers/models/esm/openfold_utils/residue_constants.py -src/transformers/models/esm/openfold_utils/rigid_utils.py -src/transformers/models/esm/openfold_utils/tensor_utils.py -src/transformers/models/falcon/configuration_falcon.py -src/transformers/models/falcon/modeling_falcon.py -src/transformers/models/flaubert/configuration_flaubert.py -src/transformers/models/flaubert/modeling_flaubert.py -src/transformers/models/flaubert/modeling_tf_flaubert.py -src/transformers/models/flava/convert_dalle_to_flava_codebook.py -src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py -src/transformers/models/flava/modeling_flava.py -src/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py -src/transformers/models/fnet/modeling_fnet.py -src/transformers/models/focalnet/configuration_focalnet.py -src/transformers/models/focalnet/convert_focalnet_to_hf_format.py -src/transformers/models/focalnet/modeling_focalnet.py -src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/fsmt/modeling_fsmt.py -src/transformers/models/funnel/configuration_funnel.py -src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py -src/transformers/models/funnel/modeling_funnel.py -src/transformers/models/funnel/modeling_tf_funnel.py -src/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py -src/transformers/models/git/configuration_git.py -src/transformers/models/git/convert_git_to_pytorch.py -src/transformers/models/glpn/configuration_glpn.py -src/transformers/models/glpn/convert_glpn_to_pytorch.py -src/transformers/models/gpt2/CONVERSION.md -src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py -src/transformers/models/gpt2/modeling_flax_gpt2.py -src/transformers/models/gpt2/modeling_tf_gpt2.py -src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py -src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py -src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py -src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py -src/transformers/models/gpt_neo/modeling_gpt_neo.py -src/transformers/models/gpt_neox/modeling_gpt_neox.py -src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py -src/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py -src/transformers/models/gptj/configuration_gptj.py -src/transformers/models/gptj/modeling_flax_gptj.py -src/transformers/models/gptj/modeling_tf_gptj.py -src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py -src/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py -src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py -src/transformers/models/graphormer/collating_graphormer.py -src/transformers/models/graphormer/configuration_graphormer.py -src/transformers/models/graphormer/modeling_graphormer.py -src/transformers/models/groupvit/configuration_groupvit.py -src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py -src/transformers/models/hubert/configuration_hubert.py -src/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/hubert/modeling_tf_hubert.py -src/transformers/models/ibert/configuration_ibert.py -src/transformers/models/ibert/modeling_ibert.py -src/transformers/models/ibert/quant_modules.py -src/transformers/models/idefics/configuration_idefics.py -src/transformers/models/idefics/image_processing_idefics.py -src/transformers/models/idefics/modeling_idefics.py -src/transformers/models/idefics/perceiver.py -src/transformers/models/idefics/processing_idefics.py -src/transformers/models/idefics/vision.py -src/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py -src/transformers/models/informer/configuration_informer.py -src/transformers/models/informer/modeling_informer.py -src/transformers/models/instructblip/configuration_instructblip.py -src/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py -src/transformers/models/instructblip/modeling_instructblip.py -src/transformers/models/instructblip/processing_instructblip.py -src/transformers/models/jukebox/configuration_jukebox.py -src/transformers/models/jukebox/convert_jukebox.py -src/transformers/models/jukebox/modeling_jukebox.py -src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/led/configuration_led.py -src/transformers/models/led/modeling_led.py -src/transformers/models/led/modeling_tf_led.py -src/transformers/models/levit/convert_levit_timm_to_pytorch.py -src/transformers/models/levit/modeling_levit.py -src/transformers/models/lilt/configuration_lilt.py -src/transformers/models/llama/configuration_llama.py -src/transformers/models/llama/convert_llama_weights_to_hf.py -src/transformers/models/llama/modeling_llama.py -src/transformers/models/longformer/configuration_longformer.py -src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py -src/transformers/models/longt5/configuration_longt5.py -src/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py -src/transformers/models/longt5/modeling_flax_longt5.py -src/transformers/models/luke/configuration_luke.py -src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/luke/modeling_luke.py -src/transformers/models/lxmert/configuration_lxmert.py -src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/lxmert/modeling_lxmert.py -src/transformers/models/lxmert/modeling_tf_lxmert.py -src/transformers/models/m2m_100/convert_m2m100_original_checkpoint_to_pytorch.py -src/transformers/models/m2m_100/modeling_m2m_100.py -src/transformers/models/marian/configuration_marian.py -src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py -src/transformers/models/marian/convert_marian_to_pytorch.py -src/transformers/models/marian/modeling_flax_marian.py -src/transformers/models/marian/modeling_tf_marian.py -src/transformers/models/markuplm/configuration_markuplm.py -src/transformers/models/markuplm/feature_extraction_markuplm.py -src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/maskformer/configuration_maskformer_swin.py -src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/maskformer/convert_maskformer_resnet_to_pytorch.py -src/transformers/models/maskformer/convert_maskformer_swin_to_pytorch.py -src/transformers/models/maskformer/modeling_maskformer_swin.py -src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py -src/transformers/models/mbart/modeling_flax_mbart.py -src/transformers/models/mega/configuration_mega.py -src/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/mega/modeling_mega.py -src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py -src/transformers/models/megatron_bert/modeling_megatron_bert.py -src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py -src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py -src/transformers/models/mgp_str/configuration_mgp_str.py -src/transformers/models/mgp_str/modeling_mgp_str.py -src/transformers/models/mistral/configuration_mistral.py -src/transformers/models/mistral/modeling_mistral.py -src/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py -src/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py -src/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/mobilevit/configuration_mobilevit.py -src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py -src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py -src/transformers/models/mpnet/configuration_mpnet.py -src/transformers/models/mpnet/modeling_mpnet.py -src/transformers/models/mpnet/modeling_tf_mpnet.py -src/transformers/models/mpt/configuration_mpt.py -src/transformers/models/mpt/modeling_mpt.py -src/transformers/models/mra/configuration_mra.py -src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py -src/transformers/models/mra/modeling_mra.py -src/transformers/models/mt5/configuration_mt5.py -src/transformers/models/mt5/modeling_flax_mt5.py -src/transformers/models/mt5/modeling_mt5.py -src/transformers/models/mt5/modeling_tf_mt5.py -src/transformers/models/musicgen/convert_musicgen_transformers.py -src/transformers/models/mvp/modeling_mvp.py -src/transformers/models/nezha/modeling_nezha.py -src/transformers/models/nllb_moe/configuration_nllb_moe.py -src/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py -src/transformers/models/nllb_moe/modeling_nllb_moe.py -src/transformers/models/nougat/convert_nougat_to_hf.py -src/transformers/models/nystromformer/configuration_nystromformer.py -src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/nystromformer/modeling_nystromformer.py -src/transformers/models/oneformer/convert_to_hf_oneformer.py -src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py -src/transformers/models/openai/modeling_openai.py -src/transformers/models/openai/modeling_tf_openai.py -src/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/opt/modeling_flax_opt.py -src/transformers/models/owlvit/configuration_owlvit.py -src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py -src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py -src/transformers/models/pegasus/modeling_flax_pegasus.py -src/transformers/models/pegasus/modeling_tf_pegasus.py -src/transformers/models/pegasus_x/modeling_pegasus_x.py -src/transformers/models/perceiver/configuration_perceiver.py -src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py -src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py -src/transformers/models/persimmon/modeling_persimmon.py -src/transformers/models/pix2struct/configuration_pix2struct.py -src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py -src/transformers/models/pix2struct/image_processing_pix2struct.py -src/transformers/models/pix2struct/processing_pix2struct.py -src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py -src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py -src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py -src/transformers/models/pop2piano/feature_extraction_pop2piano.py -src/transformers/models/pop2piano/processing_pop2piano.py -src/transformers/models/pop2piano/tokenization_pop2piano.py -src/transformers/models/prophetnet/configuration_prophetnet.py -src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/prophetnet/modeling_prophetnet.py -src/transformers/models/pvt/configuration_pvt.py -src/transformers/models/pvt/convert_pvt_to_pytorch.py -src/transformers/models/pvt/image_processing_pvt.py -src/transformers/models/pvt/modeling_pvt.py -src/transformers/models/qdqbert/configuration_qdqbert.py -src/transformers/models/qdqbert/modeling_qdqbert.py -src/transformers/models/rag/configuration_rag.py -src/transformers/models/rag/modeling_rag.py -src/transformers/models/rag/modeling_tf_rag.py -src/transformers/models/rag/retrieval_rag.py -src/transformers/models/realm/modeling_realm.py -src/transformers/models/realm/retrieval_realm.py -src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py -src/transformers/models/regnet/configuration_regnet.py -src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py -src/transformers/models/regnet/convert_regnet_to_pytorch.py -src/transformers/models/regnet/modeling_flax_regnet.py -src/transformers/models/rembert/configuration_rembert.py -src/transformers/models/rembert/convert_rembert_tf_checkpoint_to_pytorch.py -src/transformers/models/rembert/modeling_rembert.py -src/transformers/models/rembert/modeling_tf_rembert.py -src/transformers/models/resnet/convert_resnet_to_pytorch.py -src/transformers/models/resnet/modeling_flax_resnet.py -src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/roberta/modeling_flax_roberta.py -src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py -src/transformers/models/roc_bert/configuration_roc_bert.py -src/transformers/models/roformer/convert_roformer_original_tf_checkpoint_to_pytorch.py -src/transformers/models/roformer/modeling_flax_roformer.py -src/transformers/models/roformer/modeling_roformer.py -src/transformers/models/roformer/modeling_tf_roformer.py -src/transformers/models/rwkv/configuration_rwkv.py -src/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py -src/transformers/models/rwkv/modeling_rwkv.py -src/transformers/models/sam/configuration_sam.py -src/transformers/models/sam/convert_sam_original_to_hf_format.py -src/transformers/models/sam/image_processing_sam.py -src/transformers/models/sam/modeling_sam.py -src/transformers/models/sam/modeling_tf_sam.py -src/transformers/models/sam/processing_sam.py -src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py -src/transformers/models/segformer/configuration_segformer.py -src/transformers/models/segformer/convert_segformer_original_to_pytorch.py -src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/sew_d/convert_sew_d_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py -src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py -src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py -src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py -src/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py -src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py -src/transformers/models/speecht5/configuration_speecht5.py -src/transformers/models/speecht5/convert_hifigan.py -src/transformers/models/speecht5/convert_speecht5_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/speecht5/number_normalizer.py -src/transformers/models/splinter/configuration_splinter.py -src/transformers/models/splinter/modeling_splinter.py -src/transformers/models/squeezebert/modeling_squeezebert.py -src/transformers/models/swiftformer/configuration_swiftformer.py -src/transformers/models/swiftformer/convert_swiftformer_original_to_hf.py -src/transformers/models/swiftformer/modeling_swiftformer.py -src/transformers/models/swin/convert_swin_simmim_to_pytorch.py -src/transformers/models/swin/convert_swin_timm_to_pytorch.py -src/transformers/models/swin/modeling_tf_swin.py -src/transformers/models/swin2sr/configuration_swin2sr.py -src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py -src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py -src/transformers/models/swinv2/modeling_swinv2.py -src/transformers/models/switch_transformers/configuration_switch_transformers.py -src/transformers/models/switch_transformers/convert_big_switch.py -src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py -src/transformers/models/switch_transformers/modeling_switch_transformers.py -src/transformers/models/t5/configuration_t5.py -src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py -src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py -src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py -src/transformers/models/t5/modeling_flax_t5.py -src/transformers/models/t5/modeling_t5.py -src/transformers/models/t5/modeling_tf_t5.py -src/transformers/models/table_transformer/configuration_table_transformer.py -src/transformers/models/table_transformer/convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/tapas/configuration_tapas.py -src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py -src/transformers/models/tapas/modeling_tapas.py -src/transformers/models/tapas/modeling_tf_tapas.py -src/transformers/models/timesformer/convert_timesformer_to_pytorch.py -src/transformers/models/timm_backbone/configuration_timm_backbone.py -src/transformers/models/timm_backbone/modeling_timm_backbone.py -src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py -src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py -src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py -src/transformers/models/transfo_xl/modeling_transfo_xl.py -src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py -src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py -src/transformers/models/tvlt/configuration_tvlt.py -src/transformers/models/tvlt/modeling_tvlt.py -src/transformers/models/umt5/configuration_umt5.py -src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py -src/transformers/models/umt5/modeling_umt5.py -src/transformers/models/unispeech/convert_unispeech_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/unispeech_sat/configuration_unispeech_sat.py -src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/unispeech_sat/convert_unispeech_sat_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/upernet/configuration_upernet.py -src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py -src/transformers/models/upernet/convert_swin_upernet_to_pytorch.py -src/transformers/models/videomae/configuration_videomae.py -src/transformers/models/videomae/convert_videomae_to_pytorch.py -src/transformers/models/vilt/configuration_vilt.py -src/transformers/models/vilt/convert_vilt_original_to_pytorch.py -src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py -src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py -src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py -src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py -src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/visual_bert/modeling_visual_bert.py -src/transformers/models/vit/convert_dino_to_pytorch.py -src/transformers/models/vit/convert_vit_timm_to_pytorch.py -src/transformers/models/vit/modeling_flax_vit.py -src/transformers/models/vit_hybrid/configuration_vit_hybrid.py -src/transformers/models/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py -src/transformers/models/vit_hybrid/modeling_vit_hybrid.py -src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py -src/transformers/models/vit_mae/modeling_tf_vit_mae.py -src/transformers/models/vit_msn/configuration_vit_msn.py -src/transformers/models/vit_msn/convert_msn_to_pytorch.py -src/transformers/models/vivit/configuration_vivit.py -src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py -src/transformers/models/vivit/image_processing_vivit.py -src/transformers/models/vivit/modeling_vivit.py -src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/wav2vec2/convert_wav2vec2_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py -src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py -src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/wavlm/convert_wavlm_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/wavlm/convert_wavlm_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/whisper/convert_openai_to_hf.py -src/transformers/models/whisper/english_normalizer.py -src/transformers/models/whisper/modeling_flax_whisper.py -src/transformers/models/x_clip/configuration_x_clip.py -src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py -src/transformers/models/xglm/configuration_xglm.py -src/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py -src/transformers/models/xglm/modeling_flax_xglm.py -src/transformers/models/xglm/modeling_tf_xglm.py -src/transformers/models/xglm/modeling_xglm.py -src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/xlm/modeling_tf_xlm.py -src/transformers/models/xlm/modeling_xlm.py -src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py -src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py -src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py -src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py -src/transformers/models/xlm_roberta/modeling_xlm_roberta.py -src/transformers/models/xlm_roberta_xl/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py -src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py -src/transformers/models/xlnet/modeling_tf_xlnet.py -src/transformers/models/xlnet/modeling_xlnet.py -src/transformers/models/xmod/convert_xmod_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/yolos/convert_yolos_to_pytorch.py -src/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py -src/transformers/models/yoso/modeling_yoso.py -src/transformers/onnx/__main__.py -src/transformers/onnx/config.py -src/transformers/onnx/convert.py -src/transformers/onnx/features.py -src/transformers/onnx/utils.py -src/transformers/optimization.py -src/transformers/optimization_tf.py -src/transformers/pipelines/audio_classification.py -src/transformers/pipelines/audio_utils.py -src/transformers/pipelines/automatic_speech_recognition.py -src/transformers/pipelines/base.py -src/transformers/pipelines/conversational.py -src/transformers/pipelines/depth_estimation.py -src/transformers/pipelines/document_question_answering.py -src/transformers/pipelines/feature_extraction.py -src/transformers/pipelines/fill_mask.py -src/transformers/pipelines/image_classification.py -src/transformers/pipelines/image_segmentation.py -src/transformers/pipelines/image_to_text.py -src/transformers/pipelines/mask_generation.py -src/transformers/pipelines/object_detection.py -src/transformers/pipelines/pt_utils.py -src/transformers/pipelines/question_answering.py -src/transformers/pipelines/table_question_answering.py -src/transformers/pipelines/text_classification.py -src/transformers/pipelines/token_classification.py -src/transformers/pipelines/video_classification.py -src/transformers/pipelines/visual_question_answering.py -src/transformers/pipelines/zero_shot_audio_classification.py -src/transformers/pipelines/zero_shot_classification.py -src/transformers/pipelines/zero_shot_image_classification.py -src/transformers/pipelines/zero_shot_object_detection.py -src/transformers/processing_utils.py -src/transformers/pytorch_utils.py -src/transformers/sagemaker/trainer_sm.py -src/transformers/sagemaker/training_args_sm.py -src/transformers/testing_utils.py -src/transformers/tf_utils.py -src/transformers/time_series_utils.py -src/transformers/tokenization_utils.py -src/transformers/tokenization_utils_base.py -src/transformers/tokenization_utils_fast.py -src/transformers/tools/agent_types.py -src/transformers/tools/agents.py -src/transformers/tools/base.py -src/transformers/tools/document_question_answering.py -src/transformers/tools/evaluate_agent.py -src/transformers/tools/image_captioning.py -src/transformers/tools/image_question_answering.py -src/transformers/tools/image_segmentation.py -src/transformers/tools/prompts.py -src/transformers/tools/python_interpreter.py -src/transformers/tools/speech_to_text.py -src/transformers/tools/text_classification.py -src/transformers/tools/text_question_answering.py -src/transformers/tools/text_summarization.py -src/transformers/tools/text_to_speech.py -src/transformers/tools/translation.py -src/transformers/trainer.py -src/transformers/trainer_callback.py -src/transformers/trainer_pt_utils.py -src/transformers/trainer_seq2seq.py -src/transformers/trainer_tf.py -src/transformers/trainer_utils.py -src/transformers/training_args.py -src/transformers/training_args_seq2seq.py -src/transformers/training_args_tf.py -src/transformers/utils/backbone_utils.py -src/transformers/utils/bitsandbytes.py -src/transformers/utils/constants.py -src/transformers/utils/doc.py -src/transformers/utils/dummy_detectron2_objects.py -src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py -src/transformers/utils/dummy_flax_objects.py -src/transformers/utils/dummy_keras_nlp_objects.py -src/transformers/utils/dummy_music_objects.py -src/transformers/utils/dummy_pt_objects.py -src/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py -src/transformers/utils/dummy_sentencepiece_objects.py -src/transformers/utils/dummy_speech_objects.py -src/transformers/utils/dummy_tensorflow_text_objects.py -src/transformers/utils/dummy_tf_objects.py -src/transformers/utils/dummy_tokenizers_objects.py -src/transformers/utils/dummy_vision_objects.py -src/transformers/utils/fx.py -src/transformers/utils/generic.py -src/transformers/utils/hp_naming.py -src/transformers/utils/hub.py -src/transformers/utils/import_utils.py -src/transformers/utils/logging.py -src/transformers/utils/model_parallel_utils.py -src/transformers/utils/notebook.py -src/transformers/utils/peft_utils.py -src/transformers/utils/quantization_config.py -src/transformers/utils/sentencepiece_model_pb2.py -src/transformers/utils/sentencepiece_model_pb2_new.py -src/transformers/utils/versions.py +docs/source/en/_config.py +docs/source/en/accelerate.md +docs/source/en/add_new_model.md +docs/source/en/add_new_pipeline.md +docs/source/en/add_tensorflow_model.md +docs/source/en/attention.md +docs/source/en/benchmarks.md +docs/source/en/bertology.md +docs/source/en/big_models.md +docs/source/en/community.md +docs/source/en/contributing.md +docs/source/en/create_a_model.md +docs/source/en/custom_models.md +docs/source/en/custom_tools.md +docs/source/en/debugging.md +docs/source/en/fast_tokenizers.md +docs/source/en/glossary.md +docs/source/en/hpo_train.md +docs/source/en/index.md +docs/source/en/installation.md +docs/source/en/internal/audio_utils.md +docs/source/en/internal/file_utils.md +docs/source/en/internal/image_processing_utils.md +docs/source/en/internal/modeling_utils.md +docs/source/en/internal/pipelines_utils.md +docs/source/en/internal/time_series_utils.md +docs/source/en/internal/tokenization_utils.md +docs/source/en/internal/trainer_utils.md +docs/source/en/llm_tutorial.md +docs/source/en/main_classes/agent.md +docs/source/en/main_classes/callback.md +docs/source/en/main_classes/configuration.md +docs/source/en/main_classes/data_collator.md +docs/source/en/main_classes/deepspeed.md +docs/source/en/main_classes/feature_extractor.md +docs/source/en/main_classes/image_processor.md +docs/source/en/main_classes/keras_callbacks.md +docs/source/en/main_classes/logging.md +docs/source/en/main_classes/model.md +docs/source/en/main_classes/onnx.md +docs/source/en/main_classes/optimizer_schedules.md +docs/source/en/main_classes/output.md +docs/source/en/main_classes/pipelines.md +docs/source/en/main_classes/processors.md +docs/source/en/main_classes/quantization.md +docs/source/en/main_classes/tokenizer.md +docs/source/en/main_classes/trainer.md +docs/source/en/model_doc/albert.md +docs/source/en/model_doc/align.md +docs/source/en/model_doc/altclip.md +docs/source/en/model_doc/audio-spectrogram-transformer.md +docs/source/en/model_doc/auto.md +docs/source/en/model_doc/autoformer.md +docs/source/en/model_doc/bark.md +docs/source/en/model_doc/bart.md +docs/source/en/model_doc/barthez.md +docs/source/en/model_doc/bartpho.md +docs/source/en/model_doc/beit.md +docs/source/en/model_doc/bert-generation.md +docs/source/en/model_doc/bert-japanese.md +docs/source/en/model_doc/bert.md +docs/source/en/model_doc/bertweet.md +docs/source/en/model_doc/big_bird.md +docs/source/en/model_doc/bigbird_pegasus.md +docs/source/en/model_doc/biogpt.md +docs/source/en/model_doc/bit.md +docs/source/en/model_doc/blenderbot-small.md +docs/source/en/model_doc/blenderbot.md +docs/source/en/model_doc/blip-2.md +docs/source/en/model_doc/blip.md +docs/source/en/model_doc/bloom.md +docs/source/en/model_doc/bort.md +docs/source/en/model_doc/bridgetower.md +docs/source/en/model_doc/camembert.md +docs/source/en/model_doc/canine.md +docs/source/en/model_doc/chinese_clip.md +docs/source/en/model_doc/clap.md +docs/source/en/model_doc/clip.md +docs/source/en/model_doc/clipseg.md +docs/source/en/model_doc/codegen.md +docs/source/en/model_doc/conditional_detr.md +docs/source/en/model_doc/convbert.md +docs/source/en/model_doc/convnext.md +docs/source/en/model_doc/convnextv2.md +docs/source/en/model_doc/cpm.md +docs/source/en/model_doc/cpmant.md +docs/source/en/model_doc/ctrl.md +docs/source/en/model_doc/cvt.md +docs/source/en/model_doc/data2vec.md +docs/source/en/model_doc/deberta-v2.md +docs/source/en/model_doc/deberta.md +docs/source/en/model_doc/decision_transformer.md +docs/source/en/model_doc/deformable_detr.md +docs/source/en/model_doc/deit.md +docs/source/en/model_doc/deplot.md +docs/source/en/model_doc/deta.md +docs/source/en/model_doc/detr.md +docs/source/en/model_doc/dialogpt.md +docs/source/en/model_doc/dinat.md +docs/source/en/model_doc/dinov2.md +docs/source/en/model_doc/distilbert.md +docs/source/en/model_doc/dit.md +docs/source/en/model_doc/dpr.md +docs/source/en/model_doc/dpt.md +docs/source/en/model_doc/efficientformer.md +docs/source/en/model_doc/efficientnet.md +docs/source/en/model_doc/electra.md +docs/source/en/model_doc/encodec.md +docs/source/en/model_doc/ernie.md +docs/source/en/model_doc/ernie_m.md +docs/source/en/model_doc/esm.md +docs/source/en/model_doc/flan-t5.md +docs/source/en/model_doc/flan-ul2.md +docs/source/en/model_doc/flaubert.md +docs/source/en/model_doc/flava.md +docs/source/en/model_doc/fnet.md +docs/source/en/model_doc/focalnet.md +docs/source/en/model_doc/fsmt.md +docs/source/en/model_doc/funnel.md +docs/source/en/model_doc/git.md +docs/source/en/model_doc/glpn.md +docs/source/en/model_doc/gpt-sw3.md +docs/source/en/model_doc/gpt2.md +docs/source/en/model_doc/gpt_bigcode.md +docs/source/en/model_doc/gpt_neo.md +docs/source/en/model_doc/gpt_neox.md +docs/source/en/model_doc/gpt_neox_japanese.md +docs/source/en/model_doc/gptj.md +docs/source/en/model_doc/gptsan-japanese.md +docs/source/en/model_doc/graphormer.md +docs/source/en/model_doc/groupvit.md +docs/source/en/model_doc/herbert.md +docs/source/en/model_doc/hubert.md +docs/source/en/model_doc/ibert.md +docs/source/en/model_doc/idefics.md +docs/source/en/model_doc/imagegpt.md +docs/source/en/model_doc/informer.md +docs/source/en/model_doc/instructblip.md +docs/source/en/model_doc/jukebox.md +docs/source/en/model_doc/layoutlm.md +docs/source/en/model_doc/layoutlmv2.md +docs/source/en/model_doc/layoutlmv3.md +docs/source/en/model_doc/layoutxlm.md +docs/source/en/model_doc/led.md +docs/source/en/model_doc/levit.md +docs/source/en/model_doc/lilt.md +docs/source/en/model_doc/llama.md +docs/source/en/model_doc/llama2.md +docs/source/en/model_doc/longformer.md +docs/source/en/model_doc/longt5.md +docs/source/en/model_doc/luke.md +docs/source/en/model_doc/lxmert.md +docs/source/en/model_doc/m2m_100.md +docs/source/en/model_doc/marian.md +docs/source/en/model_doc/mask2former.md +docs/source/en/model_doc/maskformer.md +docs/source/en/model_doc/matcha.md +docs/source/en/model_doc/mbart.md +docs/source/en/model_doc/mctct.md +docs/source/en/model_doc/mega.md +docs/source/en/model_doc/megatron-bert.md +docs/source/en/model_doc/megatron_gpt2.md +docs/source/en/model_doc/mgp-str.md +docs/source/en/model_doc/mistral.md +docs/source/en/model_doc/mluke.md +docs/source/en/model_doc/mms.md +docs/source/en/model_doc/mobilebert.md +docs/source/en/model_doc/mobilenet_v1.md +docs/source/en/model_doc/mobilenet_v2.md +docs/source/en/model_doc/mobilevit.md +docs/source/en/model_doc/mobilevitv2.md +docs/source/en/model_doc/mpnet.md +docs/source/en/model_doc/mpt.md +docs/source/en/model_doc/mra.md +docs/source/en/model_doc/mt5.md +docs/source/en/model_doc/musicgen.md +docs/source/en/model_doc/mvp.md +docs/source/en/model_doc/nat.md +docs/source/en/model_doc/nezha.md +docs/source/en/model_doc/nllb-moe.md +docs/source/en/model_doc/nllb.md +docs/source/en/model_doc/nystromformer.md +docs/source/en/model_doc/oneformer.md +docs/source/en/model_doc/open-llama.md +docs/source/en/model_doc/openai-gpt.md +docs/source/en/model_doc/opt.md +docs/source/en/model_doc/owlvit.md +docs/source/en/model_doc/pegasus.md +docs/source/en/model_doc/pegasus_x.md +docs/source/en/model_doc/perceiver.md +docs/source/en/model_doc/phobert.md +docs/source/en/model_doc/pix2struct.md +docs/source/en/model_doc/plbart.md +docs/source/en/model_doc/poolformer.md +docs/source/en/model_doc/pop2piano.md +docs/source/en/model_doc/prophetnet.md +docs/source/en/model_doc/pvt.md +docs/source/en/model_doc/qdqbert.md +docs/source/en/model_doc/rag.md +docs/source/en/model_doc/realm.md +docs/source/en/model_doc/reformer.md +docs/source/en/model_doc/regnet.md +docs/source/en/model_doc/rembert.md +docs/source/en/model_doc/resnet.md +docs/source/en/model_doc/retribert.md +docs/source/en/model_doc/roberta-prelayernorm.md +docs/source/en/model_doc/roberta.md +docs/source/en/model_doc/roc_bert.md +docs/source/en/model_doc/roformer.md +docs/source/en/model_doc/rwkv.md +docs/source/en/model_doc/sam.md +docs/source/en/model_doc/segformer.md +docs/source/en/model_doc/sew-d.md +docs/source/en/model_doc/sew.md +docs/source/en/model_doc/speech-encoder-decoder.md +docs/source/en/model_doc/speech_to_text_2.md +docs/source/en/model_doc/speecht5.md +docs/source/en/model_doc/splinter.md +docs/source/en/model_doc/squeezebert.md +docs/source/en/model_doc/swiftformer.md +docs/source/en/model_doc/swin.md +docs/source/en/model_doc/swin2sr.md +docs/source/en/model_doc/swinv2.md +docs/source/en/model_doc/table-transformer.md +docs/source/en/model_doc/tapas.md +docs/source/en/model_doc/time_series_transformer.md +docs/source/en/model_doc/timesformer.md +docs/source/en/model_doc/trajectory_transformer.md +docs/source/en/model_doc/transfo-xl.md +docs/source/en/model_doc/trocr.md +docs/source/en/model_doc/tvlt.md +docs/source/en/model_doc/ul2.md +docs/source/en/model_doc/umt5.md +docs/source/en/model_doc/unispeech-sat.md +docs/source/en/model_doc/unispeech.md +docs/source/en/model_doc/upernet.md +docs/source/en/model_doc/van.md +docs/source/en/model_doc/videomae.md +docs/source/en/model_doc/vilt.md +docs/source/en/model_doc/vision-encoder-decoder.md +docs/source/en/model_doc/vision-text-dual-encoder.md +docs/source/en/model_doc/visual_bert.md +docs/source/en/model_doc/vit.md +docs/source/en/model_doc/vit_hybrid.md +docs/source/en/model_doc/vit_mae.md +docs/source/en/model_doc/vit_msn.md +docs/source/en/model_doc/vivit.md +docs/source/en/model_doc/wav2vec2-conformer.md +docs/source/en/model_doc/wav2vec2.md +docs/source/en/model_doc/wav2vec2_phoneme.md +docs/source/en/model_doc/wavlm.md +docs/source/en/model_doc/whisper.md +docs/source/en/model_doc/xclip.md +docs/source/en/model_doc/xglm.md +docs/source/en/model_doc/xlm-prophetnet.md +docs/source/en/model_doc/xlm-roberta-xl.md +docs/source/en/model_doc/xlm-roberta.md +docs/source/en/model_doc/xlm-v.md +docs/source/en/model_doc/xlm.md +docs/source/en/model_doc/xlnet.md +docs/source/en/model_doc/xls_r.md +docs/source/en/model_doc/xlsr_wav2vec2.md +docs/source/en/model_doc/xmod.md +docs/source/en/model_doc/yolos.md +docs/source/en/model_doc/yoso.md +docs/source/en/model_memory_anatomy.md +docs/source/en/model_sharing.md +docs/source/en/model_summary.md +docs/source/en/multilingual.md +docs/source/en/notebooks.md +docs/source/en/pad_truncation.md +docs/source/en/peft.md +docs/source/en/perf_hardware.md +docs/source/en/perf_infer_cpu.md +docs/source/en/perf_infer_gpu_one.md +docs/source/en/perf_torch_compile.md +docs/source/en/perf_train_cpu.md +docs/source/en/perf_train_cpu_many.md +docs/source/en/perf_train_gpu_many.md +docs/source/en/perf_train_gpu_one.md +docs/source/en/perf_train_special.md +docs/source/en/perf_train_tpu.md +docs/source/en/perf_train_tpu_tf.md +docs/source/en/performance.md +docs/source/en/perplexity.md +docs/source/en/philosophy.md +docs/source/en/pipeline_webserver.md +docs/source/en/pr_checks.md +docs/source/en/preprocessing.md +docs/source/en/run_scripts.md +docs/source/en/sagemaker.md +docs/source/en/serialization.md +docs/source/en/tasks/asr.md +docs/source/en/tasks/audio_classification.md +docs/source/en/tasks/document_question_answering.md +docs/source/en/tasks/idefics.md # causes other tests to fail +docs/source/en/tasks/image_captioning.md +docs/source/en/tasks/image_classification.md +docs/source/en/tasks/language_modeling.md +docs/source/en/tasks/masked_language_modeling.md +docs/source/en/tasks/monocular_depth_estimation.md +docs/source/en/tasks/multiple_choice.md +docs/source/en/tasks/object_detection.md +docs/source/en/tasks/question_answering.md +docs/source/en/tasks/semantic_segmentation.md +docs/source/en/tasks/sequence_classification.md +docs/source/en/tasks/summarization.md +docs/source/en/tasks/text-to-speech.md +docs/source/en/tasks/token_classification.md +docs/source/en/tasks/translation.md +docs/source/en/tasks/video_classification.md +docs/source/en/tasks/visual_question_answering.md +docs/source/en/tasks/zero_shot_image_classification.md +docs/source/en/tasks/zero_shot_object_detection.md +docs/source/en/tasks_explained.md +docs/source/en/tf_xla.md +docs/source/en/tflite.md +docs/source/en/tokenizer_summary.md +docs/source/en/torchscript.md +docs/source/en/training.md +docs/source/en/transformers_agents.md +docs/source/en/troubleshooting.md +src/transformers/activations.py +src/transformers/activations_tf.py +src/transformers/audio_utils.py +src/transformers/benchmark/benchmark.py +src/transformers/benchmark/benchmark_args.py +src/transformers/benchmark/benchmark_args_tf.py +src/transformers/benchmark/benchmark_args_utils.py +src/transformers/benchmark/benchmark_tf.py +src/transformers/benchmark/benchmark_utils.py +src/transformers/commands/add_new_model.py +src/transformers/commands/add_new_model_like.py +src/transformers/commands/convert.py +src/transformers/commands/download.py +src/transformers/commands/env.py +src/transformers/commands/lfs.py +src/transformers/commands/pt_to_tf.py +src/transformers/commands/run.py +src/transformers/commands/serving.py +src/transformers/commands/train.py +src/transformers/commands/transformers_cli.py +src/transformers/commands/user.py +src/transformers/configuration_utils.py +src/transformers/convert_graph_to_onnx.py +src/transformers/convert_pytorch_checkpoint_to_tf2.py +src/transformers/convert_slow_tokenizer.py +src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py +src/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py +src/transformers/data/data_collator.py +src/transformers/data/datasets/glue.py +src/transformers/data/datasets/language_modeling.py +src/transformers/data/datasets/squad.py +src/transformers/data/metrics/squad_metrics.py +src/transformers/data/processors/glue.py +src/transformers/data/processors/squad.py +src/transformers/data/processors/utils.py +src/transformers/data/processors/xnli.py +src/transformers/debug_utils.py +src/transformers/deepspeed.py +src/transformers/dependency_versions_check.py +src/transformers/dependency_versions_table.py +src/transformers/dynamic_module_utils.py +src/transformers/feature_extraction_sequence_utils.py +src/transformers/feature_extraction_utils.py +src/transformers/file_utils.py +src/transformers/hf_argparser.py +src/transformers/hyperparameter_search.py +src/transformers/image_processing_utils.py +src/transformers/image_transforms.py +src/transformers/image_utils.py +src/transformers/integrations/bitsandbytes.py +src/transformers/integrations/deepspeed.py +src/transformers/integrations/integration_utils.py +src/transformers/integrations/peft.py +src/transformers/keras_callbacks.py +src/transformers/modelcard.py +src/transformers/modeling_flax_outputs.py +src/transformers/modeling_flax_pytorch_utils.py +src/transformers/modeling_flax_utils.py +src/transformers/modeling_outputs.py +src/transformers/modeling_tf_outputs.py +src/transformers/modeling_tf_pytorch_utils.py +src/transformers/modeling_tf_utils.py +src/transformers/modeling_utils.py +src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/albert/modeling_flax_albert.py +src/transformers/models/align/configuration_align.py +src/transformers/models/align/convert_align_tf_to_hf.py +src/transformers/models/align/modeling_align.py +src/transformers/models/altclip/configuration_altclip.py +src/transformers/models/altclip/modeling_altclip.py +src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +src/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py +src/transformers/models/auto/auto_factory.py +src/transformers/models/auto/configuration_auto.py +src/transformers/models/auto/modeling_auto.py +src/transformers/models/auto/modeling_flax_auto.py +src/transformers/models/auto/modeling_tf_auto.py +src/transformers/models/autoformer/configuration_autoformer.py +src/transformers/models/autoformer/modeling_autoformer.py +src/transformers/models/bark/convert_suno_to_hf.py +src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/bart/modeling_flax_bart.py +src/transformers/models/bart/modeling_tf_bart.py +src/transformers/models/beit/convert_beit_unilm_to_pytorch.py +src/transformers/models/beit/modeling_flax_beit.py +src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py +src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py +src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py +src/transformers/models/bert/modeling_flax_bert.py +src/transformers/models/bert_generation/modeling_bert_generation.py +src/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py +src/transformers/models/big_bird/modeling_flax_big_bird.py +src/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py +src/transformers/models/biogpt/configuration_biogpt.py +src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/biogpt/modeling_biogpt.py +src/transformers/models/bit/configuration_bit.py +src/transformers/models/bit/convert_bit_to_pytorch.py +src/transformers/models/bit/modeling_bit.py +src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/blenderbot/modeling_flax_blenderbot.py +src/transformers/models/blenderbot/modeling_tf_blenderbot.py +src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py +src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +src/transformers/models/blip/configuration_blip.py +src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py +src/transformers/models/blip/modeling_blip_text.py +src/transformers/models/blip/modeling_tf_blip_text.py +src/transformers/models/blip_2/configuration_blip_2.py +src/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py +src/transformers/models/blip_2/modeling_blip_2.py # causes other tests to fail +src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py +src/transformers/models/bloom/modeling_bloom.py +src/transformers/models/bloom/modeling_flax_bloom.py +src/transformers/models/bridgetower/configuration_bridgetower.py +src/transformers/models/bridgetower/modeling_bridgetower.py +src/transformers/models/bros/convert_bros_to_pytorch.py +src/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py +src/transformers/models/camembert/modeling_camembert.py +src/transformers/models/camembert/modeling_tf_camembert.py +src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py +src/transformers/models/chinese_clip/configuration_chinese_clip.py +src/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py +src/transformers/models/chinese_clip/modeling_chinese_clip.py +src/transformers/models/clap/convert_clap_original_pytorch_to_hf.py +src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py +src/transformers/models/clip/modeling_clip.py +src/transformers/models/clip/modeling_flax_clip.py +src/transformers/models/clip/modeling_tf_clip.py +src/transformers/models/clipseg/configuration_clipseg.py +src/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py +src/transformers/models/codegen/modeling_codegen.py +src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py +src/transformers/models/convbert/modeling_convbert.py +src/transformers/models/convbert/modeling_tf_convbert.py +src/transformers/models/convnext/convert_convnext_to_pytorch.py +src/transformers/models/convnext/modeling_tf_convnext.py +src/transformers/models/convnextv2/configuration_convnextv2.py +src/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py +src/transformers/models/convnextv2/modeling_convnextv2.py +src/transformers/models/cpmant/configuration_cpmant.py +src/transformers/models/cpmant/modeling_cpmant.py +src/transformers/models/cpmant/tokenization_cpmant.py +src/transformers/models/ctrl/modeling_tf_ctrl.py +src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/cvt/modeling_tf_cvt.py +src/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/data2vec/modeling_data2vec_text.py +src/transformers/models/data2vec/modeling_tf_data2vec_vision.py +src/transformers/models/deberta/modeling_tf_deberta.py +src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +src/transformers/models/decision_transformer/modeling_decision_transformer.py +src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py +src/transformers/models/deformable_detr/load_custom.py +src/transformers/models/deit/convert_deit_timm_to_pytorch.py +src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py +src/transformers/models/deprecated/mctct/configuration_mctct.py +src/transformers/models/deprecated/mctct/feature_extraction_mctct.py +src/transformers/models/deprecated/mctct/modeling_mctct.py +src/transformers/models/deprecated/mctct/processing_mctct.py +src/transformers/models/deprecated/mmbt/configuration_mmbt.py +src/transformers/models/deprecated/mmbt/modeling_mmbt.py +src/transformers/models/deprecated/open_llama/configuration_open_llama.py +src/transformers/models/deprecated/open_llama/modeling_open_llama.py +src/transformers/models/deprecated/retribert/configuration_retribert.py +src/transformers/models/deprecated/retribert/modeling_retribert.py +src/transformers/models/deprecated/retribert/tokenization_retribert.py +src/transformers/models/deprecated/retribert/tokenization_retribert_fast.py +src/transformers/models/deprecated/tapex/tokenization_tapex.py +src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py +src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py +src/transformers/models/deprecated/van/configuration_van.py +src/transformers/models/deprecated/van/convert_van_to_pytorch.py +src/transformers/models/deprecated/van/modeling_van.py +src/transformers/models/deta/convert_deta_resnet_to_pytorch.py +src/transformers/models/deta/convert_deta_swin_to_pytorch.py +src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/detr/convert_detr_to_pytorch.py +src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/dinov2/configuration_dinov2.py +src/transformers/models/dinov2/convert_dinov2_to_hf.py +src/transformers/models/dinov2/modeling_dinov2.py +src/transformers/models/distilbert/modeling_distilbert.py +src/transformers/models/distilbert/modeling_flax_distilbert.py +src/transformers/models/distilbert/modeling_tf_distilbert.py +src/transformers/models/dit/convert_dit_unilm_to_pytorch.py +src/transformers/models/donut/configuration_donut_swin.py +src/transformers/models/donut/convert_donut_to_pytorch.py +src/transformers/models/donut/modeling_donut_swin.py +src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py +src/transformers/models/dpr/modeling_dpr.py +src/transformers/models/dpr/modeling_tf_dpr.py +src/transformers/models/dpt/configuration_dpt.py +src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py +src/transformers/models/dpt/convert_dpt_to_pytorch.py +src/transformers/models/efficientformer/configuration_efficientformer.py +src/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/efficientformer/modeling_efficientformer.py +src/transformers/models/efficientnet/configuration_efficientnet.py +src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py +src/transformers/models/efficientnet/modeling_efficientnet.py +src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py +src/transformers/models/electra/modeling_flax_electra.py +src/transformers/models/encodec/configuration_encodec.py +src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py +src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py +src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +src/transformers/models/ernie/modeling_ernie.py +src/transformers/models/esm/configuration_esm.py +src/transformers/models/esm/convert_esm.py +src/transformers/models/esm/modeling_esm.py +src/transformers/models/esm/modeling_esmfold.py +src/transformers/models/esm/modeling_tf_esm.py +src/transformers/models/esm/openfold_utils/chunk_utils.py +src/transformers/models/esm/openfold_utils/data_transforms.py +src/transformers/models/esm/openfold_utils/feats.py +src/transformers/models/esm/openfold_utils/loss.py +src/transformers/models/esm/openfold_utils/protein.py +src/transformers/models/esm/openfold_utils/residue_constants.py +src/transformers/models/esm/openfold_utils/rigid_utils.py +src/transformers/models/esm/openfold_utils/tensor_utils.py +src/transformers/models/falcon/configuration_falcon.py +src/transformers/models/falcon/modeling_falcon.py +src/transformers/models/flaubert/configuration_flaubert.py +src/transformers/models/flaubert/modeling_flaubert.py +src/transformers/models/flaubert/modeling_tf_flaubert.py +src/transformers/models/flava/convert_dalle_to_flava_codebook.py +src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py +src/transformers/models/flava/modeling_flava.py +src/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py +src/transformers/models/fnet/modeling_fnet.py +src/transformers/models/focalnet/configuration_focalnet.py +src/transformers/models/focalnet/convert_focalnet_to_hf_format.py +src/transformers/models/focalnet/modeling_focalnet.py +src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/fsmt/modeling_fsmt.py +src/transformers/models/funnel/configuration_funnel.py +src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py +src/transformers/models/funnel/modeling_funnel.py +src/transformers/models/funnel/modeling_tf_funnel.py +src/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py +src/transformers/models/git/configuration_git.py +src/transformers/models/git/convert_git_to_pytorch.py +src/transformers/models/glpn/configuration_glpn.py +src/transformers/models/glpn/convert_glpn_to_pytorch.py +src/transformers/models/gpt2/CONVERSION.md +src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py +src/transformers/models/gpt2/modeling_flax_gpt2.py +src/transformers/models/gpt2/modeling_tf_gpt2.py +src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py +src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py +src/transformers/models/gpt_neo/modeling_gpt_neo.py +src/transformers/models/gpt_neox/modeling_gpt_neox.py +src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +src/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py +src/transformers/models/gptj/configuration_gptj.py +src/transformers/models/gptj/modeling_flax_gptj.py +src/transformers/models/gptj/modeling_tf_gptj.py +src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py +src/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py +src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +src/transformers/models/graphormer/collating_graphormer.py +src/transformers/models/graphormer/configuration_graphormer.py +src/transformers/models/graphormer/modeling_graphormer.py +src/transformers/models/groupvit/configuration_groupvit.py +src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py +src/transformers/models/hubert/configuration_hubert.py +src/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/hubert/modeling_tf_hubert.py +src/transformers/models/ibert/configuration_ibert.py +src/transformers/models/ibert/modeling_ibert.py +src/transformers/models/ibert/quant_modules.py +src/transformers/models/idefics/configuration_idefics.py +src/transformers/models/idefics/image_processing_idefics.py +src/transformers/models/idefics/modeling_idefics.py +src/transformers/models/idefics/perceiver.py +src/transformers/models/idefics/processing_idefics.py +src/transformers/models/idefics/vision.py +src/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py +src/transformers/models/informer/configuration_informer.py +src/transformers/models/informer/modeling_informer.py +src/transformers/models/instructblip/configuration_instructblip.py +src/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py +src/transformers/models/instructblip/modeling_instructblip.py +src/transformers/models/instructblip/processing_instructblip.py +src/transformers/models/jukebox/configuration_jukebox.py +src/transformers/models/jukebox/convert_jukebox.py +src/transformers/models/jukebox/modeling_jukebox.py +src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/led/configuration_led.py +src/transformers/models/led/modeling_led.py +src/transformers/models/led/modeling_tf_led.py +src/transformers/models/levit/convert_levit_timm_to_pytorch.py +src/transformers/models/levit/modeling_levit.py +src/transformers/models/lilt/configuration_lilt.py +src/transformers/models/llama/configuration_llama.py +src/transformers/models/llama/convert_llama_weights_to_hf.py +src/transformers/models/llama/modeling_llama.py +src/transformers/models/longformer/configuration_longformer.py +src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py +src/transformers/models/longt5/configuration_longt5.py +src/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py +src/transformers/models/longt5/modeling_flax_longt5.py +src/transformers/models/luke/configuration_luke.py +src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/luke/modeling_luke.py +src/transformers/models/lxmert/configuration_lxmert.py +src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/lxmert/modeling_lxmert.py +src/transformers/models/lxmert/modeling_tf_lxmert.py +src/transformers/models/m2m_100/convert_m2m100_original_checkpoint_to_pytorch.py +src/transformers/models/m2m_100/modeling_m2m_100.py +src/transformers/models/marian/configuration_marian.py +src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py +src/transformers/models/marian/convert_marian_to_pytorch.py +src/transformers/models/marian/modeling_flax_marian.py +src/transformers/models/marian/modeling_tf_marian.py +src/transformers/models/markuplm/configuration_markuplm.py +src/transformers/models/markuplm/feature_extraction_markuplm.py +src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/maskformer/configuration_maskformer_swin.py +src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/maskformer/convert_maskformer_resnet_to_pytorch.py +src/transformers/models/maskformer/convert_maskformer_swin_to_pytorch.py +src/transformers/models/maskformer/modeling_maskformer_swin.py +src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py +src/transformers/models/mbart/modeling_flax_mbart.py +src/transformers/models/mega/configuration_mega.py +src/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/mega/modeling_mega.py +src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py +src/transformers/models/megatron_bert/modeling_megatron_bert.py +src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py +src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py +src/transformers/models/mgp_str/configuration_mgp_str.py +src/transformers/models/mgp_str/modeling_mgp_str.py +src/transformers/models/mistral/configuration_mistral.py +src/transformers/models/mistral/modeling_mistral.py +src/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +src/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +src/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/mobilevit/configuration_mobilevit.py +src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py +src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py +src/transformers/models/mpnet/configuration_mpnet.py +src/transformers/models/mpnet/modeling_mpnet.py +src/transformers/models/mpnet/modeling_tf_mpnet.py +src/transformers/models/mpt/configuration_mpt.py +src/transformers/models/mpt/modeling_mpt.py +src/transformers/models/mra/configuration_mra.py +src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py +src/transformers/models/mra/modeling_mra.py +src/transformers/models/mt5/configuration_mt5.py +src/transformers/models/mt5/modeling_flax_mt5.py +src/transformers/models/mt5/modeling_mt5.py +src/transformers/models/mt5/modeling_tf_mt5.py +src/transformers/models/musicgen/convert_musicgen_transformers.py +src/transformers/models/mvp/modeling_mvp.py +src/transformers/models/nezha/modeling_nezha.py +src/transformers/models/nllb_moe/configuration_nllb_moe.py +src/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py +src/transformers/models/nllb_moe/modeling_nllb_moe.py +src/transformers/models/nougat/convert_nougat_to_hf.py +src/transformers/models/nystromformer/configuration_nystromformer.py +src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/nystromformer/modeling_nystromformer.py +src/transformers/models/oneformer/convert_to_hf_oneformer.py +src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py +src/transformers/models/openai/modeling_openai.py +src/transformers/models/openai/modeling_tf_openai.py +src/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/opt/modeling_flax_opt.py +src/transformers/models/owlvit/configuration_owlvit.py +src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py +src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py +src/transformers/models/pegasus/modeling_flax_pegasus.py +src/transformers/models/pegasus/modeling_tf_pegasus.py +src/transformers/models/pegasus_x/modeling_pegasus_x.py +src/transformers/models/perceiver/configuration_perceiver.py +src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py +src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py +src/transformers/models/persimmon/modeling_persimmon.py +src/transformers/models/pix2struct/configuration_pix2struct.py +src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py +src/transformers/models/pix2struct/image_processing_pix2struct.py +src/transformers/models/pix2struct/processing_pix2struct.py +src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py +src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py +src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py +src/transformers/models/pop2piano/feature_extraction_pop2piano.py +src/transformers/models/pop2piano/processing_pop2piano.py +src/transformers/models/pop2piano/tokenization_pop2piano.py +src/transformers/models/prophetnet/configuration_prophetnet.py +src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/prophetnet/modeling_prophetnet.py +src/transformers/models/pvt/configuration_pvt.py +src/transformers/models/pvt/convert_pvt_to_pytorch.py +src/transformers/models/pvt/image_processing_pvt.py +src/transformers/models/pvt/modeling_pvt.py +src/transformers/models/qdqbert/configuration_qdqbert.py +src/transformers/models/qdqbert/modeling_qdqbert.py +src/transformers/models/rag/configuration_rag.py +src/transformers/models/rag/modeling_rag.py +src/transformers/models/rag/modeling_tf_rag.py +src/transformers/models/rag/retrieval_rag.py +src/transformers/models/realm/modeling_realm.py +src/transformers/models/realm/retrieval_realm.py +src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py +src/transformers/models/regnet/configuration_regnet.py +src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py +src/transformers/models/regnet/convert_regnet_to_pytorch.py +src/transformers/models/regnet/modeling_flax_regnet.py +src/transformers/models/rembert/configuration_rembert.py +src/transformers/models/rembert/convert_rembert_tf_checkpoint_to_pytorch.py +src/transformers/models/rembert/modeling_rembert.py +src/transformers/models/rembert/modeling_tf_rembert.py +src/transformers/models/resnet/convert_resnet_to_pytorch.py +src/transformers/models/resnet/modeling_flax_resnet.py +src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/roberta/modeling_flax_roberta.py +src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py +src/transformers/models/roc_bert/configuration_roc_bert.py +src/transformers/models/roformer/convert_roformer_original_tf_checkpoint_to_pytorch.py +src/transformers/models/roformer/modeling_flax_roformer.py +src/transformers/models/roformer/modeling_roformer.py +src/transformers/models/roformer/modeling_tf_roformer.py +src/transformers/models/rwkv/configuration_rwkv.py +src/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py +src/transformers/models/rwkv/modeling_rwkv.py +src/transformers/models/sam/configuration_sam.py +src/transformers/models/sam/convert_sam_original_to_hf_format.py +src/transformers/models/sam/image_processing_sam.py +src/transformers/models/sam/modeling_sam.py +src/transformers/models/sam/modeling_tf_sam.py +src/transformers/models/sam/processing_sam.py +src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py +src/transformers/models/segformer/configuration_segformer.py +src/transformers/models/segformer/convert_segformer_original_to_pytorch.py +src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/sew_d/convert_sew_d_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py +src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py +src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py +src/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py +src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +src/transformers/models/speecht5/configuration_speecht5.py +src/transformers/models/speecht5/convert_hifigan.py +src/transformers/models/speecht5/convert_speecht5_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/speecht5/number_normalizer.py +src/transformers/models/splinter/configuration_splinter.py +src/transformers/models/splinter/modeling_splinter.py +src/transformers/models/squeezebert/modeling_squeezebert.py +src/transformers/models/swiftformer/configuration_swiftformer.py +src/transformers/models/swiftformer/convert_swiftformer_original_to_hf.py +src/transformers/models/swiftformer/modeling_swiftformer.py +src/transformers/models/swin/convert_swin_simmim_to_pytorch.py +src/transformers/models/swin/convert_swin_timm_to_pytorch.py +src/transformers/models/swin/modeling_tf_swin.py +src/transformers/models/swin2sr/configuration_swin2sr.py +src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py +src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py +src/transformers/models/swinv2/modeling_swinv2.py +src/transformers/models/switch_transformers/configuration_switch_transformers.py +src/transformers/models/switch_transformers/convert_big_switch.py +src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py +src/transformers/models/switch_transformers/modeling_switch_transformers.py +src/transformers/models/t5/configuration_t5.py +src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py +src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py +src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py +src/transformers/models/t5/modeling_flax_t5.py +src/transformers/models/t5/modeling_t5.py +src/transformers/models/t5/modeling_tf_t5.py +src/transformers/models/table_transformer/configuration_table_transformer.py +src/transformers/models/table_transformer/convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/tapas/configuration_tapas.py +src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py +src/transformers/models/tapas/modeling_tapas.py +src/transformers/models/tapas/modeling_tf_tapas.py +src/transformers/models/timesformer/convert_timesformer_to_pytorch.py +src/transformers/models/timm_backbone/configuration_timm_backbone.py +src/transformers/models/timm_backbone/modeling_timm_backbone.py +src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py +src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py +src/transformers/models/transfo_xl/modeling_transfo_xl.py +src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py +src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py +src/transformers/models/tvlt/configuration_tvlt.py +src/transformers/models/tvlt/modeling_tvlt.py +src/transformers/models/umt5/configuration_umt5.py +src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py +src/transformers/models/umt5/modeling_umt5.py +src/transformers/models/unispeech/convert_unispeech_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/unispeech_sat/configuration_unispeech_sat.py +src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/unispeech_sat/convert_unispeech_sat_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/upernet/configuration_upernet.py +src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py +src/transformers/models/upernet/convert_swin_upernet_to_pytorch.py +src/transformers/models/videomae/configuration_videomae.py +src/transformers/models/videomae/convert_videomae_to_pytorch.py +src/transformers/models/vilt/configuration_vilt.py +src/transformers/models/vilt/convert_vilt_original_to_pytorch.py +src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py +src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py +src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py +src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/visual_bert/modeling_visual_bert.py +src/transformers/models/vit/convert_dino_to_pytorch.py +src/transformers/models/vit/convert_vit_timm_to_pytorch.py +src/transformers/models/vit/modeling_flax_vit.py +src/transformers/models/vit_hybrid/configuration_vit_hybrid.py +src/transformers/models/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py +src/transformers/models/vit_hybrid/modeling_vit_hybrid.py +src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py +src/transformers/models/vit_mae/modeling_tf_vit_mae.py +src/transformers/models/vit_msn/configuration_vit_msn.py +src/transformers/models/vit_msn/convert_msn_to_pytorch.py +src/transformers/models/vivit/configuration_vivit.py +src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py +src/transformers/models/vivit/image_processing_vivit.py +src/transformers/models/vivit/modeling_vivit.py +src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/wav2vec2/convert_wav2vec2_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py +src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/wavlm/convert_wavlm_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/wavlm/convert_wavlm_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/whisper/convert_openai_to_hf.py +src/transformers/models/whisper/english_normalizer.py +src/transformers/models/whisper/modeling_flax_whisper.py +src/transformers/models/x_clip/configuration_x_clip.py +src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py +src/transformers/models/xglm/configuration_xglm.py +src/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py +src/transformers/models/xglm/modeling_flax_xglm.py +src/transformers/models/xglm/modeling_tf_xglm.py +src/transformers/models/xglm/modeling_xglm.py +src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/xlm/modeling_tf_xlm.py +src/transformers/models/xlm/modeling_xlm.py +src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py +src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py +src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py +src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py +src/transformers/models/xlm_roberta/modeling_xlm_roberta.py +src/transformers/models/xlm_roberta_xl/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py +src/transformers/models/xlnet/modeling_tf_xlnet.py +src/transformers/models/xlnet/modeling_xlnet.py +src/transformers/models/xmod/convert_xmod_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/yolos/convert_yolos_to_pytorch.py +src/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py +src/transformers/models/yoso/modeling_yoso.py +src/transformers/onnx/__main__.py +src/transformers/onnx/config.py +src/transformers/onnx/convert.py +src/transformers/onnx/features.py +src/transformers/onnx/utils.py +src/transformers/optimization.py +src/transformers/optimization_tf.py +src/transformers/pipelines/audio_classification.py +src/transformers/pipelines/audio_utils.py +src/transformers/pipelines/automatic_speech_recognition.py +src/transformers/pipelines/base.py +src/transformers/pipelines/conversational.py +src/transformers/pipelines/depth_estimation.py +src/transformers/pipelines/document_question_answering.py +src/transformers/pipelines/feature_extraction.py +src/transformers/pipelines/fill_mask.py +src/transformers/pipelines/image_classification.py +src/transformers/pipelines/image_segmentation.py +src/transformers/pipelines/image_to_text.py +src/transformers/pipelines/mask_generation.py +src/transformers/pipelines/object_detection.py +src/transformers/pipelines/pt_utils.py +src/transformers/pipelines/question_answering.py +src/transformers/pipelines/table_question_answering.py +src/transformers/pipelines/text_classification.py +src/transformers/pipelines/token_classification.py +src/transformers/pipelines/video_classification.py +src/transformers/pipelines/visual_question_answering.py +src/transformers/pipelines/zero_shot_audio_classification.py +src/transformers/pipelines/zero_shot_classification.py +src/transformers/pipelines/zero_shot_image_classification.py +src/transformers/pipelines/zero_shot_object_detection.py +src/transformers/processing_utils.py +src/transformers/pytorch_utils.py +src/transformers/sagemaker/trainer_sm.py +src/transformers/sagemaker/training_args_sm.py +src/transformers/testing_utils.py +src/transformers/tf_utils.py +src/transformers/time_series_utils.py +src/transformers/tokenization_utils.py +src/transformers/tokenization_utils_base.py +src/transformers/tokenization_utils_fast.py +src/transformers/tools/agent_types.py +src/transformers/tools/agents.py +src/transformers/tools/base.py +src/transformers/tools/document_question_answering.py +src/transformers/tools/evaluate_agent.py +src/transformers/tools/image_captioning.py +src/transformers/tools/image_question_answering.py +src/transformers/tools/image_segmentation.py +src/transformers/tools/prompts.py +src/transformers/tools/python_interpreter.py +src/transformers/tools/speech_to_text.py +src/transformers/tools/text_classification.py +src/transformers/tools/text_question_answering.py +src/transformers/tools/text_summarization.py +src/transformers/tools/text_to_speech.py +src/transformers/tools/translation.py +src/transformers/trainer.py +src/transformers/trainer_callback.py +src/transformers/trainer_pt_utils.py +src/transformers/trainer_seq2seq.py +src/transformers/trainer_tf.py +src/transformers/trainer_utils.py +src/transformers/training_args.py +src/transformers/training_args_seq2seq.py +src/transformers/training_args_tf.py +src/transformers/utils/backbone_utils.py +src/transformers/utils/bitsandbytes.py +src/transformers/utils/constants.py +src/transformers/utils/doc.py +src/transformers/utils/dummy_detectron2_objects.py +src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py +src/transformers/utils/dummy_flax_objects.py +src/transformers/utils/dummy_keras_nlp_objects.py +src/transformers/utils/dummy_music_objects.py +src/transformers/utils/dummy_pt_objects.py +src/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py +src/transformers/utils/dummy_sentencepiece_objects.py +src/transformers/utils/dummy_speech_objects.py +src/transformers/utils/dummy_tensorflow_text_objects.py +src/transformers/utils/dummy_tf_objects.py +src/transformers/utils/dummy_tokenizers_objects.py +src/transformers/utils/dummy_vision_objects.py +src/transformers/utils/fx.py +src/transformers/utils/generic.py +src/transformers/utils/hp_naming.py +src/transformers/utils/hub.py +src/transformers/utils/import_utils.py +src/transformers/utils/logging.py +src/transformers/utils/model_parallel_utils.py +src/transformers/utils/notebook.py +src/transformers/utils/peft_utils.py +src/transformers/utils/quantization_config.py +src/transformers/utils/sentencepiece_model_pb2.py +src/transformers/utils/sentencepiece_model_pb2_new.py +src/transformers/utils/versions.py diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index b2f9d2f8cec7..a60d6a558dcb 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -387,7 +387,7 @@ def get_all_doctest_files() -> List[str]: # These are files not doctested yet. with open("utils/not_doctested.txt") as fp: - not_doctested = set(fp.read().strip().split("\n")) + not_doctested = {x.split(" ")[0] for x in fp.read().strip().split("\n")} # So far we don't have 100% coverage for doctest. This line will be removed once we achieve 100%. test_files_to_run = [x for x in test_files_to_run if x not in not_doctested] @@ -415,7 +415,9 @@ def get_new_doctest_files(repo, base_commit, branching_commit) -> List[str]: with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f: new_content = f.read() # Compute the removed lines and return them - removed_content = set(old_content.split("\n")) - set(new_content.split("\n")) + removed_content = {x.split(" ")[0] for x in old_content.split("\n")} - { + x.split(" ")[0] for x in new_content.split("\n") + } return sorted(removed_content) return [] From 011b15c1c75a575fcaee5a50de02ff316881816a Mon Sep 17 00:00:00 2001 From: Maria Khalusova Date: Fri, 3 Nov 2023 08:03:13 -0400 Subject: [PATCH 075/268] [docs] Custom model doc update (#27213) doc update --- docs/source/en/custom_models.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/source/en/custom_models.md b/docs/source/en/custom_models.md index d709772eed06..4abc3ce5773a 100644 --- a/docs/source/en/custom_models.md +++ b/docs/source/en/custom_models.md @@ -272,6 +272,22 @@ Note that there is no need to specify an auto class for the configuration (there [`AutoConfig`]) but it's different for models. Your custom model could be suitable for many different tasks, so you have to specify which one of the auto classes is the correct one for your model. + + +Use `register_for_auto_class()` if you want the code files to be copied. If you instead prefer to use code on the Hub from another repo, +you don't need to call it. In cases where there's more than one auto class, you can modify the `config.json` directly using the +following structure: + +``` +"auto_map": { + "AutoConfig": "--", + "AutoModel": "--", + "AutoModelFor": "--", +}, +``` + + + Next, let's create the config and models as we did before: ```py From db69bd88fbf060382a10d23374deeb913e4225a1 Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 3 Nov 2023 13:17:46 +0000 Subject: [PATCH 076/268] Update the ConversationalPipeline docstring for chat templates (#27250) * Update the ConversationalPipeline docstring now that we're using chat templates * Direct access to conversation.messages * Explain the string init --- src/transformers/pipelines/conversational.py | 22 ++++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/transformers/pipelines/conversational.py b/src/transformers/pipelines/conversational.py index 96a16e5b0f8f..a97223c649e1 100644 --- a/src/transformers/pipelines/conversational.py +++ b/src/transformers/pipelines/conversational.py @@ -208,17 +208,19 @@ class ConversationalPipeline(Pipeline): ```python >>> from transformers import pipeline, Conversation + # Any model with a chat template can be used in a ConversationalPipeline. - >>> chatbot = pipeline(model="microsoft/DialoGPT-medium") - >>> conversation = Conversation("Going to the movies tonight - any suggestions?") + >>> chatbot = pipeline(model="facebook/blenderbot-400M-distill") + >>> # Conversation objects initialized with a string will treat it as a user message + >>> conversation = Conversation("I'm looking for a movie - what's your favourite one?") >>> conversation = chatbot(conversation) - >>> conversation.generated_responses[-1] - 'The Big Lebowski' + >>> conversation.messages[-1]["content"] + ' I don't really have a favorite movie, but I do like action movies. What about you?' - >>> conversation.add_user_input("Is it an action movie?") + >>> conversation.add_message({"role": "user", "content": "That's interesting, why do you like action movies?"}) >>> conversation = chatbot(conversation) - >>> conversation.generated_responses[-1] - "It's a comedy." + >>> conversation.messages[-1]["content"] + ' I think it's just because they're so fast-paced and action-fantastic.' ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) @@ -226,10 +228,8 @@ class ConversationalPipeline(Pipeline): This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"conversational"`. - The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task, - currently: *'microsoft/DialoGPT-small'*, *'microsoft/DialoGPT-medium'*, *'microsoft/DialoGPT-large'*. See the - up-to-date list of available models on - [huggingface.co/models](https://huggingface.co/models?filter=conversational). + This pipeline can be used with any model that has a [chat + template](https://huggingface.co/docs/transformers/chat_templating) set. """ def __init__(self, *args, **kwargs): From f13f544ad9aff5a9595a424730a005ee7d0af537 Mon Sep 17 00:00:00 2001 From: Shiyu Li Date: Fri, 3 Nov 2023 10:00:33 -0400 Subject: [PATCH 077/268] Fix switch transformer mixed precision issue (#27220) * Fix mixed precision error for switch transformer * Fixup --- .../models/gptsan_japanese/modeling_gptsan_japanese.py | 2 +- .../models/switch_transformers/modeling_switch_transformers.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py index 7591ecb0b82a..dcbda1d7384f 100644 --- a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py @@ -286,7 +286,7 @@ def forward(self, hidden_states): next_states = hidden_states.clone() for idx, expert in enumerate(self.experts.values()): token_indices = router_mask[:, :, idx].bool() - next_states[token_indices] = expert(hidden_states[token_indices]) + next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype) hidden_states = router_probs * next_states return hidden_states, (router_logits, expert_index) diff --git a/src/transformers/models/switch_transformers/modeling_switch_transformers.py b/src/transformers/models/switch_transformers/modeling_switch_transformers.py index 07c96a5aa828..60c100d37c48 100644 --- a/src/transformers/models/switch_transformers/modeling_switch_transformers.py +++ b/src/transformers/models/switch_transformers/modeling_switch_transformers.py @@ -318,7 +318,7 @@ def forward(self, hidden_states): next_states = hidden_states.clone() for idx, expert in enumerate(self.experts.values()): token_indices = router_mask[:, :, idx].bool() - next_states[token_indices] = expert(hidden_states[token_indices]) + next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype) hidden_states = router_probs * next_states return hidden_states, (router_logits, expert_index) From ad8ff96224a1551dd3afc340b2f3c788766619b8 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Fri, 3 Nov 2023 15:23:13 +0100 Subject: [PATCH 078/268] [`Docs` / `SAM` ] Reflect correct changes to run inference without OOM (#27268) Update sam.md --- docs/source/en/model_doc/sam.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/source/en/model_doc/sam.md b/docs/source/en/model_doc/sam.md index fe0d24623fd7..d2a472957af9 100644 --- a/docs/source/en/model_doc/sam.md +++ b/docs/source/en/model_doc/sam.md @@ -57,7 +57,8 @@ raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") input_points = [[[450, 600]]] # 2D location of a window in the image inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(device) -outputs = model(**inputs) +with torch.no_grad(): + outputs = model(**inputs) masks = processor.image_processor.post_process_masks( outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu() @@ -108,4 +109,4 @@ Resources: ## TFSamModel [[autodoc]] TFSamModel - - call \ No newline at end of file + - call From 5964f820db1568d26298b37dea9db328185c7f7c Mon Sep 17 00:00:00 2001 From: Maria Khalusova Date: Fri, 3 Nov 2023 10:57:03 -0400 Subject: [PATCH 079/268] [Docs] Model_doc structure/clarity improvements (#26876) * first batch of structure improvements for model_docs * second batch of structure improvements for model_docs * more structure improvements for model_docs * more structure improvements for model_docs * structure improvements for cv model_docs * more structural refactoring * addressed feedback about image processors --- docs/source/en/model_doc/albert.md | 26 ++++-- docs/source/en/model_doc/align.md | 9 +-- docs/source/en/model_doc/altclip.md | 11 +-- .../audio-spectrogram-transformer.md | 18 ++--- docs/source/en/model_doc/autoformer.md | 2 - docs/source/en/model_doc/bark.md | 13 ++- docs/source/en/model_doc/bart.md | 36 +++++---- docs/source/en/model_doc/barthez.md | 8 +- docs/source/en/model_doc/bartpho.md | 8 +- docs/source/en/model_doc/beit.md | 17 +++- docs/source/en/model_doc/bert-generation.md | 15 ++-- docs/source/en/model_doc/bert-japanese.md | 10 ++- docs/source/en/model_doc/bert.md | 32 ++++++-- docs/source/en/model_doc/bertweet.md | 11 ++- docs/source/en/model_doc/big_bird.md | 20 ++++- docs/source/en/model_doc/bigbird_pegasus.md | 8 +- docs/source/en/model_doc/biogpt.md | 10 +-- docs/source/en/model_doc/bit.md | 11 ++- docs/source/en/model_doc/blenderbot-small.md | 25 ++++-- docs/source/en/model_doc/blenderbot.md | 50 +++++++----- docs/source/en/model_doc/blip-2.md | 10 +-- docs/source/en/model_doc/blip.md | 22 +++-- docs/source/en/model_doc/bloom.md | 22 +++-- docs/source/en/model_doc/bort.md | 14 ++-- docs/source/en/model_doc/bridgetower.md | 7 +- docs/source/en/model_doc/bros.md | 11 ++- docs/source/en/model_doc/byt5.md | 8 +- docs/source/en/model_doc/camembert.md | 22 +++-- docs/source/en/model_doc/canine.md | 23 +++--- docs/source/en/model_doc/chinese_clip.md | 8 +- docs/source/en/model_doc/clap.md | 2 - docs/source/en/model_doc/clip.md | 19 ++++- docs/source/en/model_doc/clipseg.md | 14 ++-- docs/source/en/model_doc/code_llama.md | 23 ++++-- docs/source/en/model_doc/codegen.md | 4 +- docs/source/en/model_doc/conditional_detr.md | 2 +- docs/source/en/model_doc/convbert.md | 17 +++- docs/source/en/model_doc/convnext.md | 13 +-- docs/source/en/model_doc/convnextv2.md | 4 - docs/source/en/model_doc/cpm.md | 9 ++- docs/source/en/model_doc/cpmant.md | 5 +- docs/source/en/model_doc/ctrl.md | 18 ++++- docs/source/en/model_doc/cvt.md | 14 +++- docs/source/en/model_doc/data2vec.md | 21 +++-- docs/source/en/model_doc/deberta-v2.md | 11 ++- docs/source/en/model_doc/deberta.md | 10 +++ .../en/model_doc/decision_transformer.md | 4 +- docs/source/en/model_doc/deformable_detr.md | 9 +-- docs/source/en/model_doc/deit.md | 22 ++--- docs/source/en/model_doc/deplot.md | 12 ++- docs/source/en/model_doc/deta.md | 7 -- docs/source/en/model_doc/detr.md | 20 ++--- docs/source/en/model_doc/dialogpt.md | 9 ++- docs/source/en/model_doc/dinat.md | 22 ++--- docs/source/en/model_doc/dinov2.md | 5 -- docs/source/en/model_doc/distilbert.md | 23 +++++- docs/source/en/model_doc/dit.md | 15 ++-- docs/source/en/model_doc/donut.md | 4 +- docs/source/en/model_doc/dpr.md | 13 ++- docs/source/en/model_doc/efficientformer.md | 9 +++ docs/source/en/model_doc/electra.md | 20 ++++- docs/source/en/model_doc/encodec.md | 4 +- docs/source/en/model_doc/encoder-decoder.md | 12 +++ docs/source/en/model_doc/ernie.md | 6 +- docs/source/en/model_doc/ernie_m.md | 15 ++-- docs/source/en/model_doc/esm.md | 22 +++-- docs/source/en/model_doc/flan-t5.md | 8 +- docs/source/en/model_doc/flan-ul2.md | 10 +-- docs/source/en/model_doc/flaubert.md | 14 +++- docs/source/en/model_doc/flava.md | 2 - docs/source/en/model_doc/fnet.md | 12 +-- docs/source/en/model_doc/focalnet.md | 5 -- docs/source/en/model_doc/fsmt.md | 3 - docs/source/en/model_doc/funnel.md | 17 +++- docs/source/en/model_doc/git.md | 9 +-- docs/source/en/model_doc/glpn.md | 4 - docs/source/en/model_doc/gpt-sw3.md | 17 ++-- docs/source/en/model_doc/gpt2.md | 26 ++++-- docs/source/en/model_doc/gpt_bigcode.md | 8 +- docs/source/en/model_doc/gpt_neo.md | 16 +++- docs/source/en/model_doc/gpt_neox.md | 4 +- docs/source/en/model_doc/gpt_neox_japanese.md | 4 +- docs/source/en/model_doc/gptj.md | 15 +++- docs/source/en/model_doc/gptsan-japanese.md | 4 +- docs/source/en/model_doc/graphormer.md | 8 +- docs/source/en/model_doc/groupvit.md | 19 +++-- docs/source/en/model_doc/herbert.md | 13 ++- docs/source/en/model_doc/hubert.md | 17 +++- docs/source/en/model_doc/ibert.md | 2 +- docs/source/en/model_doc/idefics.md | 4 +- docs/source/en/model_doc/imagegpt.md | 6 +- docs/source/en/model_doc/informer.md | 2 - docs/source/en/model_doc/instructblip.md | 7 +- docs/source/en/model_doc/jukebox.md | 8 +- docs/source/en/model_doc/layoutlm.md | 13 ++- docs/source/en/model_doc/layoutlmv2.md | 2 +- docs/source/en/model_doc/layoutlmv3.md | 30 ++++--- docs/source/en/model_doc/layoutxlm.md | 10 ++- docs/source/en/model_doc/led.md | 20 ++++- docs/source/en/model_doc/levit.md | 7 +- docs/source/en/model_doc/lilt.md | 18 ++--- docs/source/en/model_doc/llama.md | 9 +-- docs/source/en/model_doc/llama2.md | 6 +- docs/source/en/model_doc/longformer.md | 17 +++- docs/source/en/model_doc/longt5.md | 18 ++++- docs/source/en/model_doc/luke.md | 18 ++--- docs/source/en/model_doc/lxmert.md | 17 +++- docs/source/en/model_doc/m2m_100.md | 16 ++-- docs/source/en/model_doc/marian.md | 25 ++++-- docs/source/en/model_doc/markuplm.md | 11 +-- docs/source/en/model_doc/mask2former.md | 17 ++-- docs/source/en/model_doc/maskformer.md | 15 ++-- docs/source/en/model_doc/matcha.md | 8 +- docs/source/en/model_doc/mbart.md | 14 +++- docs/source/en/model_doc/mctct.md | 11 +-- docs/source/en/model_doc/mega.md | 10 ++- docs/source/en/model_doc/megatron-bert.md | 12 +-- docs/source/en/model_doc/megatron_gpt2.md | 14 +++- docs/source/en/model_doc/mgp-str.md | 10 +-- docs/source/en/model_doc/mistral.md | 8 +- docs/source/en/model_doc/mluke.md | 8 +- docs/source/en/model_doc/mms.md | 12 ++- docs/source/en/model_doc/mobilebert.md | 16 +++- docs/source/en/model_doc/mobilenet_v1.md | 6 +- docs/source/en/model_doc/mobilenet_v2.md | 6 +- docs/source/en/model_doc/mobilevit.md | 16 +++- docs/source/en/model_doc/mobilevitv2.md | 9 +-- docs/source/en/model_doc/mpnet.md | 19 +++-- docs/source/en/model_doc/mpt.md | 5 +- docs/source/en/model_doc/mra.md | 6 -- docs/source/en/model_doc/mt5.md | 13 ++- docs/source/en/model_doc/mvp.md | 10 ++- docs/source/en/model_doc/nat.md | 24 +++--- docs/source/en/model_doc/nezha.md | 2 +- docs/source/en/model_doc/nllb-moe.md | 12 +-- docs/source/en/model_doc/nllb.md | 7 +- docs/source/en/model_doc/nougat.md | 8 +- docs/source/en/model_doc/nystromformer.md | 2 +- docs/source/en/model_doc/oneformer.md | 15 ++-- docs/source/en/model_doc/open-llama.md | 2 +- docs/source/en/model_doc/openai-gpt.md | 20 +++-- docs/source/en/model_doc/opt.md | 37 ++++++--- docs/source/en/model_doc/owlv2.md | 18 +++-- docs/source/en/model_doc/owlvit.md | 3 +- docs/source/en/model_doc/pegasus.md | 35 ++++---- docs/source/en/model_doc/pegasus_x.md | 12 +-- docs/source/en/model_doc/perceiver.md | 15 ++-- docs/source/en/model_doc/persimmon.md | 6 +- docs/source/en/model_doc/phobert.md | 11 ++- docs/source/en/model_doc/pix2struct.md | 1 - docs/source/en/model_doc/plbart.md | 13 ++- docs/source/en/model_doc/poolformer.md | 5 +- docs/source/en/model_doc/pop2piano.md | 20 +++-- docs/source/en/model_doc/prophetnet.md | 12 +-- docs/source/en/model_doc/qdqbert.md | 12 +-- docs/source/en/model_doc/rag.md | 17 +++- docs/source/en/model_doc/reformer.md | 14 ++-- docs/source/en/model_doc/regnet.md | 22 ++--- docs/source/en/model_doc/rembert.md | 13 ++- docs/source/en/model_doc/resnet.md | 16 ++-- .../en/model_doc/roberta-prelayernorm.md | 22 +++-- docs/source/en/model_doc/roberta.md | 18 ++++- docs/source/en/model_doc/roc_bert.md | 12 +-- docs/source/en/model_doc/roformer.md | 24 ++++-- docs/source/en/model_doc/rwkv.md | 3 +- docs/source/en/model_doc/segformer.md | 11 ++- docs/source/en/model_doc/sew-d.md | 8 +- docs/source/en/model_doc/sew.md | 8 +- docs/source/en/model_doc/speech_to_text.md | 12 ++- docs/source/en/model_doc/speech_to_text_2.md | 5 +- docs/source/en/model_doc/splinter.md | 8 +- docs/source/en/model_doc/squeezebert.md | 8 +- docs/source/en/model_doc/swiftformer.md | 5 -- docs/source/en/model_doc/swin.md | 17 ++-- docs/source/en/model_doc/swinv2.md | 3 - .../en/model_doc/switch_transformers.md | 9 +-- docs/source/en/model_doc/t5.md | 29 ++++--- docs/source/en/model_doc/t5v1.1.md | 12 ++- docs/source/en/model_doc/table-transformer.md | 9 +-- docs/source/en/model_doc/tapas.md | 17 +++- docs/source/en/model_doc/tapex.md | 12 ++- .../en/model_doc/time_series_transformer.md | 15 +--- docs/source/en/model_doc/timesformer.md | 11 +-- .../en/model_doc/trajectory_transformer.md | 7 +- docs/source/en/model_doc/transfo-xl.md | 16 +++- docs/source/en/model_doc/trocr.md | 2 +- docs/source/en/model_doc/tvlt.md | 16 ++-- docs/source/en/model_doc/ul2.md | 14 +++- docs/source/en/model_doc/umt5.md | 19 +++-- docs/source/en/model_doc/unispeech-sat.md | 10 +-- docs/source/en/model_doc/unispeech.md | 10 +-- docs/source/en/model_doc/upernet.md | 22 ++--- docs/source/en/model_doc/van.md | 4 +- docs/source/en/model_doc/videomae.md | 8 +- docs/source/en/model_doc/vilt.md | 20 ++--- .../en/model_doc/vision-encoder-decoder.md | 12 +++ .../en/model_doc/vision-text-dual-encoder.md | 12 +++ docs/source/en/model_doc/visual_bert.md | 8 +- docs/source/en/model_doc/vit.md | 80 +++++++++---------- docs/source/en/model_doc/vit_hybrid.md | 3 - docs/source/en/model_doc/vit_mae.md | 27 ++++--- docs/source/en/model_doc/vit_msn.md | 17 ++-- docs/source/en/model_doc/vitdet.md | 7 +- docs/source/en/model_doc/vitmatte.md | 8 +- docs/source/en/model_doc/vits.md | 3 +- docs/source/en/model_doc/vivit.md | 1 - .../source/en/model_doc/wav2vec2-conformer.md | 10 +-- docs/source/en/model_doc/wav2vec2.md | 18 ++++- docs/source/en/model_doc/wav2vec2_phoneme.md | 19 +++-- docs/source/en/model_doc/wavlm.md | 14 ++-- docs/source/en/model_doc/whisper.md | 18 +++-- docs/source/en/model_doc/xglm.md | 16 +++- docs/source/en/model_doc/xlm-prophetnet.md | 8 +- docs/source/en/model_doc/xlm-roberta-xl.md | 12 +-- docs/source/en/model_doc/xlm-roberta.md | 25 ++++-- docs/source/en/model_doc/xlm-v.md | 11 ++- docs/source/en/model_doc/xlm.md | 19 ++++- docs/source/en/model_doc/xlnet.md | 17 +++- docs/source/en/model_doc/xls_r.md | 12 ++- docs/source/en/model_doc/xlsr_wav2vec2.md | 8 +- docs/source/en/model_doc/xmod.md | 8 +- docs/source/en/model_doc/yolos.md | 12 +-- docs/source/en/model_doc/yoso.md | 13 +-- 223 files changed, 1785 insertions(+), 1105 deletions(-) diff --git a/docs/source/en/model_doc/albert.md b/docs/source/en/model_doc/albert.md index 9e821f2f4d02..b7a819b2ed46 100644 --- a/docs/source/en/model_doc/albert.md +++ b/docs/source/en/model_doc/albert.md @@ -45,7 +45,10 @@ self-supervised loss that focuses on modeling inter-sentence coherence, and show with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and SQuAD benchmarks while having fewer parameters compared to BERT-large.* -Tips: +This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by +[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT). + +## Usage tips - ALBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. @@ -56,11 +59,7 @@ Tips: - Layers are split in groups that share parameters (to save memory). Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not. - -This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by -[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -90,6 +89,9 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). This [[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput + + + ## AlbertModel [[autodoc]] AlbertModel @@ -124,6 +126,10 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). This [[autodoc]] AlbertForQuestionAnswering - forward + + + + ## TFAlbertModel [[autodoc]] TFAlbertModel @@ -159,6 +165,9 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). This [[autodoc]] TFAlbertForQuestionAnswering - call + + + ## FlaxAlbertModel [[autodoc]] FlaxAlbertModel @@ -193,3 +202,8 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). This [[autodoc]] FlaxAlbertForQuestionAnswering - __call__ + + + + + diff --git a/docs/source/en/model_doc/align.md b/docs/source/en/model_doc/align.md index faf76853f609..5e41dac6024a 100644 --- a/docs/source/en/model_doc/align.md +++ b/docs/source/en/model_doc/align.md @@ -24,7 +24,10 @@ The abstract from the paper is the following: *Pre-trained representations are becoming crucial for many NLP and perception tasks. While representation learning in NLP has transitioned to training on raw text without human annotations, visual and vision-language representations still rely heavily on curated training datasets that are expensive or require expert knowledge. For vision applications, representations are mostly learned using datasets with explicit class labels such as ImageNet or OpenImages. For vision-language, popular datasets like Conceptual Captions, MSCOCO, or CLIP all involve a non-trivial data collection (and cleaning) process. This costly curation process limits the size of datasets and hence hinders the scaling of trained models. In this paper, we leverage a noisy dataset of over one billion image alt-text pairs, obtained without expensive filtering or post-processing steps in the Conceptual Captions dataset. A simple dual-encoder architecture learns to align visual and language representations of the image and text pairs using a contrastive loss. We show that the scale of our corpus can make up for its noise and leads to state-of-the-art representations even with such a simple learning scheme. Our visual representation achieves strong performance when transferred to classification tasks such as ImageNet and VTAB. The aligned visual and language representations enables zero-shot image classification and also set new state-of-the-art results on Flickr30K and MSCOCO image-text retrieval benchmarks, even when compared with more sophisticated cross-attention models. The representations also enable cross-modality search with complex text and text + image queries.* -## Usage +This model was contributed by [Alara Dirik](https://huggingface.co/adirik). +The original code is not released, this implementation is based on the Kakao Brain implementation based on the original paper. + +## Usage example ALIGN uses EfficientNet to get visual features and BERT to get the text features. Both the text and visual features are then projected to a latent space with identical dimension. The dot product between the projected image and text features is then used as a similarity score. @@ -56,9 +59,6 @@ probs = logits_per_image.softmax(dim=1) print(probs) ``` -This model was contributed by [Alara Dirik](https://huggingface.co/adirik). -The original code is not released, this implementation is based on the Kakao Brain implementation based on the original paper. - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ALIGN. @@ -69,7 +69,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we will review it. The resource should ideally demonstrate something new instead of duplicating an existing resource. - ## AlignConfig [[autodoc]] AlignConfig diff --git a/docs/source/en/model_doc/altclip.md b/docs/source/en/model_doc/altclip.md index 23cdcb63fbd2..b1fc9b382694 100644 --- a/docs/source/en/model_doc/altclip.md +++ b/docs/source/en/model_doc/altclip.md @@ -31,7 +31,9 @@ teacher learning and contrastive learning. We validate our method through evalua performances on a bunch of tasks including ImageNet-CN, Flicker30k- CN, and COCO-CN. Further, we obtain very close performances with CLIP on almost all tasks, suggesting that one can simply alter the text encoder in CLIP for extended capabilities such as multilingual understanding.* -## Usage +This model was contributed by [jongjyh](https://huggingface.co/jongjyh). + +## Usage tips and example The usage of AltCLIP is very similar to the CLIP. the difference between CLIP is the text encoder. Note that we use bidirectional attention instead of casual attention and we take the [CLS] token in XLM-R to represent text embedding. @@ -50,7 +52,6 @@ The [`AltCLIPProcessor`] wraps a [`CLIPImageProcessor`] and a [`XLMRobertaTokeni encode the text and prepare the images. The following example shows how to get the image-text similarity scores using [`AltCLIPProcessor`] and [`AltCLIPModel`]. - ```python >>> from PIL import Image >>> import requests @@ -70,11 +71,11 @@ encode the text and prepare the images. The following example shows how to get t >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` -Tips: + -This model is build on `CLIPModel`, so use it like a original CLIP. +This model is based on `CLIPModel`, use it like you would use the original [CLIP](clip). -This model was contributed by [jongjyh](https://huggingface.co/jongjyh). + ## AltCLIPConfig diff --git a/docs/source/en/model_doc/audio-spectrogram-transformer.md b/docs/source/en/model_doc/audio-spectrogram-transformer.md index df9fe78c2d4c..587ec85d09b6 100644 --- a/docs/source/en/model_doc/audio-spectrogram-transformer.md +++ b/docs/source/en/model_doc/audio-spectrogram-transformer.md @@ -26,15 +26,6 @@ The abstract from the paper is the following: *In the past decade, convolutional neural networks (CNNs) have been widely adopted as the main building block for end-to-end audio classification models, which aim to learn a direct mapping from audio spectrograms to corresponding labels. To better capture long-range global context, a recent trend is to add a self-attention mechanism on top of the CNN, forming a CNN-attention hybrid model. However, it is unclear whether the reliance on a CNN is necessary, and if neural networks purely based on attention are sufficient to obtain good performance in audio classification. In this paper, we answer the question by introducing the Audio Spectrogram Transformer (AST), the first convolution-free, purely attention-based model for audio classification. We evaluate AST on various audio classification benchmarks, where it achieves new state-of-the-art results of 0.485 mAP on AudioSet, 95.6% accuracy on ESC-50, and 98.1% accuracy on Speech Commands V2.* -Tips: - -- When fine-tuning the Audio Spectrogram Transformer (AST) on your own dataset, it's recommended to take care of the input normalization (to make -sure the input has mean of 0 and std of 0.5). [`ASTFeatureExtractor`] takes care of this. Note that it uses the AudioSet -mean and std by default. You can check [`ast/src/get_norm_stats.py`](https://github.com/YuanGongND/ast/blob/master/src/get_norm_stats.py) to see how -the authors compute the stats for a downstream dataset. -- Note that the AST needs a low learning rate (the authors use a 10 times smaller learning rate compared to their CNN model proposed in the -[PSLA paper](https://arxiv.org/abs/2102.01243)) and converges quickly, so please search for a suitable learning rate and learning rate scheduler for your task. - drawing @@ -43,6 +34,15 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/YuanGongND/ast). +## Usage tips + +- When fine-tuning the Audio Spectrogram Transformer (AST) on your own dataset, it's recommended to take care of the input normalization (to make +sure the input has mean of 0 and std of 0.5). [`ASTFeatureExtractor`] takes care of this. Note that it uses the AudioSet +mean and std by default. You can check [`ast/src/get_norm_stats.py`](https://github.com/YuanGongND/ast/blob/master/src/get_norm_stats.py) to see how +the authors compute the stats for a downstream dataset. +- Note that the AST needs a low learning rate (the authors use a 10 times smaller learning rate compared to their CNN model proposed in the +[PSLA paper](https://arxiv.org/abs/2102.01243)) and converges quickly, so please search for a suitable learning rate and learning rate scheduler for your task. + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with the Audio Spectrogram Transformer. diff --git a/docs/source/en/model_doc/autoformer.md b/docs/source/en/model_doc/autoformer.md index 20977c71cae9..bb423e941c78 100644 --- a/docs/source/en/model_doc/autoformer.md +++ b/docs/source/en/model_doc/autoformer.md @@ -39,13 +39,11 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] AutoformerConfig - ## AutoformerModel [[autodoc]] AutoformerModel - forward - ## AutoformerForPrediction [[autodoc]] AutoformerForPrediction diff --git a/docs/source/en/model_doc/bark.md b/docs/source/en/model_doc/bark.md index e287df13fe04..0d9127d917d2 100644 --- a/docs/source/en/model_doc/bark.md +++ b/docs/source/en/model_doc/bark.md @@ -14,8 +14,7 @@ specific language governing permissions and limitations under the License. ## Overview -Bark is a transformer-based text-to-speech model proposed by Suno AI in [suno-ai/bark](https://github.com/suno-ai/bark). - +Bark is a transformer-based text-to-speech model proposed by Suno AI in [suno-ai/bark](https://github.com/suno-ai/bark). Bark is made of 4 main models: @@ -26,6 +25,9 @@ Bark is made of 4 main models: It should be noted that each of the first three modules can support conditional speaker embeddings to condition the output sound according to specific predefined voice. +This model was contributed by [Yoach Lacombe (ylacombe)](https://huggingface.co/ylacombe) and [Sanchit Gandhi (sanchit-gandhi)](https://github.com/sanchit-gandhi). +The original code can be found [here](https://github.com/suno-ai/bark). + ### Optimizing Bark Bark can be optimized with just a few extra lines of code, which **significantly reduces its memory footprint** and **accelerates inference**. @@ -86,7 +88,7 @@ model.enable_cpu_offload() Find out more on inference optimization techniques [here](https://huggingface.co/docs/transformers/perf_infer_gpu_one). -### Tips +### Usage tips Suno offers a library of voice presets in a number of languages [here](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c). These presets are also uploaded in the hub [here](https://huggingface.co/suno/bark-small/tree/main/speaker_embeddings) or [here](https://huggingface.co/suno/bark/tree/main/speaker_embeddings). @@ -142,11 +144,6 @@ To save the audio, simply take the sample rate from the model config and some sc >>> write_wav("bark_generation.wav", sample_rate, audio_array) ``` - -This model was contributed by [Yoach Lacombe (ylacombe)](https://huggingface.co/ylacombe) and [Sanchit Gandhi (sanchit-gandhi)](https://github.com/sanchit-gandhi). -The original code can be found [here](https://github.com/suno-ai/bark). - - ## BarkConfig [[autodoc]] BarkConfig diff --git a/docs/source/en/model_doc/bart.md b/docs/source/en/model_doc/bart.md index dcf149fd85e1..7986228915cf 100644 --- a/docs/source/en/model_doc/bart.md +++ b/docs/source/en/model_doc/bart.md @@ -25,9 +25,6 @@ rendered properly in your Markdown viewer. -**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) and assign -@patrickvonplaten - ## Overview The Bart model was proposed in [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, @@ -45,7 +42,9 @@ According to the abstract, state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 6 ROUGE. -Tips: +This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The authors' code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/bart). + +## Usage tips: - BART is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. @@ -57,18 +56,6 @@ Tips: * permute sentences * rotate the document to make it start at a specific token -This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The Authors' code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/bart). - - -### Examples - -- Examples and scripts for fine-tuning BART and other models for sequence to sequence tasks can be found in - [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md). -- An example of how to train [`BartForConditionalGeneration`] with a Hugging Face `datasets` - object can be found in this [forum discussion](https://discuss.huggingface.co/t/train-bart-for-conditional-generation-e-g-summarization/1904). -- [Distilled checkpoints](https://huggingface.co/models?search=distilbart) are described in this [paper](https://arxiv.org/abs/2010.13002). - - ## Implementation Notes - Bart doesn't use `token_type_ids` for sequence classification. Use [`BartTokenizer`] or @@ -112,6 +99,7 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h - [`BartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb). - [`TFBartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb). - [`FlaxBartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/summarization). +- An example of how to train [`BartForConditionalGeneration`] with a Hugging Face `datasets` object can be found in this [forum discussion](https://discuss.huggingface.co/t/train-bart-for-conditional-generation-e-g-summarization/1904) - [Summarization](https://huggingface.co/course/chapter7/5?fw=pt#summarization) chapter of the 🤗 Hugging Face course. - [Summarization task guide](../tasks/summarization) @@ -134,6 +122,7 @@ See also: - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) +- [Distilled checkpoints](https://huggingface.co/models?search=distilbart) are described in this [paper](https://arxiv.org/abs/2010.13002). ## BartConfig @@ -150,6 +139,10 @@ See also: [[autodoc]] BartTokenizerFast - all + + + + ## BartModel [[autodoc]] BartModel @@ -175,6 +168,9 @@ See also: [[autodoc]] BartForCausalLM - forward + + + ## TFBartModel [[autodoc]] TFBartModel @@ -190,6 +186,9 @@ See also: [[autodoc]] TFBartForSequenceClassification - call + + + ## FlaxBartModel [[autodoc]] FlaxBartModel @@ -222,3 +221,8 @@ See also: [[autodoc]] FlaxBartForCausalLM - __call__ + + + + + diff --git a/docs/source/en/model_doc/barthez.md b/docs/source/en/model_doc/barthez.md index fdeb8e2fed23..1b571e242f47 100644 --- a/docs/source/en/model_doc/barthez.md +++ b/docs/source/en/model_doc/barthez.md @@ -38,8 +38,14 @@ provides a significant boost over vanilla BARThez, and is on par with or outperf This model was contributed by [moussakam](https://huggingface.co/moussakam). The Authors' code can be found [here](https://github.com/moussaKam/BARThez). + -### Examples +BARThez implementation is the same as BART, except for tokenization. Refer to [BART documentation](bart) for information on +configuration classes and their parameters. BARThez-specific tokenizers are documented below. + + + +## Resources - BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check: [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md). diff --git a/docs/source/en/model_doc/bartpho.md b/docs/source/en/model_doc/bartpho.md index 3529c11a7ed2..8f0a5f8bfe24 100644 --- a/docs/source/en/model_doc/bartpho.md +++ b/docs/source/en/model_doc/bartpho.md @@ -29,7 +29,9 @@ on a downstream task of Vietnamese text summarization show that in both automati outperforms the strong baseline mBART and improves the state-of-the-art. We release BARTpho to facilitate future research and applications of generative Vietnamese NLP tasks.* -Example of use: +This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BARTpho). + +## Usage example ```python >>> import torch @@ -54,7 +56,7 @@ Example of use: >>> features = bartpho(**input_ids) ``` -Tips: +## Usage tips - Following mBART, BARTpho uses the "large" architecture of BART with an additional layer-normalization layer on top of both the encoder and decoder. Thus, usage examples in the [documentation of BART](bart), when adapting to use @@ -79,8 +81,6 @@ Tips: Other languages, if employing this pre-trained multilingual SentencePiece model "vocab_file" for subword segmentation, can reuse BartphoTokenizer with their own language-specialized "monolingual_vocab_file". -This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BARTpho). - ## BartphoTokenizer [[autodoc]] BartphoTokenizer diff --git a/docs/source/en/model_doc/beit.md b/docs/source/en/model_doc/beit.md index 69586724713d..f7605ebcdf90 100644 --- a/docs/source/en/model_doc/beit.md +++ b/docs/source/en/model_doc/beit.md @@ -39,7 +39,10 @@ with previous pre-training methods. For example, base-size BEiT achieves 83.2% t significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).* -Tips: +This model was contributed by [nielsr](https://huggingface.co/nielsr). The JAX/FLAX version of this model was +contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/beit). + +## Usage tips - BEiT models are regular Vision Transformers, but pre-trained in a self-supervised way rather than supervised. They outperform both the [original model (ViT)](vit) as well as [Data-efficient Image Transformers (DeiT)](deit) when fine-tuned on ImageNet-1K and CIFAR-100. You can check out demo notebooks regarding inference as well as @@ -68,9 +71,6 @@ alt="drawing" width="600"/> BEiT pre-training. Taken from the original paper. -This model was contributed by [nielsr](https://huggingface.co/nielsr). The JAX/FLAX version of this model was -contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/beit). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BEiT. @@ -107,6 +107,9 @@ If you're interested in submitting a resource to be included here, please feel f - preprocess - post_process_semantic_segmentation + + + ## BeitModel [[autodoc]] BeitModel @@ -127,6 +130,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] BeitForSemanticSegmentation - forward + + + ## FlaxBeitModel [[autodoc]] FlaxBeitModel @@ -141,3 +147,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] FlaxBeitForImageClassification - __call__ + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/bert-generation.md b/docs/source/en/model_doc/bert-generation.md index 9cc7bac6c7e4..7edbf38694ed 100644 --- a/docs/source/en/model_doc/bert-generation.md +++ b/docs/source/en/model_doc/bert-generation.md @@ -33,10 +33,13 @@ GPT-2 and RoBERTa checkpoints and conducted an extensive empirical study on the encoder and decoder, with these checkpoints. Our models result in new state-of-the-art results on Machine Translation, Text Summarization, Sentence Splitting, and Sentence Fusion.* -Usage: +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be +found [here](https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder). -- The model can be used in combination with the [`EncoderDecoderModel`] to leverage two pretrained - BERT checkpoints for subsequent fine-tuning. +## Usage examples and tips + +The model can be used in combination with the [`EncoderDecoderModel`] to leverage two pretrained BERT checkpoints for +subsequent fine-tuning: ```python >>> # leverage checkpoints for Bert2Bert model... @@ -61,8 +64,7 @@ Usage: >>> loss.backward() ``` -- Pretrained [`EncoderDecoderModel`] are also directly available in the model hub, e.g., - +Pretrained [`EncoderDecoderModel`] are also directly available in the model hub, e.g.: ```python >>> # instantiate sentence fusion model @@ -85,9 +87,6 @@ Tips: - For summarization, sentence splitting, sentence fusion and translation, no special tokens are required for the input. Therefore, no EOS token should be added to the end of the input. -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be -found [here](https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder). - ## BertGenerationConfig [[autodoc]] BertGenerationConfig diff --git a/docs/source/en/model_doc/bert-japanese.md b/docs/source/en/model_doc/bert-japanese.md index 208b775307a6..d68bb221d577 100644 --- a/docs/source/en/model_doc/bert-japanese.md +++ b/docs/source/en/model_doc/bert-japanese.md @@ -67,11 +67,15 @@ Example of using a model with Character tokenization: >>> outputs = bertjapanese(**inputs) ``` -Tips: +This model was contributed by [cl-tohoku](https://huggingface.co/cl-tohoku). -- This implementation is the same as BERT, except for tokenization method. Refer to the [documentation of BERT](bert) for more usage examples. + + +This implementation is the same as BERT, except for tokenization method. Refer to [BERT documentation](bert) for +API reference information. + + -This model was contributed by [cl-tohoku](https://huggingface.co/cl-tohoku). ## BertJapaneseTokenizer diff --git a/docs/source/en/model_doc/bert.md b/docs/source/en/model_doc/bert.md index 19d15cfc05a4..bdf4566b43ad 100644 --- a/docs/source/en/model_doc/bert.md +++ b/docs/source/en/model_doc/bert.md @@ -45,7 +45,9 @@ language processing tasks, including pushing the GLUE score to 80.5% (7.7% point accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).* -Tips: +This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/google-research/bert). + +## Usage tips - BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. @@ -59,10 +61,6 @@ Tips: - The model must predict the original sentence, but has a second objective: inputs are two sentences A and B (with a separation token in between). With probability 50%, the sentences are consecutive in the corpus, in the remaining 50% they are not related. The model has to predict if the sentences are consecutive or not. - - -This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/google-research/bert). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BERT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. @@ -137,14 +135,23 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h - create_token_type_ids_from_sequences - save_vocabulary + + + ## BertTokenizerFast [[autodoc]] BertTokenizerFast + + + ## TFBertTokenizer [[autodoc]] TFBertTokenizer + + + ## Bert specific outputs [[autodoc]] models.bert.modeling_bert.BertForPreTrainingOutput @@ -153,6 +160,10 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput + + + + ## BertModel [[autodoc]] BertModel @@ -198,6 +209,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] BertForQuestionAnswering - forward + + + ## TFBertModel [[autodoc]] TFBertModel @@ -243,6 +257,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFBertForQuestionAnswering - call + + + ## FlaxBertModel [[autodoc]] FlaxBertModel @@ -287,3 +304,8 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] FlaxBertForQuestionAnswering - __call__ + + + + + diff --git a/docs/source/en/model_doc/bertweet.md b/docs/source/en/model_doc/bertweet.md index 50629445aee8..c4c883b21ad7 100644 --- a/docs/source/en/model_doc/bertweet.md +++ b/docs/source/en/model_doc/bertweet.md @@ -28,7 +28,9 @@ al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa- 2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks: Part-of-speech tagging, Named-entity recognition and text classification.* -Example of use: +This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BERTweet). + +## Usage example ```python >>> import torch @@ -55,7 +57,12 @@ Example of use: >>> # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base") ``` -This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BERTweet). + + +This implementation is the same as BERT, except for tokenization method. Refer to [BERT documentation](bert) for +API reference information. + + ## BertweetTokenizer diff --git a/docs/source/en/model_doc/big_bird.md b/docs/source/en/model_doc/big_bird.md index b8bbb388d6e9..3d1ef91d5606 100644 --- a/docs/source/en/model_doc/big_bird.md +++ b/docs/source/en/model_doc/big_bird.md @@ -41,7 +41,10 @@ sequence as part of the sparse attention mechanism. The proposed sparse attentio BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also propose novel applications to genomics data.* -Tips: +This model was contributed by [vasudevgupta](https://huggingface.co/vasudevgupta). The original code can be found +[here](https://github.com/google-research/bigbird). + +## Usage tips - For an in-detail explanation on how BigBird's attention works, see [this blog post](https://huggingface.co/blog/big-bird). - BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using @@ -53,10 +56,8 @@ Tips: - BigBird is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. -This model was contributed by [vasudevgupta](https://huggingface.co/vasudevgupta). The original code can be found -[here](https://github.com/google-research/bigbird). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -85,6 +86,9 @@ This model was contributed by [vasudevgupta](https://huggingface.co/vasudevgupta [[autodoc]] models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput + + + ## BigBirdModel [[autodoc]] BigBirdModel @@ -125,6 +129,9 @@ This model was contributed by [vasudevgupta](https://huggingface.co/vasudevgupta [[autodoc]] BigBirdForQuestionAnswering - forward + + + ## FlaxBigBirdModel [[autodoc]] FlaxBigBirdModel @@ -164,3 +171,8 @@ This model was contributed by [vasudevgupta](https://huggingface.co/vasudevgupta [[autodoc]] FlaxBigBirdForQuestionAnswering - __call__ + + + + + diff --git a/docs/source/en/model_doc/bigbird_pegasus.md b/docs/source/en/model_doc/bigbird_pegasus.md index d767f548a768..003e5643719b 100644 --- a/docs/source/en/model_doc/bigbird_pegasus.md +++ b/docs/source/en/model_doc/bigbird_pegasus.md @@ -41,7 +41,9 @@ sequence as part of the sparse attention mechanism. The proposed sparse attentio BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also propose novel applications to genomics data.* -Tips: +The original code can be found [here](https://github.com/google-research/bigbird). + +## Usage tips - For an in-detail explanation on how BigBird's attention works, see [this blog post](https://huggingface.co/blog/big-bird). - BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using @@ -54,9 +56,7 @@ Tips: - BigBird is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. -The original code can be found [here](https://github.com/google-research/bigbird). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) diff --git a/docs/source/en/model_doc/biogpt.md b/docs/source/en/model_doc/biogpt.md index 29327df21a02..1cac6d10990d 100644 --- a/docs/source/en/model_doc/biogpt.md +++ b/docs/source/en/model_doc/biogpt.md @@ -25,15 +25,15 @@ The abstract from the paper is the following: *Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature. We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous models on most tasks. Especially, we get 44.98%, 38.42% and 40.76% F1 score on BC5CDR, KD-DTI and DDI end-to-end relation extraction tasks, respectively, and 78.2% accuracy on PubMedQA, creating a new record. Our case study on text generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms.* -Tips: +This model was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/BioGPT). + +## Usage tips -- BioGPT is a model with absolute position embeddings so it’s usually advised to pad the inputs on the right rather than the left. +- BioGPT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - BioGPT was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows BioGPT to generate syntactically coherent text as it can be observed in the run_generation.py example script. - The model can take the `past_key_values` (for PyTorch) as input, which is the previously computed key/value attention pairs. Using this (past_key_values or past) value prevents the model from re-computing pre-computed values in the context of text generation. For PyTorch, see past_key_values argument of the BioGptForCausalLM.forward() method for more information on its usage. -This model was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/BioGPT). - -## Documentation resources +## Resources - [Causal language modeling task guide](../tasks/language_modeling) diff --git a/docs/source/en/model_doc/bit.md b/docs/source/en/model_doc/bit.md index 80b9fdd2caff..7f8a8ea67c45 100644 --- a/docs/source/en/model_doc/bit.md +++ b/docs/source/en/model_doc/bit.md @@ -25,15 +25,15 @@ The abstract from the paper is the following: *Transfer of pre-trained representations improves sample efficiency and simplifies hyperparameter tuning when training deep neural networks for vision. We revisit the paradigm of pre-training on large supervised datasets and fine-tuning the model on a target task. We scale up pre-training, and propose a simple recipe that we call Big Transfer (BiT). By combining a few carefully selected components, and transferring using a simple heuristic, we achieve strong performance on over 20 datasets. BiT performs well across a surprisingly wide range of data regimes -- from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on ILSVRC-2012, 99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark (VTAB). On small datasets, BiT attains 76.8% on ILSVRC-2012 with 10 examples per class, and 97.0% on CIFAR-10 with 10 examples per class. We conduct detailed analysis of the main components that lead to high transfer performance.* -Tips: +This model was contributed by [nielsr](https://huggingface.co/nielsr). +The original code can be found [here](https://github.com/google-research/big_transfer). + +## Usage tips - BiT models are equivalent to ResNetv2 in terms of architecture, except that: 1) all batch normalization layers are replaced by [group normalization](https://arxiv.org/abs/1803.08494), 2) [weight standardization](https://arxiv.org/abs/1903.10520) is used for convolutional layers. The authors show that the combination of both is useful for training with large batch sizes, and has a significant impact on transfer learning. -This model was contributed by [nielsr](https://huggingface.co/nielsr). -The original code can be found [here](https://github.com/google-research/big_transfer). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BiT. @@ -62,5 +62,4 @@ If you're interested in submitting a resource to be included here, please feel f ## BitForImageClassification [[autodoc]] BitForImageClassification - - forward - + - forward \ No newline at end of file diff --git a/docs/source/en/model_doc/blenderbot-small.md b/docs/source/en/model_doc/blenderbot-small.md index c126bc9b1451..d5f4a7d849b7 100644 --- a/docs/source/en/model_doc/blenderbot-small.md +++ b/docs/source/en/model_doc/blenderbot-small.md @@ -40,15 +40,16 @@ and code publicly available. Human evaluations show our best models are superior dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.* -Tips: - -- Blenderbot Small is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than - the left. - This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The authors' code can be found [here](https://github.com/facebookresearch/ParlAI). -## Documentation resources +## Usage tips + +Blenderbot Small is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than +the left. + + +## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) @@ -70,6 +71,9 @@ found [here](https://github.com/facebookresearch/ParlAI). [[autodoc]] BlenderbotSmallTokenizerFast + + + ## BlenderbotSmallModel [[autodoc]] BlenderbotSmallModel @@ -85,6 +89,9 @@ found [here](https://github.com/facebookresearch/ParlAI). [[autodoc]] BlenderbotSmallForCausalLM - forward + + + ## TFBlenderbotSmallModel [[autodoc]] TFBlenderbotSmallModel @@ -95,6 +102,9 @@ found [here](https://github.com/facebookresearch/ParlAI). [[autodoc]] TFBlenderbotSmallForConditionalGeneration - call + + + ## FlaxBlenderbotSmallModel [[autodoc]] FlaxBlenderbotSmallModel @@ -108,3 +118,6 @@ found [here](https://github.com/facebookresearch/ParlAI). - __call__ - encode - decode + + + diff --git a/docs/source/en/model_doc/blenderbot.md b/docs/source/en/model_doc/blenderbot.md index 5a10af77b698..42e1710cb2d5 100644 --- a/docs/source/en/model_doc/blenderbot.md +++ b/docs/source/en/model_doc/blenderbot.md @@ -16,8 +16,6 @@ rendered properly in your Markdown viewer. # Blenderbot -**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) . - ## Overview The Blender chatbot model was proposed in [Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, @@ -36,26 +34,14 @@ and code publicly available. Human evaluations show our best models are superior dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.* -Tips: - -- Blenderbot is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than - the left. - This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The authors' code can be found [here](https://github.com/facebookresearch/ParlAI) . +## Usage tips and example -## Implementation Notes - -- Blenderbot uses a standard [seq2seq model transformer](https://arxiv.org/pdf/1706.03762.pdf) based architecture. -- Available checkpoints can be found in the [model hub](https://huggingface.co/models?search=blenderbot). -- This is the *default* Blenderbot model class. However, some smaller checkpoints, such as - `facebook/blenderbot_small_90M`, have a different architecture and consequently should be used with - [BlenderbotSmall](blenderbot-small). - - -## Usage +Blenderbot is a model with absolute position embeddings so it's usually advised to pad the inputs on the right +rather than the left. -Here is an example of model usage: +An example: ```python >>> from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration @@ -70,7 +56,16 @@ Here is an example of model usage: [" That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"] ``` -## Documentation resources +## Implementation Notes + +- Blenderbot uses a standard [seq2seq model transformer](https://arxiv.org/pdf/1706.03762.pdf) based architecture. +- Available checkpoints can be found in the [model hub](https://huggingface.co/models?search=blenderbot). +- This is the *default* Blenderbot model class. However, some smaller checkpoints, such as + `facebook/blenderbot_small_90M`, have a different architecture and consequently should be used with + [BlenderbotSmall](blenderbot-small). + + +## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) @@ -90,9 +85,13 @@ Here is an example of model usage: [[autodoc]] BlenderbotTokenizerFast - build_inputs_with_special_tokens + + + + ## BlenderbotModel -See `transformers.BartModel` for arguments to *forward* and *generate* +See [`~transformers.BartModel`] for arguments to *forward* and *generate* [[autodoc]] BlenderbotModel - forward @@ -109,6 +108,9 @@ See [`~transformers.BartForConditionalGeneration`] for arguments to *forward* an [[autodoc]] BlenderbotForCausalLM - forward + + + ## TFBlenderbotModel [[autodoc]] TFBlenderbotModel @@ -119,6 +121,9 @@ See [`~transformers.BartForConditionalGeneration`] for arguments to *forward* an [[autodoc]] TFBlenderbotForConditionalGeneration - call + + + ## FlaxBlenderbotModel [[autodoc]] FlaxBlenderbotModel @@ -132,3 +137,8 @@ See [`~transformers.BartForConditionalGeneration`] for arguments to *forward* an - __call__ - encode - decode + + + + + diff --git a/docs/source/en/model_doc/blip-2.md b/docs/source/en/model_doc/blip-2.md index 0890e612561a..d2a47e7af8f1 100644 --- a/docs/source/en/model_doc/blip-2.md +++ b/docs/source/en/model_doc/blip-2.md @@ -27,11 +27,6 @@ The abstract from the paper is the following: *The cost of vision-and-language pre-training has become increasingly prohibitive due to end-to-end training of large-scale models. This paper proposes BLIP-2, a generic and efficient pre-training strategy that bootstraps vision-language pre-training from off-the-shelf frozen pre-trained image encoders and frozen large language models. BLIP-2 bridges the modality gap with a lightweight Querying Transformer, which is pre-trained in two stages. The first stage bootstraps vision-language representation learning from a frozen image encoder. The second stage bootstraps vision-to-language generative learning from a frozen language model. BLIP-2 achieves state-of-the-art performance on various vision-language tasks, despite having significantly fewer trainable parameters than existing methods. For example, our model outperforms Flamingo80B by 8.7% on zero-shot VQAv2 with 54x fewer trainable parameters. We also demonstrate the model's emerging capabilities of zero-shot image-to-text generation that can follow natural language instructions.* -Tips: - -- BLIP-2 can be used for conditional text generation given an image and an optional text prompt. At inference time, it's recommended to use the [`generate`] method. -- One can use [`Blip2Processor`] to prepare images for the model, and decode the predicted tokens ID's back to text. - drawing @@ -40,6 +35,11 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/salesforce/LAVIS/tree/5ee63d688ba4cebff63acee04adaef2dee9af207). +## Usage tips + +- BLIP-2 can be used for conditional text generation given an image and an optional text prompt. At inference time, it's recommended to use the [`generate`] method. +- One can use [`Blip2Processor`] to prepare images for the model, and decode the predicted tokens ID's back to text. + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BLIP-2. diff --git a/docs/source/en/model_doc/blip.md b/docs/source/en/model_doc/blip.md index 8afed63311f8..bc122c942a67 100644 --- a/docs/source/en/model_doc/blip.md +++ b/docs/source/en/model_doc/blip.md @@ -20,7 +20,7 @@ rendered properly in your Markdown viewer. The BLIP model was proposed in [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) by Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi. -BLIP is a model that is able to perform various multi-modal tasks including +BLIP is a model that is able to perform various multi-modal tasks including: - Visual Question Answering - Image-Text retrieval (Image-text matching) - Image Captioning @@ -39,7 +39,6 @@ The original code can be found [here](https://github.com/salesforce/BLIP). - [Jupyter notebook](https://github.com/huggingface/notebooks/blob/main/examples/image_captioning_blip.ipynb) on how to fine-tune BLIP for image captioning on a custom dataset - ## BlipConfig [[autodoc]] BlipConfig @@ -57,12 +56,14 @@ The original code can be found [here](https://github.com/salesforce/BLIP). [[autodoc]] BlipProcessor - ## BlipImageProcessor [[autodoc]] BlipImageProcessor - preprocess + + + ## BlipModel [[autodoc]] BlipModel @@ -75,30 +76,29 @@ The original code can be found [here](https://github.com/salesforce/BLIP). [[autodoc]] BlipTextModel - forward - ## BlipVisionModel [[autodoc]] BlipVisionModel - forward - ## BlipForConditionalGeneration [[autodoc]] BlipForConditionalGeneration - forward - ## BlipForImageTextRetrieval [[autodoc]] BlipForImageTextRetrieval - forward - ## BlipForQuestionAnswering [[autodoc]] BlipForQuestionAnswering - forward + + + ## TFBlipModel [[autodoc]] TFBlipModel @@ -111,26 +111,24 @@ The original code can be found [here](https://github.com/salesforce/BLIP). [[autodoc]] TFBlipTextModel - call - ## TFBlipVisionModel [[autodoc]] TFBlipVisionModel - call - ## TFBlipForConditionalGeneration [[autodoc]] TFBlipForConditionalGeneration - call - ## TFBlipForImageTextRetrieval [[autodoc]] TFBlipForImageTextRetrieval - call - ## TFBlipForQuestionAnswering [[autodoc]] TFBlipForQuestionAnswering - - call \ No newline at end of file + - call + + diff --git a/docs/source/en/model_doc/bloom.md b/docs/source/en/model_doc/bloom.md index 3c155fa58782..a1d39d13ad00 100644 --- a/docs/source/en/model_doc/bloom.md +++ b/docs/source/en/model_doc/bloom.md @@ -56,16 +56,20 @@ See also: [[autodoc]] BloomConfig - all -## BloomModel - -[[autodoc]] BloomModel - - forward - ## BloomTokenizerFast [[autodoc]] BloomTokenizerFast - all + + + + +## BloomModel + +[[autodoc]] BloomModel + - forward + ## BloomForCausalLM [[autodoc]] BloomForCausalLM @@ -86,6 +90,9 @@ See also: [[autodoc]] BloomForQuestionAnswering - forward + + + ## FlaxBloomModel [[autodoc]] FlaxBloomModel @@ -95,3 +102,8 @@ See also: [[autodoc]] FlaxBloomForCausalLM - __call__ + + + + + diff --git a/docs/source/en/model_doc/bort.md b/docs/source/en/model_doc/bort.md index dccf2b560b68..1542d464d9fd 100644 --- a/docs/source/en/model_doc/bort.md +++ b/docs/source/en/model_doc/bort.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. -This model is in maintenance mode only, so we won't accept any new PRs changing its code. +This model is in maintenance mode only, we do not accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.30.0. You can do so by running the following command: `pip install -U transformers==4.30.0`. @@ -43,13 +43,15 @@ hardware. It is also 7.9x faster on a CPU, as well as being better performing th architecture, and some of the non-compressed variants: it obtains performance improvements of between 0.3% and 31%, absolute, with respect to BERT-large, on multiple public natural language understanding (NLU) benchmarks.* -Tips: +This model was contributed by [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/alexa/bort/). + +## Usage tips -- BORT's model architecture is based on BERT, so one can refer to [BERT's documentation page](bert) for the - model's API as well as usage examples. -- BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, so one can refer to [RoBERTa's documentation page](roberta) for the tokenizer's API as well as usage examples. +- BORT's model architecture is based on BERT, refer to [BERT's documentation page](bert) for the + model's API reference as well as usage examples. +- BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, refer to [RoBERTa's documentation page](roberta) for the tokenizer's API reference as well as usage examples. - BORT requires a specific fine-tuning algorithm, called [Agora](https://adewynter.github.io/notes/bort_algorithms_and_applications.html#fine-tuning-with-algebraic-topology) , that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the algorithm to make BORT fine-tuning work. -This model was contributed by [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/alexa/bort/). + diff --git a/docs/source/en/model_doc/bridgetower.md b/docs/source/en/model_doc/bridgetower.md index ba98cea91d21..013fea06c277 100644 --- a/docs/source/en/model_doc/bridgetower.md +++ b/docs/source/en/model_doc/bridgetower.md @@ -37,7 +37,9 @@ alt="drawing" width="600"/> BridgeTower architecture. Taken from the original paper. -## Usage +This model was contributed by [Anahita Bhiwandiwalla](https://huggingface.co/anahita-b), [Tiep Le](https://huggingface.co/Tile) and [Shaoyen Tseng](https://huggingface.co/shaoyent). The original code can be found [here](https://github.com/microsoft/BridgeTower). + +## Usage tips and examples BridgeTower consists of a visual encoder, a textual encoder and cross-modal encoder with multiple lightweight bridge layers. The goal of this approach was to build a bridge between each uni-modal encoder and the cross-modal encoder to enable comprehensive and detailed interaction at each layer of the cross-modal encoder. @@ -116,9 +118,6 @@ The following example shows how to run masked language modeling using [`BridgeTo .a cat looking out of the window. ``` -This model was contributed by [Anahita Bhiwandiwalla](https://huggingface.co/anahita-b), [Tiep Le](https://huggingface.co/Tile) and [Shaoyen Tseng](https://huggingface.co/shaoyent). The original code can be found [here](https://github.com/microsoft/BridgeTower). - - Tips: - This implementation of BridgeTower uses [`RobertaTokenizer`] to generate text embeddings and OpenAI's CLIP/ViT model to compute visual embeddings. diff --git a/docs/source/en/model_doc/bros.md b/docs/source/en/model_doc/bros.md index 1c8e3f50605c..419e725e75e8 100644 --- a/docs/source/en/model_doc/bros.md +++ b/docs/source/en/model_doc/bros.md @@ -31,12 +31,13 @@ AMLM is a 2D version of TMLM. It randomly masks text tokens and predicts with th BROS achieves comparable or better result on Key Information Extraction (KIE) benchmarks such as FUNSD, SROIE, CORD and SciTSR, without relying on explicit visual features. - The abstract from the paper is the following: *Key information extraction (KIE) from document images requires understanding the contextual and spatial semantics of texts in two-dimensional (2D) space. Many recent studies try to solve the task by developing pre-trained language models focusing on combining visual features from document images with texts and their layout. On the other hand, this paper tackles the problem by going back to the basic: effective combination of text and layout. Specifically, we propose a pre-trained language model, named BROS (BERT Relying On Spatiality), that encodes relative positions of texts in 2D space and learns from unlabeled documents with area-masking strategy. With this optimized training scheme for understanding texts in 2D space, BROS shows comparable or better performance compared to previous methods on four KIE benchmarks (FUNSD, SROIE*, CORD, and SciTSR) without relying on visual features. This paper also reveals two real-world challenges in KIE tasks-(1) minimizing the error from incorrect text ordering and (2) efficient learning from fewer downstream examples-and demonstrates the superiority of BROS over previous methods.* -Tips: +This model was contributed by [jinho8345](https://huggingface.co/jinho8345). The original code can be found [here](https://github.com/clovaai/bros). + +## Usage tips and examples - [`~transformers.BrosModel.forward`] requires `input_ids` and `bbox` (bounding box). Each bounding box should be in (x0, y0, x1, y1) format (top-left corner, bottom-right corner). Obtaining of Bounding boxes depends on external OCR system. The `x` coordinate should be normalized by document image width, and the `y` coordinate should be normalized by document image height. @@ -78,9 +79,9 @@ def make_box_first_token_mask(bboxes, words, tokenizer, max_seq_length=512): ``` -- Demo scripts can be found [here](https://github.com/clovaai/bros). +## Resources -This model was contributed by [jinho8345](https://huggingface.co/jinho8345). The original code can be found [here](https://github.com/clovaai/bros). +- Demo scripts can be found [here](https://github.com/clovaai/bros). ## BrosConfig @@ -102,13 +103,11 @@ This model was contributed by [jinho8345](https://huggingface.co/jinho8345). The [[autodoc]] BrosForTokenClassification - forward - ## BrosSpadeEEForTokenClassification [[autodoc]] BrosSpadeEEForTokenClassification - forward - ## BrosSpadeELForTokenClassification [[autodoc]] BrosSpadeELForTokenClassification diff --git a/docs/source/en/model_doc/byt5.md b/docs/source/en/model_doc/byt5.md index 2df7c4ddaa24..dc2942e33bbe 100644 --- a/docs/source/en/model_doc/byt5.md +++ b/docs/source/en/model_doc/byt5.md @@ -40,14 +40,18 @@ experiments.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be found [here](https://github.com/google-research/byt5). -ByT5's architecture is based on the T5v1.1 model, so one can refer to [T5v1.1's documentation page](t5v1.1). They + + +ByT5's architecture is based on the T5v1.1 model, refer to [T5v1.1's documentation page](t5v1.1) for the API reference. They only differ in how inputs should be prepared for the model, see the code examples below. + + Since ByT5 was pre-trained unsupervisedly, there's no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix. -### Example +## Usage example ByT5 works on raw UTF-8 bytes, so it can be used without a tokenizer: diff --git a/docs/source/en/model_doc/camembert.md b/docs/source/en/model_doc/camembert.md index 3ec4cd5dd0b1..dc217fe619bf 100644 --- a/docs/source/en/model_doc/camembert.md +++ b/docs/source/en/model_doc/camembert.md @@ -34,14 +34,16 @@ dependency parsing, named-entity recognition, and natural language inference. Ca for most of the tasks considered. We release the pretrained model for CamemBERT hoping to foster research and downstream applications for French NLP.* -Tips: +This model was contributed by [camembert](https://huggingface.co/camembert). The original code can be found [here](https://camembert-model.fr/). -- This implementation is the same as RoBERTa. Refer to the [documentation of RoBERTa](roberta) for usage examples - as well as the information relative to the inputs and outputs. + -This model was contributed by [camembert](https://huggingface.co/camembert). The original code can be found [here](https://camembert-model.fr/). +This implementation is the same as RoBERTa. Refer to the [documentation of RoBERTa](roberta) for usage examples as well +as the information relative to the inputs and outputs. -## Documentation resources + + +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -66,6 +68,9 @@ This model was contributed by [camembert](https://huggingface.co/camembert). The [[autodoc]] CamembertTokenizerFast + + + ## CamembertModel [[autodoc]] CamembertModel @@ -94,6 +99,9 @@ This model was contributed by [camembert](https://huggingface.co/camembert). The [[autodoc]] CamembertForQuestionAnswering + + + ## TFCamembertModel [[autodoc]] TFCamembertModel @@ -121,3 +129,7 @@ This model was contributed by [camembert](https://huggingface.co/camembert). The ## TFCamembertForQuestionAnswering [[autodoc]] TFCamembertForQuestionAnswering + + + + diff --git a/docs/source/en/model_doc/canine.md b/docs/source/en/model_doc/canine.md index 748ec63eccce..7729d8aa91d7 100644 --- a/docs/source/en/model_doc/canine.md +++ b/docs/source/en/model_doc/canine.md @@ -37,7 +37,9 @@ To use its finer-grained input effectively and efficiently, CANINE combines down sequence length, with a deep transformer stack, which encodes context. CANINE outperforms a comparable mBERT model by 2.8 F1 on TyDi QA, a challenging multilingual benchmark, despite having 28% fewer model parameters.* -Tips: +This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/google-research/language/tree/master/language/canine). + +## Usage tips - CANINE uses no less than 3 Transformer encoders internally: 2 "shallow" encoders (which only consist of a single layer) and 1 "deep" encoder (which is a regular BERT encoder). First, a "shallow" encoder is used to contextualize @@ -50,19 +52,18 @@ Tips: (which has a predefined Unicode code point). For token classification tasks however, the downsampled sequence of tokens needs to be upsampled again to match the length of the original character sequence (which is 2048). The details for this can be found in the paper. -- Models: + +Model checkpoints: - [google/canine-c](https://huggingface.co/google/canine-c): Pre-trained with autoregressive character loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB). - [google/canine-s](https://huggingface.co/google/canine-s): Pre-trained with subword loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB). -This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/google-research/language/tree/master/language/canine). +## Usage example -### Example - -CANINE works on raw characters, so it can be used without a tokenizer: +CANINE works on raw characters, so it can be used **without a tokenizer**: ```python >>> from transformers import CanineModel @@ -96,17 +97,13 @@ sequences to the same length): >>> sequence_output = outputs.last_hidden_state ``` -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Multiple choice task guide](../tasks/multiple_choice) -## CANINE specific outputs - -[[autodoc]] models.canine.modeling_canine.CanineModelOutputWithPooling - ## CanineConfig [[autodoc]] CanineConfig @@ -118,6 +115,10 @@ sequences to the same length): - get_special_tokens_mask - create_token_type_ids_from_sequences +## CANINE specific outputs + +[[autodoc]] models.canine.modeling_canine.CanineModelOutputWithPooling + ## CanineModel [[autodoc]] CanineModel diff --git a/docs/source/en/model_doc/chinese_clip.md b/docs/source/en/model_doc/chinese_clip.md index 430a734014c5..b2d27a844e9e 100644 --- a/docs/source/en/model_doc/chinese_clip.md +++ b/docs/source/en/model_doc/chinese_clip.md @@ -25,7 +25,9 @@ The abstract from the paper is the following: *The tremendous success of CLIP (Radford et al., 2021) has promoted the research and application of contrastive learning for vision-language pretraining. In this work, we construct a large-scale dataset of image-text pairs in Chinese, where most data are retrieved from publicly available datasets, and we pretrain Chinese CLIP models on the new dataset. We develop 5 Chinese CLIP models of multiple sizes, spanning from 77 to 958 million parameters. Furthermore, we propose a two-stage pretraining method, where the model is first trained with the image encoder frozen and then trained with all parameters being optimized, to achieve enhanced model performance. Our comprehensive experiments demonstrate that Chinese CLIP can achieve the state-of-the-art performance on MUGE, Flickr30K-CN, and COCO-CN in the setups of zero-shot learning and finetuning, and it is able to achieve competitive performance in zero-shot image classification based on the evaluation on the ELEVATER benchmark (Li et al., 2022). Our codes, pretrained models, and demos have been released.* -## Usage +The Chinese-CLIP model was contributed by [OFA-Sys](https://huggingface.co/OFA-Sys). + +## Usage example The code snippet below shows how to compute image & text features and similarities: @@ -59,15 +61,13 @@ The code snippet below shows how to compute image & text features and similariti >>> probs = logits_per_image.softmax(dim=1) # probs: [[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]] ``` -Currently, we release the following scales of pretrained Chinese-CLIP models at HF Model Hub: +Currently, following scales of pretrained Chinese-CLIP models are available on 🤗 Hub: - [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) - [OFA-Sys/chinese-clip-vit-large-patch14](https://huggingface.co/OFA-Sys/chinese-clip-vit-large-patch14) - [OFA-Sys/chinese-clip-vit-large-patch14-336px](https://huggingface.co/OFA-Sys/chinese-clip-vit-large-patch14-336px) - [OFA-Sys/chinese-clip-vit-huge-patch14](https://huggingface.co/OFA-Sys/chinese-clip-vit-huge-patch14) -The Chinese-CLIP model was contributed by [OFA-Sys](https://huggingface.co/OFA-Sys). - ## ChineseCLIPConfig [[autodoc]] ChineseCLIPConfig diff --git a/docs/source/en/model_doc/clap.md b/docs/source/en/model_doc/clap.md index 54082ec8aada..7bfc75e23c35 100644 --- a/docs/source/en/model_doc/clap.md +++ b/docs/source/en/model_doc/clap.md @@ -30,7 +30,6 @@ The abstract from the paper is the following: This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArtZucker) . The original code can be found [here](https://github.com/LAION-AI/Clap). - ## ClapConfig [[autodoc]] ClapConfig @@ -78,4 +77,3 @@ The original code can be found [here](https://github.com/LAION-AI/Clap). [[autodoc]] ClapAudioModelWithProjection - forward - diff --git a/docs/source/en/model_doc/clip.md b/docs/source/en/model_doc/clip.md index 29b074f1cbbc..ed4fd8df7899 100644 --- a/docs/source/en/model_doc/clip.md +++ b/docs/source/en/model_doc/clip.md @@ -40,7 +40,9 @@ for any dataset specific training. For instance, we match the accuracy of the or without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at this https URL.* -## Usage +This model was contributed by [valhalla](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/openai/CLIP). + +## Usage tips and example CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text @@ -77,8 +79,6 @@ encode the text and prepare the images. The following example shows how to get t >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` -This model was contributed by [valhalla](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/openai/CLIP). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with CLIP. @@ -142,6 +142,9 @@ The resource should ideally demonstrate something new instead of duplicating an [[autodoc]] CLIPProcessor + + + ## CLIPModel [[autodoc]] CLIPModel @@ -164,12 +167,14 @@ The resource should ideally demonstrate something new instead of duplicating an [[autodoc]] CLIPVisionModelWithProjection - forward - ## CLIPVisionModel [[autodoc]] CLIPVisionModel - forward + + + ## TFCLIPModel [[autodoc]] TFCLIPModel @@ -187,6 +192,9 @@ The resource should ideally demonstrate something new instead of duplicating an [[autodoc]] TFCLIPVisionModel - call + + + ## FlaxCLIPModel [[autodoc]] FlaxCLIPModel @@ -208,3 +216,6 @@ The resource should ideally demonstrate something new instead of duplicating an [[autodoc]] FlaxCLIPVisionModel - __call__ + + + diff --git a/docs/source/en/model_doc/clipseg.md b/docs/source/en/model_doc/clipseg.md index c4c60a48d055..320095bc1905 100644 --- a/docs/source/en/model_doc/clipseg.md +++ b/docs/source/en/model_doc/clipseg.md @@ -41,13 +41,6 @@ to any binary segmentation task where a text or image query can be formulated. Finally, we find our system to adapt well to generalized queries involving affordances or properties* -Tips: - -- [`CLIPSegForImageSegmentation`] adds a decoder on top of [`CLIPSegModel`]. The latter is identical to [`CLIPModel`]. -- [`CLIPSegForImageSegmentation`] can generate image segmentations based on arbitrary prompts at test time. A prompt can be either a text -(provided to the model as `input_ids`) or an image (provided to the model as `conditional_pixel_values`). One can also provide custom -conditional embeddings (provided to the model as `conditional_embeddings`). - drawing @@ -56,6 +49,13 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/timojl/clipseg). +## Usage tips + +- [`CLIPSegForImageSegmentation`] adds a decoder on top of [`CLIPSegModel`]. The latter is identical to [`CLIPModel`]. +- [`CLIPSegForImageSegmentation`] can generate image segmentations based on arbitrary prompts at test time. A prompt can be either a text +(provided to the model as `input_ids`) or an image (provided to the model as `conditional_pixel_values`). One can also provide custom +conditional embeddings (provided to the model as `conditional_embeddings`). + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with CLIPSeg. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. diff --git a/docs/source/en/model_doc/code_llama.md b/docs/source/en/model_doc/code_llama.md index a60cf1641533..38d50c87334d 100644 --- a/docs/source/en/model_doc/code_llama.md +++ b/docs/source/en/model_doc/code_llama.md @@ -24,7 +24,11 @@ The abstract from the paper is the following: *We release Code Llama, a family of large language models for code based on Llama 2 providing state-of-the-art performance among open models, infilling capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks. We provide multiple flavors to cover a wide range of applications: foundation models (Code Llama), Python specializations (Code Llama - Python), and instruction-following models (Code Llama - Instruct) with 7B, 13B and 34B parameters each. All models are trained on sequences of 16k tokens and show improvements on inputs with up to 100k tokens. 7B and 13B Code Llama and Code Llama - Instruct variants support infilling based on surrounding content. Code Llama reaches state-of-the-art performance among open models on several code benchmarks, with scores of up to 53% and 55% on HumanEval and MBPP, respectively. Notably, Code Llama - Python 7B outperforms Llama 2 70B on HumanEval and MBPP, and all our models outperform every other publicly available model on MultiPL-E. We release Code Llama under a permissive license that allows for both research and commercial use.* -Check out all Code Llama models [here](https://huggingface.co/models?search=code_llama) and the officially released ones in the [codellama org](https://huggingface.co/codellama). +Check out all Code Llama model checkpoints [here](https://huggingface.co/models?search=code_llama) and the officially released ones in the [codellama org](https://huggingface.co/codellama). + +This model was contributed by [ArthurZucker](https://huggingface.co/ArthurZ). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). + +## Usage tips and examples @@ -38,21 +42,22 @@ As mentioned above, the `dtype` of the storage weights is mostly irrelevant unle -Tips: -- These models have the same architecture as the `Llama2` models +Tips: - The infilling task is supported out of the box. You should be using the `tokenizer.fill_token` where you want your input to be filled. - The model conversion script is the same as for the `Llama2` family: -Here is a sample usage +Here is a sample usage: + ```bash python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` + Note that executing the script requires enough CPU RAM to host the whole model in float16 precision (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). -- After conversion, the model and tokenizer can be loaded via: +After conversion, the model and tokenizer can be loaded via: ```python >>> from transformers import LlamaForCausalLM, CodeLlamaTokenizer @@ -95,9 +100,13 @@ If you only want the infilled part: Under the hood, the tokenizer [automatically splits by ``](https://huggingface.co/docs/transformers/main/model_doc/code_llama#transformers.CodeLlamaTokenizer.fill_token) to create a formatted input string that follows [the original training pattern](https://github.com/facebookresearch/codellama/blob/cb51c14ec761370ba2e2bc351374a79265d0465e/llama/generation.py#L402). This is more robust than preparing the pattern yourself: it avoids pitfalls, such as token glueing, that are very hard to debug. To see how much CPU and GPU memory you need for this model or others, try [this calculator](https://huggingface.co/spaces/hf-accelerate/model-memory-usage) which can help determine that value. -- The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string. +The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string. -This model was contributed by [ArthurZucker](https://huggingface.co/ArthurZ). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). + + +Code Llama has the same architecture as the `Llama2` models, refer to [Llama2's documentation page](llama2) for the API reference. +Find Code Llama tokenizer reference below. + ## CodeLlamaTokenizer diff --git a/docs/source/en/model_doc/codegen.md b/docs/source/en/model_doc/codegen.md index 695f45f9ae17..78be813db1a6 100644 --- a/docs/source/en/model_doc/codegen.md +++ b/docs/source/en/model_doc/codegen.md @@ -40,7 +40,7 @@ The original code can be found [here](https://github.com/salesforce/codegen). * `mono`: Initialized with `multi`, then further pre-trained on Python data * For example, `Salesforce/codegen-350M-mono` offers a 350 million-parameter checkpoint pre-trained sequentially on the Pile, multiple programming languages, and Python. -## How to use +## Usage example ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer @@ -60,7 +60,7 @@ def hello_world(): hello_world() ``` -## Documentation resources +## Resources - [Causal language modeling task guide](../tasks/language_modeling) diff --git a/docs/source/en/model_doc/conditional_detr.md b/docs/source/en/model_doc/conditional_detr.md index 8993fb384316..516e1c436855 100644 --- a/docs/source/en/model_doc/conditional_detr.md +++ b/docs/source/en/model_doc/conditional_detr.md @@ -31,7 +31,7 @@ alt="drawing" width="600"/> This model was contributed by [DepuMeng](https://huggingface.co/DepuMeng). The original code can be found [here](https://github.com/Atten4Vis/ConditionalDETR). -## Documentation resources +## Resources - [Object detection task guide](../tasks/object_detection) diff --git a/docs/source/en/model_doc/convbert.md b/docs/source/en/model_doc/convbert.md index 8a0aa7a946cc..17b5d7920c6c 100644 --- a/docs/source/en/model_doc/convbert.md +++ b/docs/source/en/model_doc/convbert.md @@ -44,12 +44,14 @@ ConvBERT significantly outperforms BERT and its variants in various downstream t fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost. Code and pre-trained models will be released.* -ConvBERT training tips are similar to those of BERT. - This model was contributed by [abhishek](https://huggingface.co/abhishek). The original implementation can be found here: https://github.com/yitu-opensource/ConvBert -## Documentation resources +## Usage tips + +ConvBERT training tips are similar to those of BERT. For usage tips refer to [BERT documentation](bert). + +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -73,6 +75,9 @@ here: https://github.com/yitu-opensource/ConvBert [[autodoc]] ConvBertTokenizerFast + + + ## ConvBertModel [[autodoc]] ConvBertModel @@ -103,6 +108,9 @@ here: https://github.com/yitu-opensource/ConvBert [[autodoc]] ConvBertForQuestionAnswering - forward + + + ## TFConvBertModel [[autodoc]] TFConvBertModel @@ -132,3 +140,6 @@ here: https://github.com/yitu-opensource/ConvBert [[autodoc]] TFConvBertForQuestionAnswering - call + + + diff --git a/docs/source/en/model_doc/convnext.md b/docs/source/en/model_doc/convnext.md index acbb0265b2e6..5222834b1f69 100644 --- a/docs/source/en/model_doc/convnext.md +++ b/docs/source/en/model_doc/convnext.md @@ -32,10 +32,6 @@ of a vision Transformer, and discover several key components that contribute to dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.* -Tips: - -- See the code examples below each model regarding usage. - drawing @@ -68,6 +64,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] ConvNextImageProcessor - preprocess + + + ## ConvNextModel [[autodoc]] ConvNextModel @@ -78,14 +77,18 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] ConvNextForImageClassification - forward + + ## TFConvNextModel [[autodoc]] TFConvNextModel - call - ## TFConvNextForImageClassification [[autodoc]] TFConvNextForImageClassification - call + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/convnextv2.md b/docs/source/en/model_doc/convnextv2.md index af08128c45ef..8cd142c2765f 100644 --- a/docs/source/en/model_doc/convnextv2.md +++ b/docs/source/en/model_doc/convnextv2.md @@ -25,10 +25,6 @@ The abstract from the paper is the following: *Driven by improved architectures and better representation learning frameworks, the field of visual recognition has enjoyed rapid modernization and performance boost in the early 2020s. For example, modern ConvNets, represented by ConvNeXt, have demonstrated strong performance in various scenarios. While these models were originally designed for supervised learning with ImageNet labels, they can also potentially benefit from self-supervised learning techniques such as masked autoencoders (MAE). However, we found that simply combining these two approaches leads to subpar performance. In this paper, we propose a fully convolutional masked autoencoder framework and a new Global Response Normalization (GRN) layer that can be added to the ConvNeXt architecture to enhance inter-channel feature competition. This co-design of self-supervised learning techniques and architectural improvement results in a new model family called ConvNeXt V2, which significantly improves the performance of pure ConvNets on various recognition benchmarks, including ImageNet classification, COCO detection, and ADE20K segmentation. We also provide pre-trained ConvNeXt V2 models of various sizes, ranging from an efficient 3.7M-parameter Atto model with 76.7% top-1 accuracy on ImageNet, to a 650M Huge model that achieves a state-of-the-art 88.9% accuracy using only public training data.* -Tips: - -- See the code examples below each model regarding usage. - drawing diff --git a/docs/source/en/model_doc/cpm.md b/docs/source/en/model_doc/cpm.md index a2ecf1a1e092..129c4ed3a377 100644 --- a/docs/source/en/model_doc/cpm.md +++ b/docs/source/en/model_doc/cpm.md @@ -37,7 +37,14 @@ NLP tasks in the settings of few-shot (even zero-shot) learning.* This model was contributed by [canwenxu](https://huggingface.co/canwenxu). The original implementation can be found here: https://github.com/TsinghuaAI/CPM-Generate -Note: We only have a tokenizer here, since the model architecture is the same as GPT-2. + + + +CPM's architecture is the same as GPT-2, except for tokenization method. Refer to [GPT-2 documentation](gpt2) for +API reference information. + + + ## CpmTokenizer diff --git a/docs/source/en/model_doc/cpmant.md b/docs/source/en/model_doc/cpmant.md index 2c4ad92a629e..4bcf774507fb 100644 --- a/docs/source/en/model_doc/cpmant.md +++ b/docs/source/en/model_doc/cpmant.md @@ -20,11 +20,10 @@ rendered properly in your Markdown viewer. CPM-Ant is an open-source Chinese pre-trained language model (PLM) with 10B parameters. It is also the first milestone of the live training process of CPM-Live. The training process is cost-effective and environment-friendly. CPM-Ant also achieves promising results with delta tuning on the CUGE benchmark. Besides the full model, we also provide various compressed versions to meet the requirements of different hardware configurations. [See more](https://github.com/OpenBMB/CPM-Live/tree/cpm-ant/cpm-live) -Tips: - This model was contributed by [OpenBMB](https://huggingface.co/openbmb). The original code can be found [here](https://github.com/OpenBMB/CPM-Live/tree/cpm-ant/cpm-live). -⚙️ Training & Inference +## Resources + - A tutorial on [CPM-Live](https://github.com/OpenBMB/CPM-Live/tree/cpm-ant/cpm-live). ## CpmAntConfig diff --git a/docs/source/en/model_doc/ctrl.md b/docs/source/en/model_doc/ctrl.md index 9c2413d27769..be9fa85c7073 100644 --- a/docs/source/en/model_doc/ctrl.md +++ b/docs/source/en/model_doc/ctrl.md @@ -41,7 +41,10 @@ providing more explicit control over text generation. These codes also allow CTR training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution.* -Tips: +This model was contributed by [keskarnitishr](https://huggingface.co/keskarnitishr). The original code can be found +[here](https://github.com/salesforce/ctrl). + +## Usage tips - CTRL makes use of control codes to generate text: it requires generations to be started by certain words, sentences or links to generate coherent text. Refer to the [original implementation](https://github.com/salesforce/ctrl) for @@ -56,10 +59,8 @@ Tips: pre-computed values in the context of text generation. See the [`forward`](model_doc/ctrl#transformers.CTRLModel.forward) method for more information on the usage of this argument. -This model was contributed by [keskarnitishr](https://huggingface.co/keskarnitishr). The original code can be found -[here](https://github.com/salesforce/ctrl). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Causal language modeling task guide](../tasks/language_modeling) @@ -73,6 +74,9 @@ This model was contributed by [keskarnitishr](https://huggingface.co/keskarnitis [[autodoc]] CTRLTokenizer - save_vocabulary + + + ## CTRLModel [[autodoc]] CTRLModel @@ -88,6 +92,9 @@ This model was contributed by [keskarnitishr](https://huggingface.co/keskarnitis [[autodoc]] CTRLForSequenceClassification - forward + + + ## TFCTRLModel [[autodoc]] TFCTRLModel @@ -102,3 +109,6 @@ This model was contributed by [keskarnitishr](https://huggingface.co/keskarnitis [[autodoc]] TFCTRLForSequenceClassification - call + + + diff --git a/docs/source/en/model_doc/cvt.md b/docs/source/en/model_doc/cvt.md index 6c9aea5ec863..503f97795c0e 100644 --- a/docs/source/en/model_doc/cvt.md +++ b/docs/source/en/model_doc/cvt.md @@ -33,15 +33,15 @@ performance gains are maintained when pretrained on larger datasets (\eg ImageNe ImageNet-22k, our CvT-W24 obtains a top-1 accuracy of 87.7\% on the ImageNet-1k val set. Finally, our results show that the positional encoding, a crucial component in existing Vision Transformers, can be safely removed in our model, simplifying the design for higher resolution vision tasks.* -Tips: +This model was contributed by [anugunj](https://huggingface.co/anugunj). The original code can be found [here](https://github.com/microsoft/CvT). + +## Usage tips - CvT models are regular Vision Transformers, but trained with convolutions. They outperform the [original model (ViT)](vit) when fine-tuned on ImageNet-1K and CIFAR-100. - You can check out demo notebooks regarding inference as well as fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (you can just replace [`ViTFeatureExtractor`] by [`AutoImageProcessor`] and [`ViTForImageClassification`] by [`CvtForImageClassification`]). - The available checkpoints are either (1) pre-trained on [ImageNet-22k](http://www.image-net.org/) (a collection of 14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). -This model was contributed by [anugunj](https://huggingface.co/anugunj). The original code can be found [here](https://github.com/microsoft/CvT). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with CvT. @@ -57,6 +57,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] CvtConfig + + + ## CvtModel [[autodoc]] CvtModel @@ -67,6 +70,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] CvtForImageClassification - forward + + + ## TFCvtModel [[autodoc]] TFCvtModel @@ -77,3 +83,5 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] TFCvtForImageClassification - call + + diff --git a/docs/source/en/model_doc/data2vec.md b/docs/source/en/model_doc/data2vec.md index dc05c44be90c..517a51ce46a3 100644 --- a/docs/source/en/model_doc/data2vec.md +++ b/docs/source/en/model_doc/data2vec.md @@ -35,19 +35,18 @@ the entire input. Experiments on the major benchmarks of speech recognition, ima natural language understanding demonstrate a new state of the art or competitive performance to predominant approaches. Models and code are available at www.github.com/pytorch/fairseq/tree/master/examples/data2vec.* -Tips: - -- Data2VecAudio, Data2VecText, and Data2VecVision have all been trained using the same self-supervised learning method. -- For Data2VecAudio, preprocessing is identical to [`Wav2Vec2Model`], including feature extraction -- For Data2VecText, preprocessing is identical to [`RobertaModel`], including tokenization. -- For Data2VecVision, preprocessing is identical to [`BeitModel`], including feature extraction. - This model was contributed by [edugp](https://huggingface.co/edugp) and [patrickvonplaten](https://huggingface.co/patrickvonplaten). [sayakpaul](https://github.com/sayakpaul) and [Rocketknight1](https://github.com/Rocketknight1) contributed Data2Vec for vision in TensorFlow. The original code (for NLP and Speech) can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/data2vec). The original code for vision can be found [here](https://github.com/facebookresearch/data2vec_vision/tree/main/beit). +## Usage tips + +- Data2VecAudio, Data2VecText, and Data2VecVision have all been trained using the same self-supervised learning method. +- For Data2VecAudio, preprocessing is identical to [`Wav2Vec2Model`], including feature extraction +- For Data2VecText, preprocessing is identical to [`RobertaModel`], including tokenization. +- For Data2VecVision, preprocessing is identical to [`BeitModel`], including feature extraction. ## Resources @@ -88,6 +87,8 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] Data2VecVisionConfig + + ## Data2VecAudioModel @@ -164,6 +165,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] Data2VecVisionForSemanticSegmentation - forward + + + ## TFData2VecVisionModel [[autodoc]] TFData2VecVisionModel @@ -178,3 +182,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] TFData2VecVisionForSemanticSegmentation - call + + + diff --git a/docs/source/en/model_doc/deberta-v2.md b/docs/source/en/model_doc/deberta-v2.md index 8dec57a17173..e3bd91e8e4fa 100644 --- a/docs/source/en/model_doc/deberta-v2.md +++ b/docs/source/en/model_doc/deberta-v2.md @@ -62,7 +62,7 @@ New in v2: This model was contributed by [DeBERTa](https://huggingface.co/DeBERTa). This model TF 2.0 implementation was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/DeBERTa). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -88,6 +88,9 @@ contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code - build_inputs_with_special_tokens - create_token_type_ids_from_sequences + + + ## DebertaV2Model [[autodoc]] DebertaV2Model @@ -123,6 +126,9 @@ contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code [[autodoc]] DebertaV2ForMultipleChoice - forward + + + ## TFDebertaV2Model [[autodoc]] TFDebertaV2Model @@ -157,3 +163,6 @@ contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code [[autodoc]] TFDebertaV2ForMultipleChoice - call + + + diff --git a/docs/source/en/model_doc/deberta.md b/docs/source/en/model_doc/deberta.md index ed66364a4b5a..342a3bc47960 100644 --- a/docs/source/en/model_doc/deberta.md +++ b/docs/source/en/model_doc/deberta.md @@ -94,6 +94,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h - build_inputs_with_special_tokens - create_token_type_ids_from_sequences + + + ## DebertaModel [[autodoc]] DebertaModel @@ -123,6 +126,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] DebertaForQuestionAnswering - forward + + + ## TFDebertaModel [[autodoc]] TFDebertaModel @@ -152,3 +158,7 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFDebertaForQuestionAnswering - call + + + + diff --git a/docs/source/en/model_doc/decision_transformer.md b/docs/source/en/model_doc/decision_transformer.md index a46673d87ac8..07ef2ecbdc8e 100644 --- a/docs/source/en/model_doc/decision_transformer.md +++ b/docs/source/en/model_doc/decision_transformer.md @@ -33,9 +33,7 @@ This allows us to draw upon the simplicity and scalability of the Transformer ar Decision Transformer matches or exceeds the performance of state-of-the-art model-free offline RL baselines on Atari, OpenAI Gym, and Key-to-Door tasks.* -Tips: - -This version of the model is for tasks where the state is a vector, image-based states will come soon. +This version of the model is for tasks where the state is a vector. This model was contributed by [edbeeching](https://huggingface.co/edbeeching). The original code can be found [here](https://github.com/kzl/decision-transformer). diff --git a/docs/source/en/model_doc/deformable_detr.md b/docs/source/en/model_doc/deformable_detr.md index 0bceb0bdf39b..726fa0d0ca9a 100644 --- a/docs/source/en/model_doc/deformable_detr.md +++ b/docs/source/en/model_doc/deformable_detr.md @@ -25,11 +25,6 @@ The abstract from the paper is the following: *DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10 times less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach.* -Tips: - -- One can use [`DeformableDetrImageProcessor`] to prepare images (and optional targets) for the model. -- Training Deformable DETR is equivalent to training the original [DETR](detr) model. See the [resources](#resources) section below for demo notebooks. - drawing @@ -37,6 +32,10 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/fundamentalvision/Deformable-DETR). +## Usage tips + +- Training Deformable DETR is equivalent to training the original [DETR](detr) model. See the [resources](#resources) section below for demo notebooks. + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Deformable DETR. diff --git a/docs/source/en/model_doc/deit.md b/docs/source/en/model_doc/deit.md index ef32e05ebd92..7d9918a45eee 100644 --- a/docs/source/en/model_doc/deit.md +++ b/docs/source/en/model_doc/deit.md @@ -16,13 +16,6 @@ rendered properly in your Markdown viewer. # DeiT - - -This is a recently introduced model so the API hasn't been tested extensively. There may be some bugs or slight -breaking changes to fix it in the future. If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title). - - - ## Overview The DeiT model was proposed in [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre @@ -45,7 +38,9 @@ distillation, especially when using a convnet as a teacher. This leads us to rep for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models.* -Tips: +This model was contributed by [nielsr](https://huggingface.co/nielsr). The TensorFlow version of this model was added by [amyeroberts](https://huggingface.co/amyeroberts). + +## Usage tips - Compared to ViT, DeiT models use a so-called distillation token to effectively learn from a teacher (which, in the DeiT paper, is a ResNet like-model). The distillation token is learned through backpropagation, by interacting with @@ -73,8 +68,6 @@ Tips: *facebook/deit-base-patch16-384*. Note that one should use [`DeiTImageProcessor`] in order to prepare images for the model. -This model was contributed by [nielsr](https://huggingface.co/nielsr). The TensorFlow version of this model was added by [amyeroberts](https://huggingface.co/amyeroberts). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DeiT. @@ -104,6 +97,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] DeiTImageProcessor - preprocess + + + ## DeiTModel [[autodoc]] DeiTModel @@ -124,6 +120,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] DeiTForImageClassificationWithTeacher - forward + + + ## TFDeiTModel [[autodoc]] TFDeiTModel @@ -143,3 +142,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] TFDeiTForImageClassificationWithTeacher - call + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/deplot.md b/docs/source/en/model_doc/deplot.md index f425a8268fdf..a77bee39de76 100644 --- a/docs/source/en/model_doc/deplot.md +++ b/docs/source/en/model_doc/deplot.md @@ -24,12 +24,10 @@ The abstract of the paper states the following: *Visual language such as charts and plots is ubiquitous in the human world. Comprehending plots and charts requires strong reasoning skills. Prior state-of-the-art (SOTA) models require at least tens of thousands of training examples and their reasoning capabilities are still much limited, especially on complex human-written queries. This paper presents the first one-shot solution to visual language reasoning. We decompose the challenge of visual language reasoning into two steps: (1) plot-to-text translation, and (2) reasoning over the translated text. The key in this method is a modality conversion module, named as DePlot, which translates the image of a plot or chart to a linearized table. The output of DePlot can then be directly used to prompt a pretrained large language model (LLM), exploiting the few-shot reasoning capabilities of LLMs. To obtain DePlot, we standardize the plot-to-table task by establishing unified task formats and metrics, and train DePlot end-to-end on this task. DePlot can then be used off-the-shelf together with LLMs in a plug-and-play fashion. Compared with a SOTA model finetuned on more than >28k data points, DePlot+LLM with just one-shot prompting achieves a 24.0% improvement over finetuned SOTA on human-written queries from the task of chart QA.* -## Model description - DePlot is a model that is trained using `Pix2Struct` architecture. You can find more information about `Pix2Struct` in the [Pix2Struct documentation](https://huggingface.co/docs/transformers/main/en/model_doc/pix2struct). DePlot is a Visual Question Answering subset of `Pix2Struct` architecture. It renders the input question on the image and predicts the answer. -## Usage +## Usage example Currently one checkpoint is available for DePlot: @@ -59,4 +57,10 @@ from transformers.optimization import Adafactor, get_cosine_schedule_with_warmup optimizer = Adafactor(self.parameters(), scale_parameter=False, relative_step=False, lr=0.01, weight_decay=1e-05) scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=1000, num_training_steps=40000) -``` \ No newline at end of file +``` + + + +DePlot is a model trained using `Pix2Struct` architecture. For API reference, see [`Pix2Struct` documentation](pix2struct). + + \ No newline at end of file diff --git a/docs/source/en/model_doc/deta.md b/docs/source/en/model_doc/deta.md index d384f5564e5e..1eed98832ac7 100644 --- a/docs/source/en/model_doc/deta.md +++ b/docs/source/en/model_doc/deta.md @@ -26,10 +26,6 @@ The abstract from the paper is the following: *Detection Transformer (DETR) directly transforms queries to unique objects by using one-to-one bipartite matching during training and enables end-to-end object detection. Recently, these models have surpassed traditional detectors on COCO with undeniable elegance. However, they differ from traditional detectors in multiple designs, including model architecture and training schedules, and thus the effectiveness of one-to-one matching is not fully understood. In this work, we conduct a strict comparison between the one-to-one Hungarian matching in DETRs and the one-to-many label assignments in traditional detectors with non-maximum supervision (NMS). Surprisingly, we observe one-to-many assignments with NMS consistently outperform standard one-to-one matching under the same setting, with a significant gain of up to 2.5 mAP. Our detector that trains Deformable-DETR with traditional IoU-based label assignment achieved 50.2 COCO mAP within 12 epochs (1x schedule) with ResNet50 backbone, outperforming all existing traditional or transformer-based detectors in this setting. On multiple datasets, schedules, and architectures, we consistently show bipartite matching is unnecessary for performant detection transformers. Furthermore, we attribute the success of detection transformers to their expressive transformer architecture.* -Tips: - -- One can use [`DetaImageProcessor`] to prepare images and optional targets for the model. - drawing @@ -51,20 +47,17 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] DetaConfig - ## DetaImageProcessor [[autodoc]] DetaImageProcessor - preprocess - post_process_object_detection - ## DetaModel [[autodoc]] DetaModel - forward - ## DetaForObjectDetection [[autodoc]] DetaForObjectDetection diff --git a/docs/source/en/model_doc/detr.md b/docs/source/en/model_doc/detr.md index 2c03a0f8b851..c36bd4380edf 100644 --- a/docs/source/en/model_doc/detr.md +++ b/docs/source/en/model_doc/detr.md @@ -41,6 +41,8 @@ baselines.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/detr). +## How DETR works + Here's a TLDR explaining how [`~transformers.DetrForObjectDetection`] works: First, an image is sent through a pre-trained convolutional backbone (in the paper, the authors use @@ -79,7 +81,7 @@ where one first trains a [`~transformers.DetrForObjectDetection`] model to detec the mask head for 25 epochs. Experimentally, these two approaches give similar results. Note that predicting boxes is required for the training to be possible, since the Hungarian matching is computed using distances between boxes. -Tips: +## Usage tips - DETR uses so-called **object queries** to detect objects in an image. The number of queries determines the maximum number of objects that can be detected in a single image, and is set to 100 by default (see parameter @@ -165,14 +167,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. -## DETR specific outputs - -[[autodoc]] models.detr.modeling_detr.DetrModelOutput - -[[autodoc]] models.detr.modeling_detr.DetrObjectDetectionOutput - -[[autodoc]] models.detr.modeling_detr.DetrSegmentationOutput - ## DetrConfig [[autodoc]] DetrConfig @@ -195,6 +189,14 @@ If you're interested in submitting a resource to be included here, please feel f - post_process_instance_segmentation - post_process_panoptic_segmentation +## DETR specific outputs + +[[autodoc]] models.detr.modeling_detr.DetrModelOutput + +[[autodoc]] models.detr.modeling_detr.DetrObjectDetectionOutput + +[[autodoc]] models.detr.modeling_detr.DetrSegmentationOutput + ## DetrModel [[autodoc]] DetrModel diff --git a/docs/source/en/model_doc/dialogpt.md b/docs/source/en/model_doc/dialogpt.md index 70929409b294..558b91d76d25 100644 --- a/docs/source/en/model_doc/dialogpt.md +++ b/docs/source/en/model_doc/dialogpt.md @@ -32,7 +32,9 @@ that leverage DialoGPT generate more relevant, contentful and context-consistent systems. The pre-trained model and training pipeline are publicly released to facilitate research into neural response generation and the development of more intelligent open-domain dialogue systems.* -Tips: +The original code can be found [here](https://github.com/microsoft/DialoGPT). + +## Usage tips - DialoGPT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. @@ -47,7 +49,8 @@ follow the OpenAI GPT-2 to model a multiturn dialogue session as a long text and modeling. We first concatenate all dialog turns within a dialogue session into a long text x_1,..., x_N (N is the sequence length), ended by the end-of-text token.* For more information please confer to the original paper. + -DialoGPT's architecture is based on the GPT2 model, so one can refer to [GPT2's documentation page](gpt2). +DialoGPT's architecture is based on the GPT2 model, refer to [GPT2's documentation page](gpt2) for API reference and examples. -The original code can be found [here](https://github.com/microsoft/DialoGPT). + diff --git a/docs/source/en/model_doc/dinat.md b/docs/source/en/model_doc/dinat.md index 2317b13b7f9c..23dfa3b74fb0 100644 --- a/docs/source/en/model_doc/dinat.md +++ b/docs/source/en/model_doc/dinat.md @@ -44,17 +44,6 @@ and ADE20K (48.5 PQ), and instance segmentation model on Cityscapes (44.5 AP) an It also matches the state of the art specialized semantic segmentation models on ADE20K (58.2 mIoU), and ranks second on Cityscapes (84.5 mIoU) (no extra data). * -Tips: -- One can use the [`AutoImageProcessor`] API to prepare images for the model. -- DiNAT can be used as a *backbone*. When `output_hidden_states = True`, -it will output both `hidden_states` and `reshaped_hidden_states`. The `reshaped_hidden_states` have a shape of `(batch, num_channels, height, width)` rather than `(batch_size, height, width, num_channels)`. - -Notes: -- DiNAT depends on [NATTEN](https://github.com/SHI-Labs/NATTEN/)'s implementation of Neighborhood Attention and Dilated Neighborhood Attention. -You can install it with pre-built wheels for Linux by referring to [shi-labs.com/natten](https://shi-labs.com/natten), or build on your system by running `pip install natten`. -Note that the latter will likely take time to compile. NATTEN does not support Windows devices yet. -- Patch size of 4 is only supported at the moment. - drawing @@ -65,6 +54,17 @@ Taken from the original paper. + + ## DistilBertModel [[autodoc]] DistilBertModel @@ -174,6 +178,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] DistilBertForQuestionAnswering - forward + + + ## TFDistilBertModel [[autodoc]] TFDistilBertModel @@ -204,6 +211,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFDistilBertForQuestionAnswering - call + + + ## FlaxDistilBertModel [[autodoc]] FlaxDistilBertModel @@ -233,3 +243,10 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] FlaxDistilBertForQuestionAnswering - __call__ + + + + + + + diff --git a/docs/source/en/model_doc/dit.md b/docs/source/en/model_doc/dit.md index 7d5f873e78bb..7f6691a15bc4 100644 --- a/docs/source/en/model_doc/dit.md +++ b/docs/source/en/model_doc/dit.md @@ -37,6 +37,10 @@ alt="drawing" width="600"/> Summary of the approach. Taken from the [original paper](https://arxiv.org/abs/2203.02378). +This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/dit). + +## Usage tips + One can directly use the weights of DiT with the AutoModel API: ```python @@ -66,10 +70,6 @@ model = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-fine This particular checkpoint was fine-tuned on [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/), an important benchmark for document image classification. A notebook that illustrates inference for document image classification can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DiT/Inference_with_DiT_(Document_Image_Transformer)_for_document_image_classification.ipynb). -As DiT's architecture is equivalent to that of BEiT, one can refer to [BEiT's documentation page](beit) for all tips, code examples and notebooks. - -This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/dit). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DiT. @@ -78,4 +78,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h - [`BeitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). -If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. \ No newline at end of file +If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. + + + + As DiT's architecture is equivalent to that of BEiT, one can refer to [BEiT's documentation page](beit) for all tips, code examples and notebooks. + diff --git a/docs/source/en/model_doc/donut.md b/docs/source/en/model_doc/donut.md index cfbf79972d57..6e5cfe648d09 100644 --- a/docs/source/en/model_doc/donut.md +++ b/docs/source/en/model_doc/donut.md @@ -34,14 +34,14 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/clovaai/donut). -Tips: +## Usage tips - The quickest way to get started with Donut is by checking the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Donut), which show how to use the model at inference time as well as fine-tuning on custom data. - Donut is always used within the [VisionEncoderDecoder](vision-encoder-decoder) framework. -## Inference +## Inference examples Donut's [`VisionEncoderDecoder`] model accepts images as input and makes use of [`~generation.GenerationMixin.generate`] to autoregressively generate text given the input image. diff --git a/docs/source/en/model_doc/dpr.md b/docs/source/en/model_doc/dpr.md index 10bc76b72dd6..8b9f352b637b 100644 --- a/docs/source/en/model_doc/dpr.md +++ b/docs/source/en/model_doc/dpr.md @@ -43,7 +43,8 @@ benchmarks.* This model was contributed by [lhoestq](https://huggingface.co/lhoestq). The original code can be found [here](https://github.com/facebookresearch/DPR). -Tips: +## Usage tips + - DPR consists in three models: * Question encoder: encode questions as vectors @@ -86,6 +87,9 @@ Tips: [[autodoc]] models.dpr.modeling_dpr.DPRReaderOutput + + + ## DPRContextEncoder [[autodoc]] DPRContextEncoder @@ -101,6 +105,9 @@ Tips: [[autodoc]] DPRReader - forward + + + ## TFDPRContextEncoder [[autodoc]] TFDPRContextEncoder @@ -115,3 +122,7 @@ Tips: [[autodoc]] TFDPRReader - call + + + + diff --git a/docs/source/en/model_doc/efficientformer.md b/docs/source/en/model_doc/efficientformer.md index 1f16f9811b77..92ba90a9e5ed 100644 --- a/docs/source/en/model_doc/efficientformer.md +++ b/docs/source/en/model_doc/efficientformer.md @@ -56,6 +56,9 @@ The original code can be found [here](https://github.com/snap-research/Efficient [[autodoc]] EfficientFormerImageProcessor - preprocess + + + ## EfficientFormerModel [[autodoc]] EfficientFormerModel @@ -71,6 +74,9 @@ The original code can be found [here](https://github.com/snap-research/Efficient [[autodoc]] EfficientFormerForImageClassificationWithTeacher - forward + + + ## TFEfficientFormerModel [[autodoc]] TFEfficientFormerModel @@ -85,3 +91,6 @@ The original code can be found [here](https://github.com/snap-research/Efficient [[autodoc]] TFEfficientFormerForImageClassificationWithTeacher - call + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/electra.md b/docs/source/en/model_doc/electra.md index 26830950ae3a..700c49df7993 100644 --- a/docs/source/en/model_doc/electra.md +++ b/docs/source/en/model_doc/electra.md @@ -50,7 +50,9 @@ using 30x more compute) on the GLUE natural language understanding benchmark. Ou where it performs comparably to RoBERTa and XLNet while using less than 1/4 of their compute and outperforms them when using the same amount of compute.* -Tips: +This model was contributed by [lysandre](https://huggingface.co/lysandre). The original code can be found [here](https://github.com/google-research/electra). + +## Usage tips - ELECTRA is the pretraining approach, therefore there is nearly no changes done to the underlying model: BERT. The only change is the separation of the embedding size and the hidden size: the embedding size is generally smaller, @@ -66,9 +68,7 @@ Tips: [`ElectraForPreTraining`] model (the classification head will be randomly initialized as it doesn't exist in the generator). -This model was contributed by [lysandre](https://huggingface.co/lysandre). The original code can be found [here](https://github.com/google-research/electra). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -95,6 +95,9 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). The o [[autodoc]] models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput + + + ## ElectraModel [[autodoc]] ElectraModel @@ -135,6 +138,9 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). The o [[autodoc]] ElectraForQuestionAnswering - forward + + + ## TFElectraModel [[autodoc]] TFElectraModel @@ -170,6 +176,9 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). The o [[autodoc]] TFElectraForQuestionAnswering - call + + + ## FlaxElectraModel [[autodoc]] FlaxElectraModel @@ -209,3 +218,6 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). The o [[autodoc]] FlaxElectraForQuestionAnswering - __call__ + + + diff --git a/docs/source/en/model_doc/encodec.md b/docs/source/en/model_doc/encodec.md index bc7f64676eee..856f8be2b80a 100644 --- a/docs/source/en/model_doc/encodec.md +++ b/docs/source/en/model_doc/encodec.md @@ -26,6 +26,9 @@ The abstract from the paper is the following: This model was contributed by [Matthijs](https://huggingface.co/Matthijs), [Patrick Von Platen](https://huggingface.co/patrickvonplaten) and [Arthur Zucker](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/facebookresearch/encodec). + +## Usage example + Here is a quick example of how to encode and decode an audio using this model: ```python @@ -45,7 +48,6 @@ Here is a quick example of how to encode and decode an audio using this model: >>> audio_values = model(inputs["input_values"], inputs["padding_mask"]).audio_values ``` - ## EncodecConfig [[autodoc]] EncodecConfig diff --git a/docs/source/en/model_doc/encoder-decoder.md b/docs/source/en/model_doc/encoder-decoder.md index 8e26a3b9e407..54c9f7506476 100644 --- a/docs/source/en/model_doc/encoder-decoder.md +++ b/docs/source/en/model_doc/encoder-decoder.md @@ -149,20 +149,32 @@ were contributed by [ydshieh](https://github.com/ydshieh). [[autodoc]] EncoderDecoderConfig + + + ## EncoderDecoderModel [[autodoc]] EncoderDecoderModel - forward - from_encoder_decoder_pretrained + + + ## TFEncoderDecoderModel [[autodoc]] TFEncoderDecoderModel - call - from_encoder_decoder_pretrained + + + ## FlaxEncoderDecoderModel [[autodoc]] FlaxEncoderDecoderModel - __call__ - from_encoder_decoder_pretrained + + + diff --git a/docs/source/en/model_doc/ernie.md b/docs/source/en/model_doc/ernie.md index a64291a7d4f5..a5110b2d7b73 100644 --- a/docs/source/en/model_doc/ernie.md +++ b/docs/source/en/model_doc/ernie.md @@ -23,7 +23,7 @@ including [ERNIE1.0](https://arxiv.org/abs/1904.09223), [ERNIE2.0](https://ojs.a These models are contributed by [nghuyong](https://huggingface.co/nghuyong) and the official code can be found in [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) (in PaddlePaddle). -### How to use +### Usage example Take `ernie-1.0-base-zh` as an example: ```Python @@ -32,7 +32,7 @@ tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh") model = AutoModel.from_pretrained("nghuyong/ernie-1.0-base-zh") ``` -### Supported Models +### Model checkpoints | Model Name | Language | Description | |:-------------------:|:--------:|:-------------------------------:| @@ -51,7 +51,7 @@ You can find all the supported models from huggingface's model hub: [huggingface repo: [PaddleNLP](https://paddlenlp.readthedocs.io/zh/latest/model_zoo/transformers/ERNIE/contents.html) and [ERNIE](https://github.com/PaddlePaddle/ERNIE/blob/repro). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/ernie_m.md b/docs/source/en/model_doc/ernie_m.md index 83e08e09bfcf..a99332cb655a 100644 --- a/docs/source/en/model_doc/ernie_m.md +++ b/docs/source/en/model_doc/ernie_m.md @@ -25,18 +25,17 @@ Hao Tian, Hua Wu, Haifeng Wang. The abstract from the paper is the following: *Recent studies have demonstrated that pre-trained cross-lingual models achieve impressive performance in downstream cross-lingual tasks. This improvement benefits from learning a large amount of monolingual and parallel corpora. Although it is generally acknowledged that parallel corpora are critical for improving the model performance, existing methods are often constrained by the size of parallel corpora, especially for lowresource languages. In this paper, we propose ERNIE-M, a new training method that encourages the model to align the representation of multiple languages with monolingual corpora, to overcome the constraint that the parallel corpus size places on the model performance. Our key insight is to integrate back-translation into the pre-training process. We generate pseudo-parallel sentence pairs on a monolingual corpus to enable the learning of semantic alignments between different languages, thereby enhancing the semantic modeling of cross-lingual models. Experimental results show that ERNIE-M outperforms existing cross-lingual models and delivers new state-of-the-art results in various cross-lingual downstream tasks.* +This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). The original code can be found [here](https://github.com/PaddlePaddle/PaddleNLP/tree/develop/paddlenlp/transformers/ernie_m). -Tips: - -1. Ernie-M is a BERT-like model so it is a stacked Transformer Encoder. -2. Instead of using MaskedLM for pretraining (like BERT) the authors used two novel techniques: `Cross-attention Masked Language Modeling` and `Back-translation Masked Language Modeling`. For now these two LMHead objectives are not implemented here. -3. It is a multilingual language model. -4. Next Sentence Prediction was not used in pretraining process. +## Usage tips -This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). The original code can be found [here](https://github.com/PaddlePaddle/PaddleNLP/tree/develop/paddlenlp/transformers/ernie_m). +- Ernie-M is a BERT-like model so it is a stacked Transformer Encoder. +- Instead of using MaskedLM for pretraining (like BERT) the authors used two novel techniques: `Cross-attention Masked Language Modeling` and `Back-translation Masked Language Modeling`. For now these two LMHead objectives are not implemented here. +- It is a multilingual language model. +- Next Sentence Prediction was not used in pretraining process. -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/esm.md b/docs/source/en/model_doc/esm.md index 47b25650847e..46bab860ff4d 100644 --- a/docs/source/en/model_doc/esm.md +++ b/docs/source/en/model_doc/esm.md @@ -17,6 +17,7 @@ rendered properly in your Markdown viewer. # ESM ## Overview + This page provides code and pre-trained weights for Transformer protein language models from Meta AI's Fundamental AI Research Team, providing the state-of-the-art ESMFold and ESM-2, and the previously released ESM-1b and ESM-1v. Transformer protein language models were introduced in the paper [Biological structure and function emerge from scaling @@ -73,11 +74,6 @@ sequences with low perplexity that are well understood by the language model. ES order of magnitude faster than AlphaFold2, enabling exploration of the structural space of metagenomic proteins in practical timescales.* - -Tips: - -- ESM models are trained with a masked language modeling (MLM) objective. - The original code can be found [here](https://github.com/facebookresearch/esm) and was was developed by the Fundamental AI Research team at Meta AI. ESM-1b, ESM-1v and ESM-2 were contributed to huggingface by [jasonliu](https://huggingface.co/jasonliu) @@ -87,10 +83,12 @@ ESMFold was contributed to huggingface by [Matt](https://huggingface.co/Rocketkn [Sylvain](https://huggingface.co/sgugger), with a big thank you to Nikita Smetanin, Roshan Rao and Tom Sercu for their help throughout the process! -The HuggingFace port of ESMFold uses portions of the [openfold](https://github.com/aqlaboratory/openfold) library. -The `openfold` library is licensed under the Apache License 2.0. +## Usage tips -## Documentation resources +- ESM models are trained with a masked language modeling (MLM) objective. +- The HuggingFace port of ESMFold uses portions of the [openfold](https://github.com/aqlaboratory/openfold) library. The `openfold` library is licensed under the Apache License 2.0. + +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -109,6 +107,8 @@ The `openfold` library is licensed under the Apache License 2.0. - create_token_type_ids_from_sequences - save_vocabulary + + ## EsmModel @@ -135,6 +135,9 @@ The `openfold` library is licensed under the Apache License 2.0. [[autodoc]] EsmForProteinFolding - forward + + + ## TFEsmModel [[autodoc]] TFEsmModel @@ -154,3 +157,6 @@ The `openfold` library is licensed under the Apache License 2.0. [[autodoc]] TFEsmForTokenClassification - call + + + diff --git a/docs/source/en/model_doc/flan-t5.md b/docs/source/en/model_doc/flan-t5.md index 5d781f75b179..c0fd6b0011cc 100644 --- a/docs/source/en/model_doc/flan-t5.md +++ b/docs/source/en/model_doc/flan-t5.md @@ -48,6 +48,10 @@ Google has released the following variants: - [google/flan-t5-xxl](https://huggingface.co/google/flan-t5-xxl). -One can refer to [T5's documentation page](t5) for all tips, code examples and notebooks. As well as the FLAN-T5 model card for more details regarding training and evaluation of the model. - The original checkpoints can be found [here](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints). + + + +Refer to [T5's documentation page](t5) for all API reference, code examples and notebooks. For more details regarding training and evaluation of the FLAN-T5, refer to the model card. + + \ No newline at end of file diff --git a/docs/source/en/model_doc/flan-ul2.md b/docs/source/en/model_doc/flan-ul2.md index 40fad51def6f..5487bb779760 100644 --- a/docs/source/en/model_doc/flan-ul2.md +++ b/docs/source/en/model_doc/flan-ul2.md @@ -21,7 +21,6 @@ rendered properly in your Markdown viewer. Flan-UL2 is an encoder decoder model based on the T5 architecture. It uses the same configuration as the [UL2](ul2) model released earlier last year. It was fine tuned using the "Flan" prompt tuning and dataset collection. Similar to `Flan-T5`, one can directly use FLAN-UL2 weights without finetuning the model: - According to the original blog here are the notable improvements: - The original UL2 model was only trained with receptive field of 512, which made it non-ideal for N-shot prompting where N is large. @@ -29,9 +28,6 @@ According to the original blog here are the notable improvements: - The original UL2 model also had mode switch tokens that was rather mandatory to get good performance. However, they were a little cumbersome as this requires often some changes during inference or finetuning. In this update/change, we continue training UL2 20B for an additional 100k steps (with small batch) to forget “mode tokens” before applying Flan instruction tuning. This Flan-UL2 checkpoint does not require mode tokens anymore. Google has released the following variants: - -One can refer to [T5's documentation page](t5) for all tips, code examples and notebooks. As well as the FLAN-T5 model card for more details regarding training and evaluation of the model. - The original checkpoints can be found [here](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints). @@ -51,6 +47,8 @@ The model is pretty heavy (~40GB in half precision) so if you just want to run t ['In a large skillet, brown the ground beef and onion over medium heat. Add the garlic'] ``` -## Inference + + +Refer to [T5's documentation page](t5) for API reference, tips, code examples and notebooks. -The inference protocol is exactly the same as any `T5` model, please have a look at the [T5's documentation page](t5) for more details. + diff --git a/docs/source/en/model_doc/flaubert.md b/docs/source/en/model_doc/flaubert.md index 3e85bd6fa9d9..04bcc2638ac9 100644 --- a/docs/source/en/model_doc/flaubert.md +++ b/docs/source/en/model_doc/flaubert.md @@ -50,7 +50,7 @@ This model was contributed by [formiel](https://huggingface.co/formiel). The ori Tips: - Like RoBERTa, without the sentence ordering prediction (so just trained on the MLM objective). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -66,6 +66,9 @@ Tips: [[autodoc]] FlaubertTokenizer + + + ## FlaubertModel [[autodoc]] FlaubertModel @@ -101,6 +104,9 @@ Tips: [[autodoc]] FlaubertForQuestionAnswering - forward + + + ## TFFlaubertModel [[autodoc]] TFFlaubertModel @@ -130,3 +136,9 @@ Tips: [[autodoc]] TFFlaubertForQuestionAnsweringSimple - call + + + + + + diff --git a/docs/source/en/model_doc/flava.md b/docs/source/en/model_doc/flava.md index ae9da0d184a5..d9f9f1de5146 100644 --- a/docs/source/en/model_doc/flava.md +++ b/docs/source/en/model_doc/flava.md @@ -33,10 +33,8 @@ at once -- a true vision and language foundation model should be good at vision cross- and multi-modal vision and language tasks. We introduce FLAVA as such a model and demonstrate impressive performance on a wide range of 35 tasks spanning these target modalities.* - This model was contributed by [aps](https://huggingface.co/aps). The original code can be found [here](https://github.com/facebookresearch/multimodal/tree/main/examples/flava). - ## FlavaConfig [[autodoc]] FlavaConfig diff --git a/docs/source/en/model_doc/fnet.md b/docs/source/en/model_doc/fnet.md index a6d862f8a1a7..1bcae678e632 100644 --- a/docs/source/en/model_doc/fnet.md +++ b/docs/source/en/model_doc/fnet.md @@ -37,15 +37,15 @@ sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finall and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models outperform Transformer counterparts.* -Tips on usage: +This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/google-research/google-research/tree/master/f_net). -- The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with - maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum - sequence length for fine-tuning and inference. +## Usage tips -This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/google-research/google-research/tree/master/f_net). +The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with +maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum +sequence length for fine-tuning and inference. -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/focalnet.md b/docs/source/en/model_doc/focalnet.md index 21a75440b136..c4c97980f069 100644 --- a/docs/source/en/model_doc/focalnet.md +++ b/docs/source/en/model_doc/focalnet.md @@ -27,14 +27,9 @@ The abstract from the paper is the following: *We propose focal modulation networks (FocalNets in short), where self-attention (SA) is completely replaced by a focal modulation mechanism for modeling token interactions in vision. Focal modulation comprises three components: (i) hierarchical contextualization, implemented using a stack of depth-wise convolutional layers, to encode visual contexts from short to long ranges, (ii) gated aggregation to selectively gather contexts for each query token based on its content, and (iii) element-wise modulation or affine transformation to inject the aggregated context into the query. Extensive experiments show FocalNets outperform the state-of-the-art SA counterparts (e.g., Swin and Focal Transformers) with similar computational costs on the tasks of image classification, object detection, and segmentation. Specifically, FocalNets with tiny and base size achieve 82.3% and 83.9% top-1 accuracy on ImageNet-1K. After pretrained on ImageNet-22K in 224 resolution, it attains 86.5% and 87.3% top-1 accuracy when finetuned with resolution 224 and 384, respectively. When transferred to downstream tasks, FocalNets exhibit clear superiority. For object detection with Mask R-CNN, FocalNet base trained with 1\times outperforms the Swin counterpart by 2.1 points and already surpasses Swin trained with 3\times schedule (49.0 v.s. 48.5). For semantic segmentation with UPerNet, FocalNet base at single-scale outperforms Swin by 2.4, and beats Swin at multi-scale (50.5 v.s. 49.7). Using large FocalNet and Mask2former, we achieve 58.5 mIoU for ADE20K semantic segmentation, and 57.9 PQ for COCO Panoptic Segmentation. Using huge FocalNet and DINO, we achieved 64.3 and 64.4 mAP on COCO minival and test-dev, respectively, establishing new SoTA on top of much larger attention-based models like Swinv2-G and BEIT-3.* -Tips: - -- One can use the [`AutoImageProcessor`] class to prepare images for the model. - This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/FocalNet). - ## FocalNetConfig [[autodoc]] FocalNetConfig diff --git a/docs/source/en/model_doc/fsmt.md b/docs/source/en/model_doc/fsmt.md index 49625f6c472e..9419dce71edf 100644 --- a/docs/source/en/model_doc/fsmt.md +++ b/docs/source/en/model_doc/fsmt.md @@ -16,9 +16,6 @@ rendered properly in your Markdown viewer. # FSMT -**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) and assign -@stas00. - ## Overview FSMT (FairSeq MachineTranslation) models were introduced in [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616) by Nathan Ng, Kyra Yee, Alexei Baevski, Myle Ott, Michael Auli, Sergey Edunov. diff --git a/docs/source/en/model_doc/funnel.md b/docs/source/en/model_doc/funnel.md index 3cc4eb0aaed6..d6929691f400 100644 --- a/docs/source/en/model_doc/funnel.md +++ b/docs/source/en/model_doc/funnel.md @@ -47,7 +47,9 @@ via a decoder. Empirically, with comparable or fewer FLOPs, Funnel-Transformer o a wide variety of sequence-level prediction tasks, including text classification, language understanding, and reading comprehension.* -Tips: +This model was contributed by [sgugger](https://huggingface.co/sgugger). The original code can be found [here](https://github.com/laiguokun/Funnel-Transformer). + +## Usage tips - Since Funnel Transformer uses pooling, the sequence length of the hidden states changes after each block of layers. This way, their length is divided by 2, which speeds up the computation of the next hidden states. The base model therefore has a final sequence length that is a quarter of the original one. This model can be used @@ -62,9 +64,7 @@ Tips: [`FunnelBaseModel`], [`FunnelForSequenceClassification`] and [`FunnelForMultipleChoice`]. -This model was contributed by [sgugger](https://huggingface.co/sgugger). The original code can be found [here](https://github.com/laiguokun/Funnel-Transformer). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -95,6 +95,9 @@ This model was contributed by [sgugger](https://huggingface.co/sgugger). The ori [[autodoc]] models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput + + + ## FunnelBaseModel [[autodoc]] FunnelBaseModel @@ -135,6 +138,9 @@ This model was contributed by [sgugger](https://huggingface.co/sgugger). The ori [[autodoc]] FunnelForQuestionAnswering - forward + + + ## TFFunnelBaseModel [[autodoc]] TFFunnelBaseModel @@ -174,3 +180,6 @@ This model was contributed by [sgugger](https://huggingface.co/sgugger). The ori [[autodoc]] TFFunnelForQuestionAnswering - call + + + diff --git a/docs/source/en/model_doc/git.md b/docs/source/en/model_doc/git.md index b0c96200af3c..bffa98b89e3b 100644 --- a/docs/source/en/model_doc/git.md +++ b/docs/source/en/model_doc/git.md @@ -27,11 +27,6 @@ The abstract from the paper is the following: *In this paper, we design and train a Generative Image-to-text Transformer, GIT, to unify vision-language tasks such as image/video captioning and question answering. While generative models provide a consistent network architecture between pre-training and fine-tuning, existing work typically contains complex structures (uni/multi-modal encoder/decoder) and depends on external modules such as object detectors/taggers and optical character recognition (OCR). In GIT, we simplify the architecture as one image encoder and one text decoder under a single language modeling task. We also scale up the pre-training data and the model size to boost the model performance. Without bells and whistles, our GIT establishes new state of the arts on 12 challenging benchmarks with a large margin. For instance, our model surpasses the human performance for the first time on TextCaps (138.2 vs. 125.5 in CIDEr). Furthermore, we present a new scheme of generation-based image classification and scene text recognition, achieving decent performance on standard benchmarks.* -Tips: - -- GIT is implemented in a very similar way to GPT-2, the only difference being that the model is also conditioned on `pixel_values`. -- One can use [`GitProcessor`] to prepare images for the model, and the `generate` method for autoregressive generation. - drawing @@ -40,6 +35,10 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/GenerativeImage2Text). +## Usage tips + +- GIT is implemented in a very similar way to GPT-2, the only difference being that the model is also conditioned on `pixel_values`. + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with GIT. diff --git a/docs/source/en/model_doc/glpn.md b/docs/source/en/model_doc/glpn.md index be9a7d2d7910..b57d1a7ccdda 100644 --- a/docs/source/en/model_doc/glpn.md +++ b/docs/source/en/model_doc/glpn.md @@ -33,10 +33,6 @@ The abstract from the paper is the following: *Depth estimation from a single image is an important task that can be applied to various fields in computer vision, and has grown rapidly with the development of convolutional neural networks. In this paper, we propose a novel structure and training strategy for monocular depth estimation to further improve the prediction accuracy of the network. We deploy a hierarchical transformer encoder to capture and convey the global context, and design a lightweight yet powerful decoder to generate an estimated depth map while considering local connectivity. By constructing connected paths between multi-scale local features and the global decoding stream with our proposed selective feature fusion module, the network can integrate both representations and recover fine details. In addition, the proposed decoder shows better performance than the previously proposed decoders, with considerably less computational complexity. Furthermore, we improve the depth-specific augmentation method by utilizing an important observation in depth estimation to enhance the model. Our network achieves state-of-the-art performance over the challenging depth dataset NYU Depth V2. Extensive experiments have been conducted to validate and show the effectiveness of the proposed approach. Finally, our model shows better generalisation ability and robustness than other comparative models.* -Tips: - -- One can use [`GLPNImageProcessor`] to prepare images for the model. - drawing diff --git a/docs/source/en/model_doc/gpt-sw3.md b/docs/source/en/model_doc/gpt-sw3.md index 286cac12c998..f4d34a07212c 100644 --- a/docs/source/en/model_doc/gpt-sw3.md +++ b/docs/source/en/model_doc/gpt-sw3.md @@ -32,12 +32,8 @@ causal language modeling (CLM) objective utilizing the NeMo Megatron GPT impleme This model was contributed by [AI Sweden](https://huggingface.co/AI-Sweden). -The implementation uses the [GPT2Model](https://huggingface.co/docs/transformers/model_doc/gpt2) coupled -with our `GPTSw3Tokenizer`. This means that `AutoTokenizer` and `AutoModelForCausalLM` map to our tokenizer -implementation and the corresponding GPT2 model implementation respectively. -*Note that sentencepiece is required to use our tokenizer and can be installed with:* `pip install transformers[sentencepiece]` or `pip install sentencepiece` +## Usage example -Example usage: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM @@ -52,12 +48,21 @@ Example usage: Träd är fina för att de är färgstarka. Men ibland är det fint ``` -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Causal language modeling task guide](../tasks/language_modeling) + + +The implementation uses the `GPT2Model` coupled with our `GPTSw3Tokenizer`. Refer to [GPT2Model documentation](gpt2) +for API reference and examples. + +Note that sentencepiece is required to use our tokenizer and can be installed with `pip install transformers[sentencepiece]` or `pip install sentencepiece` + + + ## GPTSw3Tokenizer [[autodoc]] GPTSw3Tokenizer diff --git a/docs/source/en/model_doc/gpt2.md b/docs/source/en/model_doc/gpt2.md index 878bf84a3fac..4708edde0b65 100644 --- a/docs/source/en/model_doc/gpt2.md +++ b/docs/source/en/model_doc/gpt2.md @@ -39,7 +39,13 @@ text. The diversity of the dataset causes this simple goal to contain naturally across diverse domains. GPT-2 is a direct scale-up of GPT, with more than 10X the parameters and trained on more than 10X the amount of data.* -Tips: +[Write With Transformer](https://transformer.huggingface.co/doc/gpt2-large) is a webapp created and hosted by +Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five +different sizes: small, medium, large, xl and a distilled version of the small checkpoint: *distilgpt-2*. + +This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://openai.com/blog/better-language-models/). + +## Usage tips - GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. @@ -54,12 +60,6 @@ Tips: - Enabling the *scale_attn_by_inverse_layer_idx* and *reorder_and_upcast_attn* flags will apply the training stability improvements from [Mistral](https://github.com/stanford-crfm/mistral/) (for PyTorch only). -[Write With Transformer](https://transformer.huggingface.co/doc/gpt2-large) is a webapp created and hosted by -Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five -different sizes: small, medium, large, xl and a distilled version of the small checkpoint: *distilgpt-2*. - -This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://openai.com/blog/better-language-models/). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with GPT2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. @@ -100,6 +100,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput + + + ## GPT2Model [[autodoc]] GPT2Model @@ -130,6 +133,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] GPT2ForTokenClassification - forward + + + ## TFGPT2Model [[autodoc]] TFGPT2Model @@ -158,6 +164,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFGPT2Tokenizer + + + ## FlaxGPT2Model [[autodoc]] FlaxGPT2Model @@ -167,3 +176,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] FlaxGPT2LMHeadModel - __call__ + + + diff --git a/docs/source/en/model_doc/gpt_bigcode.md b/docs/source/en/model_doc/gpt_bigcode.md index 8cc77a825de7..0f3bc72d03a5 100644 --- a/docs/source/en/model_doc/gpt_bigcode.md +++ b/docs/source/en/model_doc/gpt_bigcode.md @@ -20,13 +20,13 @@ rendered properly in your Markdown viewer. The GPTBigCode model was proposed in [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988) by BigCode. The listed authors are: Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra. -The abstract from the paper is the following:uery +The abstract from the paper is the following: *The BigCode project is an open-scientific collaboration working on the responsible development of large language models for code. This tech report describes the progress of the collaboration until December 2022, outlining the current state of the Personally Identifiable Information (PII) redaction pipeline, the experiments conducted to de-risk the model architecture, and the experiments investigating better preprocessing methods for the training data. We train 1.1B parameter models on the Java, JavaScript, and Python subsets of The Stack and evaluate them on the MultiPL-E text-to-code benchmark. We find that more aggressive filtering of near-duplicates can further boost performance and, surprisingly, that selecting files from repositories with 5+ GitHub stars deteriorates performance significantly. Our best model outperforms previous open-source multilingual code generation models (InCoder-6.7B and CodeGen-Multi-2.7B) in both left-to-right generation and infilling on the Java, JavaScript, and Python portions of MultiPL-E, despite being a substantially smaller model. All models are released under an OpenRAIL license at [this https URL.](https://huggingface.co/bigcode)* -The model is a an optimized [GPT2 model](https://huggingface.co/docs/transformers/model_doc/gpt2) with support for Multi-Query Attention. +The model is an optimized [GPT2 model](https://huggingface.co/docs/transformers/model_doc/gpt2) with support for Multi-Query Attention. -## Technical details +## Implementation details The main differences compared to GPT2. - Added support for Multi-Query Attention. @@ -85,7 +85,6 @@ Below is a expected speedup diagram that compares pure inference time between th [[autodoc]] GPTBigCodeConfig - ## GPTBigCodeModel [[autodoc]] GPTBigCodeModel @@ -96,7 +95,6 @@ Below is a expected speedup diagram that compares pure inference time between th [[autodoc]] GPTBigCodeForCausalLM - forward - ## GPTBigCodeForSequenceClassification [[autodoc]] GPTBigCodeForSequenceClassification diff --git a/docs/source/en/model_doc/gpt_neo.md b/docs/source/en/model_doc/gpt_neo.md index 6b925aad10e4..6a598ef6a961 100644 --- a/docs/source/en/model_doc/gpt_neo.md +++ b/docs/source/en/model_doc/gpt_neo.md @@ -27,7 +27,7 @@ The architecture is similar to GPT2 except that GPT Neo uses local attention in This model was contributed by [valhalla](https://huggingface.co/valhalla). -### Generation +## Usage example The `generate()` method can be used to generate text using GPT Neo model. @@ -54,7 +54,7 @@ The `generate()` method can be used to generate text using GPT Neo model. >>> gen_text = tokenizer.batch_decode(gen_tokens)[0] ``` -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Causal language modeling task guide](../tasks/language_modeling) @@ -63,6 +63,10 @@ The `generate()` method can be used to generate text using GPT Neo model. [[autodoc]] GPTNeoConfig + + + + ## GPTNeoModel [[autodoc]] GPTNeoModel @@ -88,6 +92,9 @@ The `generate()` method can be used to generate text using GPT Neo model. [[autodoc]] GPTNeoForTokenClassification - forward + + + ## FlaxGPTNeoModel [[autodoc]] FlaxGPTNeoModel @@ -97,3 +104,8 @@ The `generate()` method can be used to generate text using GPT Neo model. [[autodoc]] FlaxGPTNeoForCausalLM - __call__ + + + + + diff --git a/docs/source/en/model_doc/gpt_neox.md b/docs/source/en/model_doc/gpt_neox.md index 0ee7c8630c65..300001ad5bb1 100644 --- a/docs/source/en/model_doc/gpt_neox.md +++ b/docs/source/en/model_doc/gpt_neox.md @@ -38,7 +38,7 @@ model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b").half().cud GPT-NeoX-20B also has a different tokenizer from the one used in GPT-J-6B and GPT-Neo. The new tokenizer allocates additional tokens to whitespace characters, making the model more suitable for certain tasks like code generation. -### Generation +## Usage example The `generate()` method can be used to generate text using GPT Neo model. @@ -61,7 +61,7 @@ The `generate()` method can be used to generate text using GPT Neo model. >>> gen_text = tokenizer.batch_decode(gen_tokens)[0] ``` -## Documentation resources +## Resources - [Causal language modeling task guide](../tasks/language_modeling) diff --git a/docs/source/en/model_doc/gpt_neox_japanese.md b/docs/source/en/model_doc/gpt_neox_japanese.md index c21ba838792a..c69e643cae5b 100644 --- a/docs/source/en/model_doc/gpt_neox_japanese.md +++ b/docs/source/en/model_doc/gpt_neox_japanese.md @@ -25,7 +25,7 @@ Following the recommendations from Google's research on [PaLM](https://ai.google Development of the model was led by [Shinya Otani](https://github.com/SO0529), [Takayoshi Makabe](https://github.com/spider-man-tm), [Anuj Arora](https://github.com/Anuj040), and [Kyo Hattori](https://github.com/go5paopao) from [ABEJA, Inc.](https://www.abejainc.com/). For more information on this model-building activity, please refer [here (ja)](https://tech-blog.abeja.asia/entry/abeja-gpt-project-202207). -### Generation +### Usage example The `generate()` method can be used to generate text using GPT NeoX Japanese model. @@ -51,7 +51,7 @@ The `generate()` method can be used to generate text using GPT NeoX Japanese mod 人とAIが協調するためには、AIと人が共存し、AIを正しく理解する必要があります。 ``` -## Documentation resources +## Resources - [Causal language modeling task guide](../tasks/language_modeling) diff --git a/docs/source/en/model_doc/gptj.md b/docs/source/en/model_doc/gptj.md index 5ad80a010951..b515cf36dd40 100644 --- a/docs/source/en/model_doc/gptj.md +++ b/docs/source/en/model_doc/gptj.md @@ -23,7 +23,7 @@ causal language model trained on [the Pile](https://pile.eleuther.ai/) dataset. This model was contributed by [Stella Biderman](https://huggingface.co/stellaathena). -Tips: +## Usage tips - To load [GPT-J](https://huggingface.co/EleutherAI/gpt-j-6B) in float32 one would need at least 2x model size RAM: 1x for initial weights and another 1x to load the checkpoint. So for GPT-J it would take at least 48GB @@ -56,7 +56,7 @@ Tips: size, the tokenizer for [GPT-J](https://huggingface.co/EleutherAI/gpt-j-6B) contains 143 extra tokens `<|extratoken_1|>... <|extratoken_143|>`, so the `vocab_size` of tokenizer also becomes 50400. -### Generation +## Usage examples The [`~generation.GenerationMixin.generate`] method can be used to generate text using GPT-J model. @@ -138,6 +138,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] GPTJConfig - all + + + ## GPTJModel [[autodoc]] GPTJModel @@ -158,6 +161,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] GPTJForQuestionAnswering - forward + + + ## TFGPTJModel [[autodoc]] TFGPTJModel @@ -178,6 +184,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFGPTJForQuestionAnswering - call + + + ## FlaxGPTJModel [[autodoc]] FlaxGPTJModel @@ -187,3 +196,5 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] FlaxGPTJForCausalLM - __call__ + + diff --git a/docs/source/en/model_doc/gptsan-japanese.md b/docs/source/en/model_doc/gptsan-japanese.md index 48f67f850655..1e6b1b6e1cf6 100644 --- a/docs/source/en/model_doc/gptsan-japanese.md +++ b/docs/source/en/model_doc/gptsan-japanese.md @@ -24,7 +24,7 @@ GPTSAN is a Japanese language model using Switch Transformer. It has the same st in the T5 paper, and support both Text Generation and Masked Language Modeling tasks. These basic tasks similarly can fine-tune for translation or summarization. -### Generation +### Usage example The `generate()` method can be used to generate text using GPTSAN-Japanese model. @@ -56,7 +56,7 @@ This length applies to the text entered in `prefix_text` for the tokenizer. The tokenizer returns the mask of the `Prefix` part of Prefix-LM as `token_type_ids`. The model treats the part where `token_type_ids` is 1 as a `Prefix` part, that is, the input can refer to both tokens before and after. -Tips: +## Usage tips Specifying the Prefix part is done with a mask passed to self-attention. When token_type_ids=None or all zero, it is equivalent to regular causal mask diff --git a/docs/source/en/model_doc/graphormer.md b/docs/source/en/model_doc/graphormer.md index 16d61bccbef0..08e3f5fb3e9b 100644 --- a/docs/source/en/model_doc/graphormer.md +++ b/docs/source/en/model_doc/graphormer.md @@ -23,26 +23,24 @@ The abstract from the paper is the following: *The Transformer architecture has become a dominant choice in many domains, such as natural language processing and computer vision. Yet, it has not achieved competitive performance on popular leaderboards of graph-level prediction compared to mainstream GNN variants. Therefore, it remains a mystery how Transformers could perform well for graph representation learning. In this paper, we solve this mystery by presenting Graphormer, which is built upon the standard Transformer architecture, and could attain excellent results on a broad range of graph representation learning tasks, especially on the recent OGB Large-Scale Challenge. Our key insight to utilizing Transformer in the graph is the necessity of effectively encoding the structural information of a graph into the model. To this end, we propose several simple yet effective structural encoding methods to help Graphormer better model graph-structured data. Besides, we mathematically characterize the expressive power of Graphormer and exhibit that with our ways of encoding the structural information of graphs, many popular GNN variants could be covered as the special cases of Graphormer.* -Tips: +This model was contributed by [clefourrier](https://huggingface.co/clefourrier). The original code can be found [here](https://github.com/microsoft/Graphormer). + +## Usage tips This model will not work well on large graphs (more than 100 nodes/edges), as it will make the memory explode. You can reduce the batch size, increase your RAM, or decrease the `UNREACHABLE_NODE_DISTANCE` parameter in algos_graphormer.pyx, but it will be hard to go above 700 nodes/edges. This model does not use a tokenizer, but instead a special collator during training. -This model was contributed by [clefourrier](https://huggingface.co/clefourrier). The original code can be found [here](https://github.com/microsoft/Graphormer). - ## GraphormerConfig [[autodoc]] GraphormerConfig - ## GraphormerModel [[autodoc]] GraphormerModel - forward - ## GraphormerForGraphClassification [[autodoc]] GraphormerForGraphClassification diff --git a/docs/source/en/model_doc/groupvit.md b/docs/source/en/model_doc/groupvit.md index cf006e284b14..8728cf0da21b 100644 --- a/docs/source/en/model_doc/groupvit.md +++ b/docs/source/en/model_doc/groupvit.md @@ -25,13 +25,13 @@ The abstract from the paper is the following: *Grouping and recognition are important components of visual scene understanding, e.g., for object detection and semantic segmentation. With end-to-end deep learning systems, grouping of image regions usually happens implicitly via top-down supervision from pixel-level recognition labels. Instead, in this paper, we propose to bring back the grouping mechanism into deep networks, which allows semantic segments to emerge automatically with only text supervision. We propose a hierarchical Grouping Vision Transformer (GroupViT), which goes beyond the regular grid structure representation and learns to group image regions into progressively larger arbitrary-shaped segments. We train GroupViT jointly with a text encoder on a large-scale image-text dataset via contrastive losses. With only text supervision and without any pixel-level annotations, GroupViT learns to group together semantic regions and successfully transfers to the task of semantic segmentation in a zero-shot manner, i.e., without any further fine-tuning. It achieves a zero-shot accuracy of 52.3% mIoU on the PASCAL VOC 2012 and 22.4% mIoU on PASCAL Context datasets, and performs competitively to state-of-the-art transfer-learning methods requiring greater levels of supervision.* -Tips: - -- You may specify `output_segmentation=True` in the forward of `GroupViTModel` to get the segmentation logits of input texts. - This model was contributed by [xvjiarui](https://huggingface.co/xvjiarui). The TensorFlow version was contributed by [ariG23498](https://huggingface.co/ariG23498) with the help of [Yih-Dar SHIEH](https://huggingface.co/ydshieh), [Amy Roberts](https://huggingface.co/amyeroberts), and [Joao Gante](https://huggingface.co/joaogante). The original code can be found [here](https://github.com/NVlabs/GroupViT). +## Usage tips + +- You may specify `output_segmentation=True` in the forward of `GroupViTModel` to get the segmentation logits of input texts. + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with GroupViT. @@ -52,6 +52,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] GroupViTVisionConfig + + + ## GroupViTModel [[autodoc]] GroupViTModel @@ -69,6 +72,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] GroupViTVisionModel - forward + + + ## TFGroupViTModel [[autodoc]] TFGroupViTModel @@ -84,4 +90,7 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h ## TFGroupViTVisionModel [[autodoc]] TFGroupViTVisionModel - - call \ No newline at end of file + - call + + + diff --git a/docs/source/en/model_doc/herbert.md b/docs/source/en/model_doc/herbert.md index ee927bddb025..0049d6bfcf3a 100644 --- a/docs/source/en/model_doc/herbert.md +++ b/docs/source/en/model_doc/herbert.md @@ -37,7 +37,11 @@ which has the best average performance and obtains the best results for three ou extensive evaluation, including several standard baselines and recently proposed, multilingual Transformer-based models.* -Examples of use: +This model was contributed by [rmroczkowski](https://huggingface.co/rmroczkowski). The original code can be found +[here](https://github.com/allegro/HerBERT). + + +## Usage example ```python >>> from transformers import HerbertTokenizer, RobertaModel @@ -56,9 +60,12 @@ Examples of use: >>> model = AutoModel.from_pretrained("allegro/herbert-klej-cased-v1") ``` -This model was contributed by [rmroczkowski](https://huggingface.co/rmroczkowski). The original code can be found -[here](https://github.com/allegro/HerBERT). + + +Herbert implementation is the same as `BERT` except for the tokenization method. Refer to [BERT documentation](bert) +for API reference and examples. + ## HerbertTokenizer diff --git a/docs/source/en/model_doc/hubert.md b/docs/source/en/model_doc/hubert.md index 5349e1388523..43ce590d3715 100644 --- a/docs/source/en/model_doc/hubert.md +++ b/docs/source/en/model_doc/hubert.md @@ -36,15 +36,15 @@ state-of-the-art wav2vec 2.0 performance on the Librispeech (960h) and Libri-lig 10h, 100h, and 960h fine-tuning subsets. Using a 1B parameter model, HuBERT shows up to 19% and 13% relative WER reduction on the more challenging dev-other and test-other evaluation subsets.* -Tips: +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). + +# Usage tips - Hubert is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - Hubert model was fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). - -## Documentation resources +## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) @@ -53,6 +53,9 @@ This model was contributed by [patrickvonplaten](https://huggingface.co/patrickv [[autodoc]] HubertConfig + + + ## HubertModel [[autodoc]] HubertModel @@ -68,6 +71,9 @@ This model was contributed by [patrickvonplaten](https://huggingface.co/patrickv [[autodoc]] HubertForSequenceClassification - forward + + + ## TFHubertModel [[autodoc]] TFHubertModel @@ -77,3 +83,6 @@ This model was contributed by [patrickvonplaten](https://huggingface.co/patrickv [[autodoc]] TFHubertForCTC - call + + + diff --git a/docs/source/en/model_doc/ibert.md b/docs/source/en/model_doc/ibert.md index 9c5f9c3e8de6..9ea623951aec 100644 --- a/docs/source/en/model_doc/ibert.md +++ b/docs/source/en/model_doc/ibert.md @@ -40,7 +40,7 @@ been open-sourced.* This model was contributed by [kssteven](https://huggingface.co/kssteven). The original code can be found [here](https://github.com/kssteven418/I-BERT). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/idefics.md b/docs/source/en/model_doc/idefics.md index e0017df0c52f..9989f89d682e 100644 --- a/docs/source/en/model_doc/idefics.md +++ b/docs/source/en/model_doc/idefics.md @@ -31,9 +31,9 @@ This model was contributed by [HuggingFaceM4](https://huggingface.co/HuggingFace -Idefics modeling code in Transformers is for finetuning and inferencing the pre-trained Idefics models. +IDEFICS modeling code in Transformers is for finetuning and inferencing the pre-trained IDEFICS models. -To train a new Idefics model from scratch use the m4 codebase (a link will be provided once it's made public) +To train a new IDEFICS model from scratch use the m4 codebase (a link will be provided once it's made public) diff --git a/docs/source/en/model_doc/imagegpt.md b/docs/source/en/model_doc/imagegpt.md index 01eb7dde5fc2..53a7ba3b34b7 100644 --- a/docs/source/en/model_doc/imagegpt.md +++ b/docs/source/en/model_doc/imagegpt.md @@ -40,7 +40,7 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr), based on [this issue](https://github.com/openai/image-gpt/issues/7). The original code can be found [here](https://github.com/openai/image-gpt). -Tips: +## Usage tips - ImageGPT is almost exactly the same as [GPT-2](gpt2), with the exception that a different activation function is used (namely "quick gelu"), and the layer normalization layers don't mean center the inputs. ImageGPT @@ -92,7 +92,6 @@ If you're interested in submitting a resource to be included here, please feel f ## ImageGPTFeatureExtractor [[autodoc]] ImageGPTFeatureExtractor - - __call__ ## ImageGPTImageProcessor @@ -103,17 +102,14 @@ If you're interested in submitting a resource to be included here, please feel f ## ImageGPTModel [[autodoc]] ImageGPTModel - - forward ## ImageGPTForCausalImageModeling [[autodoc]] ImageGPTForCausalImageModeling - - forward ## ImageGPTForImageClassification [[autodoc]] ImageGPTForImageClassification - - forward diff --git a/docs/source/en/model_doc/informer.md b/docs/source/en/model_doc/informer.md index 0d2d82a3f573..8100b2844325 100644 --- a/docs/source/en/model_doc/informer.md +++ b/docs/source/en/model_doc/informer.md @@ -39,13 +39,11 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] InformerConfig - ## InformerModel [[autodoc]] InformerModel - forward - ## InformerForPrediction [[autodoc]] InformerForPrediction diff --git a/docs/source/en/model_doc/instructblip.md b/docs/source/en/model_doc/instructblip.md index d2cf80e50a5d..1a693493fff1 100644 --- a/docs/source/en/model_doc/instructblip.md +++ b/docs/source/en/model_doc/instructblip.md @@ -21,10 +21,6 @@ The abstract from the paper is the following: *General-purpose language models that can solve various language-domain tasks have emerged driven by the pre-training and instruction-tuning pipeline. However, building general-purpose vision-language models is challenging due to the increased task discrepancy introduced by the additional visual input. Although vision-language pre-training has been widely studied, vision-language instruction tuning remains relatively less explored. In this paper, we conduct a systematic and comprehensive study on vision-language instruction tuning based on the pre-trained BLIP-2 models. We gather a wide variety of 26 publicly available datasets, transform them into instruction tuning format and categorize them into two clusters for held-in instruction tuning and held-out zero-shot evaluation. Additionally, we introduce instruction-aware visual feature extraction, a crucial method that enables the model to extract informative features tailored to the given instruction. The resulting InstructBLIP models achieve state-of-the-art zero-shot performance across all 13 held-out datasets, substantially outperforming BLIP-2 and the larger Flamingo. Our models also lead to state-of-the-art performance when finetuned on individual downstream tasks (e.g., 90.7% accuracy on ScienceQA IMG). Furthermore, we qualitatively demonstrate the advantages of InstructBLIP over concurrent multimodal models.* -Tips: - -- InstructBLIP uses the same architecture as [BLIP-2](blip2) with a tiny but important difference: it also feeds the text prompt (instruction) to the Q-Former. - drawing @@ -33,6 +29,9 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/salesforce/LAVIS/tree/main/projects/instructblip). +## Usage tips + +InstructBLIP uses the same architecture as [BLIP-2](blip2) with a tiny but important difference: it also feeds the text prompt (instruction) to the Q-Former. ## InstructBlipConfig diff --git a/docs/source/en/model_doc/jukebox.md b/docs/source/en/model_doc/jukebox.md index 24a80164a2d8..a6d865d86cce 100644 --- a/docs/source/en/model_doc/jukebox.md +++ b/docs/source/en/model_doc/jukebox.md @@ -32,7 +32,11 @@ The metadata such as *artist, genre and timing* are passed to each prior, in the ![JukeboxModel](https://gist.githubusercontent.com/ArthurZucker/92c1acaae62ebf1b6a951710bdd8b6af/raw/c9c517bf4eff61393f6c7dec9366ef02bdd059a3/jukebox.svg) -Tips: +This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). +The original code can be found [here](https://github.com/openai/jukebox). + +## Usage tips + - This model only supports inference. This is for a few reasons, mostly because it requires a crazy amount of memory to train. Feel free to open a PR and add what's missing to have a full integration with the hugging face traineer! - This model is very slow, and takes 8h to generate a minute long audio using the 5b top prior on a V100 GPU. In order automaticallay handle the device on which the model should execute, use `accelerate`. - Contrary to the paper, the order of the priors goes from `0` to `1` as it felt more intuitive : we sample starting from `0`. @@ -67,14 +71,12 @@ The original code can be found [here](https://github.com/openai/jukebox). - upsample - _sample - ## JukeboxPrior [[autodoc]] JukeboxPrior - sample - forward - ## JukeboxVQVAE [[autodoc]] JukeboxVQVAE diff --git a/docs/source/en/model_doc/layoutlm.md b/docs/source/en/model_doc/layoutlm.md index ebf6b1a4b4fc..34b429fb7376 100644 --- a/docs/source/en/model_doc/layoutlm.md +++ b/docs/source/en/model_doc/layoutlm.md @@ -46,7 +46,7 @@ document-level pretraining. It achieves new state-of-the-art results in several understanding (from 70.72 to 79.27), receipt understanding (from 94.02 to 95.24) and document image classification (from 93.07 to 94.42).* -Tips: +## Usage tips - In addition to *input_ids*, [`~transformers.LayoutLMModel.forward`] also expects the input `bbox`, which are the bounding boxes (i.e. 2D-positions) of the input tokens. These can be obtained using an external OCR engine such @@ -123,6 +123,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] LayoutLMTokenizerFast + + + ## LayoutLMModel [[autodoc]] LayoutLMModel @@ -143,6 +146,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] LayoutLMForQuestionAnswering + + + ## TFLayoutLMModel [[autodoc]] TFLayoutLMModel @@ -162,3 +168,8 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h ## TFLayoutLMForQuestionAnswering [[autodoc]] TFLayoutLMForQuestionAnswering + + + + + diff --git a/docs/source/en/model_doc/layoutlmv2.md b/docs/source/en/model_doc/layoutlmv2.md index f2a1c65a42b1..15286d4ddb76 100644 --- a/docs/source/en/model_doc/layoutlmv2.md +++ b/docs/source/en/model_doc/layoutlmv2.md @@ -56,7 +56,7 @@ python -m pip install torchvision tesseract ``` (If you are developing for LayoutLMv2, note that passing the doctests also requires the installation of these packages.) -Tips: +## Usage tips - The main difference between LayoutLMv1 and LayoutLMv2 is that the latter incorporates visual embeddings during pre-training (while LayoutLMv1 only adds visual embeddings during fine-tuning). diff --git a/docs/source/en/model_doc/layoutlmv3.md b/docs/source/en/model_doc/layoutlmv3.md index 22e2c3ff7186..87ff32f38356 100644 --- a/docs/source/en/model_doc/layoutlmv3.md +++ b/docs/source/en/model_doc/layoutlmv3.md @@ -26,16 +26,6 @@ The abstract from the paper is the following: *Self-supervised pre-training techniques have achieved remarkable progress in Document AI. Most multimodal pre-trained models use a masked language modeling objective to learn bidirectional representations on the text modality, but they differ in pre-training objectives for the image modality. This discrepancy adds difficulty to multimodal representation learning. In this paper, we propose LayoutLMv3 to pre-train multimodal Transformers for Document AI with unified text and image masking. Additionally, LayoutLMv3 is pre-trained with a word-patch alignment objective to learn cross-modal alignment by predicting whether the corresponding image patch of a text word is masked. The simple unified architecture and training objectives make LayoutLMv3 a general-purpose pre-trained model for both text-centric and image-centric Document AI tasks. Experimental results show that LayoutLMv3 achieves state-of-the-art performance not only in text-centric tasks, including form understanding, receipt understanding, and document visual question answering, but also in image-centric tasks such as document image classification and document layout analysis.* -Tips: - -- In terms of data processing, LayoutLMv3 is identical to its predecessor [LayoutLMv2](layoutlmv2), except that: - - images need to be resized and normalized with channels in regular RGB format. LayoutLMv2 on the other hand normalizes the images internally and expects the channels in BGR format. - - text is tokenized using byte-pair encoding (BPE), as opposed to WordPiece. - Due to these differences in data preprocessing, one can use [`LayoutLMv3Processor`] which internally combines a [`LayoutLMv3ImageProcessor`] (for the image modality) and a [`LayoutLMv3Tokenizer`]/[`LayoutLMv3TokenizerFast`] (for the text modality) to prepare all data for the model. -- Regarding usage of [`LayoutLMv3Processor`], we refer to the [usage guide](layoutlmv2#usage-layoutlmv2processor) of its predecessor. -- Demo notebooks for LayoutLMv3 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3). -- Demo scripts can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3). - drawing @@ -43,6 +33,14 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The TensorFlow version of this model was added by [chriskoo](https://huggingface.co/chriskoo), [tokec](https://huggingface.co/tokec), and [lre](https://huggingface.co/lre). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/layoutlmv3). +## Usage tips + +- In terms of data processing, LayoutLMv3 is identical to its predecessor [LayoutLMv2](layoutlmv2), except that: + - images need to be resized and normalized with channels in regular RGB format. LayoutLMv2 on the other hand normalizes the images internally and expects the channels in BGR format. + - text is tokenized using byte-pair encoding (BPE), as opposed to WordPiece. + Due to these differences in data preprocessing, one can use [`LayoutLMv3Processor`] which internally combines a [`LayoutLMv3ImageProcessor`] (for the image modality) and a [`LayoutLMv3Tokenizer`]/[`LayoutLMv3TokenizerFast`] (for the text modality) to prepare all data for the model. +- Regarding usage of [`LayoutLMv3Processor`], we refer to the [usage guide](layoutlmv2#usage-layoutlmv2processor) of its predecessor. + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LayoutLMv3. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. @@ -53,6 +51,9 @@ LayoutLMv3 is nearly identical to LayoutLMv2, so we've also included LayoutLMv2 +- Demo notebooks for LayoutLMv3 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3). +- Demo scripts can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3). + - [`LayoutLMv2ForSequenceClassification`] is supported by this [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/RVL-CDIP/Fine_tuning_LayoutLMv2ForSequenceClassification_on_RVL_CDIP.ipynb). @@ -103,6 +104,9 @@ LayoutLMv3 is nearly identical to LayoutLMv2, so we've also included LayoutLMv2 [[autodoc]] LayoutLMv3Processor - __call__ + + + ## LayoutLMv3Model [[autodoc]] LayoutLMv3Model @@ -123,6 +127,9 @@ LayoutLMv3 is nearly identical to LayoutLMv2, so we've also included LayoutLMv2 [[autodoc]] LayoutLMv3ForQuestionAnswering - forward + + + ## TFLayoutLMv3Model [[autodoc]] TFLayoutLMv3Model @@ -142,3 +149,6 @@ LayoutLMv3 is nearly identical to LayoutLMv2, so we've also included LayoutLMv2 [[autodoc]] TFLayoutLMv3ForQuestionAnswering - call + + + diff --git a/docs/source/en/model_doc/layoutxlm.md b/docs/source/en/model_doc/layoutxlm.md index 8858560bbb21..f6b2cbef9d6f 100644 --- a/docs/source/en/model_doc/layoutxlm.md +++ b/docs/source/en/model_doc/layoutxlm.md @@ -33,6 +33,10 @@ introduce a multilingual form understanding benchmark dataset named XFUN, which for each language. Experiment results show that the LayoutXLM model has significantly outperformed the existing SOTA cross-lingual pre-trained models on the XFUN dataset.* +This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm). + +## Usage tips and examples + One can directly plug in the weights of LayoutXLM into a LayoutLMv2 model, like so: ```python @@ -56,10 +60,10 @@ Similar to LayoutLMv2, you can use [`LayoutXLMProcessor`] (which internally appl [`LayoutXLMTokenizer`]/[`LayoutXLMTokenizerFast`] in sequence) to prepare all data for the model. -As LayoutXLM's architecture is equivalent to that of LayoutLMv2, one can refer to [LayoutLMv2's documentation page](layoutlmv2) for all tips, code examples and notebooks. - -This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm). + +As LayoutXLM's architecture is equivalent to that of LayoutLMv2, one can refer to [LayoutLMv2's documentation page](layoutlmv2) for all tips, code examples and notebooks. + ## LayoutXLMTokenizer diff --git a/docs/source/en/model_doc/led.md b/docs/source/en/model_doc/led.md index 9ba9383a59d5..9a39b0b28ede 100644 --- a/docs/source/en/model_doc/led.md +++ b/docs/source/en/model_doc/led.md @@ -35,7 +35,7 @@ WikiHop and TriviaQA. We finally introduce the Longformer-Encoder-Decoder (LED), long document generative sequence-to-sequence tasks, and demonstrate its effectiveness on the arXiv summarization dataset.* -Tips: +## Usage tips - [`LEDForConditionalGeneration`] is an extension of [`BartForConditionalGeneration`] exchanging the traditional *self-attention* layer with @@ -52,15 +52,15 @@ Tips: errors. This can be done by executing `model.gradient_checkpointing_enable()`. Moreover, the `use_cache=False` flag can be used to disable the caching mechanism to save memory. -- A notebook showing how to evaluate LED, can be accessed [here](https://colab.research.google.com/drive/12INTTR6n64TzS4RrXZxMSXfrOd9Xzamo?usp=sharing). -- A notebook showing how to fine-tune LED, can be accessed [here](https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing). - LED is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). -## Documentation resources +## Resources +- [A notebook showing how to evaluate LED](https://colab.research.google.com/drive/12INTTR6n64TzS4RrXZxMSXfrOd9Xzamo?usp=sharing). +- [A notebook showing how to fine-tune LED](https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing). - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) - [Translation task guide](../tasks/translation) @@ -100,6 +100,9 @@ This model was contributed by [patrickvonplaten](https://huggingface.co/patrickv [[autodoc]] models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput + + + ## LEDModel [[autodoc]] LEDModel @@ -120,6 +123,9 @@ This model was contributed by [patrickvonplaten](https://huggingface.co/patrickv [[autodoc]] LEDForQuestionAnswering - forward + + + ## TFLEDModel [[autodoc]] TFLEDModel @@ -129,3 +135,9 @@ This model was contributed by [patrickvonplaten](https://huggingface.co/patrickv [[autodoc]] TFLEDForConditionalGeneration - call + + + + + + diff --git a/docs/source/en/model_doc/levit.md b/docs/source/en/model_doc/levit.md index 8145be775f52..15dc2f4e1373 100644 --- a/docs/source/en/model_doc/levit.md +++ b/docs/source/en/model_doc/levit.md @@ -38,7 +38,9 @@ alt="drawing" width="600"/> LeViT Architecture. Taken from the original paper. -Tips: +This model was contributed by [anugunj](https://huggingface.co/anugunj). The original code can be found [here](https://github.com/facebookresearch/LeViT). + +## Usage tips - Compared to ViT, LeViT models use an additional distillation head to effectively learn from a teacher (which, in the LeViT paper, is a ResNet like-model). The distillation head is learned through backpropagation under supervision of a ResNet like-model. They also draw inspiration from convolution neural networks to use activation maps with decreasing resolutions to increase the efficiency. - There are 2 ways to fine-tune distilled models, either (1) in a classic way, by only placing a prediction head on top @@ -63,8 +65,6 @@ Tips: - You can check out demo notebooks regarding inference as well as fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (you can just replace [`ViTFeatureExtractor`] by [`LevitImageProcessor`] and [`ViTForImageClassification`] by [`LevitForImageClassification`] or [`LevitForImageClassificationWithTeacher`]). -This model was contributed by [anugunj](https://huggingface.co/anugunj). The original code can be found [here](https://github.com/facebookresearch/LeViT). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LeViT. @@ -90,7 +90,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] LevitImageProcessor - preprocess - ## LevitModel [[autodoc]] LevitModel diff --git a/docs/source/en/model_doc/lilt.md b/docs/source/en/model_doc/lilt.md index 901deefd7ffe..fb279573fbfd 100644 --- a/docs/source/en/model_doc/lilt.md +++ b/docs/source/en/model_doc/lilt.md @@ -26,7 +26,15 @@ The abstract from the paper is the following: *Structured document understanding has attracted considerable attention and made significant progress recently, owing to its crucial role in intelligent document processing. However, most existing related models can only deal with the document data of specific language(s) (typically English) included in the pre-training collection, which is extremely limited. To address this issue, we propose a simple yet effective Language-independent Layout Transformer (LiLT) for structured document understanding. LiLT can be pre-trained on the structured documents of a single language and then directly fine-tuned on other languages with the corresponding off-the-shelf monolingual/multilingual pre-trained textual models. Experimental results on eight languages have shown that LiLT can achieve competitive or even superior performance on diverse widely-used downstream benchmarks, which enables language-independent benefit from the pre-training of document layout structure.* -Tips: + + + LiLT architecture. Taken from the original paper. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). +The original code can be found [here](https://github.com/jpwang/lilt). + +## Usage tips - To combine the Language-Independent Layout Transformer with a new RoBERTa checkpoint from the [hub](https://huggingface.co/models?search=roberta), refer to [this guide](https://github.com/jpWang/LiLT#or-generate-your-own-checkpoint-optional). The script will result in `config.json` and `pytorch_model.bin` files being stored locally. After doing this, one can do the following (assuming you're logged in with your HuggingFace account): @@ -42,14 +50,6 @@ model.push_to_hub("name_of_repo_on_the_hub") - As [lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) uses the same vocabulary as [LayoutLMv3](layoutlmv3), one can use [`LayoutLMv3TokenizerFast`] to prepare data for the model. The same is true for [lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-infoxlm-base): one can use [`LayoutXLMTokenizerFast`] for that model. - - - LiLT architecture. Taken from the original paper. - -This model was contributed by [nielsr](https://huggingface.co/nielsr). -The original code can be found [here](https://github.com/jpwang/lilt). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LiLT. diff --git a/docs/source/en/model_doc/llama.md b/docs/source/en/model_doc/llama.md index e63e4b1ab3b3..9f55c425d448 100644 --- a/docs/source/en/model_doc/llama.md +++ b/docs/source/en/model_doc/llama.md @@ -24,7 +24,9 @@ The abstract from the paper is the following: *We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community. * -Tips: +This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). + +## Usage tips - Weights for the LLaMA models can be obtained from by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form) - After downloading the weights, they will need to be converted to the Hugging Face Transformers format using the [conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). The script can be called with the following (example) command: @@ -48,9 +50,6 @@ come in several checkpoints they each contain a part of each weight of the model - The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string. -This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). - - Based on the original LLaMA model, Meta AI has released some follow-up works: - **Llama2**: Llama2 is an improved version of Llama with some architectural tweaks (Grouped Query Attention), and is pre-trained on 2Trillion tokens. Refer to the documentation of Llama2 which can be found [here](llama2). @@ -82,7 +81,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] LlamaConfig - ## LlamaTokenizer [[autodoc]] LlamaTokenizer @@ -105,7 +103,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] LlamaModel - forward - ## LlamaForCausalLM [[autodoc]] LlamaForCausalLM diff --git a/docs/source/en/model_doc/llama2.md b/docs/source/en/model_doc/llama2.md index 0ff1e38f16a2..a817a866c0fa 100644 --- a/docs/source/en/model_doc/llama2.md +++ b/docs/source/en/model_doc/llama2.md @@ -24,7 +24,10 @@ The abstract from the paper is the following: *In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs.* -Checkout all Llama2 models [here](https://huggingface.co/models?search=llama2) +Checkout all Llama2 model checkpoints [here](https://huggingface.co/models?search=llama2). +This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ) with contributions from [Lysandre Debut](https://huggingface.co/lysandre). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). + +## Usage tips @@ -64,7 +67,6 @@ come in several checkpoints they each contain a part of each weight of the model - The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string. -This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ) with contributions from [Lysandre Debut](https://huggingface.co/lysandre). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). ## Resources diff --git a/docs/source/en/model_doc/longformer.md b/docs/source/en/model_doc/longformer.md index 9947195058cc..20ba7a922515 100644 --- a/docs/source/en/model_doc/longformer.md +++ b/docs/source/en/model_doc/longformer.md @@ -41,15 +41,15 @@ contrast to most prior work, we also pretrain Longformer and finetune it on a va pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.* -Tips: +This model was contributed by [beltagy](https://huggingface.co/beltagy). The Authors' code can be found [here](https://github.com/allenai/longformer). + +## Usage tips - Since the Longformer is based on RoBERTa, it doesn't have `token_type_ids`. You don't need to indicate which token belongs to which segment. Just separate your segments with the separation token `tokenizer.sep_token` (or ``). - A transformer model replacing the attention matrices by sparse matrices to go faster. Often, the local context (e.g., what are the two tokens left and right?) is enough to take action for a given token. Some preselected input tokens are still given global attention, but the attention matrix has way less parameters, resulting in a speed-up. See the local attention section for more information. -This model was contributed by [beltagy](https://huggingface.co/beltagy). The Authors' code can be found [here](https://github.com/allenai/longformer). - ## Longformer Self Attention Longformer self attention employs self attention on both a "local" context and a "global" context. Most tokens only @@ -93,7 +93,7 @@ mlm_labels = tokenizer.encode("This is a sentence from the training data", retur loss = model(input_ids, labels=input_ids, masked_lm_labels=mlm_labels)[0] ``` -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -143,6 +143,9 @@ loss = model(input_ids, labels=input_ids, masked_lm_labels=mlm_labels)[0] [[autodoc]] models.longformer.modeling_tf_longformer.TFLongformerTokenClassifierOutput + + + ## LongformerModel [[autodoc]] LongformerModel @@ -173,6 +176,9 @@ loss = model(input_ids, labels=input_ids, masked_lm_labels=mlm_labels)[0] [[autodoc]] LongformerForQuestionAnswering - forward + + + ## TFLongformerModel [[autodoc]] TFLongformerModel @@ -202,3 +208,6 @@ loss = model(input_ids, labels=input_ids, masked_lm_labels=mlm_labels)[0] [[autodoc]] TFLongformerForMultipleChoice - call + + + diff --git a/docs/source/en/model_doc/longt5.md b/docs/source/en/model_doc/longt5.md index e8dcfe3b237f..40faa6d8c237 100644 --- a/docs/source/en/model_doc/longt5.md +++ b/docs/source/en/model_doc/longt5.md @@ -36,7 +36,10 @@ attention ideas from long-input transformers (ETC), and adopted pre-training str able to achieve state-of-the-art results on several summarization tasks and outperform the original T5 models on question answering tasks.* -Tips: +This model was contributed by [stancld](https://huggingface.co/stancld). +The original code can be found [here](https://github.com/google-research/longt5). + +## Usage tips - [`LongT5ForConditionalGeneration`] is an extension of [`T5ForConditionalGeneration`] exchanging the traditional encoder *self-attention* layer with efficient either *local* attention or *transient-global* (*tglobal*) attention. @@ -87,10 +90,8 @@ The complexity of this mechanism is `O(l(r + l/k))`. >>> rouge.compute(predictions=result["predicted_abstract"], references=result["abstract"]) ``` -This model was contributed by [stancld](https://huggingface.co/stancld). -The original code can be found [here](https://github.com/google-research/longt5). -## Documentation resources +## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) @@ -99,6 +100,9 @@ The original code can be found [here](https://github.com/google-research/longt5) [[autodoc]] LongT5Config + + + ## LongT5Model [[autodoc]] LongT5Model @@ -114,6 +118,9 @@ The original code can be found [here](https://github.com/google-research/longt5) [[autodoc]] LongT5EncoderModel - forward + + + ## FlaxLongT5Model [[autodoc]] FlaxLongT5Model @@ -127,3 +134,6 @@ The original code can be found [here](https://github.com/google-research/longt5) - __call__ - encode - decode + + + diff --git a/docs/source/en/model_doc/luke.md b/docs/source/en/model_doc/luke.md index 2947c7c41bdf..4e070b1c4bac 100644 --- a/docs/source/en/model_doc/luke.md +++ b/docs/source/en/model_doc/luke.md @@ -37,7 +37,9 @@ state-of-the-art results on five well-known datasets: Open Entity (entity typing CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question answering).* -Tips: +This model was contributed by [ikuyamada](https://huggingface.co/ikuyamada) and [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/studio-ousia/luke). + +## Usage tips - This implementation is the same as [`RobertaModel`] with the addition of entity embeddings as well as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities. @@ -75,13 +77,7 @@ Tips: head models by specifying `task="entity_classification"`, `task="entity_pair_classification"`, or `task="entity_span_classification"`. Please refer to the example code of each head models. - A demo notebook on how to fine-tune [`LukeForEntityPairClassification`] for relation - classification can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LUKE). - - There are also 3 notebooks available, which showcase how you can reproduce the results as reported in the paper with - the HuggingFace implementation of LUKE. They can be found [here](https://github.com/studio-ousia/luke/tree/master/notebooks). - -Example: +Usage example: ```python >>> from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification @@ -119,10 +115,10 @@ Example: >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` -This model was contributed by [ikuyamada](https://huggingface.co/ikuyamada) and [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/studio-ousia/luke). - -## Documentation resources +## Resources +- [A demo notebook on how to fine-tune [`LukeForEntityPairClassification`] for relation classification](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LUKE) +- [Notebooks showcasing how you to reproduce the results as reported in the paper with the HuggingFace implementation of LUKE](https://github.com/studio-ousia/luke/tree/master/notebooks) - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) diff --git a/docs/source/en/model_doc/lxmert.md b/docs/source/en/model_doc/lxmert.md index 114539f61e81..435994196b43 100644 --- a/docs/source/en/model_doc/lxmert.md +++ b/docs/source/en/model_doc/lxmert.md @@ -41,7 +41,9 @@ best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablati model components and pretraining strategies significantly contribute to our strong results; and also present several attention visualizations for the different encoders* -Tips: +This model was contributed by [eltoto1219](https://huggingface.co/eltoto1219). The original code can be found [here](https://github.com/airsplay/lxmert). + +## Usage tips - Bounding boxes are not necessary to be used in the visual feature embeddings, any kind of visual-spacial features will work. @@ -53,9 +55,7 @@ Tips: contains self-attention for each respective modality and cross-attention, only the cross attention is returned and both self attention outputs are disregarded. -This model was contributed by [eltoto1219](https://huggingface.co/eltoto1219). The original code can be found [here](https://github.com/airsplay/lxmert). - -## Documentation resources +## Resources - [Question answering task guide](../tasks/question_answering) @@ -83,6 +83,9 @@ This model was contributed by [eltoto1219](https://huggingface.co/eltoto1219). T [[autodoc]] models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput + + + ## LxmertModel [[autodoc]] LxmertModel @@ -98,6 +101,9 @@ This model was contributed by [eltoto1219](https://huggingface.co/eltoto1219). T [[autodoc]] LxmertForQuestionAnswering - forward + + + ## TFLxmertModel [[autodoc]] TFLxmertModel @@ -107,3 +113,6 @@ This model was contributed by [eltoto1219](https://huggingface.co/eltoto1219). T [[autodoc]] TFLxmertForPreTraining - call + + + diff --git a/docs/source/en/model_doc/m2m_100.md b/docs/source/en/model_doc/m2m_100.md index c2b4354c6d5f..fa808c2e94bb 100644 --- a/docs/source/en/model_doc/m2m_100.md +++ b/docs/source/en/model_doc/m2m_100.md @@ -38,7 +38,7 @@ open-source our scripts so that others may reproduce the data, evaluation, and f This model was contributed by [valhalla](https://huggingface.co/valhalla). -### Training and Generation +## Usage tips and examples M2M100 is a multilingual encoder-decoder (seq-to-seq) model primarily intended for translation tasks. As the model is multilingual it expects the sequences in a certain format: A special language id token is used as prefix in both the @@ -48,7 +48,7 @@ id for source text and target language id for target text, with `X` being the so The [`M2M100Tokenizer`] depends on `sentencepiece` so be sure to install it before running the examples. To install `sentencepiece` run `pip install sentencepiece`. -- Supervised Training +**Supervised Training** ```python from transformers import M2M100Config, M2M100ForConditionalGeneration, M2M100Tokenizer @@ -64,12 +64,12 @@ model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") loss = model(**model_inputs).loss # forward pass ``` -- Generation +**Generation** - M2M100 uses the `eos_token_id` as the `decoder_start_token_id` for generation with the target language id - being forced as the first generated token. To force the target language id as the first generated token, pass the - *forced_bos_token_id* parameter to the *generate* method. The following example shows how to translate between - Hindi to French and Chinese to English using the *facebook/m2m100_418M* checkpoint. +M2M100 uses the `eos_token_id` as the `decoder_start_token_id` for generation with the target language id +being forced as the first generated token. To force the target language id as the first generated token, pass the +*forced_bos_token_id* parameter to the *generate* method. The following example shows how to translate between +Hindi to French and Chinese to English using the *facebook/m2m100_418M* checkpoint. ```python >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer @@ -95,7 +95,7 @@ loss = model(**model_inputs).loss # forward pass "Life is like a box of chocolate." ``` -## Documentation resources +## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) diff --git a/docs/source/en/model_doc/marian.md b/docs/source/en/model_doc/marian.md index 8be41686594c..8078ea1427c9 100644 --- a/docs/source/en/model_doc/marian.md +++ b/docs/source/en/model_doc/marian.md @@ -25,14 +25,11 @@ rendered properly in your Markdown viewer. -**Bugs:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=sshleifer&labels=&template=bug-report.md&title) -and assign @patrickvonplaten. +## Overview -Translations should be similar, but not identical to output in the test set linked to in each model card. +A framework for translation models, using the same models as BART. Translations should be similar, but not identical to output in the test set linked to in each model card. +This model was contributed by [sshleifer](https://huggingface.co/sshleifer). -Tips: - -- A framework for translation models, using the same models as BART. ## Implementation Notes @@ -49,7 +46,7 @@ Tips: - the model starts generating with `pad_token_id` (which has 0 as a token_embedding) as the prefix (Bart uses ``), - Code to bulk convert models can be found in `convert_marian_to_pytorch.py`. -- This model was contributed by [sshleifer](https://huggingface.co/sshleifer). + ## Naming @@ -165,7 +162,7 @@ Example of translating english to many romance languages, using old-style 2 char 'Y esto al español'] ``` -## Documentation resources +## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) @@ -180,6 +177,9 @@ Example of translating english to many romance languages, using old-style 2 char [[autodoc]] MarianTokenizer - build_inputs_with_special_tokens + + + ## MarianModel [[autodoc]] MarianModel @@ -195,6 +195,9 @@ Example of translating english to many romance languages, using old-style 2 char [[autodoc]] MarianForCausalLM - forward + + + ## TFMarianModel [[autodoc]] TFMarianModel @@ -205,6 +208,9 @@ Example of translating english to many romance languages, using old-style 2 char [[autodoc]] TFMarianMTModel - call + + + ## FlaxMarianModel [[autodoc]] FlaxMarianModel @@ -214,3 +220,6 @@ Example of translating english to many romance languages, using old-style 2 char [[autodoc]] FlaxMarianMTModel - __call__ + + + diff --git a/docs/source/en/model_doc/markuplm.md b/docs/source/en/model_doc/markuplm.md index b286c4fc00c1..8150892e63f8 100644 --- a/docs/source/en/model_doc/markuplm.md +++ b/docs/source/en/model_doc/markuplm.md @@ -40,19 +40,19 @@ HTML/XML-based documents, where text and markup information is jointly pre-train pre-trained MarkupLM significantly outperforms the existing strong baseline models on several document understanding tasks. The pre-trained model and code will be publicly available.* -Tips: +This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/markuplm). + +## Usage tips + - In addition to `input_ids`, [`~MarkupLMModel.forward`] expects 2 additional inputs, namely `xpath_tags_seq` and `xpath_subs_seq`. These are the XPATH tags and subscripts respectively for each token in the input sequence. - One can use [`MarkupLMProcessor`] to prepare all data for the model. Refer to the [usage guide](#usage-markuplmprocessor) for more info. -- Demo notebooks can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/MarkupLM). drawing MarkupLM architecture. Taken from the original paper. -This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/markuplm). - ## Usage: MarkupLMProcessor The easiest way to prepare data for the model is to use [`MarkupLMProcessor`], which internally combines a feature extractor @@ -197,8 +197,9 @@ all nodes and xpaths yourself, you can provide them directly to the processor. M dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` -## Documentation resources +## Resources +- [Demo notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/MarkupLM) - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) diff --git a/docs/source/en/model_doc/mask2former.md b/docs/source/en/model_doc/mask2former.md index ddfa5da2ba2c..bd5ab80728eb 100644 --- a/docs/source/en/model_doc/mask2former.md +++ b/docs/source/en/model_doc/mask2former.md @@ -25,16 +25,17 @@ The abstract from the paper is the following: *Image segmentation groups pixels with different semantics, e.g., category or instance membership. Each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K).* -Tips: -- Mask2Former uses the same preprocessing and postprocessing steps as [MaskFormer](maskformer). Use [`Mask2FormerImageProcessor`] or [`AutoImageProcessor`] to prepare images and optional targets for the model. -- To get the final segmentation, depending on the task, you can call [`~Mask2FormerImageProcessor.post_process_semantic_segmentation`] or [`~Mask2FormerImageProcessor.post_process_instance_segmentation`] or [`~Mask2FormerImageProcessor.post_process_panoptic_segmentation`]. All three tasks can be solved using [`Mask2FormerForUniversalSegmentation`] output, panoptic segmentation accepts an optional `label_ids_to_fuse` argument to fuse instances of the target object/s (e.g. sky) together. - drawing Mask2Former architecture. Taken from the original paper. This model was contributed by [Shivalika Singh](https://huggingface.co/shivi) and [Alara Dirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/facebookresearch/Mask2Former). +## Usage tips + +- Mask2Former uses the same preprocessing and postprocessing steps as [MaskFormer](maskformer). Use [`Mask2FormerImageProcessor`] or [`AutoImageProcessor`] to prepare images and optional targets for the model. +- To get the final segmentation, depending on the task, you can call [`~Mask2FormerImageProcessor.post_process_semantic_segmentation`] or [`~Mask2FormerImageProcessor.post_process_instance_segmentation`] or [`~Mask2FormerImageProcessor.post_process_panoptic_segmentation`]. All three tasks can be solved using [`Mask2FormerForUniversalSegmentation`] output, panoptic segmentation accepts an optional `label_ids_to_fuse` argument to fuse instances of the target object/s (e.g. sky) together. + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Mask2Former. @@ -44,16 +45,16 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we will review it. The resource should ideally demonstrate something new instead of duplicating an existing resource. +## Mask2FormerConfig + +[[autodoc]] Mask2FormerConfig + ## MaskFormer specific outputs [[autodoc]] models.mask2former.modeling_mask2former.Mask2FormerModelOutput [[autodoc]] models.mask2former.modeling_mask2former.Mask2FormerForUniversalSegmentationOutput -## Mask2FormerConfig - -[[autodoc]] Mask2FormerConfig - ## Mask2FormerModel [[autodoc]] Mask2FormerModel diff --git a/docs/source/en/model_doc/maskformer.md b/docs/source/en/model_doc/maskformer.md index 4695e54857f7..5566dec58593 100644 --- a/docs/source/en/model_doc/maskformer.md +++ b/docs/source/en/model_doc/maskformer.md @@ -31,7 +31,14 @@ The abstract from the paper is the following: *Modern approaches typically formulate semantic segmentation as a per-pixel classification task, while instance-level segmentation is handled with an alternative mask classification. Our key insight: mask classification is sufficiently general to solve both semantic- and instance-level segmentation tasks in a unified manner using the exact same model, loss, and training procedure. Following this observation, we propose MaskFormer, a simple mask classification model which predicts a set of binary masks, each associated with a single global class label prediction. Overall, the proposed mask classification-based method simplifies the landscape of effective approaches to semantic and panoptic segmentation tasks and shows excellent empirical results. In particular, we observe that MaskFormer outperforms per-pixel classification baselines when the number of classes is large. Our mask classification-based method outperforms both current state-of-the-art semantic (55.6 mIoU on ADE20K) and panoptic segmentation (52.7 PQ on COCO) models.* -Tips: +The figure below illustrates the architecture of MaskFormer. Taken from the [original paper](https://arxiv.org/abs/2107.06278). + + + +This model was contributed by [francesco](https://huggingface.co/francesco). The original code can be found [here](https://github.com/facebookresearch/MaskFormer). + +## Usage tips + - MaskFormer's Transformer decoder is identical to the decoder of [DETR](detr). During training, the authors of DETR did find it helpful to use auxiliary losses in the decoder, especially to help the model output the correct number of objects of each class. If you set the parameter `use_auxilary_loss` of [`MaskFormerConfig`] to `True`, then prediction feedforward neural networks and Hungarian losses are added after each decoder layer (with the FFNs sharing parameters). - If you want to train the model in a distributed environment across multiple nodes, then one should update the `get_num_masks` function inside in the `MaskFormerLoss` class of `modeling_maskformer.py`. When training on multiple nodes, this should be @@ -39,12 +46,6 @@ Tips: - One can use [`MaskFormerImageProcessor`] to prepare images for the model and optional targets for the model. - To get the final segmentation, depending on the task, you can call [`~MaskFormerImageProcessor.post_process_semantic_segmentation`] or [`~MaskFormerImageProcessor.post_process_panoptic_segmentation`]. Both tasks can be solved using [`MaskFormerForInstanceSegmentation`] output, panoptic segmentation accepts an optional `label_ids_to_fuse` argument to fuse instances of the target object/s (e.g. sky) together. -The figure below illustrates the architecture of MaskFormer. Taken from the [original paper](https://arxiv.org/abs/2107.06278). - - - -This model was contributed by [francesco](https://huggingface.co/francesco). The original code can be found [here](https://github.com/facebookresearch/MaskFormer). - ## Resources diff --git a/docs/source/en/model_doc/matcha.md b/docs/source/en/model_doc/matcha.md index 20c403413feb..d4ee33059367 100644 --- a/docs/source/en/model_doc/matcha.md +++ b/docs/source/en/model_doc/matcha.md @@ -67,4 +67,10 @@ from transformers.optimization import Adafactor, get_cosine_schedule_with_warmup optimizer = Adafactor(self.parameters(), scale_parameter=False, relative_step=False, lr=0.01, weight_decay=1e-05) scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=1000, num_training_steps=40000) -``` \ No newline at end of file +``` + + + +MatCha is a model that is trained using `Pix2Struct` architecture. You can find more information about `Pix2Struct` in the [Pix2Struct documentation](https://huggingface.co/docs/transformers/main/en/model_doc/pix2struct). + + \ No newline at end of file diff --git a/docs/source/en/model_doc/mbart.md b/docs/source/en/model_doc/mbart.md index 8a614dd50556..e7fc0bd53efa 100644 --- a/docs/source/en/model_doc/mbart.md +++ b/docs/source/en/model_doc/mbart.md @@ -25,8 +25,6 @@ rendered properly in your Markdown viewer. -**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) and assign -@patrickvonplaten ## Overview of MBart @@ -186,6 +184,9 @@ tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) [[autodoc]] MBart50TokenizerFast + + + ## MBartModel [[autodoc]] MBartModel @@ -207,6 +208,9 @@ tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) [[autodoc]] MBartForCausalLM - forward + + + ## TFMBartModel [[autodoc]] TFMBartModel @@ -217,6 +221,9 @@ tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) [[autodoc]] TFMBartForConditionalGeneration - call + + + ## FlaxMBartModel [[autodoc]] FlaxMBartModel @@ -244,3 +251,6 @@ tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) - __call__ - encode - decode + + + diff --git a/docs/source/en/model_doc/mctct.md b/docs/source/en/model_doc/mctct.md index 72d4bedfac69..7cf1a68f12e4 100644 --- a/docs/source/en/model_doc/mctct.md +++ b/docs/source/en/model_doc/mctct.md @@ -40,17 +40,15 @@ pseudo-labels for all languages, either from scratch or by fine-tuning. Experime Common Voice and unlabeled VoxPopuli datasets show that our recipe can yield a model with better performance for many languages that also transfers well to LibriSpeech.* - - This model was contributed by [cwkeam](https://huggingface.co/cwkeam). The original code can be found [here](https://github.com/flashlight/wav2letter/tree/main/recipes/mling_pl). -## Documentation resources +## Usage tips -- [Automatic speech recognition task guide](../tasks/asr) +The PyTorch version of this model is only available in torch 1.9 and higher. -Tips: +## Resources -- The PyTorch version of this model is only available in torch 1.9 and higher. +- [Automatic speech recognition task guide](../tasks/asr) ## MCTCTConfig @@ -70,7 +68,6 @@ Tips: - batch_decode - decode - ## MCTCTModel [[autodoc]] MCTCTModel diff --git a/docs/source/en/model_doc/mega.md b/docs/source/en/model_doc/mega.md index d4d68b9becd1..4ce62ca45a1d 100644 --- a/docs/source/en/model_doc/mega.md +++ b/docs/source/en/model_doc/mega.md @@ -28,15 +28,17 @@ The abstract from the paper is the following: *The design choices in the Transformer attention mechanism, including weak inductive bias and quadratic computational complexity, have limited its application for modeling long sequences. In this paper, we introduce Mega, a simple, theoretically grounded, single-head gated attention mechanism equipped with (exponential) moving average to incorporate inductive bias of position-aware local dependencies into the position-agnostic attention mechanism. We further propose a variant of Mega that offers linear time and space complexity yet yields only minimal quality loss, by efficiently splitting the whole sequence into multiple chunks with fixed length. Extensive experiments on a wide range of sequence modeling benchmarks, including the Long Range Arena, neural machine translation, auto-regressive language modeling, and image and speech classification, show that Mega achieves significant improvements over other sequence models, including variants of Transformers and recent state space models. * -Tips: +This model was contributed by [mnaylor](https://huggingface.co/mnaylor). +The original code can be found [here](https://github.com/facebookresearch/mega). + + +## Usage tips - MEGA can perform quite well with relatively few parameters. See Appendix D in the MEGA paper for examples of architectural specs which perform well in various settings. If using MEGA as a decoder, be sure to set `bidirectional=False` to avoid errors with default bidirectional. - Mega-chunk is a variant of mega that reduces time and spaces complexity from quadratic to linear. Utilize chunking with MegaConfig.use_chunking and control chunk size with MegaConfig.chunk_size -This model was contributed by [mnaylor](https://huggingface.co/mnaylor). -The original code can be found [here](https://github.com/facebookresearch/mega). -Implementation Notes: +## Implementation Notes - The original implementation of MEGA had an inconsistent expectation of attention masks for padding and causal self-attention between the softmax attention and Laplace/squared ReLU method. This implementation addresses that inconsistency. - The original implementation did not include token type embeddings; this implementation adds support for these, with the option controlled by MegaConfig.add_token_type_embeddings diff --git a/docs/source/en/model_doc/megatron-bert.md b/docs/source/en/model_doc/megatron-bert.md index 88ccff23587b..67000c8b843f 100644 --- a/docs/source/en/model_doc/megatron-bert.md +++ b/docs/source/en/model_doc/megatron-bert.md @@ -40,7 +40,11 @@ achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15. accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy of 89.4%).* -Tips: +This model was contributed by [jdemouth](https://huggingface.co/jdemouth). The original code can be found [here](https://github.com/NVIDIA/Megatron-LM). +That repository contains a multi-GPU and multi-node implementation of the Megatron Language models. In particular, +it contains a hybrid model parallel approach using "tensor parallel" and "pipeline parallel" techniques. + +## Usage tips We have provided pretrained [BERT-345M](https://ngc.nvidia.com/catalog/models/nvidia:megatron_bert_345m) checkpoints for use to evaluate or finetuning downstream tasks. @@ -78,11 +82,7 @@ python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpo python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_cased.zip ``` -This model was contributed by [jdemouth](https://huggingface.co/jdemouth). The original code can be found [here](https://github.com/NVIDIA/Megatron-LM). That repository contains a multi-GPU and multi-node implementation of the -Megatron Language models. In particular, it contains a hybrid model parallel approach using "tensor parallel" and -"pipeline parallel" techniques. - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/megatron_gpt2.md b/docs/source/en/model_doc/megatron_gpt2.md index 1eea7d82bf3a..284fd372c0e0 100644 --- a/docs/source/en/model_doc/megatron_gpt2.md +++ b/docs/source/en/model_doc/megatron_gpt2.md @@ -40,7 +40,11 @@ achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15. accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy of 89.4%).* -Tips: +This model was contributed by [jdemouth](https://huggingface.co/jdemouth). The original code can be found [here](https://github.com/NVIDIA/Megatron-LM). +That repository contains a multi-GPU and multi-node implementation of the Megatron Language models. In particular, it +contains a hybrid model parallel approach using "tensor parallel" and "pipeline parallel" techniques. + +## Usage tips We have provided pretrained [GPT2-345M](https://ngc.nvidia.com/catalog/models/nvidia:megatron_lm_345m) checkpoints for use to evaluate or finetuning downstream tasks. @@ -65,7 +69,9 @@ The following command allows you to do the conversion. We assume that the folder python3 $PATH_TO_TRANSFORMERS/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_gpt2_345m_v0_0.zip ``` -This model was contributed by [jdemouth](https://huggingface.co/jdemouth). The original code can be found [here](https://github.com/NVIDIA/Megatron-LM). That repository contains a multi-GPU and multi-node implementation of the -Megatron Language models. In particular, it contains a hybrid model parallel approach using "tensor parallel" and -"pipeline parallel" techniques. + + + MegatronGPT2 architecture is the same as OpenAI GPT-2 . Refer to [GPT-2 documentation](gpt2) for information on + configuration classes and their parameters. + \ No newline at end of file diff --git a/docs/source/en/model_doc/mgp-str.md b/docs/source/en/model_doc/mgp-str.md index e384c0620170..5a44a18b349d 100644 --- a/docs/source/en/model_doc/mgp-str.md +++ b/docs/source/en/model_doc/mgp-str.md @@ -29,12 +29,10 @@ alt="drawing" width="600"/> MGP-STR architecture. Taken from the original paper. -Tips: +MGP-STR is trained on two synthetic datasets [MJSynth]((http://www.robots.ox.ac.uk/~vgg/data/text/)) (MJ) and SynthText(http://www.robots.ox.ac.uk/~vgg/data/scenetext/) (ST) without fine-tuning on other datasets. It achieves state-of-the-art results on six standard Latin scene text benchmarks, including 3 regular text datasets (IC13, SVT, IIIT) and 3 irregular ones (IC15, SVTP, CUTE). +This model was contributed by [yuekun](https://huggingface.co/yuekun). The original code can be found [here](https://github.com/AlibabaResearch/AdvancedLiterateMachinery/tree/main/OCR/MGP-STR). -- MGP-STR is trained on two synthetic datasets [MJSynth]((http://www.robots.ox.ac.uk/~vgg/data/text/)) (MJ) and SynthText(http://www.robots.ox.ac.uk/~vgg/data/scenetext/) (ST) without fine-tuning on other datasets. It achieves state-of-the-art results on six standard Latin scene text benchmarks, including 3 regular text datasets (IC13, SVT, IIIT) and 3 irregular ones (IC15, SVTP, CUTE). -- This model was contributed by [yuekun](https://huggingface.co/yuekun). The original code can be found [here](https://github.com/AlibabaResearch/AdvancedLiterateMachinery/tree/main/OCR/MGP-STR). - -## Inference +## Inference example [`MgpstrModel`] accepts images as input and generates three types of predictions, which represent textual information at different granularities. The three types of predictions are fused to give the final prediction result. @@ -46,7 +44,7 @@ into a single instance to both extract the input features and decode the predict - Step-by-step Optical Character Recognition (OCR) -``` py +```py >>> from transformers import MgpstrProcessor, MgpstrForSceneTextRecognition >>> import requests >>> from PIL import Image diff --git a/docs/source/en/model_doc/mistral.md b/docs/source/en/model_doc/mistral.md index 5972f72a614e..8e37bc2caf88 100644 --- a/docs/source/en/model_doc/mistral.md +++ b/docs/source/en/model_doc/mistral.md @@ -18,9 +18,9 @@ rendered properly in your Markdown viewer. ## Overview -Mistral-7B-v0.1 is Mistral AI’s first Large Language Model (LLM). +Mistral-7B-v0.1 is Mistral AI's first Large Language Model (LLM). -## Model Details +### Model Details Mistral-7B-v0.1 is a decoder-based LM with the following architectural choices: * Sliding Window Attention - Trained with 8k context length and fixed cache size, with a theoretical attention span of 128K tokens @@ -31,11 +31,11 @@ We also provide an instruction fine-tuned model: `Mistral-7B-Instruct-v0.1` whic For more details please read our [release blog post](https://mistral.ai/news/announcing-mistral-7b/) -## License +### License Both `Mistral-7B-v0.1` and `Mistral-7B-Instruct-v0.1` are released under the Apache 2.0 license. -## Usage +## Usage tips `Mistral-7B-v0.1` and `Mistral-7B-Instruct-v0.1` can be found on the [Huggingface Hub](https://huggingface.co/mistralai) diff --git a/docs/source/en/model_doc/mluke.md b/docs/source/en/model_doc/mluke.md index ec9430848cea..719af76ad446 100644 --- a/docs/source/en/model_doc/mluke.md +++ b/docs/source/en/model_doc/mluke.md @@ -37,6 +37,10 @@ representations into the input allows us to extract more language-agnostic featu multilingual cloze prompt task with the mLAMA dataset. We show that entity-based prompt elicits correct factual knowledge more likely than using only word representations.* +This model was contributed by [ryo0634](https://huggingface.co/ryo0634). The original code can be found [here](https://github.com/studio-ousia/luke). + +## Usage tips + One can directly plug in the weights of mLUKE into a LUKE model, like so: ```python @@ -53,10 +57,12 @@ from transformers import MLukeTokenizer tokenizer = MLukeTokenizer.from_pretrained("studio-ousia/mluke-base") ``` + + As mLUKE's architecture is equivalent to that of LUKE, one can refer to [LUKE's documentation page](luke) for all tips, code examples and notebooks. -This model was contributed by [ryo0634](https://huggingface.co/ryo0634). The original code can be found [here](https://github.com/studio-ousia/luke). + ## MLukeTokenizer diff --git a/docs/source/en/model_doc/mms.md b/docs/source/en/model_doc/mms.md index 497eb40d7e3e..aefdbfd889f5 100644 --- a/docs/source/en/model_doc/mms.md +++ b/docs/source/en/model_doc/mms.md @@ -306,7 +306,6 @@ with torch.no_grad(): outputs = model(**inputs) ``` - ### Language Identification (LID) Different LID models are available based on the number of languages they can recognize - [126](https://huggingface.co/facebook/mms-lid-126), [256](https://huggingface.co/facebook/mms-lid-256), [512](https://huggingface.co/facebook/mms-lid-512), [1024](https://huggingface.co/facebook/mms-lid-1024), [2048](https://huggingface.co/facebook/mms-lid-2048), [4017](https://huggingface.co/facebook/mms-lid-4017). @@ -378,4 +377,13 @@ processor.id2label.values() ### Audio Pretrained Models -Pretrained models are available for two different sizes - [300M](https://huggingface.co/facebook/mms-300m) , [1Bil](https://huggingface.co/facebook/mms-1b). The architecture is based on the Wav2Vec2 model, so one can refer to [Wav2Vec2's documentation page](wav2vec2) for further details on how to finetune with models for various downstream tasks. +Pretrained models are available for two different sizes - [300M](https://huggingface.co/facebook/mms-300m) , +[1Bil](https://huggingface.co/facebook/mms-1b). + + + +The MMS for ASR architecture is based on the Wav2Vec2 model, refer to [Wav2Vec2's documentation page](wav2vec2) for further +details on how to finetune with models for various downstream tasks. + +MMS-TTS uses the same model architecture as VITS, refer to [VITS's documentation page](vits) for API reference. + diff --git a/docs/source/en/model_doc/mobilebert.md b/docs/source/en/model_doc/mobilebert.md index e652756351d2..fbd9d34afb94 100644 --- a/docs/source/en/model_doc/mobilebert.md +++ b/docs/source/en/model_doc/mobilebert.md @@ -37,7 +37,9 @@ natural language inference tasks of GLUE, MobileBERT achieves a GLUEscore o 77.7 latency on a Pixel 4 phone. On the SQuAD v1.1/v2.0 question answering task, MobileBERT achieves a dev F1 score of 90.0/79.2 (1.5/2.1 higher than BERT_BASE).* -Tips: +This model was contributed by [vshampor](https://huggingface.co/vshampor). The original code can be found [here](https://github.com/google-research/mobilebert). + +## Usage tips - MobileBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. @@ -45,9 +47,8 @@ Tips: efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard. -This model was contributed by [vshampor](https://huggingface.co/vshampor). The original code can be found [here](https://github.com/google-research/mobilebert). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -73,6 +74,9 @@ This model was contributed by [vshampor](https://huggingface.co/vshampor). The o [[autodoc]] models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput + + + ## MobileBertModel [[autodoc]] MobileBertModel @@ -113,6 +117,9 @@ This model was contributed by [vshampor](https://huggingface.co/vshampor). The o [[autodoc]] MobileBertForQuestionAnswering - forward + + + ## TFMobileBertModel [[autodoc]] TFMobileBertModel @@ -152,3 +159,6 @@ This model was contributed by [vshampor](https://huggingface.co/vshampor). The o [[autodoc]] TFMobileBertForQuestionAnswering - call + + + diff --git a/docs/source/en/model_doc/mobilenet_v1.md b/docs/source/en/model_doc/mobilenet_v1.md index 56743efe1416..9f68035c63c2 100644 --- a/docs/source/en/model_doc/mobilenet_v1.md +++ b/docs/source/en/model_doc/mobilenet_v1.md @@ -24,7 +24,9 @@ The abstract from the paper is the following: *We present a class of efficient models called MobileNets for mobile and embedded vision applications. MobileNets are based on a streamlined architecture that uses depth-wise separable convolutions to build light weight deep neural networks. We introduce two simple global hyper-parameters that efficiently trade off between latency and accuracy. These hyper-parameters allow the model builder to choose the right sized model for their application based on the constraints of the problem. We present extensive experiments on resource and accuracy tradeoffs and show strong performance compared to other popular models on ImageNet classification. We then demonstrate the effectiveness of MobileNets across a wide range of applications and use cases including object detection, finegrain classification, face attributes and large scale geo-localization.* -Tips: +This model was contributed by [matthijs](https://huggingface.co/Matthijs). The original code and weights can be found [here](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md). + +## Usage tips - The checkpoints are named **mobilenet\_v1\_*depth*\_*size***, for example **mobilenet\_v1\_1.0\_224**, where **1.0** is the depth multiplier (sometimes also referred to as "alpha" or the width multiplier) and **224** is the resolution of the input images the model was trained on. @@ -46,8 +48,6 @@ Unsupported features: - It's common to extract the output from the pointwise layers at indices 5, 11, 12, 13 for downstream purposes. Using `output_hidden_states=True` returns the output from all intermediate layers. There is currently no way to limit this to specific layers. -This model was contributed by [matthijs](https://huggingface.co/Matthijs). The original code and weights can be found [here](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with MobileNetV1. diff --git a/docs/source/en/model_doc/mobilenet_v2.md b/docs/source/en/model_doc/mobilenet_v2.md index bd4114dc71ad..ff22231ae0c1 100644 --- a/docs/source/en/model_doc/mobilenet_v2.md +++ b/docs/source/en/model_doc/mobilenet_v2.md @@ -26,7 +26,9 @@ The abstract from the paper is the following: *The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on Imagenet classification, COCO object detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as the number of parameters.* -Tips: +This model was contributed by [matthijs](https://huggingface.co/Matthijs). The original code and weights can be found [here for the main model](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet) and [here for DeepLabV3+](https://github.com/tensorflow/models/tree/master/research/deeplab). + +## Usage tips - The checkpoints are named **mobilenet\_v2\_*depth*\_*size***, for example **mobilenet\_v2\_1.0\_224**, where **1.0** is the depth multiplier (sometimes also referred to as "alpha" or the width multiplier) and **224** is the resolution of the input images the model was trained on. @@ -50,8 +52,6 @@ Unsupported features: - The DeepLabV3+ segmentation head does not use the final convolution layer from the backbone, but this layer gets computed anyway. There is currently no way to tell [`MobileNetV2Model`] up to which layer it should run. -This model was contributed by [matthijs](https://huggingface.co/Matthijs). The original code and weights can be found [here for the main model](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet) and [here for DeepLabV3+](https://github.com/tensorflow/models/tree/master/research/deeplab). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with MobileNetV2. diff --git a/docs/source/en/model_doc/mobilevit.md b/docs/source/en/model_doc/mobilevit.md index 2d815795689a..e724ffa380e2 100644 --- a/docs/source/en/model_doc/mobilevit.md +++ b/docs/source/en/model_doc/mobilevit.md @@ -24,7 +24,9 @@ The abstract from the paper is the following: *Light-weight convolutional neural networks (CNNs) are the de-facto for mobile vision tasks. Their spatial inductive biases allow them to learn representations with fewer parameters across different vision tasks. However, these networks are spatially local. To learn global representations, self-attention-based vision trans-formers (ViTs) have been adopted. Unlike CNNs, ViTs are heavy-weight. In this paper, we ask the following question: is it possible to combine the strengths of CNNs and ViTs to build a light-weight and low latency network for mobile vision tasks? Towards this end, we introduce MobileViT, a light-weight and general-purpose vision transformer for mobile devices. MobileViT presents a different perspective for the global processing of information with transformers, i.e., transformers as convolutions. Our results show that MobileViT significantly outperforms CNN- and ViT-based networks across different tasks and datasets. On the ImageNet-1k dataset, MobileViT achieves top-1 accuracy of 78.4% with about 6 million parameters, which is 3.2% and 6.2% more accurate than MobileNetv3 (CNN-based) and DeIT (ViT-based) for a similar number of parameters. On the MS-COCO object detection task, MobileViT is 5.7% more accurate than MobileNetv3 for a similar number of parameters.* -Tips: +This model was contributed by [matthijs](https://huggingface.co/Matthijs). The TensorFlow version of the model was contributed by [sayakpaul](https://huggingface.co/sayakpaul). The original code and weights can be found [here](https://github.com/apple/ml-cvnets). + +## Usage tips - MobileViT is more like a CNN than a Transformer model. It does not work on sequence data but on batches of images. Unlike ViT, there are no embeddings. The backbone model outputs a feature map. You can follow [this tutorial](https://keras.io/examples/vision/mobilevit) for a lightweight introduction. - One can use [`MobileViTImageProcessor`] to prepare images for the model. Note that if you do your own preprocessing, the pretrained checkpoints expect images to be in BGR pixel order (not RGB). @@ -58,9 +60,6 @@ with open(tflite_filename, "wb") as f: The resulting model will be just **about an MB** making it a good fit for mobile applications where resources and network bandwidth can be constrained. - -This model was contributed by [matthijs](https://huggingface.co/Matthijs). The TensorFlow version of the model was contributed by [sayakpaul](https://huggingface.co/sayakpaul). The original code and weights can be found [here](https://github.com/apple/ml-cvnets). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with MobileViT. @@ -91,6 +90,9 @@ If you're interested in submitting a resource to be included here, please feel f - preprocess - post_process_semantic_segmentation + + + ## MobileViTModel [[autodoc]] MobileViTModel @@ -106,6 +108,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] MobileViTForSemanticSegmentation - forward + + + ## TFMobileViTModel [[autodoc]] TFMobileViTModel @@ -120,3 +125,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] TFMobileViTForSemanticSegmentation - call + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/mobilevitv2.md b/docs/source/en/model_doc/mobilevitv2.md index 4b6689ef2b40..c3a650fc7042 100644 --- a/docs/source/en/model_doc/mobilevitv2.md +++ b/docs/source/en/model_doc/mobilevitv2.md @@ -26,17 +26,16 @@ The abstract from the paper is the following: *Mobile vision transformers (MobileViT) can achieve state-of-the-art performance across several mobile vision tasks, including classification and detection. Though these models have fewer parameters, they have high latency as compared to convolutional neural network-based models. The main efficiency bottleneck in MobileViT is the multi-headed self-attention (MHA) in transformers, which requires O(k2) time complexity with respect to the number of tokens (or patches) k. Moreover, MHA requires costly operations (e.g., batch-wise matrix multiplication) for computing self-attention, impacting latency on resource-constrained devices. This paper introduces a separable self-attention method with linear complexity, i.e. O(k). A simple yet effective characteristic of the proposed method is that it uses element-wise operations for computing self-attention, making it a good choice for resource-constrained devices. The improved model, MobileViTV2, is state-of-the-art on several mobile vision tasks, including ImageNet object classification and MS-COCO object detection. With about three million parameters, MobileViTV2 achieves a top-1 accuracy of 75.6% on the ImageNet dataset, outperforming MobileViT by about 1% while running 3.2× faster on a mobile device.* -Tips: +This model was contributed by [shehan97](https://huggingface.co/shehan97). +The original code can be found [here](https://github.com/apple/ml-cvnets). + +## Usage tips - MobileViTV2 is more like a CNN than a Transformer model. It does not work on sequence data but on batches of images. Unlike ViT, there are no embeddings. The backbone model outputs a feature map. - One can use [`MobileViTImageProcessor`] to prepare images for the model. Note that if you do your own preprocessing, the pretrained checkpoints expect images to be in BGR pixel order (not RGB). - The available image classification checkpoints are pre-trained on [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). - The segmentation model uses a [DeepLabV3](https://arxiv.org/abs/1706.05587) head. The available semantic segmentation checkpoints are pre-trained on [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/). -This model was contributed by [shehan97](https://huggingface.co/shehan97). -The original code can be found [here](https://github.com/apple/ml-cvnets). - - ## MobileViTV2Config [[autodoc]] MobileViTV2Config diff --git a/docs/source/en/model_doc/mpnet.md b/docs/source/en/model_doc/mpnet.md index 97c140f631d1..c571da47b004 100644 --- a/docs/source/en/model_doc/mpnet.md +++ b/docs/source/en/model_doc/mpnet.md @@ -37,14 +37,14 @@ down-streaming tasks (GLUE, SQuAD, etc). Experimental results show that MPNet ou margin, and achieves better results on these tasks compared with previous state-of-the-art pre-trained methods (e.g., BERT, XLNet, RoBERTa) under the same model setting.* -Tips: +The original code can be found [here](https://github.com/microsoft/MPNet). -- MPNet doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. just - separate your segments with the separation token `tokenizer.sep_token` (or `[sep]`). +## Usage tips -The original code can be found [here](https://github.com/microsoft/MPNet). +MPNet doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. Just +separate your segments with the separation token `tokenizer.sep_token` (or `[sep]`). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -68,6 +68,9 @@ The original code can be found [here](https://github.com/microsoft/MPNet). [[autodoc]] MPNetTokenizerFast + + + ## MPNetModel [[autodoc]] MPNetModel @@ -98,6 +101,9 @@ The original code can be found [here](https://github.com/microsoft/MPNet). [[autodoc]] MPNetForQuestionAnswering - forward + + + ## TFMPNetModel [[autodoc]] TFMPNetModel @@ -127,3 +133,6 @@ The original code can be found [here](https://github.com/microsoft/MPNet). [[autodoc]] TFMPNetForQuestionAnswering - call + + + diff --git a/docs/source/en/model_doc/mpt.md b/docs/source/en/model_doc/mpt.md index fd0a3b5c46bf..f7e6fcc14382 100644 --- a/docs/source/en/model_doc/mpt.md +++ b/docs/source/en/model_doc/mpt.md @@ -30,13 +30,14 @@ The original code is available at the [`llm-foundry`](https://github.com/mosaic Read more about it [in the release blogpost](https://www.mosaicml.com/blog/mpt-7b) -Tips: +## Usage tips - Learn more about some techniques behind training of the model [in this section of llm-foundry repository](https://github.com/mosaicml/llm-foundry/blob/main/TUTORIAL.md#faqs) - If you want to use the advanced version of the model (triton kernels, direct flash attention integration), you can still use the original model implementation by adding `trust_remote_code=True` when calling `from_pretrained`. -- [Fine-tuning Notebook](https://colab.research.google.com/drive/1HCpQkLL7UXW8xJUJJ29X7QAeNJKO0frZ?usp=sharing) on how to fine-tune MPT-7B on a free Google Colab instance to turn the model into a Chatbot. +## Resources +- [Fine-tuning Notebook](https://colab.research.google.com/drive/1HCpQkLL7UXW8xJUJJ29X7QAeNJKO0frZ?usp=sharing) on how to fine-tune MPT-7B on a free Google Colab instance to turn the model into a Chatbot. ## MptConfig diff --git a/docs/source/en/model_doc/mra.md b/docs/source/en/model_doc/mra.md index 8c1c392ead12..cc4c0d9cc9c8 100644 --- a/docs/source/en/model_doc/mra.md +++ b/docs/source/en/model_doc/mra.md @@ -27,24 +27,20 @@ The abstract from the paper is the following: This model was contributed by [novice03](https://huggingface.co/novice03). The original code can be found [here](https://github.com/mlpen/mra-attention). - ## MraConfig [[autodoc]] MraConfig - ## MraModel [[autodoc]] MraModel - forward - ## MraForMaskedLM [[autodoc]] MraForMaskedLM - forward - ## MraForSequenceClassification [[autodoc]] MraForSequenceClassification @@ -55,13 +51,11 @@ The original code can be found [here](https://github.com/mlpen/mra-attention). [[autodoc]] MraForMultipleChoice - forward - ## MraForTokenClassification [[autodoc]] MraForTokenClassification - forward - ## MraForQuestionAnswering [[autodoc]] MraForQuestionAnswering diff --git a/docs/source/en/model_doc/mt5.md b/docs/source/en/model_doc/mt5.md index beec9b535490..f7360092dec7 100644 --- a/docs/source/en/model_doc/mt5.md +++ b/docs/source/en/model_doc/mt5.md @@ -60,7 +60,7 @@ Google has released the following variants: This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be found [here](https://github.com/google-research/multilingual-t5). -## Documentation resources +## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) @@ -82,6 +82,8 @@ See [`T5Tokenizer`] for all details. See [`T5TokenizerFast`] for all details. + + ## MT5Model @@ -103,6 +105,9 @@ See [`T5TokenizerFast`] for all details. [[autodoc]] MT5ForQuestionAnswering + + + ## TFMT5Model [[autodoc]] TFMT5Model @@ -115,6 +120,9 @@ See [`T5TokenizerFast`] for all details. [[autodoc]] TFMT5EncoderModel + + + ## FlaxMT5Model [[autodoc]] FlaxMT5Model @@ -126,3 +134,6 @@ See [`T5TokenizerFast`] for all details. ## FlaxMT5EncoderModel [[autodoc]] FlaxMT5EncoderModel + + + diff --git a/docs/source/en/model_doc/mvp.md b/docs/source/en/model_doc/mvp.md index 043163f40b30..0d98e04cf091 100644 --- a/docs/source/en/model_doc/mvp.md +++ b/docs/source/en/model_doc/mvp.md @@ -28,15 +28,17 @@ According to the abstract, - MVP also has task-specific soft prompts to stimulate the model's capacity in performing a certain task. - MVP is specially designed for natural language generation and can be adapted to a wide range of generation tasks, including but not limited to summarization, data-to-text generation, open-ended dialogue system, story generation, question answering, question generation, task-oriented dialogue system, commonsense generation, paraphrase generation, text style transfer, and text simplification. Our model can also be adapted to natural language understanding tasks such as sequence classification and (extractive) question answering. -Tips: +This model was contributed by [Tianyi Tang](https://huggingface.co/StevenTang). The detailed information and instructions can be found [here](https://github.com/RUCAIBox/MVP). + +## Usage tips + - We have released a series of models [here](https://huggingface.co/models?filter=mvp), including MVP, MVP with task-specific prompts, and multi-task pre-trained variants. - If you want to use a model without prompts (standard Transformer), you can load it through `MvpForConditionalGeneration.from_pretrained('RUCAIBox/mvp')`. - If you want to use a model with task-specific prompts, such as summarization, you can load it through `MvpForConditionalGeneration.from_pretrained('RUCAIBox/mvp-summarization')`. - Our model supports lightweight prompt tuning following [Prefix-tuning](https://arxiv.org/abs/2101.00190) with method `set_lightweight_tuning()`. -This model was contributed by [Tianyi Tang](https://huggingface.co/StevenTang). The detailed information and instructions can be found [here](https://github.com/RUCAIBox/MVP). +## Usage examples -## Examples For summarization, it is an example to use MVP and MVP with summarization-specific prompts. ```python @@ -104,7 +106,7 @@ For lightweight tuning, *i.e.*, fixing the model and only tuning prompts, you ca >>> model.set_lightweight_tuning() ``` -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) diff --git a/docs/source/en/model_doc/nat.md b/docs/source/en/model_doc/nat.md index 668951c241f5..ecb61ccb0a33 100644 --- a/docs/source/en/model_doc/nat.md +++ b/docs/source/en/model_doc/nat.md @@ -36,7 +36,18 @@ that boosts image classification and downstream vision performance. Experimental NAT-Tiny reaches 83.2% top-1 accuracy on ImageNet, 51.4% mAP on MS-COCO and 48.4% mIoU on ADE20K, which is 1.9% ImageNet accuracy, 1.0% COCO mAP, and 2.6% ADE20K mIoU improvement over a Swin model with similar size. * -Tips: + + + Neighborhood Attention compared to other attention patterns. +Taken from the original paper. + +This model was contributed by [Ali Hassani](https://huggingface.co/alihassanijr). +The original code can be found [here](https://github.com/SHI-Labs/Neighborhood-Attention-Transformer). + +## Usage tips + - One can use the [`AutoImageProcessor`] API to prepare images for the model. - NAT can be used as a *backbone*. When `output_hidden_states = True`, it will output both `hidden_states` and `reshaped_hidden_states`. @@ -50,16 +61,6 @@ or build on your system by running `pip install natten`. Note that the latter will likely take time to compile. NATTEN does not support Windows devices yet. - Patch size of 4 is only supported at the moment. - - - Neighborhood Attention compared to other attention patterns. -Taken from the original paper. - -This model was contributed by [Ali Hassani](https://huggingface.co/alihassanijr). -The original code can be found [here](https://github.com/SHI-Labs/Neighborhood-Attention-Transformer). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with NAT. @@ -75,7 +76,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] NatConfig - ## NatModel [[autodoc]] NatModel diff --git a/docs/source/en/model_doc/nezha.md b/docs/source/en/model_doc/nezha.md index 9c136cdf0660..872f576f1286 100644 --- a/docs/source/en/model_doc/nezha.md +++ b/docs/source/en/model_doc/nezha.md @@ -35,7 +35,7 @@ and natural language inference (XNLI).* This model was contributed by [sijunhe](https://huggingface.co/sijunhe). The original code can be found [here](https://github.com/huawei-noah/Pretrained-Language-Model/tree/master/NEZHA-PyTorch). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/nllb-moe.md b/docs/source/en/model_doc/nllb-moe.md index a98266b24927..eb2b7a7da26a 100644 --- a/docs/source/en/model_doc/nllb-moe.md +++ b/docs/source/en/model_doc/nllb-moe.md @@ -37,22 +37,24 @@ improvements to counteract overfitting while training on thousands of tasks. Cri a human-translated benchmark, Flores-200, and combined human evaluation with a novel toxicity benchmark covering all languages in Flores-200 to assess translation safety. Our model achieves an improvement of 44% BLEU relative to the previous state-of-the-art, laying important groundwork towards realizing a universal translation system.* -Tips: +This model was contributed by [Arthur Zucker](https://huggingface.co/ArtZucker). +The original code can be found [here](https://github.com/facebookresearch/fairseq). + +## Usage tips - M2M100ForConditionalGeneration is the base model for both NLLB and NLLB MoE - The NLLB-MoE is very similar to the NLLB model, but it's feed forward layer is based on the implementation of SwitchTransformers. - The tokenizer is the same as the NLLB models. -This model was contributed by [Arthur Zucker](https://huggingface.co/ArtZucker). -The original code can be found [here](https://github.com/facebookresearch/fairseq). - ## Implementation differences with SwitchTransformers + The biggest difference is the way the tokens are routed. NLLB-MoE uses a `top-2-gate` which means that for each input, only the top two experts are selected based on the highest predicted probabilities from the gating network, and the remaining experts are ignored. In `SwitchTransformers`, only the top-1 probabilities are computed, which means that tokens have less probability of being forwarded. Moreover, if a token is not routed to any expert, `SwitchTransformers` still adds its unmodified hidden states (kind of like a residual connection) while they are masked in `NLLB`'s top-2 routing mechanism. ## Generating with NLLB-MoE + The available checkpoints require around 350GB of storage. Make sure to use `accelerate` if you do not have enough RAM on your machine. While generating the target text set the `forced_bos_token_id` to the target language id. The following @@ -99,7 +101,7 @@ See example below for a translation from romanian to german: >>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] ``` -## Documentation resources +## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) diff --git a/docs/source/en/model_doc/nllb.md b/docs/source/en/model_doc/nllb.md index ec50716c73c8..b0dffa185ec4 100644 --- a/docs/source/en/model_doc/nllb.md +++ b/docs/source/en/model_doc/nllb.md @@ -16,8 +16,9 @@ rendered properly in your Markdown viewer. # NLLB -**DISCLAIMER:** The default behaviour for the tokenizer has recently been fixed (and thus changed)! +## Updated tokenizer behavior +**DISCLAIMER:** The default behaviour for the tokenizer was fixed and thus changed in April 2023. The previous version adds `[self.eos_token_id, self.cur_lang_code]` at the end of the token sequence for both target and source tokenization. This is wrong as the NLLB paper mentions (page 48, 6.1.1. Model Architecture) : *Note that we prefix the source sequence with the source language, as opposed to the target @@ -56,7 +57,7 @@ Enabling the old behaviour can be done as follows: For more details, feel free to check the linked [PR](https://github.com/huggingface/transformers/pull/22313) and [Issue](https://github.com/huggingface/transformers/issues/19943). -## Overview of NLLB +## Overview The NLLB model was presented in [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, @@ -131,7 +132,7 @@ See example below for a translation from romanian to german: UN-Chef sagt, es gibt keine militärische Lösung in Syrien ``` -## Documentation resources +## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) diff --git a/docs/source/en/model_doc/nougat.md b/docs/source/en/model_doc/nougat.md index 3fcb97a541b8..a39e74eb213a 100644 --- a/docs/source/en/model_doc/nougat.md +++ b/docs/source/en/model_doc/nougat.md @@ -33,7 +33,7 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/nougat). -Tips: +## Usage tips - The quickest way to get started with Nougat is by checking the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Nougat), which show how to use the model @@ -89,6 +89,12 @@ into a single instance to both extract the input features and decode the predict See the [model hub](https://huggingface.co/models?filter=nougat) to look for Nougat checkpoints. + + +The model is identical to [Donut](donut) in terms of architecture. + + + ## NougatImageProcessor [[autodoc]] NougatImageProcessor diff --git a/docs/source/en/model_doc/nystromformer.md b/docs/source/en/model_doc/nystromformer.md index 6434944aba8a..185c4e1f011a 100644 --- a/docs/source/en/model_doc/nystromformer.md +++ b/docs/source/en/model_doc/nystromformer.md @@ -37,7 +37,7 @@ favorably relative to other efficient self-attention methods. Our code is availa This model was contributed by [novice03](https://huggingface.co/novice03). The original code can be found [here](https://github.com/mlpen/Nystromformer). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/oneformer.md b/docs/source/en/model_doc/oneformer.md index 5f8f46e1529e..97a6aa64f543 100644 --- a/docs/source/en/model_doc/oneformer.md +++ b/docs/source/en/model_doc/oneformer.md @@ -26,7 +26,14 @@ The abstract from the paper is the following: *Universal Image Segmentation is not a new concept. Past attempts to unify image segmentation in the last decades include scene parsing, panoptic segmentation, and, more recently, new panoptic architectures. However, such panoptic architectures do not truly unify image segmentation because they need to be trained individually on the semantic, instance, or panoptic segmentation to achieve the best performance. Ideally, a truly universal framework should be trained only once and achieve SOTA performance across all three image segmentation tasks. To that end, we propose OneFormer, a universal image segmentation framework that unifies segmentation with a multi-task train-once design. We first propose a task-conditioned joint training strategy that enables training on ground truths of each domain (semantic, instance, and panoptic segmentation) within a single multi-task training process. Secondly, we introduce a task token to condition our model on the task at hand, making our model task-dynamic to support multi-task training and inference. Thirdly, we propose using a query-text contrastive loss during training to establish better inter-task and inter-class distinctions. Notably, our single OneFormer model outperforms specialized Mask2Former models across all three segmentation tasks on ADE20k, CityScapes, and COCO, despite the latter being trained on each of the three tasks individually with three times the resources. With new ConvNeXt and DiNAT backbones, we observe even more performance improvement. We believe OneFormer is a significant step towards making image segmentation more universal and accessible.* -Tips: +The figure below illustrates the architecture of OneFormer. Taken from the [original paper](https://arxiv.org/abs/2211.06220). + + + +This model was contributed by [Jitesh Jain](https://huggingface.co/praeclarumjj3). The original code can be found [here](https://github.com/SHI-Labs/OneFormer). + +## Usage tips + - OneFormer requires two inputs during inference: *image* and *task token*. - During training, OneFormer only uses panoptic annotations. - If you want to train the model in a distributed environment across multiple nodes, then one should update the @@ -35,12 +42,6 @@ Tips: - One can use [`OneFormerProcessor`] to prepare input images and task inputs for the model and optional targets for the model. [`OneformerProcessor`] wraps [`OneFormerImageProcessor`] and [`CLIPTokenizer`] into a single instance to both prepare the images and encode the task inputs. - To get the final segmentation, depending on the task, you can call [`~OneFormerProcessor.post_process_semantic_segmentation`] or [`~OneFormerImageProcessor.post_process_instance_segmentation`] or [`~OneFormerImageProcessor.post_process_panoptic_segmentation`]. All three tasks can be solved using [`OneFormerForUniversalSegmentation`] output, panoptic segmentation accepts an optional `label_ids_to_fuse` argument to fuse instances of the target object/s (e.g. sky) together. -The figure below illustrates the architecture of OneFormer. Taken from the [original paper](https://arxiv.org/abs/2211.06220). - - - -This model was contributed by [Jitesh Jain](https://huggingface.co/praeclarumjj3). The original code can be found [here](https://github.com/SHI-Labs/OneFormer). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with OneFormer. diff --git a/docs/source/en/model_doc/open-llama.md b/docs/source/en/model_doc/open-llama.md index 9663170c4083..01170e7e3be6 100644 --- a/docs/source/en/model_doc/open-llama.md +++ b/docs/source/en/model_doc/open-llama.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. -This model is in maintenance mode only, so we won't accept any new PRs changing its code. +This model is in maintenance mode only, we don't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.31.0. You can do so by running the following command: `pip install -U transformers==4.31.0`. diff --git a/docs/source/en/model_doc/openai-gpt.md b/docs/source/en/model_doc/openai-gpt.md index ff98930b576e..1fbfbbcd89e3 100644 --- a/docs/source/en/model_doc/openai-gpt.md +++ b/docs/source/en/model_doc/openai-gpt.md @@ -44,7 +44,12 @@ approach on a wide range of benchmarks for natural language understanding. Our g discriminatively trained models that use architectures specifically crafted for each task, significantly improving upon the state of the art in 9 out of the 12 tasks studied.* -Tips: +[Write With Transformer](https://transformer.huggingface.co/doc/gpt) is a webapp created and hosted by Hugging Face +showcasing the generative capabilities of several models. GPT is one of them. + +This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/openai/finetune-transformer-lm). + +## Usage tips - GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. @@ -52,10 +57,6 @@ Tips: token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be observed in the *run_generation.py* example script. -[Write With Transformer](https://transformer.huggingface.co/doc/gpt) is a webapp created and hosted by Hugging Face -showcasing the generative capabilities of several models. GPT is one of them. - -This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/openai/finetune-transformer-lm). Note: @@ -116,6 +117,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput + + + ## OpenAIGPTModel [[autodoc]] OpenAIGPTModel @@ -136,6 +140,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] OpenAIGPTForSequenceClassification - forward + + + ## TFOpenAIGPTModel [[autodoc]] TFOpenAIGPTModel @@ -155,3 +162,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFOpenAIGPTForSequenceClassification - call + + + diff --git a/docs/source/en/model_doc/opt.md b/docs/source/en/model_doc/opt.md index 332c63600acb..68da201f99bc 100644 --- a/docs/source/en/model_doc/opt.md +++ b/docs/source/en/model_doc/opt.md @@ -25,13 +25,13 @@ The abstract from the paper is the following: *Large language models, which are often trained for hundreds of thousands of compute days, have shown remarkable capabilities for zero- and few-shot learning. Given their computational cost, these models are difficult to replicate without significant capital. For the few that are available through APIs, no access is granted to the full model weights, making them difficult to study. We present Open Pre-trained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M to 175B parameters, which we aim to fully and responsibly share with interested researchers. We show that OPT-175B is comparable to GPT-3, while requiring only 1/7th the carbon footprint to develop. We are also releasing our logbook detailing the infrastructure challenges we faced, along with code for experimenting with all of the released models.* +This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ), [Younes Belkada](https://huggingface.co/ybelkada), and [Patrick Von Platen](https://huggingface.co/patrickvonplaten). +The original code can be found [here](https://github.com/facebookresearch/metaseq). + Tips: - OPT has the same architecture as [`BartDecoder`]. - Contrary to GPT2, OPT adds the EOS token `` to the beginning of every prompt. -This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ), [Younes Belkada](https://huggingface.co/ybelkada), and [Patrick Von Platen](https://huggingface.co/patrickvonplaten). -The original code can be found [here](https://github.com/facebookresearch/metaseq). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with OPT. If you're @@ -66,6 +66,9 @@ The resource should ideally demonstrate something new instead of duplicating an [[autodoc]] OPTConfig + + + ## OPTModel [[autodoc]] OPTModel @@ -76,6 +79,19 @@ The resource should ideally demonstrate something new instead of duplicating an [[autodoc]] OPTForCausalLM - forward +## OPTForSequenceClassification + +[[autodoc]] OPTForSequenceClassification + - forward + +## OPTForQuestionAnswering + +[[autodoc]] OPTForQuestionAnswering + - forward + + + + ## TFOPTModel [[autodoc]] TFOPTModel @@ -86,23 +102,18 @@ The resource should ideally demonstrate something new instead of duplicating an [[autodoc]] TFOPTForCausalLM - call -## OPTForSequenceClassification - -[[autodoc]] OPTForSequenceClassification - - forward - -## OPTForQuestionAnswering - -[[autodoc]] OPTForQuestionAnswering - - forward + + ## FlaxOPTModel [[autodoc]] FlaxOPTModel - __call__ - ## FlaxOPTForCausalLM [[autodoc]] FlaxOPTForCausalLM - __call__ + + + diff --git a/docs/source/en/model_doc/owlv2.md b/docs/source/en/model_doc/owlv2.md index 73063c59350e..12000af9ed4f 100644 --- a/docs/source/en/model_doc/owlv2.md +++ b/docs/source/en/model_doc/owlv2.md @@ -24,11 +24,6 @@ The abstract from the paper is the following: *Open-vocabulary object detection has benefited greatly from pretrained vision-language models, but is still limited by the amount of available detection training data. While detection training data can be expanded by using Web image-text pairs as weak supervision, this has not been done at scales comparable to image-level pretraining. Here, we scale up detection data with self-training, which uses an existing detector to generate pseudo-box annotations on image-text pairs. Major challenges in scaling self-training are the choice of label space, pseudo-annotation filtering, and training efficiency. We present the OWLv2 model and OWL-ST self-training recipe, which address these challenges. OWLv2 surpasses the performance of previous state-of-the-art open-vocabulary detectors already at comparable training scales (~10M examples). However, with OWL-ST, we can scale to over 1B examples, yielding further large improvement: With an L/14 architecture, OWL-ST improves AP on LVIS rare classes, for which the model has seen no human box annotations, from 31.2% to 44.6% (43% relative improvement). OWL-ST unlocks Web-scale training for open-world localization, similar to what has been seen for image classification and language modelling.* -Tips: - -- The architecture of OWLv2 is identical to [OWL-ViT](owlvit), however the object detection head now also includes an objectness classifier, which predicts the (query-agnostic) likelihood that a predicted box contains an object (as opposed to background). The objectness score can be used to rank or filter predictions independently of text queries. -- Usage of OWLv2 is identical to [OWL-ViT](owlvit) with a new, updated image processor ([`Owlv2ImageProcessor`]). - drawing @@ -37,13 +32,12 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/google-research/scenic/tree/main/scenic/projects/owl_vit). -## Usage +## Usage example OWLv2 is, just like its predecessor [OWL-ViT](owlvit), a zero-shot text-conditioned object detection model. OWL-ViT uses [CLIP](clip) as its multi-modal backbone, with a ViT-like Transformer to get visual features and a causal language model to get the text features. To use CLIP for detection, OWL-ViT removes the final token pooling layer of the vision model and attaches a lightweight classification and box head to each transformer output token. Open-vocabulary classification is enabled by replacing the fixed classification layer weights with the class-name embeddings obtained from the text model. The authors first train CLIP from scratch and fine-tune it end-to-end with the classification and box heads on standard detection datasets using a bipartite matching loss. One or multiple text queries per image can be used to perform zero-shot text-conditioned object detection. [`Owlv2ImageProcessor`] can be used to resize (or rescale) and normalize images for the model and [`CLIPTokenizer`] is used to encode the text. [`Owlv2Processor`] wraps [`Owlv2ImageProcessor`] and [`CLIPTokenizer`] into a single instance to both encode the text and prepare the images. The following example shows how to perform object detection using [`Owlv2Processor`] and [`Owlv2ForObjectDetection`]. - ```python >>> import requests >>> from PIL import Image @@ -76,7 +70,15 @@ Detected a photo of a cat with confidence 0.665 at location [6.75, 38.97, 326.62 ## Resources -A demo notebook on using OWLv2 for zero- and one-shot (image-guided) object detection can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/OWLv2). +- A demo notebook on using OWLv2 for zero- and one-shot (image-guided) object detection can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/OWLv2). +- [Zero-shot object detection task guide](../tasks/zero_shot_object_detection) + + + +The architecture of OWLv2 is identical to [OWL-ViT](owlvit), however the object detection head now also includes an objectness classifier, which predicts the (query-agnostic) likelihood that a predicted box contains an object (as opposed to background). The objectness score can be used to rank or filter predictions independently of text queries. +Usage of OWLv2 is identical to [OWL-ViT](owlvit) with a new, updated image processor ([`Owlv2ImageProcessor`]). + + ## Owlv2Config diff --git a/docs/source/en/model_doc/owlvit.md b/docs/source/en/model_doc/owlvit.md index 712d0f62d788..0ba26eeb37b4 100644 --- a/docs/source/en/model_doc/owlvit.md +++ b/docs/source/en/model_doc/owlvit.md @@ -31,13 +31,12 @@ alt="drawing" width="600"/> This model was contributed by [adirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/google-research/scenic/tree/main/scenic/projects/owl_vit). -## Usage +## Usage tips OWL-ViT is a zero-shot text-conditioned object detection model. OWL-ViT uses [CLIP](clip) as its multi-modal backbone, with a ViT-like Transformer to get visual features and a causal language model to get the text features. To use CLIP for detection, OWL-ViT removes the final token pooling layer of the vision model and attaches a lightweight classification and box head to each transformer output token. Open-vocabulary classification is enabled by replacing the fixed classification layer weights with the class-name embeddings obtained from the text model. The authors first train CLIP from scratch and fine-tune it end-to-end with the classification and box heads on standard detection datasets using a bipartite matching loss. One or multiple text queries per image can be used to perform zero-shot text-conditioned object detection. [`OwlViTImageProcessor`] can be used to resize (or rescale) and normalize images for the model and [`CLIPTokenizer`] is used to encode the text. [`OwlViTProcessor`] wraps [`OwlViTImageProcessor`] and [`CLIPTokenizer`] into a single instance to both encode the text and prepare the images. The following example shows how to perform object detection using [`OwlViTProcessor`] and [`OwlViTForObjectDetection`]. - ```python >>> import requests >>> from PIL import Image diff --git a/docs/source/en/model_doc/pegasus.md b/docs/source/en/model_doc/pegasus.md index 14608aae31c9..0622354e62de 100644 --- a/docs/source/en/model_doc/pegasus.md +++ b/docs/source/en/model_doc/pegasus.md @@ -25,9 +25,6 @@ rendered properly in your Markdown viewer. -**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=sshleifer&labels=&template=bug-report.md&title) -and assign @patrickvonplaten. - ## Overview @@ -42,13 +39,17 @@ According to the abstract, This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The Authors' code can be found [here](https://github.com/google-research/pegasus). -Tips: +## Usage tips - Sequence-to-sequence model with the same encoder-decoder model architecture as BART. Pegasus is pre-trained jointly on two self-supervised objective functions: Masked Language Modeling (MLM) and a novel summarization specific pretraining objective, called Gap Sentence Generation (GSG). * MLM: encoder input tokens are randomly replaced by a mask tokens and have to be predicted by the encoder (like in BERT) * GSG: whole encoder input sentences are replaced by a second mask token and fed to the decoder, but which has a causal mask to hide the future words like a regular auto-regressive transformer decoder. +- FP16 is not supported (help/ideas on this appreciated!). +- The adafactor optimizer is recommended for pegasus fine-tuning. + + ## Checkpoints All the [checkpoints](https://huggingface.co/models?search=pegasus) are fine-tuned for summarization, besides @@ -60,20 +61,11 @@ All the [checkpoints](https://huggingface.co/models?search=pegasus) are fine-tun - Full replication results and correctly pre-processed data can be found in this [Issue](https://github.com/huggingface/transformers/issues/6844#issue-689259666). - [Distilled checkpoints](https://huggingface.co/models?search=distill-pegasus) are described in this [paper](https://arxiv.org/abs/2010.13002). -### Examples - -- [Script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh) to fine-tune pegasus - on the XSUM dataset. Data download instructions at [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md). -- FP16 is not supported (help/ideas on this appreciated!). -- The adafactor optimizer is recommended for pegasus fine-tuning. - - ## Implementation Notes - All models are transformer encoder-decoders with 16 layers in each component. - The implementation is completely inherited from [`BartForConditionalGeneration`] - Some key configuration differences: - - static, sinusoidal position embeddings - the model starts generating with pad_token_id (which has 0 token_embedding) as the prefix. - more beams are used (`num_beams=8`) @@ -82,7 +74,6 @@ All the [checkpoints](https://huggingface.co/models?search=pegasus) are fine-tun - The code to convert checkpoints trained in the author's [repo](https://github.com/google-research/pegasus) can be found in `convert_pegasus_tf_to_pytorch.py`. - ## Usage Example ```python @@ -106,8 +97,10 @@ All the [checkpoints](https://huggingface.co/models?search=pegasus) are fine-tun ... ) ``` -## Documentation resources +## Resources +- [Script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh) to fine-tune pegasus + on the XSUM dataset. Data download instructions at [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md). - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) @@ -126,6 +119,9 @@ warning: `add_tokens` does not work at the moment. [[autodoc]] PegasusTokenizerFast + + + ## PegasusModel [[autodoc]] PegasusModel @@ -141,6 +137,9 @@ warning: `add_tokens` does not work at the moment. [[autodoc]] PegasusForCausalLM - forward + + + ## TFPegasusModel [[autodoc]] TFPegasusModel @@ -151,6 +150,9 @@ warning: `add_tokens` does not work at the moment. [[autodoc]] TFPegasusForConditionalGeneration - call + + + ## FlaxPegasusModel [[autodoc]] FlaxPegasusModel @@ -164,3 +166,6 @@ warning: `add_tokens` does not work at the moment. - __call__ - encode - decode + + + diff --git a/docs/source/en/model_doc/pegasus_x.md b/docs/source/en/model_doc/pegasus_x.md index a0fd670fc7c9..20af5731e900 100644 --- a/docs/source/en/model_doc/pegasus_x.md +++ b/docs/source/en/model_doc/pegasus_x.md @@ -26,10 +26,6 @@ The abstract from the paper is the following: *While large pretrained Transformer models have proven highly capable at tackling natural language tasks, handling long sequence inputs continues to be a significant challenge. One such task is long input summarization, where inputs are longer than the maximum input context of most pretrained models. Through an extensive set of experiments, we investigate what model architectural changes and pretraining paradigms can most efficiently adapt a pretrained Transformer for long input summarization. We find that a staggered, block-local Transformer with global encoder tokens strikes a good balance of performance and efficiency, and that an additional pretraining phase on long sequences meaningfully improves downstream summarization performance. Based on our findings, we introduce PEGASUS-X, an extension of the PEGASUS model with additional long input pretraining to handle inputs of up to 16K tokens. PEGASUS-X achieves strong performance on long input summarization tasks comparable with much larger models while adding few additional parameters and not requiring model parallelism to train.* -Tips: - -* PEGASUS-X uses the same tokenizer as PEGASUS. - This model was contributed by [zphang]( + +PEGASUS-X uses the same tokenizer as [PEGASUS](pegasus). + + + ## PegasusXConfig [[autodoc]] PegasusXConfig - ## PegasusXModel [[autodoc]] PegasusXModel - forward - ## PegasusXForConditionalGeneration [[autodoc]] PegasusXForConditionalGeneration diff --git a/docs/source/en/model_doc/perceiver.md b/docs/source/en/model_doc/perceiver.md index 97921baed2b1..ee678c22f6f8 100644 --- a/docs/source/en/model_doc/perceiver.md +++ b/docs/source/en/model_doc/perceiver.md @@ -81,7 +81,13 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/deepmind/deepmind-research/tree/master/perceiver). -Tips: + + +Perceiver does **not** work with `torch.nn.DataParallel` due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035) + + + +## Resources - The quickest way to get started with the Perceiver is by checking the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Perceiver). @@ -89,13 +95,6 @@ Tips: is implemented in the library. Note that the models available in the library only showcase some examples of what you can do with the Perceiver. There are many more use cases, including question answering, named-entity recognition, object detection, audio classification, video classification, etc. - -**Note**: - -- Perceiver does **not** work with `torch.nn.DataParallel` due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035) - -## Documentation resources - - [Text classification task guide](../tasks/sequence_classification) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Image classification task guide](../tasks/image_classification) diff --git a/docs/source/en/model_doc/persimmon.md b/docs/source/en/model_doc/persimmon.md index cf13d070c622..fe9e66a0b717 100644 --- a/docs/source/en/model_doc/persimmon.md +++ b/docs/source/en/model_doc/persimmon.md @@ -26,6 +26,10 @@ The authors showcase their approach to model evaluation, focusing on practical t In terms of model details, the work outlines the architecture and training methodology of Persimmon-8B, providing insights into its design choices, sequence length, and dataset composition. The authors present a fast inference code that outperforms traditional implementations through operator fusion and CUDA graph utilization while maintaining code coherence. They express their anticipation of how the community will leverage this contribution to drive innovation, hinting at further upcoming releases as part of an ongoing series of developments. +This model was contributed by [ArthurZ](https://huggingface.co/ArthurZ). +The original code can be found [here](https://github.com/persimmon-ai-labs/adept-inference). + +## Usage tips @@ -67,8 +71,6 @@ model = PersimmonForCausalLM.from_pretrained("/output/path") tokenizer = PersimmonTokenizer.from_pretrained("/output/path") ``` -This model was contributed by [ArthurZ](https://huggingface.co/ArthurZ). -The original code can be found [here](https://github.com/persimmon-ai-labs/adept-inference). - Perismmon uses a `sentencepiece` based tokenizer, with a `Unigram` model. It supports bytefallback, which is only available in `tokenizers==0.14.0` for the fast tokenizer. The `LlamaTokenizer` is used as it is a standard wrapper around sentencepiece. The `chat` template will be updated with the templating functions in a follow up PR! diff --git a/docs/source/en/model_doc/phobert.md b/docs/source/en/model_doc/phobert.md index 5543a9b3541a..30a50275476e 100644 --- a/docs/source/en/model_doc/phobert.md +++ b/docs/source/en/model_doc/phobert.md @@ -28,7 +28,9 @@ best pre-trained multilingual model XLM-R (Conneau et al., 2020) and improves th Vietnamese-specific NLP tasks including Part-of-speech tagging, Dependency parsing, Named-entity recognition and Natural language inference.* -Example of use: +This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/PhoBERT). + +## Usage example ```python >>> import torch @@ -50,7 +52,12 @@ Example of use: >>> # phobert = TFAutoModel.from_pretrained("vinai/phobert-base") ``` -This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/PhoBERT). + + +PhoBERT implementation is the same as BERT, except for tokenization. Refer to [EART documentation](bert) for information on +configuration classes and their parameters. PhoBERT-specific tokenizer is documented below. + + ## PhobertTokenizer diff --git a/docs/source/en/model_doc/pix2struct.md b/docs/source/en/model_doc/pix2struct.md index b722a59b82e6..8dc179f5f863 100644 --- a/docs/source/en/model_doc/pix2struct.md +++ b/docs/source/en/model_doc/pix2struct.md @@ -39,7 +39,6 @@ The original code can be found [here](https://github.com/google-research/pix2str - [Fine-tuning Notebook](https://github.com/huggingface/notebooks/blob/main/examples/image_captioning_pix2struct.ipynb) - [All models](https://huggingface.co/models?search=pix2struct) - ## Pix2StructConfig [[autodoc]] Pix2StructConfig diff --git a/docs/source/en/model_doc/plbart.md b/docs/source/en/model_doc/plbart.md index c9f502021485..61af52e54d0d 100644 --- a/docs/source/en/model_doc/plbart.md +++ b/docs/source/en/model_doc/plbart.md @@ -16,10 +16,7 @@ rendered properly in your Markdown viewer. # PLBart -**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) and assign -[@gchhablani](https://www.github.com/gchhablani). - -## Overview of PLBart +## Overview The PLBART model was proposed in [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. This is a BART-like model which can be used to perform code-summarization, code-generation, and code-translation tasks. The pre-trained model `plbart-base` has been trained using multilingual denoising task @@ -40,7 +37,7 @@ even with limited annotations.* This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The Authors' code can be found [here](https://github.com/wasiahmad/PLBART). -### Training of PLBart +## Usage examples PLBart is a multilingual encoder-decoder (sequence-to-sequence) model primarily intended for code-to-text, text-to-code, code-to-code tasks. As the model is multilingual it expects the sequences in a different format. A special language id token is added in both the @@ -53,7 +50,7 @@ In cases where the language code is needed, the regular [`~PLBartTokenizer.__cal when you pass texts as the first argument or with the keyword argument `text`, and will encode target text format if it's passed with the `text_target` keyword argument. -- Supervised training +### Supervised training ```python >>> from transformers import PLBartForConditionalGeneration, PLBartTokenizer @@ -65,7 +62,7 @@ it's passed with the `text_target` keyword argument. >>> model(**inputs) ``` -- Generation +### Generation While generating the target text set the `decoder_start_token_id` to the target language id. The following example shows how to translate Python to English using the `uclanlp/plbart-python-en_XX` model. @@ -82,7 +79,7 @@ it's passed with the `text_target` keyword argument. "Returns the maximum value of a b c." ``` -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Causal language modeling task guide](../tasks/language_modeling) diff --git a/docs/source/en/model_doc/poolformer.md b/docs/source/en/model_doc/poolformer.md index 537c60bdbcf6..823c4412485c 100644 --- a/docs/source/en/model_doc/poolformer.md +++ b/docs/source/en/model_doc/poolformer.md @@ -28,8 +28,9 @@ The figure below illustrates the architecture of PoolFormer. Taken from the [ori +This model was contributed by [heytanay](https://huggingface.co/heytanay). The original code can be found [here](https://github.com/sail-sg/poolformer). -Tips: +## Usage tips - PoolFormer has a hierarchical architecture, where instead of Attention, a simple Average Pooling layer is present. All checkpoints of the model can be found on the [hub](https://huggingface.co/models?other=poolformer). - One can use [`PoolFormerImageProcessor`] to prepare images for the model. @@ -43,8 +44,6 @@ Tips: | m36 | [6, 6, 18, 6] | [96, 192, 384, 768] | 56 | 82.1 | | m48 | [8, 8, 24, 8] | [96, 192, 384, 768] | 73 | 82.5 | -This model was contributed by [heytanay](https://huggingface.co/heytanay). The original code can be found [here](https://github.com/sail-sg/poolformer). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with PoolFormer. diff --git a/docs/source/en/model_doc/pop2piano.md b/docs/source/en/model_doc/pop2piano.md index 95fd83f19237..8e52eda70cc0 100644 --- a/docs/source/en/model_doc/pop2piano.md +++ b/docs/source/en/model_doc/pop2piano.md @@ -32,7 +32,6 @@ is transformed to its waveform and passed to the encoder, which transforms it to uses these latent representations to generate token ids in an autoregressive way. Each token id corresponds to one of four different token types: time, velocity, note and 'special'. The token ids are then decoded to their equivalent MIDI file. - The abstract from the paper is the following: *Piano covers of pop music are enjoyed by many people. However, the @@ -49,22 +48,21 @@ directly from pop audio without using melody and chord extraction modules. We show that Pop2Piano, trained with our dataset, is capable of producing plausible piano covers.* +This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). +The original code can be found [here](https://github.com/sweetcocoa/pop2piano). -Tips: +## Usage tips -1. To use Pop2Piano, you will need to install the 🤗 Transformers library, as well as the following third party modules: +* To use Pop2Piano, you will need to install the 🤗 Transformers library, as well as the following third party modules: ``` pip install pretty-midi==0.2.9 essentia==2.1b6.dev1034 librosa scipy ``` Please note that you may need to restart your runtime after installation. -2. Pop2Piano is an Encoder-Decoder based model like T5. -3. Pop2Piano can be used to generate midi-audio files for a given audio sequence. -4. Choosing different composers in `Pop2PianoForConditionalGeneration.generate()` can lead to variety of different results. -5. Setting the sampling rate to 44.1 kHz when loading the audio file can give good performance. -6. Though Pop2Piano was mainly trained on Korean Pop music, it also does pretty well on other Western Pop or Hip Hop songs. - -This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). -The original code can be found [here](https://github.com/sweetcocoa/pop2piano). +* Pop2Piano is an Encoder-Decoder based model like T5. +* Pop2Piano can be used to generate midi-audio files for a given audio sequence. +* Choosing different composers in `Pop2PianoForConditionalGeneration.generate()` can lead to variety of different results. +* Setting the sampling rate to 44.1 kHz when loading the audio file can give good performance. +* Though Pop2Piano was mainly trained on Korean Pop music, it also does pretty well on other Western Pop or Hip Hop songs. ## Examples diff --git a/docs/source/en/model_doc/prophetnet.md b/docs/source/en/model_doc/prophetnet.md index 6ab0937da77e..7e63e0c0887e 100644 --- a/docs/source/en/model_doc/prophetnet.md +++ b/docs/source/en/model_doc/prophetnet.md @@ -25,10 +25,6 @@ rendered properly in your Markdown viewer. - -**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) and assign -@patrickvonplaten - ## Overview The ProphetNet model was proposed in [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei @@ -49,15 +45,15 @@ dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Giga abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.* -Tips: +The Authors' code can be found [here](https://github.com/microsoft/ProphetNet). + +## Usage tips - ProphetNet is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - The model architecture is based on the original Transformer, but replaces the “standard” self-attention mechanism in the decoder by a a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism. -The Authors' code can be found [here](https://github.com/microsoft/ProphetNet). - -## Documentation resources +## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) diff --git a/docs/source/en/model_doc/qdqbert.md b/docs/source/en/model_doc/qdqbert.md index 62a0e010843d..9ee42ff3b49d 100644 --- a/docs/source/en/model_doc/qdqbert.md +++ b/docs/source/en/model_doc/qdqbert.md @@ -32,22 +32,18 @@ by processors with high-throughput integer math pipelines. We also present a wor able to maintain accuracy within 1% of the floating-point baseline on all networks studied, including models that are more difficult to quantize, such as MobileNets and BERT-large.* -Tips: +This model was contributed by [shangz](https://huggingface.co/shangz). + +## Usage tips - QDQBERT model adds fake quantization operations (pair of QuantizeLinear/DequantizeLinear ops) to (i) linear layer inputs and weights, (ii) matmul inputs, (iii) residual add inputs, in BERT model. - - QDQBERT requires the dependency of [Pytorch Quantization Toolkit](https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization). To install `pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com` - - QDQBERT model can be loaded from any checkpoint of HuggingFace BERT model (for example *bert-base-uncased*), and perform Quantization Aware Training/Post Training Quantization. - - A complete example of using QDQBERT model to perform Quatization Aware Training and Post Training Quantization for SQUAD task can be found at [transformers/examples/research_projects/quantization-qdqbert/](examples/research_projects/quantization-qdqbert/). -This model was contributed by [shangz](https://huggingface.co/shangz). - - ### Set default quantizers QDQBERT model adds fake quantization operations (pair of QuantizeLinear/DequantizeLinear ops) to BERT by @@ -118,7 +114,7 @@ the instructions in [torch.onnx](https://pytorch.org/docs/stable/onnx.html). Exa >>> torch.onnx.export(...) ``` -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/rag.md b/docs/source/en/model_doc/rag.md index b467c6169f66..1891efe74263 100644 --- a/docs/source/en/model_doc/rag.md +++ b/docs/source/en/model_doc/rag.md @@ -52,8 +52,12 @@ parametric-only seq2seq baseline.* This model was contributed by [ola13](https://huggingface.co/ola13). -Tips: -- Retrieval-augmented generation (“RAG”) models combine the powers of pretrained dense retrieval (DPR) and Seq2Seq models. RAG models retrieve docs, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks. +## Usage tips + +Retrieval-augmented generation ("RAG") models combine the powers of pretrained dense retrieval (DPR) and Seq2Seq models. +RAG models retrieve docs, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq +modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt +to downstream tasks. ## RagConfig @@ -73,6 +77,9 @@ Tips: [[autodoc]] RagRetriever + + + ## RagModel [[autodoc]] RagModel @@ -90,6 +97,9 @@ Tips: - forward - generate + + + ## TFRagModel [[autodoc]] TFRagModel @@ -106,3 +116,6 @@ Tips: [[autodoc]] TFRagTokenForGeneration - call - generate + + + diff --git a/docs/source/en/model_doc/reformer.md b/docs/source/en/model_doc/reformer.md index 05274c7667b7..ec924dc50c44 100644 --- a/docs/source/en/model_doc/reformer.md +++ b/docs/source/en/model_doc/reformer.md @@ -25,8 +25,6 @@ rendered properly in your Markdown viewer. -**DISCLAIMER:** This model is still a work in progress, if you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title). - ## Overview The Reformer model was proposed in the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451.pdf) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. @@ -44,7 +42,7 @@ while being much more memory-efficient and much faster on long sequences.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/google/trax/tree/master/trax/models/reformer). -Tips: +## Usage tips - Reformer does **not** work with *torch.nn.DataParallel* due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035). - Use Axial position encoding (see below for more details). It’s a mechanism to avoid having a huge positional encoding matrix (when the sequence length is very big) by factorizing it into smaller matrices. @@ -52,7 +50,7 @@ Tips: - Avoid storing the intermediate results of each layer by using reversible transformer layers to obtain them during the backward pass (subtracting the residuals from the input of the next layer gives them back) or recomputing them for results inside a given layer (less efficient than storing them but saves memory). - Compute the feedforward operations by chunks and not on the whole batch. -## Axial Positional Encodings +### Axial Positional Encodings Axial Positional Encodings were first implemented in Google's [trax library](https://github.com/google/trax/blob/4d99ad4965bab1deba227539758d59f0df0fef48/trax/layers/research/position_encodings.py#L29) and developed by the authors of this model's paper. In models that are treating very long input sequences, the @@ -96,7 +94,7 @@ product has to be equal to `config.max_embedding_size`, which during training ha length* of the `input_ids`. -## LSH Self Attention +### LSH Self Attention In Locality sensitive hashing (LSH) self attention the key and query projection weights are tied. Therefore, the key query embedding vectors are also tied. LSH self attention uses the locality sensitive hashing mechanism proposed in @@ -129,7 +127,7 @@ Using LSH self attention, the memory and time complexity of the query-key matmul and time bottleneck in a transformer model, with \\(n_s\\) being the sequence length. -## Local Self Attention +### Local Self Attention Local self attention is essentially a "normal" self attention layer with key, query and value projections, but is chunked so that in each chunk of length `config.local_chunk_length` the query embedding vectors only attends to @@ -141,7 +139,7 @@ Using Local self attention, the memory and time complexity of the query-key matm and time bottleneck in a transformer model, with \\(n_s\\) being the sequence length. -## Training +### Training During training, we must ensure that the sequence length is set to a value that can be divided by the least common multiple of `config.lsh_chunk_length` and `config.local_chunk_length` and that the parameters of the Axial @@ -155,7 +153,7 @@ input_ids = tokenizer.encode("This is a sentence from the training data", return loss = model(input_ids, labels=input_ids)[0] ``` -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) diff --git a/docs/source/en/model_doc/regnet.md b/docs/source/en/model_doc/regnet.md index 89e89459bd7f..2a8f7e733d85 100644 --- a/docs/source/en/model_doc/regnet.md +++ b/docs/source/en/model_doc/regnet.md @@ -26,15 +26,13 @@ The abstract from the paper is the following: *In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs.* -Tips: - -- One can use [`AutoImageProcessor`] to prepare images for the model. -- The huge 10B model from [Self-supervised Pretraining of Visual Features in the Wild](https://arxiv.org/abs/2103.01988), trained on one billion Instagram images, is available on the [hub](https://huggingface.co/facebook/regnet-y-10b-seer) - This model was contributed by [Francesco](https://huggingface.co/Francesco). The TensorFlow version of the model was contributed by [sayakpaul](https://huggingface.com/sayakpaul) and [ariG23498](https://huggingface.com/ariG23498). The original code can be found [here](https://github.com/facebookresearch/pycls). +The huge 10B model from [Self-supervised Pretraining of Visual Features in the Wild](https://arxiv.org/abs/2103.01988), +trained on one billion Instagram images, is available on the [hub](https://huggingface.co/facebook/regnet-y-10b-seer) + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with RegNet. @@ -50,37 +48,43 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] RegNetConfig + + ## RegNetModel [[autodoc]] RegNetModel - forward - ## RegNetForImageClassification [[autodoc]] RegNetForImageClassification - forward + + + ## TFRegNetModel [[autodoc]] TFRegNetModel - call - ## TFRegNetForImageClassification [[autodoc]] TFRegNetForImageClassification - call + + ## FlaxRegNetModel [[autodoc]] FlaxRegNetModel - __call__ - ## FlaxRegNetForImageClassification [[autodoc]] FlaxRegNetForImageClassification - - __call__ \ No newline at end of file + - __call__ + + diff --git a/docs/source/en/model_doc/rembert.md b/docs/source/en/model_doc/rembert.md index b2e4d0f5adae..b755d3423060 100644 --- a/docs/source/en/model_doc/rembert.md +++ b/docs/source/en/model_doc/rembert.md @@ -34,14 +34,14 @@ Transformer representations to be more general and more transferable to other ta findings, we are able to train models that achieve strong performance on the XTREME benchmark without increasing the number of parameters at the fine-tuning stage.* -Tips: +## Usage tips For fine-tuning, RemBERT can be thought of as a bigger version of mBERT with an ALBERT-like factorization of the embedding layer. The embeddings are not tied in pre-training, in contrast with BERT, which enables smaller input embeddings (preserved during fine-tuning) and bigger output embeddings (discarded at fine-tuning). The tokenizer is also similar to the Albert one rather than the BERT one. -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -70,6 +70,9 @@ also similar to the Albert one rather than the BERT one. - create_token_type_ids_from_sequences - save_vocabulary + + + ## RemBertModel [[autodoc]] RemBertModel @@ -105,6 +108,9 @@ also similar to the Albert one rather than the BERT one. [[autodoc]] RemBertForQuestionAnswering - forward + + + ## TFRemBertModel [[autodoc]] TFRemBertModel @@ -139,3 +145,6 @@ also similar to the Albert one rather than the BERT one. [[autodoc]] TFRemBertForQuestionAnswering - call + + + diff --git a/docs/source/en/model_doc/resnet.md b/docs/source/en/model_doc/resnet.md index 9bb36a776f16..b959266512f5 100644 --- a/docs/source/en/model_doc/resnet.md +++ b/docs/source/en/model_doc/resnet.md @@ -27,10 +27,6 @@ The abstract from the paper is the following: *Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.* -Tips: - -- One can use [`AutoImageProcessor`] to prepare images for the model. - The figure below illustrates the architecture of ResNet. Taken from the [original paper](https://arxiv.org/abs/1512.03385). @@ -52,30 +48,35 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] ResNetConfig + + ## ResNetModel [[autodoc]] ResNetModel - forward - ## ResNetForImageClassification [[autodoc]] ResNetForImageClassification - forward + + ## TFResNetModel [[autodoc]] TFResNetModel - call - ## TFResNetForImageClassification [[autodoc]] TFResNetForImageClassification - call + + + ## FlaxResNetModel [[autodoc]] FlaxResNetModel @@ -85,3 +86,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] FlaxResNetForImageClassification - __call__ + + + diff --git a/docs/source/en/model_doc/roberta-prelayernorm.md b/docs/source/en/model_doc/roberta-prelayernorm.md index 9822fd7af961..000c0a7d2d80 100644 --- a/docs/source/en/model_doc/roberta-prelayernorm.md +++ b/docs/source/en/model_doc/roberta-prelayernorm.md @@ -25,15 +25,15 @@ The abstract from the paper is the following: *fairseq is an open-source sequence modeling toolkit that allows researchers and developers to train custom models for translation, summarization, language modeling, and other text generation tasks. The toolkit is based on PyTorch and supports distributed training across multiple GPUs and machines. We also support fast mixed-precision training and inference on modern GPUs.* -Tips: +This model was contributed by [andreasmaden](https://huggingface.co/andreasmaden). +The original code can be found [here](https://github.com/princeton-nlp/DinkyTrain). + +## Usage tips - The implementation is the same as [Roberta](roberta) except instead of using _Add and Norm_ it does _Norm and Add_. _Add_ and _Norm_ refers to the Addition and LayerNormalization as described in [Attention Is All You Need](https://arxiv.org/abs/1706.03762). - This is identical to using the `--encoder-normalize-before` flag in [fairseq](https://fairseq.readthedocs.io/). -This model was contributed by [andreasmaden](https://huggingface.co/andreasmaden). -The original code can be found [here](https://github.com/princeton-nlp/DinkyTrain). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -46,6 +46,9 @@ The original code can be found [here](https://github.com/princeton-nlp/DinkyTrai [[autodoc]] RobertaPreLayerNormConfig + + + ## RobertaPreLayerNormModel [[autodoc]] RobertaPreLayerNormModel @@ -81,6 +84,9 @@ The original code can be found [here](https://github.com/princeton-nlp/DinkyTrai [[autodoc]] RobertaPreLayerNormForQuestionAnswering - forward + + + ## TFRobertaPreLayerNormModel [[autodoc]] TFRobertaPreLayerNormModel @@ -116,6 +122,9 @@ The original code can be found [here](https://github.com/princeton-nlp/DinkyTrai [[autodoc]] TFRobertaPreLayerNormForQuestionAnswering - call + + + ## FlaxRobertaPreLayerNormModel [[autodoc]] FlaxRobertaPreLayerNormModel @@ -150,3 +159,6 @@ The original code can be found [here](https://github.com/princeton-nlp/DinkyTrai [[autodoc]] FlaxRobertaPreLayerNormForQuestionAnswering - __call__ + + + diff --git a/docs/source/en/model_doc/roberta.md b/docs/source/en/model_doc/roberta.md index 5a2ba6b5cf66..364b5b37e5f3 100644 --- a/docs/source/en/model_doc/roberta.md +++ b/docs/source/en/model_doc/roberta.md @@ -47,7 +47,9 @@ model published after it. Our best model achieves state-of-the-art results on GL highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code.* -Tips: +This model was contributed by [julien-c](https://huggingface.co/julien-c). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/roberta). + +## Usage tips - This implementation is the same as [`BertModel`] with a tiny embeddings tweak as well as a setup for Roberta pretrained models. @@ -63,8 +65,6 @@ Tips: * use BPE with bytes as a subunit and not characters (because of unicode characters) - [CamemBERT](camembert) is a wrapper around RoBERTa. Refer to this page for usage examples. -This model was contributed by [julien-c](https://huggingface.co/julien-c). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/roberta). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with RoBERTa. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. @@ -127,6 +127,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] RobertaTokenizerFast - build_inputs_with_special_tokens + + + ## RobertaModel [[autodoc]] RobertaModel @@ -162,6 +165,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] RobertaForQuestionAnswering - forward + + + ## TFRobertaModel [[autodoc]] TFRobertaModel @@ -197,6 +203,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFRobertaForQuestionAnswering - call + + + ## FlaxRobertaModel [[autodoc]] FlaxRobertaModel @@ -231,3 +240,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] FlaxRobertaForQuestionAnswering - __call__ + + + diff --git a/docs/source/en/model_doc/roc_bert.md b/docs/source/en/model_doc/roc_bert.md index 831c656fb817..30fadd5c2c10 100644 --- a/docs/source/en/model_doc/roc_bert.md +++ b/docs/source/en/model_doc/roc_bert.md @@ -35,7 +35,7 @@ in the toxic content detection task under human-made attacks.* This model was contributed by [weiweishi](https://huggingface.co/weiweishi). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -49,7 +49,6 @@ This model was contributed by [weiweishi](https://huggingface.co/weiweishi). [[autodoc]] RoCBertConfig - all - ## RoCBertTokenizer [[autodoc]] RoCBertTokenizer @@ -58,31 +57,26 @@ This model was contributed by [weiweishi](https://huggingface.co/weiweishi). - create_token_type_ids_from_sequences - save_vocabulary - ## RoCBertModel [[autodoc]] RoCBertModel - forward - ## RoCBertForPreTraining [[autodoc]] RoCBertForPreTraining - forward - ## RoCBertForCausalLM [[autodoc]] RoCBertForCausalLM - forward - ## RoCBertForMaskedLM [[autodoc]] RoCBertForMaskedLM - forward - ## RoCBertForSequenceClassification [[autodoc]] transformers.RoCBertForSequenceClassification @@ -93,14 +87,12 @@ This model was contributed by [weiweishi](https://huggingface.co/weiweishi). [[autodoc]] transformers.RoCBertForMultipleChoice - forward - ## RoCBertForTokenClassification [[autodoc]] transformers.RoCBertForTokenClassification - forward - ## RoCBertForQuestionAnswering [[autodoc]] RoCBertForQuestionAnswering - - forward \ No newline at end of file + - forward diff --git a/docs/source/en/model_doc/roformer.md b/docs/source/en/model_doc/roformer.md index f15a1062965f..5d8f146c43fd 100644 --- a/docs/source/en/model_doc/roformer.md +++ b/docs/source/en/model_doc/roformer.md @@ -33,15 +33,13 @@ transformer with rotary position embedding, or RoFormer, achieves superior perfo release the theoretical analysis along with some preliminary experiment results on Chinese data. The undergoing experiment for English benchmark will soon be updated.* -Tips: - -- RoFormer is a BERT-like autoencoding model with rotary position embeddings. Rotary position embeddings have shown - improved performance on classification tasks with long texts. - - This model was contributed by [junnyu](https://huggingface.co/junnyu). The original code can be found [here](https://github.com/ZhuiyiTechnology/roformer). -## Documentation resources +## Usage tips +RoFormer is a BERT-like autoencoding model with rotary position embeddings. Rotary position embeddings have shown +improved performance on classification tasks with long texts. + +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -67,6 +65,9 @@ This model was contributed by [junnyu](https://huggingface.co/junnyu). The origi [[autodoc]] RoFormerTokenizerFast - build_inputs_with_special_tokens + + + ## RoFormerModel [[autodoc]] RoFormerModel @@ -102,6 +103,9 @@ This model was contributed by [junnyu](https://huggingface.co/junnyu). The origi [[autodoc]] RoFormerForQuestionAnswering - forward + + + ## TFRoFormerModel [[autodoc]] TFRoFormerModel @@ -137,6 +141,9 @@ This model was contributed by [junnyu](https://huggingface.co/junnyu). The origi [[autodoc]] TFRoFormerForQuestionAnswering - call + + + ## FlaxRoFormerModel [[autodoc]] FlaxRoFormerModel @@ -166,3 +173,6 @@ This model was contributed by [junnyu](https://huggingface.co/junnyu). The origi [[autodoc]] FlaxRoFormerForQuestionAnswering - __call__ + + + diff --git a/docs/source/en/model_doc/rwkv.md b/docs/source/en/model_doc/rwkv.md index 9293db14cc63..3dfcf7ba4b55 100644 --- a/docs/source/en/model_doc/rwkv.md +++ b/docs/source/en/model_doc/rwkv.md @@ -27,7 +27,7 @@ This can be more efficient than a regular Transformer and can deal with sentence This model was contributed by [sgugger](https://huggingface.co/sgugger). The original code can be found [here](https://github.com/BlinkDL/RWKV-LM). -Example of use as an RNN: +## Usage example ```py import torch @@ -73,7 +73,6 @@ output = model.generate(inputs["input_ids"], max_new_tokens=64, stopping_criteri [[autodoc]] RwkvConfig - ## RwkvModel [[autodoc]] RwkvModel diff --git a/docs/source/en/model_doc/segformer.md b/docs/source/en/model_doc/segformer.md index 0f535351af5c..4edd646cd4fa 100644 --- a/docs/source/en/model_doc/segformer.md +++ b/docs/source/en/model_doc/segformer.md @@ -43,7 +43,7 @@ The figure below illustrates the architecture of SegFormer. Taken from the [orig This model was contributed by [nielsr](https://huggingface.co/nielsr). The TensorFlow version of the model was contributed by [sayakpaul](https://huggingface.co/sayakpaul). The original code can be found [here](https://github.com/NVlabs/SegFormer). -Tips: +## Usage tips - SegFormer consists of a hierarchical Transformer encoder, and a lightweight all-MLP decoder head. [`SegformerModel`] is the hierarchical Transformer encoder (which in the paper is also referred to @@ -123,6 +123,9 @@ If you're interested in submitting a resource to be included here, please feel f - preprocess - post_process_semantic_segmentation + + + ## SegformerModel [[autodoc]] SegformerModel @@ -143,6 +146,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] SegformerForSemanticSegmentation - forward + + + ## TFSegformerDecodeHead [[autodoc]] TFSegformerDecodeHead @@ -162,3 +168,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] TFSegformerForSemanticSegmentation - call + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/sew-d.md b/docs/source/en/model_doc/sew-d.md index b70c59061b57..013e404bd045 100644 --- a/docs/source/en/model_doc/sew-d.md +++ b/docs/source/en/model_doc/sew-d.md @@ -32,15 +32,15 @@ variety of training setups. For example, under the 100h-960h semi-supervised set inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.* -Tips: +This model was contributed by [anton-l](https://huggingface.co/anton-l). + +## Usage tips - SEW-D is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - SEWDForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. -This model was contributed by [anton-l](https://huggingface.co/anton-l). - -## Documentation resources +## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) diff --git a/docs/source/en/model_doc/sew.md b/docs/source/en/model_doc/sew.md index ebf128ea429f..ee8a36a4dcb2 100644 --- a/docs/source/en/model_doc/sew.md +++ b/docs/source/en/model_doc/sew.md @@ -32,15 +32,15 @@ variety of training setups. For example, under the 100h-960h semi-supervised set inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.* -Tips: +This model was contributed by [anton-l](https://huggingface.co/anton-l). + +## Usage tips - SEW is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - SEWForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. -This model was contributed by [anton-l](https://huggingface.co/anton-l). - -## Documentation resources +## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) diff --git a/docs/source/en/model_doc/speech_to_text.md b/docs/source/en/model_doc/speech_to_text.md index cb13a1871ae6..23512b323af6 100644 --- a/docs/source/en/model_doc/speech_to_text.md +++ b/docs/source/en/model_doc/speech_to_text.md @@ -27,7 +27,6 @@ transcripts/translations autoregressively. Speech2Text has been fine-tuned on se This model was contributed by [valhalla](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/speech_to_text). - ## Inference Speech2Text is a speech model that accepts a float tensor of log-mel filter-bank features extracted from the speech @@ -44,7 +43,6 @@ install those packages before running the examples. You could either install tho `pip install transformers"[speech, sentencepiece]"` or install the packages separately with `pip install torchaudio sentencepiece`. Also `torchaudio` requires the development version of the [libsndfile](http://www.mega-nerd.com/libsndfile/) package which can be installed via a system package manager. On Ubuntu it can be installed as follows: `apt install libsndfile1-dev` - - ASR and Speech Translation ```python @@ -98,7 +96,6 @@ be installed as follows: `apt install libsndfile1-dev` See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look for Speech2Text checkpoints. - ## Speech2TextConfig [[autodoc]] Speech2TextConfig @@ -125,6 +122,9 @@ See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look - batch_decode - decode + + + ## Speech2TextModel [[autodoc]] Speech2TextModel @@ -135,6 +135,9 @@ See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look [[autodoc]] Speech2TextForConditionalGeneration - forward + + + ## TFSpeech2TextModel [[autodoc]] TFSpeech2TextModel @@ -144,3 +147,6 @@ See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look [[autodoc]] TFSpeech2TextForConditionalGeneration - call + + + diff --git a/docs/source/en/model_doc/speech_to_text_2.md b/docs/source/en/model_doc/speech_to_text_2.md index 1abdeced580e..6648e67f629d 100644 --- a/docs/source/en/model_doc/speech_to_text_2.md +++ b/docs/source/en/model_doc/speech_to_text_2.md @@ -31,8 +31,7 @@ This model was contributed by [Patrick von Platen](https://huggingface.co/patric The original code can be found [here](https://github.com/pytorch/fairseq/blob/1f7ef9ed1e1061f8c7f88f8b94c7186834398690/fairseq/models/wav2vec/wav2vec2_asr.py#L266). - -Tips: +## Usage tips - Speech2Text2 achieves state-of-the-art results on the CoVoST Speech Translation dataset. For more information, see the [official models](https://huggingface.co/models?other=speech2text2) . @@ -98,7 +97,7 @@ predicted token ids. See [model hub](https://huggingface.co/models?filter=speech2text2) to look for Speech2Text2 checkpoints. -## Documentation resources +## Resources - [Causal language modeling task guide](../tasks/language_modeling) diff --git a/docs/source/en/model_doc/splinter.md b/docs/source/en/model_doc/splinter.md index f16169d9b218..a46c55966c0e 100644 --- a/docs/source/en/model_doc/splinter.md +++ b/docs/source/en/model_doc/splinter.md @@ -34,7 +34,9 @@ are replaced with a special token, viewed as a question representation, that is the answer span. The resulting model obtains surprisingly good results on multiple benchmarks (e.g., 72.7 F1 on SQuAD with only 128 training examples), while maintaining competitive performance in the high-resource setting. -Tips: +This model was contributed by [yuvalkirstain](https://huggingface.co/yuvalkirstain) and [oriram](https://huggingface.co/oriram). The original code can be found [here](https://github.com/oriram/splinter). + +## Usage tips - Splinter was trained to predict answers spans conditioned on a special [QUESTION] token. These tokens contextualize to question representations which are used to predict the answers. This layer is called QASS, and is the default @@ -49,9 +51,7 @@ Tips: doesn't (*tau/splinter-base* and *tau/splinter-large*). This is done to support randomly initializing this layer at fine-tuning, as it is shown to yield better results for some cases in the paper. -This model was contributed by [yuvalkirstain](https://huggingface.co/yuvalkirstain) and [oriram](https://huggingface.co/oriram). The original code can be found [here](https://github.com/oriram/splinter). - -## Documentation resources +## Resources - [Question answering task guide](../tasks/question-answering) diff --git a/docs/source/en/model_doc/squeezebert.md b/docs/source/en/model_doc/squeezebert.md index 515a2ef31781..e2bb378fe5bb 100644 --- a/docs/source/en/model_doc/squeezebert.md +++ b/docs/source/en/model_doc/squeezebert.md @@ -38,7 +38,9 @@ self-attention layers with grouped convolutions, and we use this technique in a SqueezeBERT, which runs 4.3x faster than BERT-base on the Pixel 3 while achieving competitive accuracy on the GLUE test set. The SqueezeBERT code will be released.* -Tips: +This model was contributed by [forresti](https://huggingface.co/forresti). + +## Usage tips - SqueezeBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. @@ -48,9 +50,7 @@ Tips: - For best results when finetuning on sequence classification tasks, it is recommended to start with the *squeezebert/squeezebert-mnli-headless* checkpoint. -This model was contributed by [forresti](https://huggingface.co/forresti). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/swiftformer.md b/docs/source/en/model_doc/swiftformer.md index 67c9597d2123..30c6941f0f46 100644 --- a/docs/source/en/model_doc/swiftformer.md +++ b/docs/source/en/model_doc/swiftformer.md @@ -26,14 +26,9 @@ The abstract from the paper is the following: *Self-attention has become a defacto choice for capturing global context in various vision applications. However, its quadratic computational complexity with respect to image resolution limits its use in real-time applications, especially for deployment on resource-constrained mobile devices. Although hybrid approaches have been proposed to combine the advantages of convolutions and self-attention for a better speed-accuracy trade-off, the expensive matrix multiplication operations in self-attention remain a bottleneck. In this work, we introduce a novel efficient additive attention mechanism that effectively replaces the quadratic matrix multiplication operations with linear element-wise multiplications. Our design shows that the key-value interaction can be replaced with a linear layer without sacrificing any accuracy. Unlike previous state-of-the-art methods, our efficient formulation of self-attention enables its usage at all stages of the network. Using our proposed efficient additive attention, we build a series of models called "SwiftFormer" which achieves state-of-the-art performance in terms of both accuracy and mobile inference speed. Our small variant achieves 78.5% top-1 ImageNet-1K accuracy with only 0.8 ms latency on iPhone 14, which is more accurate and 2x faster compared to MobileViT-v2.* -Tips: - - One can use the [`ViTImageProcessor`] API to prepare images for the model. - - This model was contributed by [shehan97](https://huggingface.co/shehan97). The original code can be found [here](https://github.com/Amshaker/SwiftFormer). - ## SwiftFormerConfig [[autodoc]] SwiftFormerConfig diff --git a/docs/source/en/model_doc/swin.md b/docs/source/en/model_doc/swin.md index 37bb86db951a..e23c882a3f09 100644 --- a/docs/source/en/model_doc/swin.md +++ b/docs/source/en/model_doc/swin.md @@ -36,11 +36,6 @@ prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures.* -Tips: -- One can use the [`AutoImageProcessor`] API to prepare images for the model. -- Swin pads the inputs supporting any input height and width (if divisible by `32`). -- Swin can be used as a *backbone*. When `output_hidden_states = True`, it will output both `hidden_states` and `reshaped_hidden_states`. The `reshaped_hidden_states` have a shape of `(batch, num_channels, height, width)` rather than `(batch_size, sequence_length, num_channels)`. - drawing @@ -48,6 +43,10 @@ alt="drawing" width="600"/> This model was contributed by [novice03](https://huggingface.co/novice03). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts). The original code can be found [here](https://github.com/microsoft/Swin-Transformer). +## Usage tips + +- Swin pads the inputs supporting any input height and width (if divisible by `32`). +- Swin can be used as a *backbone*. When `output_hidden_states = True`, it will output both `hidden_states` and `reshaped_hidden_states`. The `reshaped_hidden_states` have a shape of `(batch, num_channels, height, width)` rather than `(batch_size, sequence_length, num_channels)`. ## Resources @@ -68,6 +67,8 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] SwinConfig + + ## SwinModel @@ -84,6 +85,9 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] transformers.SwinForImageClassification - forward + + + ## TFSwinModel [[autodoc]] TFSwinModel @@ -98,3 +102,6 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] transformers.TFSwinForImageClassification - call + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/swinv2.md b/docs/source/en/model_doc/swinv2.md index e08389527ece..25233dca3395 100644 --- a/docs/source/en/model_doc/swinv2.md +++ b/docs/source/en/model_doc/swinv2.md @@ -24,9 +24,6 @@ The abstract from the paper is the following: *Large-scale NLP models have been shown to significantly improve the performance on language tasks with no signs of saturation. They also demonstrate amazing few-shot capabilities like that of human beings. This paper aims to explore large-scale models in computer vision. We tackle three major issues in training and application of large vision models, including training instability, resolution gaps between pre-training and fine-tuning, and hunger on labelled data. Three main techniques are proposed: 1) a residual-post-norm method combined with cosine attention to improve training stability; 2) A log-spaced continuous position bias method to effectively transfer models pre-trained using low-resolution images to downstream tasks with high-resolution inputs; 3) A self-supervised pre-training method, SimMIM, to reduce the needs of vast labeled images. Through these techniques, this paper successfully trained a 3 billion-parameter Swin Transformer V2 model, which is the largest dense vision model to date, and makes it capable of training with images of up to 1,536×1,536 resolution. It set new performance records on 4 representative vision tasks, including ImageNet-V2 image classification, COCO object detection, ADE20K semantic segmentation, and Kinetics-400 video action classification. Also note our training is much more efficient than that in Google's billion-level visual models, which consumes 40 times less labelled data and 40 times less training time.* -Tips: -- One can use the [`AutoImageProcessor`] API to prepare images for the model. - This model was contributed by [nandwalritik](https://huggingface.co/nandwalritik). The original code can be found [here](https://github.com/microsoft/Swin-Transformer). diff --git a/docs/source/en/model_doc/switch_transformers.md b/docs/source/en/model_doc/switch_transformers.md index 8f6a231b7ef7..5080f711ace0 100644 --- a/docs/source/en/model_doc/switch_transformers.md +++ b/docs/source/en/model_doc/switch_transformers.md @@ -23,19 +23,18 @@ The SwitchTransformers model was proposed in [Switch Transformers: Scaling to Tr The Switch Transformer model uses a sparse T5 encoder-decoder architecture, where the MLP are replaced by a Mixture of Experts (MoE). A routing mechanism (top 1 in this case) associates each token to one of the expert, where each expert is a dense MLP. While switch transformers have a lot more weights than their equivalent dense models, the sparsity allows better scaling and better finetuning performance at scale. During a forward pass, only a fraction of the weights are used. The routing mechanism allows the model to select relevant weights on the fly which increases the model capacity without increasing the number of operations. - The abstract from the paper is the following: *In deep learning, models typically reuse the same parameters for all inputs. Mixture of Experts (MoE) defies this and instead selects different parameters for each incoming example. The result is a sparsely-activated model -- with outrageous numbers of parameters -- but a constant computational cost. However, despite several notable successes of MoE, widespread adoption has been hindered by complexity, communication costs and training instability -- we address these with the Switch Transformer. We simplify the MoE routing algorithm and design intuitive improved models with reduced communication and computational costs. Our proposed training techniques help wrangle the instabilities and we show large sparse models may be trained, for the first time, with lower precision (bfloat16) formats. We design models based off T5-Base and T5-Large to obtain up to 7x increases in pre-training speed with the same computational resources. These improvements extend into multilingual settings where we measure gains over the mT5-Base version across all 101 languages. Finally, we advance the current scale of language models by pre-training up to trillion parameter models on the "Colossal Clean Crawled Corpus" and achieve a 4x speedup over the T5-XXL model.* -Tips: +This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArtZucker) . +The original code can be found [here](https://github.com/google/flaxformer/tree/main/flaxformer/architectures/moe). + +## Usage tips - SwitchTransformers uses the [`T5Tokenizer`], which can be loaded directly from each model's repository. - The released weights are pretrained on English [Masked Language Modeling](https://moon-ci-docs.huggingface.co/docs/transformers/pr_19323/en/glossary#general-terms) task, and should be finetuned. -This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArtZucker) . -The original code can be found [here](https://github.com/google/flaxformer/tree/main/flaxformer/architectures/moe). - ## Resources - [Translation task guide](../tasks/translation) diff --git a/docs/source/en/model_doc/t5.md b/docs/source/en/model_doc/t5.md index 2e833e8e1a67..704d05987b9b 100644 --- a/docs/source/en/model_doc/t5.md +++ b/docs/source/en/model_doc/t5.md @@ -45,7 +45,11 @@ with scale and our new "Colossal Clean Crawled Corpus", we achieve state-of-the- summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code.* -Tips: +All checkpoints can be found on the [hub](https://huggingface.co/models?search=t5). + +This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/google-research/text-to-text-transfer-transformer). + +## Usage tips - T5 is an encoder-decoder model pre-trained on a multi-task mixture of unsupervised and supervised tasks and for which each task is converted into a text-to-text format. T5 works well on a variety of tasks out-of-the-box by prepending a @@ -91,12 +95,6 @@ Based on the original T5 model, Google has released some follow-up works: - **UMT5**: UmT5 is a multilingual T5 model trained on an improved and refreshed mC4 multilingual corpus, 29 trillion characters across 107 language, using a new sampling method, UniMax. Refer to the documentation of mT5 which can be found [here](umt5). -All checkpoints can be found on the [hub](https://huggingface.co/models?search=t5). - -This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/google-research/text-to-text-transfer-transformer). - - - ## Training T5 is an encoder-decoder model and converts all NLP problems into a text-to-text format. It is trained using teacher @@ -249,8 +247,6 @@ batches to the longest example is not recommended on TPU as it triggers a recomp encountered during training thus significantly slowing down the training. only padding up to the longest example in a batch) leads to very slow training on TPU. - - ## Inference At inference time, it is recommended to use [`~generation.GenerationMixin.generate`]. This @@ -316,9 +312,6 @@ The predicted tokens will then be placed between the sentinel tokens. [' park offers the park.'] ``` - - - ## Performance If you'd like a faster training and inference performance, install [apex](https://github.com/NVIDIA/apex#quick-start) and then the model will automatically use `apex.normalization.FusedRMSNorm` instead of `T5LayerNorm`. The former uses an optimized fused kernel which is several times faster than the latter. @@ -386,6 +379,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] T5TokenizerFast + + + ## T5Model [[autodoc]] T5Model @@ -411,6 +407,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] T5ForQuestionAnswering - forward + + + ## TFT5Model [[autodoc]] TFT5Model @@ -426,6 +425,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFT5EncoderModel - call + + + ## FlaxT5Model [[autodoc]] FlaxT5Model @@ -444,3 +446,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] FlaxT5EncoderModel - __call__ + + + diff --git a/docs/source/en/model_doc/t5v1.1.md b/docs/source/en/model_doc/t5v1.1.md index 900e26f521dd..e18696f629df 100644 --- a/docs/source/en/model_doc/t5v1.1.md +++ b/docs/source/en/model_doc/t5v1.1.md @@ -20,6 +20,10 @@ rendered properly in your Markdown viewer. T5v1.1 was released in the [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) repository by Colin Raffel et al. It's an improved version of the original T5 model. +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be +found [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511). + +## Usage tips One can directly plug in the weights of T5v1.1 into a T5 model, like so: @@ -59,7 +63,9 @@ Google has released the following variants: - [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl). -One can refer to [T5's documentation page](t5) for all tips, code examples and notebooks. -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be -found [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511). + + +Refer to [T5's documentation page](t5) for all API reference, tips, code examples and notebooks. + + \ No newline at end of file diff --git a/docs/source/en/model_doc/table-transformer.md b/docs/source/en/model_doc/table-transformer.md index 7ea7ae8cd352..850e7f50aa61 100644 --- a/docs/source/en/model_doc/table-transformer.md +++ b/docs/source/en/model_doc/table-transformer.md @@ -33,16 +33,15 @@ significant increase in training performance and a more reliable estimate of mod object detection models trained on PubTables-1M produce excellent results for all three tasks of detection, structure recognition, and functional analysis without the need for any special customization for these tasks.* -Tips: - -- The authors released 2 models, one for [table detection](https://huggingface.co/microsoft/table-transformer-detection) in documents, one for [table structure recognition](https://huggingface.co/microsoft/table-transformer-structure-recognition) (the task of recognizing the individual rows, columns etc. in a table). -- One can use the [`AutoImageProcessor`] API to prepare images and optional targets for the model. This will load a [`DetrImageProcessor`] behind the scenes. - drawing Table detection and table structure recognition clarified. Taken from the original paper. +The authors released 2 models, one for [table detection](https://huggingface.co/microsoft/table-transformer-detection) in +documents, one for [table structure recognition](https://huggingface.co/microsoft/table-transformer-structure-recognition) +(the task of recognizing the individual rows, columns etc. in a table). + This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/table-transformer). diff --git a/docs/source/en/model_doc/tapas.md b/docs/source/en/model_doc/tapas.md index 1c76015f2857..78d2f3ee1380 100644 --- a/docs/source/en/model_doc/tapas.md +++ b/docs/source/en/model_doc/tapas.md @@ -44,7 +44,7 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The Tensorflow version of this model was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/tapas). -Tips: +## Usage tips - TAPAS is a model that uses relative position embeddings by default (restarting the position embeddings at every cell of the table). Note that this is something that was added after the publication of the original TAPAS paper. According to the authors, this usually results in a slightly better performance, and allows you to encode longer sequences without running out of embeddings. This is reflected in the `reset_position_index_per_cell` parameter of [`TapasConfig`], which is set to `True` by default. The default versions of the models available on the [hub](https://huggingface.co/models?search=tapas) all use relative position embeddings. You can still use the ones with absolute position embeddings by passing in an additional argument `revision="no_reset"` when calling the `from_pretrained()` method. Note that it's usually advised to pad the inputs on the right rather than the left. - TAPAS is based on BERT, so `TAPAS-base` for example corresponds to a `BERT-base` architecture. Of course, `TAPAS-large` will result in the best performance (the results reported in the paper are from `TAPAS-large`). Results of the various sized models are shown on the [original Github repository](https://github.com/google-research/tapas>). @@ -573,7 +573,7 @@ Predicted answer: SUM > 87, 53, 69 In case of a conversational set-up, then each table-question pair must be provided **sequentially** to the model, such that the `prev_labels` token types can be overwritten by the predicted `labels` of the previous table-question pair. Again, more info can be found in [this notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb) (for PyTorch) and [this notebook](https://github.com/kamalkraj/Tapas-Tutorial/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb) (for TensorFlow). -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Masked language modeling task guide](../tasks/masked_language_modeling) @@ -590,6 +590,9 @@ In case of a conversational set-up, then each table-question pair must be provid - convert_logits_to_predictions - save_vocabulary + + + ## TapasModel [[autodoc]] TapasModel - forward @@ -606,6 +609,9 @@ In case of a conversational set-up, then each table-question pair must be provid [[autodoc]] TapasForQuestionAnswering - forward + + + ## TFTapasModel [[autodoc]] TFTapasModel - call @@ -620,4 +626,9 @@ In case of a conversational set-up, then each table-question pair must be provid ## TFTapasForQuestionAnswering [[autodoc]] TFTapasForQuestionAnswering - - call \ No newline at end of file + - call + + + + + diff --git a/docs/source/en/model_doc/tapex.md b/docs/source/en/model_doc/tapex.md index 52234b5c59bc..15ac2463fd85 100644 --- a/docs/source/en/model_doc/tapex.md +++ b/docs/source/en/model_doc/tapex.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. -This model is in maintenance mode only, so we won't accept any new PRs changing its code. +This model is in maintenance mode only, we don't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.30.0. You can do so by running the following command: `pip install -U transformers==4.30.0`. @@ -49,7 +49,7 @@ on the weakly-supervised WikiSQL denotation accuracy to 89.5% (+2.3%), the WikiT to 74.5% (+3.5%), and the TabFact accuracy to 84.2% (+3.2%). To our knowledge, this is the first work to exploit table pre-training via synthetic executable programs and to achieve new state-of-the-art results on various downstream tasks.* -Tips: +## Usage tips - TAPEX is a generative (seq2seq) model. One can directly plug in the weights of TAPEX into a BART model. - TAPEX has checkpoints on the hub that are either pre-trained only, or fine-tuned on WTQ, SQA, WikiSQL and TabFact. @@ -58,7 +58,7 @@ Tips: - TAPEX has its own tokenizer, that allows to prepare all data for the model easily. One can pass Pandas DataFrames and strings to the tokenizer, and it will automatically create the `input_ids` and `attention_mask` (as shown in the usage examples below). -## Usage: inference +### Usage: inference Below, we illustrate how to use TAPEX for table question answering. As one can see, one can directly plug in the weights of TAPEX into a BART model. We use the [Auto API](auto), which will automatically instantiate the appropriate tokenizer ([`TapexTokenizer`]) and model ([`BartForConditionalGeneration`]) for us, @@ -135,6 +135,12 @@ benchmark for table fact checking (it achieves 84% accuracy). The code example b Refused ``` + + +TAPEX architecture is the same as BART, except for tokenization. Refer to [BART documentation](bart) for information on +configuration classes and their parameters. TAPEX-specific tokenizer is documented below. + + ## TapexTokenizer diff --git a/docs/source/en/model_doc/time_series_transformer.md b/docs/source/en/model_doc/time_series_transformer.md index 208798aa1c68..c5bfcfc15ea2 100644 --- a/docs/source/en/model_doc/time_series_transformer.md +++ b/docs/source/en/model_doc/time_series_transformer.md @@ -16,18 +16,12 @@ rendered properly in your Markdown viewer. # Time Series Transformer - - -This is a recently introduced model so the API hasn't been tested extensively. There may be some bugs or slight -breaking changes to fix it in the future. If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title). - - - ## Overview The Time Series Transformer model is a vanilla encoder-decoder Transformer for time series forecasting. +This model was contributed by [kashif](https://huggingface.co/kashif). -Tips: +## Usage tips - Similar to other models in the library, [`TimeSeriesTransformerModel`] is the raw Transformer without any head on top, and [`TimeSeriesTransformerForPrediction`] adds a distribution head on top of the former, which can be used for time-series forecasting. Note that this is a so-called probabilistic forecasting model, not a @@ -56,9 +50,6 @@ of the context as initial input for the decoder). - At inference time, we give the final value of the `past_values` as input to the decoder. Next, we can sample from the model to make a prediction at the next time step, which is then fed to the decoder in order to make the next prediction (also called autoregressive generation). - -This model was contributed by [kashif](https://huggingface.co/kashif). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. @@ -70,13 +61,11 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TimeSeriesTransformerConfig - ## TimeSeriesTransformerModel [[autodoc]] TimeSeriesTransformerModel - forward - ## TimeSeriesTransformerForPrediction [[autodoc]] TimeSeriesTransformerForPrediction diff --git a/docs/source/en/model_doc/timesformer.md b/docs/source/en/model_doc/timesformer.md index d87fde4fb2b3..fe75bee5b289 100644 --- a/docs/source/en/model_doc/timesformer.md +++ b/docs/source/en/model_doc/timesformer.md @@ -25,14 +25,15 @@ The abstract from the paper is the following: *We present a convolution-free approach to video classification built exclusively on self-attention over space and time. Our method, named "TimeSformer," adapts the standard Transformer architecture to video by enabling spatiotemporal feature learning directly from a sequence of frame-level patches. Our experimental study compares different self-attention schemes and suggests that "divided attention," where temporal attention and spatial attention are separately applied within each block, leads to the best video classification accuracy among the design choices considered. Despite the radically new design, TimeSformer achieves state-of-the-art results on several action recognition benchmarks, including the best reported accuracy on Kinetics-400 and Kinetics-600. Finally, compared to 3D convolutional networks, our model is faster to train, it can achieve dramatically higher test efficiency (at a small drop in accuracy), and it can also be applied to much longer video clips (over one minute long). Code and models are available at: [this https URL](https://github.com/facebookresearch/TimeSformer).* -Tips: - -There are many pretrained variants. Select your pretrained model based on the dataset it is trained on. Moreover, the number of input frames per clip changes based on the model size so you should consider this parameter while selecting your pretrained model. - This model was contributed by [fcakyon](https://huggingface.co/fcakyon). The original code can be found [here](https://github.com/facebookresearch/TimeSformer). -## Documentation resources +## Usage tips + +There are many pretrained variants. Select your pretrained model based on the dataset it is trained on. Moreover, +the number of input frames per clip changes based on the model size so you should consider this parameter while selecting your pretrained model. + +## Resources - [Video classification task guide](../tasks/video_classification) diff --git a/docs/source/en/model_doc/trajectory_transformer.md b/docs/source/en/model_doc/trajectory_transformer.md index 548642f7bb9f..45616255871a 100644 --- a/docs/source/en/model_doc/trajectory_transformer.md +++ b/docs/source/en/model_doc/trajectory_transformer.md @@ -43,19 +43,18 @@ in offline RL algorithms. We demonstrate the flexibility of this approach across imitation learning, goal-conditioned RL, and offline RL. Further, we show that this approach can be combined with existing model-free algorithms to yield a state-of-the-art planner in sparse-reward, long-horizon tasks.* -Tips: +This model was contributed by [CarlCochet](https://huggingface.co/CarlCochet). The original code can be found [here](https://github.com/jannerm/trajectory-transformer). + +## Usage tips This Transformer is used for deep reinforcement learning. To use it, you need to create sequences from actions, states and rewards from all previous timesteps. This model will treat all these elements together as one big sequence (a trajectory). -This model was contributed by [CarlCochet](https://huggingface.co/CarlCochet). The original code can be found [here](https://github.com/jannerm/trajectory-transformer). - ## TrajectoryTransformerConfig [[autodoc]] TrajectoryTransformerConfig - ## TrajectoryTransformerModel [[autodoc]] TrajectoryTransformerModel diff --git a/docs/source/en/model_doc/transfo-xl.md b/docs/source/en/model_doc/transfo-xl.md index beb5ba2fea83..d75e3a37b990 100644 --- a/docs/source/en/model_doc/transfo-xl.md +++ b/docs/source/en/model_doc/transfo-xl.md @@ -45,7 +45,9 @@ bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens.* -Tips: +This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/kimiyoung/transformer-xl). + +## Usage tips - Transformer-XL uses relative sinusoidal positional embeddings. Padding can be done on the left or on the right. The original implementation trains on SQuAD with padding on the left, therefore the padding defaults are set to left. @@ -54,7 +56,6 @@ Tips: - Basically, the hidden states of the previous segment are concatenated to the current input to compute the attention scores. This allows the model to pay attention to information that was in the previous segment as well as the current one. By stacking multiple attention layers, the receptive field can be increased to multiple previous segments. - This changes the positional embeddings to positional relative embeddings (as the regular positional embeddings would give the same results in the current input and the current hidden state at a given position) and needs to make some adjustments in the way attention scores are computed. -This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/kimiyoung/transformer-xl). @@ -62,7 +63,7 @@ TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyT -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Causal language modeling task guide](../tasks/language_modeling) @@ -86,6 +87,9 @@ TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyT [[autodoc]] models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput + + + ## TransfoXLModel [[autodoc]] TransfoXLModel @@ -101,6 +105,9 @@ TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyT [[autodoc]] TransfoXLForSequenceClassification - forward + + + ## TFTransfoXLModel [[autodoc]] TFTransfoXLModel @@ -116,6 +123,9 @@ TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyT [[autodoc]] TFTransfoXLForSequenceClassification - call + + + ## Internal Layers [[autodoc]] AdaptiveEmbedding diff --git a/docs/source/en/model_doc/trocr.md b/docs/source/en/model_doc/trocr.md index bfab93ad663b..c471a13bbd23 100644 --- a/docs/source/en/model_doc/trocr.md +++ b/docs/source/en/model_doc/trocr.md @@ -43,7 +43,7 @@ Please refer to the [`VisionEncoderDecoder`] class on how to use this model. This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/6f60612e7cc86a2a1ae85c47231507a587ab4e01/trocr). -Tips: +## Usage tips - The quickest way to get started with TrOCR is by checking the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/TrOCR), which show how to use the model diff --git a/docs/source/en/model_doc/tvlt.md b/docs/source/en/model_doc/tvlt.md index 5ddb6badb71f..f09ea8af863c 100644 --- a/docs/source/en/model_doc/tvlt.md +++ b/docs/source/en/model_doc/tvlt.md @@ -25,14 +25,6 @@ The abstract from the paper is the following: *In this work, we present the Textless Vision-Language Transformer (TVLT), where homogeneous transformer blocks take raw visual and audio inputs for vision-and-language representation learning with minimal modality-specific design, and do not use text-specific modules such as tokenization or automatic speech recognition (ASR). TVLT is trained by reconstructing masked patches of continuous video frames and audio spectrograms (masked autoencoding) and contrastive modeling to align video and audio. TVLT attains performance comparable to its text-based counterpart on various multimodal tasks, such as visual question answering, image retrieval, video retrieval, and multimodal sentiment analysis, with 28x faster inference speed and only 1/3 of the parameters. Our findings suggest the possibility of learning compact and efficient visual-linguistic representations from low-level visual and audio signals without assuming the prior existence of text.* -Tips: - -- TVLT is a model that takes both `pixel_values` and `audio_values` as input. One can use [`TvltProcessor`] to prepare data for the model. - This processor wraps an image processor (for the image/video modality) and an audio feature extractor (for the audio modality) into one. -- TVLT is trained with images/videos and audios of various sizes: the authors resize and crop the input images/videos to 224 and limit the length of audio spectrogram to 2048. To make batching of videos and audios possible, the authors use a `pixel_mask` that indicates which pixels are real/padding and `audio_mask` that indicates which audio values are real/padding. -- The design of TVLT is very similar to that of a standard Vision Transformer (ViT) and masked autoencoder (MAE) as in [ViTMAE](vitmae). The difference is that the model includes embedding layers for the audio modality. -- The PyTorch version of this model is only available in torch 1.10 and higher. -

drawing @@ -42,6 +34,14 @@ alt="drawing" width="600"/> The original code can be found [here](https://github.com/zinengtang/TVLT). This model was contributed by [Zineng Tang](https://huggingface.co/ZinengTang). +## Usage tips + +- TVLT is a model that takes both `pixel_values` and `audio_values` as input. One can use [`TvltProcessor`] to prepare data for the model. + This processor wraps an image processor (for the image/video modality) and an audio feature extractor (for the audio modality) into one. +- TVLT is trained with images/videos and audios of various sizes: the authors resize and crop the input images/videos to 224 and limit the length of audio spectrogram to 2048. To make batching of videos and audios possible, the authors use a `pixel_mask` that indicates which pixels are real/padding and `audio_mask` that indicates which audio values are real/padding. +- The design of TVLT is very similar to that of a standard Vision Transformer (ViT) and masked autoencoder (MAE) as in [ViTMAE](vitmae). The difference is that the model includes embedding layers for the audio modality. +- The PyTorch version of this model is only available in torch 1.10 and higher. + ## TvltConfig [[autodoc]] TvltConfig diff --git a/docs/source/en/model_doc/ul2.md b/docs/source/en/model_doc/ul2.md index 3863f23a7d73..f4d01c40b0c1 100644 --- a/docs/source/en/model_doc/ul2.md +++ b/docs/source/en/model_doc/ul2.md @@ -24,12 +24,20 @@ The abstract from the paper is the following: *Existing pre-trained models are generally geared towards a particular class of problems. To date, there seems to be still no consensus on what the right architecture and pre-training setup should be. This paper presents a unified framework for pre-training models that are universally effective across datasets and setups. We begin by disentangling architectural archetypes with pre-training objectives -- two concepts that are commonly conflated. Next, we present a generalized and unified perspective for self-supervision in NLP and show how different pre-training objectives can be cast as one another and how interpolating between different objectives can be effective. We then propose Mixture-of-Denoisers (MoD), a pre-training objective that combines diverse pre-training paradigms together. We furthermore introduce a notion of mode switching, wherein downstream fine-tuning is associated with specific pre-training schemes. We conduct extensive ablative experiments to compare multiple pre-training objectives and find that our method pushes the Pareto-frontier by outperforming T5 and/or GPT-like models across multiple diverse setups. Finally, by scaling our model up to 20B parameters, we achieve SOTA performance on 50 well-established supervised NLP tasks ranging from language generation (with automated and human evaluation), language understanding, text classification, question answering, commonsense reasoning, long text reasoning, structured knowledge grounding and information retrieval. Our model also achieve strong results at in-context learning, outperforming 175B GPT-3 on zero-shot SuperGLUE and tripling the performance of T5-XXL on one-shot summarization.* -Tips: +This model was contributed by [DanielHesslow](https://huggingface.co/Seledorn). The original code can be found [here](https://github.com/google-research/google-research/tree/master/ul2). + +## Usage tips - UL2 is an encoder-decoder model pre-trained on a mixture of denoising functions as well as fine-tuned on an array of downstream tasks. - UL2 has the same architecture as [T5v1.1](t5v1.1) but uses the Gated-SiLU activation function instead of Gated-GELU. - The authors release checkpoints of one architecture which can be seen [here](https://huggingface.co/google/ul2) -The original code can be found [here](https://github.com/google-research/google-research/tree/master/ul2). + + +As UL2 has the same architecture as T5v1.1, refer to [T5's documentation page](t5) for API reference, tips, code examples and notebooks. + + + + + -This model was contributed by [DanielHesslow](https://huggingface.co/Seledorn). diff --git a/docs/source/en/model_doc/umt5.md b/docs/source/en/model_doc/umt5.md index 4e6375bd465a..6a7498c24338 100644 --- a/docs/source/en/model_doc/umt5.md +++ b/docs/source/en/model_doc/umt5.md @@ -33,13 +33,6 @@ The abstract from the paper is the following: *Pretrained multilingual large language models have typically used heuristic temperature-based sampling to balance between different languages. However previous work has not systematically evaluated the efficacy of different pretraining language distributions across model scales. In this paper, we propose a new sampling method, UniMax, that delivers more uniform coverage of head languages while mitigating overfitting on tail languages by explicitly capping the number of repeats over each language's corpus. We perform an extensive series of ablations testing a range of sampling strategies on a suite of multilingual benchmarks, while varying model scale. We find that UniMax outperforms standard temperature-based sampling, and the benefits persist as scale increases. As part of our contribution, we release: (i) an improved and refreshed mC4 multilingual corpus consisting of 29 trillion characters across 107 languages, and (ii) a suite of pretrained umT5 model checkpoints trained with UniMax sampling.* -Tips: - -- UMT5 was only pre-trained on [mC4](https://huggingface.co/datasets/mc4) excluding any supervised training. -Therefore, this model has to be fine-tuned before it is usable on a downstream task, unlike the original T5 model. -- Since umT5 was pre-trained in an unsupervise manner, there's no real advantage to using a task prefix during single-task -fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix. - Google has released the following variants: - [google/umt5-small](https://huggingface.co/google/umt5-small) @@ -50,7 +43,12 @@ Google has released the following variants: This model was contributed by [agemagician](https://huggingface.co/agemagician) and [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/google-research/t5x). -One can refer to [T5's documentation page](t5) for more tips, code examples and notebooks. +## Usage tips + +- UMT5 was only pre-trained on [mC4](https://huggingface.co/datasets/mc4) excluding any supervised training. +Therefore, this model has to be fine-tuned before it is usable on a downstream task, unlike the original T5 model. +- Since umT5 was pre-trained in an unsupervise manner, there's no real advantage to using a task prefix during single-task +fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix. ## Differences with mT5? `UmT5` is based on mT5, with a non-shared relative positional bias that is computed for each layer. This means that the model set `has_relative_bias` for each layer. @@ -73,6 +71,11 @@ The conversion script is also different because the model was saved in t5x's lat ['nyone who drink a alcohol A A. This I'] ``` + + +Refer to [T5's documentation page](t5) for more tips, code examples and notebooks. + + ## UMT5Config [[autodoc]] UMT5Config diff --git a/docs/source/en/model_doc/unispeech-sat.md b/docs/source/en/model_doc/unispeech-sat.md index 25489d9eeffd..e2a21148115e 100644 --- a/docs/source/en/model_doc/unispeech-sat.md +++ b/docs/source/en/model_doc/unispeech-sat.md @@ -37,7 +37,10 @@ state-of-the-art performance in universal representation learning, especially fo tasks. An ablation study is performed verifying the efficacy of each proposed method. Finally, we scale up training dataset to 94 thousand hours public audio data and achieve further performance improvement in all SUPERB tasks.* -Tips: +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be +found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech-SAT). + +## Usage tips - UniSpeechSat is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. @@ -45,10 +48,7 @@ Tips: decoded using [`Wav2Vec2CTCTokenizer`]. - UniSpeechSat performs especially well on speaker verification, speaker identification, and speaker diarization tasks. -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be -found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech-SAT). - -## Documentation resources +## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) diff --git a/docs/source/en/model_doc/unispeech.md b/docs/source/en/model_doc/unispeech.md index 8338aa1bda2e..2b2b13bed52c 100644 --- a/docs/source/en/model_doc/unispeech.md +++ b/docs/source/en/model_doc/unispeech.md @@ -33,17 +33,17 @@ recognition by a maximum of 13.4% and 17.8% relative phone error rate reductions testing languages). The transferability of UniSpeech is also demonstrated on a domain-shift speech recognition task, i.e., a relative word error rate reduction of 6% against the previous approach.* -Tips: +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be +found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech). + +## Usage tips - UniSpeech is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. - UniSpeech model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be -found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech). - -## Documentation resources +## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) diff --git a/docs/source/en/model_doc/upernet.md b/docs/source/en/model_doc/upernet.md index db651acaa406..418c3ef1786b 100644 --- a/docs/source/en/model_doc/upernet.md +++ b/docs/source/en/model_doc/upernet.md @@ -33,17 +33,7 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code is based on OpenMMLab's mmsegmentation [here](https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/uper_head.py). -## Resources - -A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with UPerNet. - -- Demo notebooks for UPerNet can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/UPerNet). -- [`UperNetForSemanticSegmentation`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/semantic-segmentation) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/semantic_segmentation.ipynb). -- See also: [Semantic segmentation task guide](../tasks/semantic_segmentation) - -If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. - -## Usage +## Usage examples UPerNet is a general framework for semantic segmentation. It can be used with any vision backbone, like so: @@ -69,6 +59,16 @@ model = UperNetForSemanticSegmentation(config) Note that this will randomly initialize all the weights of the model. +## Resources + +A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with UPerNet. + +- Demo notebooks for UPerNet can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/UPerNet). +- [`UperNetForSemanticSegmentation`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/semantic-segmentation) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/semantic_segmentation.ipynb). +- See also: [Semantic segmentation task guide](../tasks/semantic_segmentation) + +If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. + ## UperNetConfig [[autodoc]] UperNetConfig diff --git a/docs/source/en/model_doc/van.md b/docs/source/en/model_doc/van.md index b9539602d3b8..83e4959b3016 100644 --- a/docs/source/en/model_doc/van.md +++ b/docs/source/en/model_doc/van.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. -This model is in maintenance mode only, so we won't accept any new PRs changing its code. +This model is in maintenance mode only, we don't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.30.0. You can do so by running the following command: `pip install -U transformers==4.30.0`. @@ -60,13 +60,11 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] VanConfig - ## VanModel [[autodoc]] VanModel - forward - ## VanForImageClassification [[autodoc]] VanForImageClassification diff --git a/docs/source/en/model_doc/videomae.md b/docs/source/en/model_doc/videomae.md index 5a3620040ad9..75eb9617380c 100644 --- a/docs/source/en/model_doc/videomae.md +++ b/docs/source/en/model_doc/videomae.md @@ -25,11 +25,6 @@ The abstract from the paper is the following: *Pre-training video transformers on extra large-scale datasets is generally required to achieve premier performance on relatively small datasets. In this paper, we show that video masked autoencoders (VideoMAE) are data-efficient learners for self-supervised video pre-training (SSVP). We are inspired by the recent ImageMAE and propose customized video tube masking and reconstruction. These simple designs turn out to be effective for overcoming information leakage caused by the temporal correlation during video reconstruction. We obtain three important findings on SSVP: (1) An extremely high proportion of masking ratio (i.e., 90% to 95%) still yields favorable performance of VideoMAE. The temporally redundant video content enables higher masking ratio than that of images. (2) VideoMAE achieves impressive results on very small datasets (i.e., around 3k-4k videos) without using any extra data. This is partially ascribed to the challenging task of video reconstruction to enforce high-level structure learning. (3) VideoMAE shows that data quality is more important than data quantity for SSVP. Domain shift between pre-training and target datasets are important issues in SSVP. Notably, our VideoMAE with the vanilla ViT backbone can achieve 83.9% on Kinects-400, 75.3% on Something-Something V2, 90.8% on UCF101, and 61.1% on HMDB51 without using any extra data.* -Tips: - -- One can use [`VideoMAEImageProcessor`] to prepare videos for the model. It will resize + normalize all frames of a video for you. -- [`VideoMAEForPreTraining`] includes the decoder on top for self-supervised pre-training. - drawing @@ -50,7 +45,6 @@ to fine-tune a VideoMAE model on a custom dataset. - [Video classification task guide](../tasks/video_classification) - [A 🤗 Space](https://huggingface.co/spaces/sayakpaul/video-classification-ucf101-subset) showing how to perform inference with a video classification model. - ## VideoMAEConfig [[autodoc]] VideoMAEConfig @@ -72,6 +66,8 @@ to fine-tune a VideoMAE model on a custom dataset. ## VideoMAEForPreTraining +`VideoMAEForPreTraining` includes the decoder on top for self-supervised pre-training. + [[autodoc]] transformers.VideoMAEForPreTraining - forward diff --git a/docs/source/en/model_doc/vilt.md b/docs/source/en/model_doc/vilt.md index 2e2f4a140d20..2b0ac022da4b 100644 --- a/docs/source/en/model_doc/vilt.md +++ b/docs/source/en/model_doc/vilt.md @@ -34,7 +34,14 @@ Vision-and-Language Transformer (ViLT), monolithic in the sense that the process simplified to just the same convolution-free manner that we process textual inputs. We show that ViLT is up to tens of times faster than previous VLP models, yet with competitive or better downstream task performance.* -Tips: + + + ViLT architecture. Taken from the original paper. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/dandelin/ViLT). + +## Usage tips - The quickest way to get started with ViLT is by checking the [example notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ViLT) (which showcase both inference and fine-tuning on custom data). @@ -45,17 +52,6 @@ Tips: which pixel values are real and which are padding. [`ViltProcessor`] automatically creates this for you. - The design of ViLT is very similar to that of a standard Vision Transformer (ViT). The only difference is that the model includes additional embedding layers for the language modality. - - - - ViLT architecture. Taken from the original paper. - -This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/dandelin/ViLT). - - -Tips: - - The PyTorch version of this model is only available in torch 1.10 and higher. ## ViltConfig diff --git a/docs/source/en/model_doc/vision-encoder-decoder.md b/docs/source/en/model_doc/vision-encoder-decoder.md index 0beeaeae108b..89d89896a2e2 100644 --- a/docs/source/en/model_doc/vision-encoder-decoder.md +++ b/docs/source/en/model_doc/vision-encoder-decoder.md @@ -151,20 +151,32 @@ were contributed by [ydshieh](https://github.com/ydshieh). [[autodoc]] VisionEncoderDecoderConfig + + + ## VisionEncoderDecoderModel [[autodoc]] VisionEncoderDecoderModel - forward - from_encoder_decoder_pretrained + + + ## TFVisionEncoderDecoderModel [[autodoc]] TFVisionEncoderDecoderModel - call - from_encoder_decoder_pretrained + + + ## FlaxVisionEncoderDecoderModel [[autodoc]] FlaxVisionEncoderDecoderModel - __call__ - from_encoder_decoder_pretrained + + + diff --git a/docs/source/en/model_doc/vision-text-dual-encoder.md b/docs/source/en/model_doc/vision-text-dual-encoder.md index 6fa9728cac46..7cb68a261875 100644 --- a/docs/source/en/model_doc/vision-text-dual-encoder.md +++ b/docs/source/en/model_doc/vision-text-dual-encoder.md @@ -36,17 +36,29 @@ new zero-shot vision tasks such as image classification or retrieval. [[autodoc]] VisionTextDualEncoderProcessor + + + ## VisionTextDualEncoderModel [[autodoc]] VisionTextDualEncoderModel - forward + + + ## FlaxVisionTextDualEncoderModel [[autodoc]] FlaxVisionTextDualEncoderModel - __call__ + + + ## TFVisionTextDualEncoderModel [[autodoc]] TFVisionTextDualEncoderModel - call + + + diff --git a/docs/source/en/model_doc/visual_bert.md b/docs/source/en/model_doc/visual_bert.md index 7d84c0d9faec..1db218f1a531 100644 --- a/docs/source/en/model_doc/visual_bert.md +++ b/docs/source/en/model_doc/visual_bert.md @@ -32,7 +32,9 @@ simpler. Further analysis demonstrates that VisualBERT can ground elements of la explicit supervision and is even sensitive to syntactic relationships, tracking, for example, associations between verbs and image regions corresponding to their arguments.* -Tips: +This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/uclanlp/visualbert). + +## Usage tips 1. Most of the checkpoints provided work with the [`VisualBertForPreTraining`] configuration. Other checkpoints provided are the fine-tuned checkpoints for down-stream tasks - VQA ('visualbert-vqa'), VCR @@ -43,8 +45,6 @@ Tips: We do not provide the detector and its weights as a part of the package, but it will be available in the research projects, and the states can be loaded directly into the detector provided. -## Usage - VisualBERT is a multi-modal vision and language model. It can be used for visual question answering, multiple choice, visual reasoning and region-to-phrase correspondence tasks. VisualBERT uses a BERT-like transformer to prepare embeddings for image-text pairs. Both the text and visual features are then projected to a latent space with identical @@ -92,8 +92,6 @@ The following example shows how to get the last hidden state using [`VisualBertM >>> last_hidden_state = outputs.last_hidden_state ``` -This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/uclanlp/visualbert). - ## VisualBertConfig [[autodoc]] VisualBertConfig diff --git a/docs/source/en/model_doc/vit.md b/docs/source/en/model_doc/vit.md index 409580d09481..25c3a6c8f537 100644 --- a/docs/source/en/model_doc/vit.md +++ b/docs/source/en/model_doc/vit.md @@ -24,7 +24,6 @@ Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minder Uszkoreit, Neil Houlsby. It's the first paper that successfully trains a Transformer encoder on ImageNet, attaining very good results compared to familiar convolutional architectures. - The abstract from the paper is the following: *While the Transformer architecture has become the de-facto standard for natural language processing tasks, its @@ -36,30 +35,6 @@ data and transferred to multiple mid-sized or small image recognition benchmarks Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.* -Tips: - -- Demo notebooks regarding inference as well as fine-tuning ViT on custom data can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer). -- To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, - which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image, which can be - used for classification. The authors also add absolute position embeddings, and feed the resulting sequence of - vectors to a standard Transformer encoder. -- As the Vision Transformer expects each image to be of the same size (resolution), one can use - [`ViTImageProcessor`] to resize (or rescale) and normalize images for the model. -- Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of - each checkpoint. For example, `google/vit-base-patch16-224` refers to a base-sized architecture with patch - resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=vit). -- The available checkpoints are either (1) pre-trained on [ImageNet-21k](http://www.image-net.org/) (a collection of - 14 million images and 21k classes) only, or (2) also fine-tuned on [ImageNet](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million - images and 1,000 classes). -- The Vision Transformer was pre-trained using a resolution of 224x224. During fine-tuning, it is often beneficial to - use a higher resolution than pre-training [(Touvron et al., 2019)](https://arxiv.org/abs/1906.06423), [(Kolesnikov - et al., 2020)](https://arxiv.org/abs/1912.11370). In order to fine-tune at higher resolution, the authors perform - 2D interpolation of the pre-trained position embeddings, according to their location in the original image. -- The best results are obtained with supervised pre-training, which is not the case in NLP. The authors also performed - an experiment with a self-supervised pre-training objective, namely masked patched prediction (inspired by masked - language modeling). With this approach, the smaller ViT-B/16 model achieves 79.9% accuracy on ImageNet, a significant - improvement of 2% to training from scratch, but still 4% behind supervised pre-training. - drawing @@ -87,28 +62,35 @@ Following the original Vision Transformer, some follow-up works have been made: This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code (written in JAX) can be found [here](https://github.com/google-research/vision_transformer). -Note that we converted the weights from Ross Wightman's [timm library](https://github.com/rwightman/pytorch-image-models), who already converted the weights from JAX to PyTorch. Credits -go to him! - -## Resources - -A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViT. - - - -- [`ViTForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). -- A blog on fine-tuning [`ViTForImageClassification`] on a custom dataset can be found [here](https://huggingface.co/blog/fine-tune-vit). -- More demo notebooks to fine-tune [`ViTForImageClassification`] can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer). -- [Image classification task guide](../tasks/image_classification) - -Besides that: +Note that we converted the weights from Ross Wightman's [timm library](https://github.com/rwightman/pytorch-image-models), +who already converted the weights from JAX to PyTorch. Credits go to him! -- [`ViTForMaskedImageModeling`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). +## Usage tips -If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. +- To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, + which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image, which can be + used for classification. The authors also add absolute position embeddings, and feed the resulting sequence of + vectors to a standard Transformer encoder. +- As the Vision Transformer expects each image to be of the same size (resolution), one can use + [`ViTImageProcessor`] to resize (or rescale) and normalize images for the model. +- Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of + each checkpoint. For example, `google/vit-base-patch16-224` refers to a base-sized architecture with patch + resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=vit). +- The available checkpoints are either (1) pre-trained on [ImageNet-21k](http://www.image-net.org/) (a collection of + 14 million images and 21k classes) only, or (2) also fine-tuned on [ImageNet](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million + images and 1,000 classes). +- The Vision Transformer was pre-trained using a resolution of 224x224. During fine-tuning, it is often beneficial to + use a higher resolution than pre-training [(Touvron et al., 2019)](https://arxiv.org/abs/1906.06423), [(Kolesnikov + et al., 2020)](https://arxiv.org/abs/1912.11370). In order to fine-tune at higher resolution, the authors perform + 2D interpolation of the pre-trained position embeddings, according to their location in the original image. +- The best results are obtained with supervised pre-training, which is not the case in NLP. The authors also performed + an experiment with a self-supervised pre-training objective, namely masked patched prediction (inspired by masked + language modeling). With this approach, the smaller ViT-B/16 model achieves 79.9% accuracy on ImageNet, a significant + improvement of 2% to training from scratch, but still 4% behind supervised pre-training. ## Resources +Demo notebooks regarding inference as well as fine-tuning ViT on custom data can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer). A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. `ViTForImageClassification` is supported by: @@ -134,7 +116,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h - A blog post on [Deploying Hugging Face ViT on Vertex AI](https://huggingface.co/blog/deploy-vertex-ai) - A blog post on [Deploying Hugging Face ViT on Kubernetes with TF Serving](https://huggingface.co/blog/deploy-tfserving-kubernetes) - ## ViTConfig [[autodoc]] ViTConfig @@ -144,12 +125,14 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] ViTFeatureExtractor - __call__ - ## ViTImageProcessor [[autodoc]] ViTImageProcessor - preprocess + + + ## ViTModel [[autodoc]] ViTModel @@ -165,6 +148,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] ViTForImageClassification - forward + + + ## TFViTModel [[autodoc]] TFViTModel @@ -175,6 +161,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFViTForImageClassification - call + + + ## FlaxVitModel [[autodoc]] FlaxViTModel @@ -184,3 +173,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] FlaxViTForImageClassification - __call__ + + + diff --git a/docs/source/en/model_doc/vit_hybrid.md b/docs/source/en/model_doc/vit_hybrid.md index 84969cd0f622..52c0d35bc135 100644 --- a/docs/source/en/model_doc/vit_hybrid.md +++ b/docs/source/en/model_doc/vit_hybrid.md @@ -25,7 +25,6 @@ Uszkoreit, Neil Houlsby. It's the first paper that successfully trains a Transfo very good results compared to familiar convolutional architectures. ViT hybrid is a slight variant of the [plain Vision Transformer](vit), by leveraging a convolutional backbone (specifically, [BiT](bit)) whose features are used as initial "tokens" for the Transformer. - The abstract from the paper is the following: *While the Transformer architecture has become the de-facto standard for natural language processing tasks, its @@ -40,7 +39,6 @@ substantially fewer computational resources to train.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code (written in JAX) can be found [here](https://github.com/google-research/vision_transformer). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViT Hybrid. @@ -52,7 +50,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. - ## ViTHybridConfig [[autodoc]] ViTHybridConfig diff --git a/docs/source/en/model_doc/vit_mae.md b/docs/source/en/model_doc/vit_mae.md index c14cc7e57c90..27d6d26816ae 100644 --- a/docs/source/en/model_doc/vit_mae.md +++ b/docs/source/en/model_doc/vit_mae.md @@ -32,7 +32,15 @@ enables us to train large models efficiently and effectively: we accelerate trai models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. Transfer performance in downstream tasks outperforms supervised pre-training and shows promising scaling behavior.* -Tips: + + + MAE architecture. Taken from the original paper. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). TensorFlow version of the model was contributed by [sayakpaul](https://github.com/sayakpaul) and +[ariG23498](https://github.com/ariG23498) (equal contribution). The original code can be found [here](https://github.com/facebookresearch/mae). + +## Usage tips - MAE (masked auto encoding) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training objective is relatively simple: by masking a large portion (75%) of the image patches, the model must reconstruct raw pixel values. One can use [`ViTMAEForPreTraining`] for this purpose. @@ -44,14 +52,6 @@ consists of Transformer blocks) takes as input. Each mask token is a shared, lea sin/cos position embeddings are added both to the input of the encoder and the decoder. - For a visual understanding of how MAEs work you can check out this [post](https://keras.io/examples/vision/masked_image_modeling/). - - - MAE architecture. Taken from the original paper. - -This model was contributed by [nielsr](https://huggingface.co/nielsr). TensorFlow version of the model was contributed by [sayakpaul](https://github.com/sayakpaul) and -[ariG23498](https://github.com/ariG23498) (equal contribution). The original code can be found [here](https://github.com/facebookresearch/mae). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViTMAE. @@ -65,26 +65,31 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] ViTMAEConfig + + ## ViTMAEModel [[autodoc]] ViTMAEModel - forward - ## ViTMAEForPreTraining [[autodoc]] transformers.ViTMAEForPreTraining - forward + + ## TFViTMAEModel [[autodoc]] TFViTMAEModel - call - ## TFViTMAEForPreTraining [[autodoc]] transformers.TFViTMAEForPreTraining - call + + + diff --git a/docs/source/en/model_doc/vit_msn.md b/docs/source/en/model_doc/vit_msn.md index ded0245194f8..666b7dd0dfda 100644 --- a/docs/source/en/model_doc/vit_msn.md +++ b/docs/source/en/model_doc/vit_msn.md @@ -33,7 +33,13 @@ while producing representations of a high semantic level that perform competitiv on ImageNet-1K, with only 5,000 annotated images, our base MSN model achieves 72.4% top-1 accuracy, and with 1% of ImageNet-1K labels, we achieve 75.7% top-1 accuracy, setting a new state-of-the-art for self-supervised learning on this benchmark.* -Tips: +drawing + + MSN architecture. Taken from the original paper. + +This model was contributed by [sayakpaul](https://huggingface.co/sayakpaul). The original code can be found [here](https://github.com/facebookresearch/msn). + +## Usage tips - MSN (masked siamese networks) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training objective is to match the prototypes assigned to the unmasked views of the images to that of the masked views of the same images. @@ -43,13 +49,6 @@ use the [`ViTMSNForImageClassification`] class which is initialized from [`ViTMS - MSN is particularly useful in the low-shot and extreme low-shot regimes. Notably, it achieves 75.7% top-1 accuracy with only 1% of ImageNet-1K labels when fine-tuned. - -drawing - - MSN architecture. Taken from the original paper. - -This model was contributed by [sayakpaul](https://huggingface.co/sayakpaul). The original code can be found [here](https://github.com/facebookresearch/msn). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViT MSN. @@ -65,13 +64,11 @@ If you're interested in submitting a resource to be included here, please feel f [[autodoc]] ViTMSNConfig - ## ViTMSNModel [[autodoc]] ViTMSNModel - forward - ## ViTMSNForImageClassification [[autodoc]] ViTMSNForImageClassification diff --git a/docs/source/en/model_doc/vitdet.md b/docs/source/en/model_doc/vitdet.md index 657e467ee319..81bf787d6cda 100644 --- a/docs/source/en/model_doc/vitdet.md +++ b/docs/source/en/model_doc/vitdet.md @@ -21,13 +21,12 @@ The abstract from the paper is the following: *We explore the plain, non-hierarchical Vision Transformer (ViT) as a backbone network for object detection. This design enables the original ViT architecture to be fine-tuned for object detection without needing to redesign a hierarchical backbone for pre-training. With minimal adaptations for fine-tuning, our plain-backbone detector can achieve competitive results. Surprisingly, we observe: (i) it is sufficient to build a simple feature pyramid from a single-scale feature map (without the common FPN design) and (ii) it is sufficient to use window attention (without shifting) aided with very few cross-window propagation blocks. With plain ViT backbones pre-trained as Masked Autoencoders (MAE), our detector, named ViTDet, can compete with the previous leading methods that were all based on hierarchical backbones, reaching up to 61.3 AP_box on the COCO dataset using only ImageNet-1K pre-training. We hope our study will draw attention to research on plain-backbone detectors.* -Tips: - -- For the moment, only the backbone is available. - This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/detectron2/tree/main/projects/ViTDet). +Tips: + +- At the moment, only the backbone is available. ## VitDetConfig diff --git a/docs/source/en/model_doc/vitmatte.md b/docs/source/en/model_doc/vitmatte.md index 479b398f8066..5a6d501030fc 100644 --- a/docs/source/en/model_doc/vitmatte.md +++ b/docs/source/en/model_doc/vitmatte.md @@ -21,10 +21,6 @@ The abstract from the paper is the following: *Recently, plain vision Transformers (ViTs) have shown impressive performance on various computer vision tasks, thanks to their strong modeling capacity and large-scale pretraining. However, they have not yet conquered the problem of image matting. We hypothesize that image matting could also be boosted by ViTs and present a new efficient and robust ViT-based matting system, named ViTMatte. Our method utilizes (i) a hybrid attention mechanism combined with a convolution neck to help ViTs achieve an excellent performance-computation trade-off in matting tasks. (ii) Additionally, we introduce the detail capture module, which just consists of simple lightweight convolutions to complement the detailed information required by matting. To the best of our knowledge, ViTMatte is the first work to unleash the potential of ViT on image matting with concise adaptation. It inherits many superior properties from ViT to matting, including various pretraining strategies, concise architecture design, and flexible inference strategies. We evaluate ViTMatte on Composition-1k and Distinctions-646, the most commonly used benchmark for image matting, our method achieves state-of-the-art performance and outperforms prior matting works by a large margin.* -Tips: - -- The model expects both the image and trimap (concatenated) as input. One can use [`ViTMatteImageProcessor`] for this purpose. - This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/hustvl/ViTMatte). @@ -39,6 +35,10 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h - A demo notebook regarding inference with [`VitMatteForImageMatting`], including background replacement, can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ViTMatte). + + +The model expects both the image and trimap (concatenated) as input. Use [`ViTMatteImageProcessor`] for this purpose. + ## VitMatteConfig diff --git a/docs/source/en/model_doc/vits.md b/docs/source/en/model_doc/vits.md index 1b57df4027dd..73001d82ed56 100644 --- a/docs/source/en/model_doc/vits.md +++ b/docs/source/en/model_doc/vits.md @@ -16,7 +16,6 @@ specific language governing permissions and limitations under the License. The VITS model was proposed in [Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech](https://arxiv.org/abs/2106.06103) by Jaehyeon Kim, Jungil Kong, Juhee Son. - VITS (**V**ariational **I**nference with adversarial learning for end-to-end **T**ext-to-**S**peech) is an end-to-end speech synthesis model that predicts a speech waveform conditional on an input text sequence. It is a conditional variational autoencoder (VAE) comprised of a posterior encoder, decoder, and conditional prior. @@ -42,7 +41,7 @@ as these checkpoints use the same architecture and a slightly modified tokenizer This model was contributed by [Matthijs](https://huggingface.co/Matthijs) and [sanchit-gandhi](https://huggingface.co/sanchit-gandhi). The original code can be found [here](https://github.com/jaywalnut310/vits). -## Model Usage +## Usage examples Both the VITS and MMS-TTS checkpoints can be used with the same API. Since the flow-based model is non-deterministic, it is good practice to set a seed to ensure reproducibility of the outputs. For languages with a Roman alphabet, diff --git a/docs/source/en/model_doc/vivit.md b/docs/source/en/model_doc/vivit.md index 755629a76752..4426493a0ff5 100644 --- a/docs/source/en/model_doc/vivit.md +++ b/docs/source/en/model_doc/vivit.md @@ -21,7 +21,6 @@ The abstract from the paper is the following: *We present pure-transformer based models for video classification, drawing upon the recent success of such models in image classification. Our model extracts spatio-temporal tokens from the input video, which are then encoded by a series of transformer layers. In order to handle the long sequences of tokens encountered in video, we propose several, efficient variants of our model which factorise the spatial- and temporal-dimensions of the input. Although transformer-based models are known to only be effective when large training datasets are available, we show how we can effectively regularise the model during training and leverage pretrained image models to be able to train on comparatively small datasets. We conduct thorough ablation studies, and achieve state-of-the-art results on multiple video classification benchmarks including Kinetics 400 and 600, Epic Kitchens, Something-Something v2 and Moments in Time, outperforming prior methods based on deep 3D convolutional networks.* - This model was contributed by [jegormeister](https://huggingface.co/jegormeister). The original code (written in JAX) can be found [here](https://github.com/google-research/scenic/tree/main/scenic/projects/vivit). ## VivitConfig diff --git a/docs/source/en/model_doc/wav2vec2-conformer.md b/docs/source/en/model_doc/wav2vec2-conformer.md index 87e255cd0c6e..c32c03bb0cb7 100644 --- a/docs/source/en/model_doc/wav2vec2-conformer.md +++ b/docs/source/en/model_doc/wav2vec2-conformer.md @@ -24,7 +24,10 @@ The official results of the model can be found in Table 3 and Table 4 of the pap The Wav2Vec2-Conformer weights were released by the Meta AI team within the [Fairseq library](https://github.com/pytorch/fairseq/blob/main/examples/wav2vec/README.md#pre-trained-models). -Tips: +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). +The original code can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec). + +## Usage tips - Wav2Vec2-Conformer follows the same architecture as Wav2Vec2, but replaces the *Attention*-block with a *Conformer*-block as introduced in [Conformer: Convolution-augmented Transformer for Speech Recognition](https://arxiv.org/abs/2005.08100). @@ -34,10 +37,7 @@ an improved word error rate. - Wav2Vec2-Conformer can use either no relative position embeddings, Transformer-XL-like position embeddings, or rotary position embeddings by setting the correct `config.position_embeddings_type`. -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). -The original code can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec). - -## Documentation resources +## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) diff --git a/docs/source/en/model_doc/wav2vec2.md b/docs/source/en/model_doc/wav2vec2.md index 3a67f66d9d1f..81d8f332aced 100644 --- a/docs/source/en/model_doc/wav2vec2.md +++ b/docs/source/en/model_doc/wav2vec2.md @@ -31,14 +31,14 @@ of the art on the 100 hour subset while using 100 times less labeled data. Using pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.* -Tips: +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). + +## Usage tips - Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). - ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Wav2Vec2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. @@ -167,6 +167,9 @@ Otherwise, [`~Wav2Vec2ProcessorWithLM.batch_decode`] performance will be slower [[autodoc]] models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput + + + ## Wav2Vec2Model [[autodoc]] Wav2Vec2Model @@ -198,6 +201,9 @@ Otherwise, [`~Wav2Vec2ProcessorWithLM.batch_decode`] performance will be slower [[autodoc]] Wav2Vec2ForPreTraining - forward + + + ## TFWav2Vec2Model [[autodoc]] TFWav2Vec2Model @@ -213,6 +219,9 @@ Otherwise, [`~Wav2Vec2ProcessorWithLM.batch_decode`] performance will be slower [[autodoc]] TFWav2Vec2ForCTC - call + + + ## FlaxWav2Vec2Model [[autodoc]] FlaxWav2Vec2Model @@ -227,3 +236,6 @@ Otherwise, [`~Wav2Vec2ProcessorWithLM.batch_decode`] performance will be slower [[autodoc]] FlaxWav2Vec2ForPreTraining - __call__ + + + diff --git a/docs/source/en/model_doc/wav2vec2_phoneme.md b/docs/source/en/model_doc/wav2vec2_phoneme.md index a852bef637b2..93e0656f493c 100644 --- a/docs/source/en/model_doc/wav2vec2_phoneme.md +++ b/docs/source/en/model_doc/wav2vec2_phoneme.md @@ -31,7 +31,13 @@ mapping phonemes of the training languages to the target language using articula this simple method significantly outperforms prior work which introduced task-specific architectures and used only part of a monolingually pretrained model.* -Tips: +Relevant checkpoints can be found under https://huggingface.co/models?other=phoneme-recognition. + +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten) + +The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec). + +## Usage tips - Wav2Vec2Phoneme uses the exact same architecture as Wav2Vec2 - Wav2Vec2Phoneme is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. @@ -39,17 +45,16 @@ Tips: decoded using [`Wav2Vec2PhonemeCTCTokenizer`]. - Wav2Vec2Phoneme can be fine-tuned on multiple language at once and decode unseen languages in a single forward pass to a sequence of phonemes -- By default the model outputs a sequence of phonemes. In order to transform the phonemes to a sequence of words one +- By default, the model outputs a sequence of phonemes. In order to transform the phonemes to a sequence of words one should make use of a dictionary and language model. -Relevant checkpoints can be found under https://huggingface.co/models?other=phoneme-recognition. -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten) - -The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec). + -Wav2Vec2Phoneme's architecture is based on the Wav2Vec2 model, so one can refer to [`Wav2Vec2`]'s documentation page except for the tokenizer. +Wav2Vec2Phoneme's architecture is based on the Wav2Vec2 model, for API reference, check out [`Wav2Vec2`](wav2vec2)'s documentation page +except for the tokenizer. + ## Wav2Vec2PhonemeCTCTokenizer diff --git a/docs/source/en/model_doc/wavlm.md b/docs/source/en/model_doc/wavlm.md index 2754304d8264..13f62980756d 100644 --- a/docs/source/en/model_doc/wavlm.md +++ b/docs/source/en/model_doc/wavlm.md @@ -35,7 +35,12 @@ additional overlapped utterances are created unsupervisely and incorporated duri the training dataset from 60k hours to 94k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.* -Tips: +Relevant checkpoints can be found under https://huggingface.co/models?other=wavlm. + +This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be +found [here](https://github.com/microsoft/unilm/tree/master/wavlm). + +## Usage tips - WavLM is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. @@ -43,12 +48,7 @@ Tips: using [`Wav2Vec2CTCTokenizer`]. - WavLM performs especially well on speaker verification, speaker identification, and speaker diarization tasks. -Relevant checkpoints can be found under https://huggingface.co/models?other=wavlm. - -This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be -found [here](https://github.com/microsoft/unilm/tree/master/wavlm). - -## Documentation resources +## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) diff --git a/docs/source/en/model_doc/whisper.md b/docs/source/en/model_doc/whisper.md index 2f1cfc5e22b3..4ea7e943813b 100644 --- a/docs/source/en/model_doc/whisper.md +++ b/docs/source/en/model_doc/whisper.md @@ -24,18 +24,16 @@ The abstract from the paper is the following: *We study the capabilities of speech processing systems trained simply to predict large amounts of transcripts of audio on the internet. When scaled to 680,000 hours of multilingual and multitask supervision, the resulting models generalize well to standard benchmarks and are often competitive with prior fully supervised results but in a zeroshot transfer setting without the need for any finetuning. When compared to humans, the models approach their accuracy and robustness. We are releasing models and inference code to serve as a foundation for further work on robust speech processing.* +This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts). +The original code can be found [here](https://github.com/openai/whisper). -Tips: +## Usage tips - The model usually performs well without requiring any finetuning. - The architecture follows a classic encoder-decoder architecture, which means that it relies on the [`~generation.GenerationMixin.generate`] function for inference. - Inference is currently only implemented for short-form i.e. audio is pre-segmented into <=30s segments. Long-form (including timestamps) will be implemented in a future release. - One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text. -This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts). -The original code can be found [here](https://github.com/openai/whisper). - - ## WhisperConfig [[autodoc]] WhisperConfig @@ -76,6 +74,9 @@ The original code can be found [here](https://github.com/openai/whisper). - batch_decode - decode + + + ## WhisperModel [[autodoc]] WhisperModel @@ -98,6 +99,8 @@ The original code can be found [here](https://github.com/openai/whisper). [[autodoc]] WhisperForAudioClassification - forward + + ## TFWhisperModel @@ -109,6 +112,8 @@ The original code can be found [here](https://github.com/openai/whisper). [[autodoc]] TFWhisperForConditionalGeneration - call + + ## FlaxWhisperModel @@ -125,3 +130,6 @@ The original code can be found [here](https://github.com/openai/whisper). [[autodoc]] FlaxWhisperForAudioClassification - __call__ + + + diff --git a/docs/source/en/model_doc/xglm.md b/docs/source/en/model_doc/xglm.md index 1b184c17e803..470e42c747be 100644 --- a/docs/source/en/model_doc/xglm.md +++ b/docs/source/en/model_doc/xglm.md @@ -42,7 +42,7 @@ in social value tasks such as hate speech detection in five languages and find i This model was contributed by [Suraj](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/xglm). -## Documentation resources +## Resources - [Causal language modeling task guide](../tasks/language_modeling) @@ -62,6 +62,9 @@ This model was contributed by [Suraj](https://huggingface.co/valhalla). The orig [[autodoc]] XGLMTokenizerFast + + + ## XGLMModel [[autodoc]] XGLMModel @@ -72,6 +75,9 @@ This model was contributed by [Suraj](https://huggingface.co/valhalla). The orig [[autodoc]] XGLMForCausalLM - forward + + + ## TFXGLMModel [[autodoc]] TFXGLMModel @@ -82,6 +88,9 @@ This model was contributed by [Suraj](https://huggingface.co/valhalla). The orig [[autodoc]] TFXGLMForCausalLM - call + + + ## FlaxXGLMModel [[autodoc]] FlaxXGLMModel @@ -90,4 +99,7 @@ This model was contributed by [Suraj](https://huggingface.co/valhalla). The orig ## FlaxXGLMForCausalLM [[autodoc]] FlaxXGLMForCausalLM - - __call__ \ No newline at end of file + - __call__ + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/xlm-prophetnet.md b/docs/source/en/model_doc/xlm-prophetnet.md index 5e7ba5b7e3f5..7a61aeb3e34a 100644 --- a/docs/source/en/model_doc/xlm-prophetnet.md +++ b/docs/source/en/model_doc/xlm-prophetnet.md @@ -36,7 +36,7 @@ Zhang, Ming Zhou on 13 Jan, 2020. XLM-ProphetNet is an encoder-decoder model and can predict n-future tokens for "ngram" language modeling instead of just the next token. Its architecture is identical to ProhpetNet, but the model was trained on the multi-lingual -"wiki100" Wikipedia dump. +"wiki100" Wikipedia dump. XLM-ProphetNet's model architecture and pretraining objective is same as ProphetNet, but XLM-ProphetNet was pre-trained on the cross-lingual dataset XGLUE. The abstract from the paper is the following: @@ -52,11 +52,7 @@ state-of-the-art results on all these datasets compared to the models using the The Authors' code can be found [here](https://github.com/microsoft/ProphetNet). -Tips: - -- XLM-ProphetNet's model architecture and pretraining objective is same as ProphetNet, but XLM-ProphetNet was pre-trained on the cross-lingual dataset XGLUE. - -## Documentation resources +## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) diff --git a/docs/source/en/model_doc/xlm-roberta-xl.md b/docs/source/en/model_doc/xlm-roberta-xl.md index b65929460706..f9cb78c0bf4e 100644 --- a/docs/source/en/model_doc/xlm-roberta-xl.md +++ b/docs/source/en/model_doc/xlm-roberta-xl.md @@ -24,15 +24,15 @@ The abstract from the paper is the following: *Recent work has demonstrated the effectiveness of cross-lingual language model pretraining for cross-lingual understanding. In this study, we present the results of two larger multilingual masked language models, with 3.5B and 10.7B parameters. Our two new models dubbed XLM-R XL and XLM-R XXL outperform XLM-R by 1.8% and 2.4% average accuracy on XNLI. Our model also outperforms the RoBERTa-Large model on several English tasks of the GLUE benchmark by 0.3% on average while handling 99 more languages. This suggests pretrained models with larger capacity may obtain both strong performance on high-resource languages while greatly improving low-resource languages. We make our code and models publicly available.* -Tips: +This model was contributed by [Soonhwan-Kwon](https://github.com/Soonhwan-Kwon) and [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/xlmr). -- XLM-RoBERTa-XL is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does - not require `lang` tensors to understand which language is used, and should be able to determine the correct - language from the input ids. +## Usage tips -This model was contributed by [Soonhwan-Kwon](https://github.com/Soonhwan-Kwon) and [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/xlmr). +XLM-RoBERTa-XL is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does +not require `lang` tensors to understand which language is used, and should be able to determine the correct +language from the input ids. -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/xlm-roberta.md b/docs/source/en/model_doc/xlm-roberta.md index 935003156fd1..58540015232e 100644 --- a/docs/source/en/model_doc/xlm-roberta.md +++ b/docs/source/en/model_doc/xlm-roberta.md @@ -46,16 +46,14 @@ languages at scale. Finally, we show, for the first time, the possibility of mul per-language performance; XLM-Ris very competitive with strong monolingual models on the GLUE and XNLI benchmarks. We will make XLM-R code, data, and models publicly available.* -Tips: +This model was contributed by [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/xlmr). + +## Usage tips - XLM-RoBERTa is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does not require `lang` tensors to understand which language is used, and should be able to determine the correct language from the input ids. - Uses RoBERTa tricks on the XLM approach, but does not use the translation language modeling objective. It only uses masked language modeling on sentences coming from one language. -- This implementation is the same as RoBERTa. Refer to the [documentation of RoBERTa](roberta) for usage examples - as well as the information relative to the inputs and outputs. - -This model was contributed by [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/xlmr). ## Resources @@ -110,6 +108,11 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h - A blog post on how to [Deploy Serverless XLM RoBERTa on AWS Lambda](https://www.philschmid.de/multilingual-serverless-xlm-roberta-with-huggingface). + + +This implementation is the same as RoBERTa. Refer to the [documentation of RoBERTa](roberta) for usage examples as well as the information relative to the inputs and outputs. + + ## XLMRobertaConfig [[autodoc]] XLMRobertaConfig @@ -126,6 +129,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] XLMRobertaTokenizerFast + + + ## XLMRobertaModel [[autodoc]] XLMRobertaModel @@ -161,6 +167,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] XLMRobertaForQuestionAnswering - forward + + + ## TFXLMRobertaModel [[autodoc]] TFXLMRobertaModel @@ -196,6 +205,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] TFXLMRobertaForQuestionAnswering - call + + + ## FlaxXLMRobertaModel [[autodoc]] FlaxXLMRobertaModel @@ -230,3 +242,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h [[autodoc]] FlaxXLMRobertaForQuestionAnswering - __call__ + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/xlm-v.md b/docs/source/en/model_doc/xlm-v.md index 38bed0dc46b5..049a1f35ad9a 100644 --- a/docs/source/en/model_doc/xlm-v.md +++ b/docs/source/en/model_doc/xlm-v.md @@ -35,7 +35,10 @@ a multilingual language model with a one million token vocabulary. XLM-V outperf tested on ranging from natural language inference (XNLI), question answering (MLQA, XQuAD, TyDiQA), and named entity recognition (WikiAnn) to low-resource tasks (Americas NLI, MasakhaNER).* -Tips: +This model was contributed by [stefan-it](https://huggingface.co/stefan-it), including detailed experiments with XLM-V on downstream tasks. +The experiments repository can be found [here](https://github.com/stefan-it/xlm-v-experiments). + +## Usage tips - XLM-V is compatible with the XLM-RoBERTa model architecture, only model weights from [`fairseq`](https://github.com/facebookresearch/fairseq) library had to be converted. @@ -43,5 +46,7 @@ Tips: A XLM-V (base size) model is available under the [`facebook/xlm-v-base`](https://huggingface.co/facebook/xlm-v-base) identifier. -This model was contributed by [stefan-it](https://huggingface.co/stefan-it), including detailed experiments with XLM-V on downstream tasks. -The experiments repository can be found [here](https://github.com/stefan-it/xlm-v-experiments). + + +XLM-V architecture is the same as XLM-RoBERTa, refer to [XLM-RoBERTa documentation](xlm-roberta) for API reference, and examples. + \ No newline at end of file diff --git a/docs/source/en/model_doc/xlm.md b/docs/source/en/model_doc/xlm.md index 8b5b31a2dbef..0ee11c6addc5 100644 --- a/docs/source/en/model_doc/xlm.md +++ b/docs/source/en/model_doc/xlm.md @@ -46,7 +46,9 @@ obtain 34.3 BLEU on WMT'16 German-English, improving the previous state of the a machine translation, we obtain a new state of the art of 38.5 BLEU on WMT'16 Romanian-English, outperforming the previous best approach by more than 4 BLEU. Our code and pretrained models will be made publicly available.* -Tips: +This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/facebookresearch/XLM/). + +## Usage tips - XLM has many different checkpoints, which were trained using different objectives: CLM, MLM or TLM. Make sure to select the correct objective for your task (e.g. MLM checkpoints are not suitable for generation). @@ -57,9 +59,7 @@ Tips: * Masked language modeling (MLM) which is like RoBERTa. One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages, with dynamic masking of the tokens. * A combination of MLM and translation language modeling (TLM). This consists of concatenating a sentence in two different languages, with random masking. To predict one of the masked tokens, the model can use both, the surrounding context in language 1 and the context given by language 2. -This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/facebookresearch/XLM/). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -84,6 +84,9 @@ This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The o [[autodoc]] models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput + + + ## XLMModel [[autodoc]] XLMModel @@ -119,6 +122,9 @@ This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The o [[autodoc]] XLMForQuestionAnswering - forward + + + ## TFXLMModel [[autodoc]] TFXLMModel @@ -148,3 +154,8 @@ This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The o [[autodoc]] TFXLMForQuestionAnsweringSimple - call + + + + + diff --git a/docs/source/en/model_doc/xlnet.md b/docs/source/en/model_doc/xlnet.md index 3685728cd72e..d2209c3d550e 100644 --- a/docs/source/en/model_doc/xlnet.md +++ b/docs/source/en/model_doc/xlnet.md @@ -44,7 +44,9 @@ formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state- pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.* -Tips: +This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/zihangdai/xlnet/). + +## Usage tips - The specific attention pattern can be controlled at training and test time using the `perm_mask` input. - Due to the difficulty of training a fully auto-regressive model over various factorization order, XLNet is pretrained @@ -56,9 +58,7 @@ Tips: - XLNet is not a traditional autoregressive model but uses a training strategy that builds on that. It permutes the tokens in the sentence, then allows the model to use the last n tokens to predict the token n+1. Since this is all done with a mask, the sentence is actually fed in the model in the right order, but instead of masking the first n tokens for n+1, XLNet uses a mask that hides the previous tokens in some given permutation of 1,…,sequence length. - XLNet also uses the same recurrence mechanism as Transformer-XL to build long-term dependencies. -This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/zihangdai/xlnet/). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -110,6 +110,9 @@ This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The o [[autodoc]] models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput + + + ## XLNetModel [[autodoc]] XLNetModel @@ -145,6 +148,9 @@ This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The o [[autodoc]] XLNetForQuestionAnswering - forward + + + ## TFXLNetModel [[autodoc]] TFXLNetModel @@ -174,3 +180,6 @@ This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The o [[autodoc]] TFXLNetForQuestionAnsweringSimple - call + + + \ No newline at end of file diff --git a/docs/source/en/model_doc/xls_r.md b/docs/source/en/model_doc/xls_r.md index 8e22004244ca..2226c813e72b 100644 --- a/docs/source/en/model_doc/xls_r.md +++ b/docs/source/en/model_doc/xls_r.md @@ -34,14 +34,18 @@ language identification. Moreover, we show that with sufficient model size, cros English-only pretraining when translating English speech into other languages, a setting which favors monolingual pretraining. We hope XLS-R can help to improve speech processing tasks for many more languages of the world.* -Tips: +Relevant checkpoints can be found under https://huggingface.co/models?other=xls_r. + +The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec). + +## Usage tips - XLS-R is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - XLS-R model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. -Relevant checkpoints can be found under https://huggingface.co/models?other=xls_r. + -XLS-R's architecture is based on the Wav2Vec2 model, so one can refer to [Wav2Vec2's documentation page](wav2vec2). +XLS-R's architecture is based on the Wav2Vec2 model, refer to [Wav2Vec2's documentation page](wav2vec2) for API reference. -The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec). + \ No newline at end of file diff --git a/docs/source/en/model_doc/xlsr_wav2vec2.md b/docs/source/en/model_doc/xlsr_wav2vec2.md index 643d37416d38..d1b5444c2469 100644 --- a/docs/source/en/model_doc/xlsr_wav2vec2.md +++ b/docs/source/en/model_doc/xlsr_wav2vec2.md @@ -34,12 +34,16 @@ individual models. Analysis shows that the latent discrete speech representation increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing XLSR-53, a large model pretrained in 53 languages.* -Tips: +The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec). + +## Usage tips - XLSR-Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - XLSR-Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. + + XLSR-Wav2Vec2's architecture is based on the Wav2Vec2 model, so one can refer to [Wav2Vec2's documentation page](wav2vec2). -The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec). + diff --git a/docs/source/en/model_doc/xmod.md b/docs/source/en/model_doc/xmod.md index 5a3409bbc4c3..47797fa64902 100644 --- a/docs/source/en/model_doc/xmod.md +++ b/docs/source/en/model_doc/xmod.md @@ -25,13 +25,15 @@ The abstract from the paper is the following: *Multilingual pre-trained models are known to suffer from the curse of multilinguality, which causes per-language performance to drop as they cover more languages. We address this issue by introducing language-specific modules, which allows us to grow the total capacity of the model, while keeping the total number of trainable parameters per language constant. In contrast with prior work that learns language-specific components post-hoc, we pre-train the modules of our Cross-lingual Modular (X-MOD) models from the start. Our experiments on natural language inference, named entity recognition and question answering show that our approach not only mitigates the negative interference between languages, but also enables positive transfer, resulting in improved monolingual and cross-lingual performance. Furthermore, our approach enables adding languages post-hoc with no measurable drop in performance, no longer limiting the model usage to the set of pre-trained languages.* +This model was contributed by [jvamvas](https://huggingface.co/jvamvas). +The original code can be found [here](https://github.com/facebookresearch/fairseq/tree/58cc6cca18f15e6d56e3f60c959fe4f878960a60/fairseq/models/xmod) and the original documentation is found [here](https://github.com/facebookresearch/fairseq/tree/58cc6cca18f15e6d56e3f60c959fe4f878960a60/examples/xmod). + +## Usage tips + Tips: - X-MOD is similar to [XLM-R](xlm-roberta), but a difference is that the input language needs to be specified so that the correct language adapter can be activated. - The main models – base and large – have adapters for 81 languages. -This model was contributed by [jvamvas](https://huggingface.co/jvamvas). -The original code can be found [here](https://github.com/facebookresearch/fairseq/tree/58cc6cca18f15e6d56e3f60c959fe4f878960a60/fairseq/models/xmod) and the original documentation is found [here](https://github.com/facebookresearch/fairseq/tree/58cc6cca18f15e6d56e3f60c959fe4f878960a60/examples/xmod). - ## Adapter Usage ### Input language diff --git a/docs/source/en/model_doc/yolos.md b/docs/source/en/model_doc/yolos.md index 6185c3a06757..5386c373ac83 100644 --- a/docs/source/en/model_doc/yolos.md +++ b/docs/source/en/model_doc/yolos.md @@ -25,10 +25,6 @@ The abstract from the paper is the following: *Can Transformer perform 2D object- and region-level recognition from a pure sequence-to-sequence perspective with minimal knowledge about the 2D spatial structure? To answer this question, we present You Only Look at One Sequence (YOLOS), a series of object detection models based on the vanilla Vision Transformer with the fewest possible modifications, region priors, as well as inductive biases of the target task. We find that YOLOS pre-trained on the mid-sized ImageNet-1k dataset only can already achieve quite competitive performance on the challenging COCO object detection benchmark, e.g., YOLOS-Base directly adopted from BERT-Base architecture can obtain 42.0 box AP on COCO val. We also discuss the impacts as well as limitations of current pre-train schemes and model scaling strategies for Transformer in vision through YOLOS.* -Tips: - -- One can use [`YolosImageProcessor`] for preparing images (and optional targets) for the model. Contrary to [DETR](detr), YOLOS doesn't require a `pixel_mask` to be created. - drawing @@ -47,6 +43,12 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. + + +Use [`YolosImageProcessor`] for preparing images (and optional targets) for the model. Contrary to [DETR](detr), YOLOS doesn't require a `pixel_mask` to be created. + + + ## YolosConfig [[autodoc]] YolosConfig @@ -65,13 +67,11 @@ If you're interested in submitting a resource to be included here, please feel f - pad - post_process_object_detection - ## YolosModel [[autodoc]] YolosModel - forward - ## YolosForObjectDetection [[autodoc]] YolosForObjectDetection diff --git a/docs/source/en/model_doc/yoso.md b/docs/source/en/model_doc/yoso.md index 4b98cd348c9a..a3dfa3fed855 100644 --- a/docs/source/en/model_doc/yoso.md +++ b/docs/source/en/model_doc/yoso.md @@ -37,7 +37,9 @@ length where we see favorable performance relative to a standard pretrained Tran for evaluating performance on long sequences, our method achieves results consistent with softmax self-attention but with sizable speed-ups and memory savings and often outperforms other efficient self-attention methods. Our code is available at this https URL* -Tips: +This model was contributed by [novice03](https://huggingface.co/novice03). The original code can be found [here](https://github.com/mlpen/YOSO). + +## Usage tips - The YOSO attention algorithm is implemented through custom CUDA kernels, functions written in CUDA C++ that can be executed multiple times in parallel on a GPU. @@ -52,9 +54,7 @@ alt="drawing" width="600"/> YOSO Attention Algorithm. Taken from the original paper. -This model was contributed by [novice03](https://huggingface.co/novice03). The original code can be found [here](https://github.com/mlpen/YOSO). - -## Documentation resources +## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) @@ -66,19 +66,16 @@ This model was contributed by [novice03](https://huggingface.co/novice03). The o [[autodoc]] YosoConfig - ## YosoModel [[autodoc]] YosoModel - forward - ## YosoForMaskedLM [[autodoc]] YosoForMaskedLM - forward - ## YosoForSequenceClassification [[autodoc]] YosoForSequenceClassification @@ -89,13 +86,11 @@ This model was contributed by [novice03](https://huggingface.co/novice03). The o [[autodoc]] YosoForMultipleChoice - forward - ## YosoForTokenClassification [[autodoc]] YosoForTokenClassification - forward - ## YosoForQuestionAnswering [[autodoc]] YosoForQuestionAnswering From 1ac2463dfee0a133311c3c585bf7253b0400d6d3 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Fri, 3 Nov 2023 21:37:54 +0530 Subject: [PATCH 080/268] [`FA2`] Add flash attention for for `DistilBert` (#26489) * flash attention added for DistilBert * fixes * removed padding_masks * Update modeling_distilbert.py * Update test_modeling_distilbert.py * style fix --- docs/source/en/model_doc/distilbert.md | 31 +++ .../models/distilbert/modeling_distilbert.py | 210 +++++++++++++++++- .../distilbert/test_modeling_distilbert.py | 112 +++++++++- 3 files changed, 348 insertions(+), 5 deletions(-) diff --git a/docs/source/en/model_doc/distilbert.md b/docs/source/en/model_doc/distilbert.md index 7633adcae42b..5742380c517b 100644 --- a/docs/source/en/model_doc/distilbert.md +++ b/docs/source/en/model_doc/distilbert.md @@ -133,6 +133,37 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h - A blog post on how to [deploy DistilBERT with Amazon SageMaker](https://huggingface.co/blog/deploy-hugging-face-models-easily-with-amazon-sagemaker). - A blog post on how to [Deploy BERT with Hugging Face Transformers, Amazon SageMaker and Terraform module](https://www.philschmid.de/terraform-huggingface-amazon-sagemaker). + +## Combining DistilBERT and Flash Attention 2 + +First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature. + +```bash +pip install -U flash-attn --no-build-isolation +``` + +Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16`) + +To load and run a model using Flash Attention 2, refer to the snippet below: + +```python +>>> import torch +>>> from transformers import AutoTokenizer, AutoModel + +>>> device = "cuda" # the device to load the model onto + +>>> tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased') +>>> model = AutoModel.from_pretrained("distilbert-base-uncased", torch_dtype=torch.float16, use_flash_attention_2=True) + +>>> text = "Replace me by any text you'd like." + +>>> encoded_input = tokenizer(text, return_tensors='pt').to(device) +>>> model.to(device) + +>>> output = model(**encoded_input) +``` + + ## DistilBertConfig [[autodoc]] DistilBertConfig diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index c66519a72457..144fde42e0bf 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -24,6 +24,7 @@ import numpy as np import torch +import torch.nn.functional as F from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -44,12 +45,18 @@ add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, + is_flash_attn_2_available, logging, replace_return_docstrings, ) from .configuration_distilbert import DistilBertConfig +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "distilbert-base-uncased" _CONFIG_FOR_DOC = "DistilBertConfig" @@ -69,6 +76,19 @@ # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE # +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor): if is_deepspeed_zero3_enabled(): import deepspeed @@ -141,10 +161,12 @@ def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] class MultiHeadSelfAttention(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() + self.config = config self.n_heads = config.n_heads self.dim = config.dim self.dropout = nn.Dropout(p=config.attention_dropout) + self.is_causal = False # Have an even number of multi heads that divide the dimensions if self.dim % self.n_heads != 0: @@ -240,6 +262,178 @@ def unshape(x: torch.Tensor) -> torch.Tensor: return (context,) +class DistilBertFlashAttention2(MultiHeadSelfAttention): + """ + DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module + stays untouched. The only required change would be on the forward pass where it needs to correctly call the public + API of flash attention and deal with padding tokens in case the input contains any of them. + """ + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + mask: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, ...]: + """ + Parameters: + query: torch.tensor(bs, seq_length, dim) + key: torch.tensor(bs, seq_length, dim) + value: torch.tensor(bs, seq_length, dim) + mask: torch.tensor(bs, seq_length) + + Returns: + weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs, + seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True` + """ + batch_size, q_length, dim = query.size() + + dim_per_head = self.dim // self.n_heads + + def reshape(x: torch.Tensor) -> torch.Tensor: + """separate heads""" + return x.view(batch_size, -1, self.n_heads, dim_per_head) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + query_states = reshape(self.q_lin(query)) + key_states = reshape(self.k_lin(key)) + value_states = reshape(self.v_lin(value)) + + attn_dropout = self.config.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + if query_states.dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_lin.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_weights = self._flash_attention_forward( + query_states, key_states, value_states, mask, q_length, dropout=attn_dropout + ) + + attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head) + attn_output = self.out_lin(attn_weights_reshaped) + + if output_attentions: + return (attn_output, attn_weights) + else: + return (attn_output,) + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with causal=True->causal=False + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->n_heads + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.n_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + class FFN(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() @@ -269,7 +463,11 @@ def __init__(self, config: PretrainedConfig): if config.dim % config.n_heads != 0: raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly") - self.attention = MultiHeadSelfAttention(config) + self.attention = ( + MultiHeadSelfAttention(config) + if not getattr(config, "_flash_attn_2_enabled", False) + else DistilBertFlashAttention2(config) + ) self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) self.ffn = FFN(config) @@ -407,6 +605,7 @@ class DistilBertPreTrainedModel(PreTrainedModel): load_tf_weights = None base_model_prefix = "distilbert" supports_gradient_checkpointing = True + _supports_flash_attn_2 = True def _init_weights(self, module: nn.Module): """Initialize the weights.""" @@ -588,14 +787,17 @@ def forward( device = input_ids.device if input_ids is not None else inputs_embeds.device - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length) - # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim) + if getattr(self.config, "_flash_attn_2_enabled", False): + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length) + return self.transformer( x=embeddings, attn_mask=attention_mask, diff --git a/tests/models/distilbert/test_modeling_distilbert.py b/tests/models/distilbert/test_modeling_distilbert.py index 22e976535369..b6d3c0f57aad 100644 --- a/tests/models/distilbert/test_modeling_distilbert.py +++ b/tests/models/distilbert/test_modeling_distilbert.py @@ -16,8 +16,10 @@ import tempfile import unittest +from pytest import mark + from transformers import DistilBertConfig, is_torch_available -from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device +from transformers.testing_utils import require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask @@ -285,6 +287,114 @@ def test_torchscript_device_change(self): loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) + # Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test. + @require_flash_attn + @require_torch_accelerator + @mark.flash_attn_test + @slow + def test_flash_attn_2_inference(self): + import torch + + for model_class in self.all_model_classes: + dummy_input = torch.LongTensor( + [ + [1, 2, 3, 4], + [1, 2, 8, 9], + [1, 2, 11, 12], + [1, 2, 13, 14], + ] + ).to(torch_device) + dummy_attention_mask = torch.LongTensor( + [ + [0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1], + ] + ).to(torch_device) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model_fa = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True + ) + model_fa.to(torch_device) + + model = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False + ) + model.to(torch_device) + + logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] + logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] + + self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)) + + output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) + logits_fa = output_fa.hidden_states[-1] + + output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) + logits = output.hidden_states[-1] + + self.assertTrue(torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2)) + + # Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test. + @require_flash_attn + @require_torch_accelerator + @mark.flash_attn_test + @slow + def test_flash_attn_2_inference_padding_right(self): + import torch + + for model_class in self.all_model_classes: + dummy_input = torch.LongTensor( + [ + [1, 2, 3, 4], + [1, 2, 8, 9], + [1, 2, 11, 12], + [1, 2, 13, 14], + ] + ).to(torch_device) + dummy_attention_mask = torch.LongTensor( + [ + [0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1], + ] + ).to(torch_device) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model_fa = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True + ) + model_fa.to(torch_device) + + model = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False + ) + model.to(torch_device) + + logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] + logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] + + self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)) + + output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) + logits_fa = output_fa.hidden_states[-1] + + output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) + logits = output.hidden_states[-1] + + self.assertTrue(torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2)) + @require_torch class DistilBertModelIntergrationTest(unittest.TestCase): From bf7cfac20a08fa401d35d5a3d14246f4119d1f52 Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:16:55 -0500 Subject: [PATCH 081/268] translate autoclass_tutorial to chinese (#27269) * translate autoclass_tutorial.md to chinese * translate update --- docs/source/zh/_toctree.yml | 2 + docs/source/zh/autoclass_tutorial.md | 149 +++++++++++++++++++++++++++ 2 files changed, 151 insertions(+) create mode 100644 docs/source/zh/autoclass_tutorial.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index 6ba316d7a425..b899e5fe7dff 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -9,6 +9,8 @@ - sections: - local: pipeline_tutorial title: 使用pipelines进行推理 + - local: autoclass_tutorial + title: 使用AutoClass编写可移植的代码 - local: preprocessing title: 预处理数据 - local: training diff --git a/docs/source/zh/autoclass_tutorial.md b/docs/source/zh/autoclass_tutorial.md new file mode 100644 index 000000000000..936080a83153 --- /dev/null +++ b/docs/source/zh/autoclass_tutorial.md @@ -0,0 +1,149 @@ + + +# 使用AutoClass加载预训练实例 + +由于存在许多不同的Transformer架构,因此为您的checkpoint创建一个可用架构可能会具有挑战性。通过`AutoClass`可以自动推断并从给定的checkpoint加载正确的架构, 这也是🤗 Transformers易于使用、简单且灵活核心规则的重要一部分。`from_pretrained()`方法允许您快速加载任何架构的预训练模型,因此您不必花费时间和精力从头开始训练模型。生成这种与checkpoint无关的代码意味着,如果您的代码适用于一个checkpoint,它将适用于另一个checkpoint - 只要它们是为了类似的任务进行训练的 - 即使架构不同。 + + + +请记住,架构指的是模型的结构,而checkpoints是给定架构的权重。例如,[BERT](https://huggingface.co/bert-base-uncased)是一种架构,而`bert-base-uncased`是一个checkpoint。模型是一个通用术语,可以指代架构或checkpoint。 + + + + +在这个教程中,学习如何: + +* 加载预训练的分词器(`tokenizer`) +* 加载预训练的图像处理器(`image processor`) +* 加载预训练的特征提取器(`feature extractor`) +* 加载预训练的处理器(`processor`) +* 加载预训练的模型。 + + +## AutoTokenizer + +几乎所有的NLP任务都以`tokenizer`开始。`tokenizer`将您的输入转换为模型可以处理的格式。 + +使用[`AutoTokenizer.from_pretrained`]加载`tokenizer`: + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") +``` + +然后按照如下方式对输入进行分词: + +```py +>>> sequence = "In a hole in the ground there lived a hobbit." +>>> print(tokenizer(sequence)) +{'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], + 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} +``` + +## AutoImageProcessor + +对于视觉任务,`image processor`将图像处理成正确的输入格式。 + +```py +>>> from transformers import AutoImageProcessor + +>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") +``` + + +## AutoFeatureExtractor + +对于音频任务,`feature extractor`将音频信号处理成正确的输入格式。 + +使用[`AutoFeatureExtractor.from_pretrained`]加载`feature extractor`: + +```py +>>> from transformers import AutoFeatureExtractor + +>>> feature_extractor = AutoFeatureExtractor.from_pretrained( +... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" +... ) +``` + +## AutoProcessor + +多模态任务需要一种`processor`,将两种类型的预处理工具结合起来。例如,[LayoutLMV2](model_doc/layoutlmv2)模型需要一个`image processo`来处理图像和一个`tokenizer`来处理文本;`processor`将两者结合起来。 + +使用[`AutoProcessor.from_pretrained`]加载`processor`: + + +```py +>>> from transformers import AutoProcessor + +>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") +``` + +## AutoModel + + + + +最后,`AutoModelFor`类让你可以加载给定任务的预训练模型(参见[这里](model_doc/auto)获取可用任务的完整列表)。例如,使用[`AutoModelForSequenceClassification.from_pretrained`]加载用于序列分类的模型: + +```py +>>> from transformers import AutoModelForSequenceClassification + +>>> model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased") +``` + +轻松地重复使用相同的checkpoint来为不同任务加载模型架构: + + +```py +>>> from transformers import AutoModelForTokenClassification + +>>> model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased") +``` + + + +对于PyTorch模型,`from_pretrained()`方法使用`torch.load()`,它内部使用已知是不安全的`pickle`。一般来说,永远不要加载来自不可信来源或可能被篡改的模型。对于托管在Hugging Face Hub上的公共模型,这种安全风险在一定程度上得到了缓解,因为每次提交都会进行[恶意软件扫描](https://huggingface.co/docs/hub/security-malware)。请参阅[Hub文档](https://huggingface.co/docs/hub/security)以了解最佳实践,例如使用GPG进行[签名提交验证](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg)。 + +TensorFlow和Flax的checkpoints不受影响,并且可以在PyTorch架构中使用`from_tf`和`from_flax`关键字参数,通过`from_pretrained`方法进行加载,来绕过此问题。 + + + +一般来说,我们建议使用`AutoTokenizer`类和`AutoModelFor`类来加载预训练的模型实例。这样可以确保每次加载正确的架构。在下一个[教程](preprocessing)中,学习如何使用新加载的`tokenizer`, `image processor`, `feature extractor`和`processor`对数据集进行预处理以进行微调。 + + + +最后,`TFAutoModelFor`类允许您加载给定任务的预训练模型(请参阅[这里](model_doc/auto)获取可用任务的完整列表)。例如,使用[`TFAutoModelForSequenceClassification.from_pretrained`]加载用于序列分类的模型: + +```py +>>> from transformers import TFAutoModelForSequenceClassification + +>>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased") +``` + +轻松地重复使用相同的checkpoint来为不同任务加载模型架构: + +```py +>>> from transformers import TFAutoModelForTokenClassification + +>>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased") +``` +一般来说,我们推荐使用`AutoTokenizer`类和`TFAutoModelFor`类来加载模型的预训练实例。这样可以确保每次加载正确的架构。在下一个[教程](preprocessing)中,学习如何使用新加载的`tokenizer`, `image processor`, `feature extractor`和`processor`对数据集进行预处理以进行微调。 + + + From cc3e4781854a52cf090ffde28d884a527dab6708 Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Fri, 3 Nov 2023 12:19:41 -0500 Subject: [PATCH 082/268] translate run_scripts.md to chinese (#27246) * translate run_scripts.md to chinese * translate run_scripts.md to chinese * translate run_scripts.md to chinese --- docs/source/zh/_toctree.yml | 2 + docs/source/zh/run_scripts.md | 359 ++++++++++++++++++++++++++++++++++ 2 files changed, 361 insertions(+) create mode 100644 docs/source/zh/run_scripts.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index b899e5fe7dff..05ed5de31d99 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -15,6 +15,8 @@ title: 预处理数据 - local: training title: 微调预训练模型 + - local: run_scripts + title: 通过脚本训练模型 - local: accelerate title: 使用🤗Accelerate进行分布式训练 - local: peft diff --git a/docs/source/zh/run_scripts.md b/docs/source/zh/run_scripts.md new file mode 100644 index 000000000000..e5cc56487dab --- /dev/null +++ b/docs/source/zh/run_scripts.md @@ -0,0 +1,359 @@ + + +# 使用脚本进行训练 + +除了 🤗 Transformers [notebooks](./noteboks/README),还有示例脚本演示了如何使用[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch)、[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow)或[JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax)训练模型以解决特定任务。 + +您还可以在这些示例中找到我们在[研究项目](https://github.com/huggingface/transformers/tree/main/examples/research_projects)和[遗留示例](https://github.com/huggingface/transformers/tree/main/examples/legacy)中使用过的脚本,这些脚本主要是由社区贡献的。这些脚本已不再被积极维护,需要使用特定版本的🤗 Transformers, 可能与库的最新版本不兼容。 + +示例脚本可能无法在初始配置下直接解决每个问题,您可能需要根据要解决的问题调整脚本。为了帮助您,大多数脚本都完全暴露了数据预处理的方式,允许您根据需要对其进行编辑。 + +如果您想在示例脚本中实现任何功能,请在[论坛](https://discuss.huggingface.co/)或[issue](https://github.com/huggingface/transformers/issues)上讨论,然后再提交Pull Request。虽然我们欢迎修复错误,但不太可能合并添加更多功能的Pull Request,因为这会降低可读性。 + +本指南将向您展示如何在[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization)和[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization)中运行示例摘要训练脚本。除非另有说明,否则所有示例都可以在两个框架中工作。 + +## 设置 + +要成功运行示例脚本的最新版本,您必须在新虚拟环境中**从源代码安装 🤗 Transformers**: + +```bash +git clone https://github.com/huggingface/transformers +cd transformers +pip install . +``` + +对于旧版本的示例脚本,请点击下面的切换按钮: + +

+ 老版本🤗 Transformers示例 + +
+ +然后切换您clone的 🤗 Transformers 仓到特定的版本,例如v3.5.1: + +```bash +git checkout tags/v3.5.1 +``` + +在安装了正确的库版本后,进入您选择的版本的`example`文件夹并安装例子要求的环境: + +```bash +pip install -r requirements.txt +``` + +## 运行脚本 + + + + +示例脚本从🤗 [Datasets](https://huggingface.co/docs/datasets/)库下载并预处理数据集。然后,脚本通过[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer)使用支持摘要任务的架构对数据集进行微调。以下示例展示了如何在[CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail)数据集上微调[T5-small](https://huggingface.co/t5-small)。由于T5模型的训练方式,它需要一个额外的`source_prefix`参数。这个提示让T5知道这是一个摘要任务。 + +```bash +python examples/pytorch/summarization/run_summarization.py \ + --model_name_or_path t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ + --predict_with_generate +``` + + + +示例脚本从 🤗 [Datasets](https://huggingface.co/docs/datasets/) 库下载并预处理数据集。然后,脚本使用 Keras 在支持摘要的架构上微调数据集。以下示例展示了如何在 [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) 数据集上微调 [T5-small](https://huggingface.co/t5-small)。T5 模型由于训练方式需要额外的 `source_prefix` 参数。这个提示让 T5 知道这是一个摘要任务。 + +```bash +python examples/tensorflow/summarization/run_summarization.py \ + --model_name_or_path t5-small \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size 8 \ + --per_device_eval_batch_size 16 \ + --num_train_epochs 3 \ + --do_train \ + --do_eval +``` + + + +## 分布式训练和混合精度 + +[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) 支持分布式训练和混合精度,这意味着你也可以在脚本中使用它。要启用这两个功能,可以做如下设置: + +- 添加 `fp16` 参数以启用混合精度。 +- 使用 `nproc_per_node` 参数设置使用的GPU数量。 + + +```bash +python -m torch.distributed.launch \ + --nproc_per_node 8 pytorch/summarization/run_summarization.py \ + --fp16 \ + --model_name_or_path t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ + --predict_with_generate +``` + +TensorFlow脚本使用[`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy)进行分布式训练,您无需在训练脚本中添加任何其他参数。如果可用,TensorFlow脚本将默认使用多个GPU。 + +## 在TPU上运行脚本 + + + + +张量处理单元(TPUs)是专门设计用于加速性能的。PyTorch使用[XLA](https://www.tensorflow.org/xla)深度学习编译器支持TPU(更多细节请参见[这里](https://github.com/pytorch/xla/blob/master/README.md))。要使用TPU,请启动`xla_spawn.py`脚本并使用`num_cores`参数设置要使用的TPU核心数量。 + +```bash +python xla_spawn.py --num_cores 8 \ + summarization/run_summarization.py \ + --model_name_or_path t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ + --predict_with_generate +``` + + + +张量处理单元(TPUs)是专门设计用于加速性能的。TensorFlow脚本使用[`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy)在TPU上进行训练。要使用TPU,请将TPU资源的名称传递给`tpu`参数。 + +```bash +python run_summarization.py \ + --tpu name_of_tpu_resource \ + --model_name_or_path t5-small \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size 8 \ + --per_device_eval_batch_size 16 \ + --num_train_epochs 3 \ + --do_train \ + --do_eval +``` + + + +## 基于🤗 Accelerate运行脚本 + +🤗 [Accelerate](https://huggingface.co/docs/accelerate) 是一个仅支持 PyTorch 的库,它提供了一种统一的方法来在不同类型的设置(仅 CPU、多个 GPU、多个TPU)上训练模型,同时保持对 PyTorch 训练循环的完全可见性。如果你还没有安装 🤗 Accelerate,请确保你已经安装了它: + +> 注意:由于 Accelerate 正在快速发展,因此必须安装 git 版本的 accelerate 来运行脚本。 + +```bash +pip install git+https://github.com/huggingface/accelerate +``` + +你需要使用`run_summarization_no_trainer.py`脚本,而不是`run_summarization.py`脚本。🤗 Accelerate支持的脚本需要在文件夹中有一个`task_no_trainer.py`文件。首先运行以下命令以创建并保存配置文件: + +```bash +accelerate config +``` +检测您的设置以确保配置正确: + +```bash +accelerate test +``` + +现在您可以开始训练模型了: + +```bash +accelerate launch run_summarization_no_trainer.py \ + --model_name_or_path t5-small \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir ~/tmp/tst-summarization +``` + +## 使用自定义数据集 + +摘要脚本支持自定义数据集,只要它们是CSV或JSON Line文件。当你使用自己的数据集时,需要指定一些额外的参数: +- `train_file` 和 `validation_file` 分别指定您的训练和验证文件的路径。 +- `text_column` 是输入要进行摘要的文本。 +- `summary_column` 是目标输出的文本。 + +使用自定义数据集的摘要脚本看起来是这样的: + + +```bash +python examples/pytorch/summarization/run_summarization.py \ + --model_name_or_path t5-small \ + --do_train \ + --do_eval \ + --train_file path_to_csv_or_jsonlines_file \ + --validation_file path_to_csv_or_jsonlines_file \ + --text_column text_column_name \ + --summary_column summary_column_name \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --predict_with_generate +``` + +## 测试脚本 + +通常,在提交整个数据集之前,最好先在较少的数据集示例上运行脚本,以确保一切按预期工作,因为完整数据集的处理可能需要花费几个小时的时间。使用以下参数将数据集截断为最大样本数: + +- `max_train_samples` +- `max_eval_samples` +- `max_predict_samples` + + +```bash +python examples/pytorch/summarization/run_summarization.py \ + --model_name_or_path t5-small \ + --max_train_samples 50 \ + --max_eval_samples 50 \ + --max_predict_samples 50 \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ + --predict_with_generate +``` + +并非所有示例脚本都支持`max_predict_samples`参数。如果您不确定您的脚本是否支持此参数,请添加`-h`参数进行检查: + +```bash +examples/pytorch/summarization/run_summarization.py -h +``` + +## 从checkpoint恢复训练 + +另一个有用的选项是从之前的checkpoint恢复训练。这将确保在训练中断时,您可以从之前停止的地方继续进行,而无需重新开始。有两种方法可以从checkpoint恢复训练。 + +第一种方法使用`output_dir previous_output_dir`参数从存储在`output_dir`中的最新的checkpoint恢复训练。在这种情况下,您应该删除`overwrite_output_dir`: + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +第二种方法使用`resume_from_checkpoint path_to_specific_checkpoint`参数从特定的checkpoint文件夹恢复训练。 + + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ + --resume_from_checkpoint path_to_specific_checkpoint \ + --predict_with_generate +``` + +## 分享模型 + +所有脚本都可以将您的最终模型上传到[Model Hub](https://huggingface.co/models)。在开始之前,请确保您已登录Hugging Face: + +```bash +huggingface-cli login +``` + +然后,在脚本中添加`push_to_hub`参数。这个参数会创建一个带有您Hugging Face用户名和`output_dir`中指定的文件夹名称的仓库。 + +为了给您的仓库指定一个特定的名称,使用`push_to_hub_model_id`参数来添加它。该仓库将自动列出在您的命名空间下。 + +以下示例展示了如何上传具有特定仓库名称的模型: + + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --push_to_hub \ + --push_to_hub_model_id finetuned-t5-cnn_dailymail \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ + --predict_with_generate +``` \ No newline at end of file From b026b5ca6dd178f50d91778f58d32d85d3a83c5f Mon Sep 17 00:00:00 2001 From: Mayank Mishra <32954280+mayank31398@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:56:18 +0530 Subject: [PATCH 083/268] Fix tokenizer export for LLamaTokenizerFast (#27222) * fix tokenizer * fix tokenizer --- .../models/code_llama/tokenization_code_llama_fast.py | 2 ++ src/transformers/models/llama/tokenization_llama_fast.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/transformers/models/code_llama/tokenization_code_llama_fast.py b/src/transformers/models/code_llama/tokenization_code_llama_fast.py index ae954afa5f6e..e2429aaec5d1 100644 --- a/src/transformers/models/code_llama/tokenization_code_llama_fast.py +++ b/src/transformers/models/code_llama/tokenization_code_llama_fast.py @@ -149,6 +149,8 @@ def __init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, prefix_token=prefix_token, middle_token=middle_token, suffix_token=suffix_token, diff --git a/src/transformers/models/llama/tokenization_llama_fast.py b/src/transformers/models/llama/tokenization_llama_fast.py index 1fc2b6749b55..c63ea44a6d2f 100644 --- a/src/transformers/models/llama/tokenization_llama_fast.py +++ b/src/transformers/models/llama/tokenization_llama_fast.py @@ -128,6 +128,8 @@ def __init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, use_default_system_prompt=use_default_system_prompt, **kwargs, ) From d788d37d24a85fbfe83a0bb6ee83046783fa9ab2 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 6 Nov 2023 11:27:22 +0100 Subject: [PATCH 084/268] Fix daily CI image build (#27307) fix Co-authored-by: ydshieh --- docker/transformers-all-latest-gpu/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 843c06cbd745..e2056caed4cc 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -11,7 +11,7 @@ SHELL ["sh", "-lc"] ARG PYTORCH='2.1.0' # (not always a valid torch version) -ARG INTEL_TORCH_EXT='1.11.0' +ARG INTEL_TORCH_EXT='2.1.0' # Example: `cu102`, `cu113`, etc. ARG CUDA='cu118' @@ -37,7 +37,7 @@ RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime] RUN python3 -m pip uninstall -y flax jax -RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT+cpu -f https://developer.intel.com/ipex-whl-stable-cpu +RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT -f https://developer.intel.com/ipex-whl-stable-cpu RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract RUN python3 -m pip install -U "itsdangerous<2.1.0" From eef7ea98c31a333bacdc7ae7a2372bde772be8e4 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 6 Nov 2023 11:27:48 +0100 Subject: [PATCH 085/268] Update doctest workflow file (#27306) fix Co-authored-by: ydshieh --- .github/workflows/doctests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/doctests.yml b/.github/workflows/doctests.yml index 236ccbcd253a..82944ed2dfe8 100644 --- a/.github/workflows/doctests.yml +++ b/.github/workflows/doctests.yml @@ -20,7 +20,7 @@ env: jobs: run_doctests: - runs-on: [single-gpu, nvidia-gpu, t4, doctest-ci] + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: huggingface/transformers-all-latest-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ From d7dcfa8917d77c4f7a9d8bc22b7c2a0fb485752d Mon Sep 17 00:00:00 2001 From: Pingzhi Li Date: Mon, 6 Nov 2023 20:16:03 +0800 Subject: [PATCH 086/268] Remove an unexpected argument for FlaxResNetBasicLayerCollection (#27272) Remove unexpected argument for FlaxResNetBasicLayerCollection --- src/transformers/models/resnet/modeling_flax_resnet.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/transformers/models/resnet/modeling_flax_resnet.py b/src/transformers/models/resnet/modeling_flax_resnet.py index 875716d3f5be..c76da498710d 100644 --- a/src/transformers/models/resnet/modeling_flax_resnet.py +++ b/src/transformers/models/resnet/modeling_flax_resnet.py @@ -215,7 +215,6 @@ def setup(self): self.layer = FlaxResNetBasicLayerCollection( out_channels=self.out_channels, stride=self.stride, - activation=self.activation, dtype=self.dtype, ) self.activation_func = ACT2FN[self.activation] From 1ffc4dee5b0057b5f3de7deea577557b1fa0adb6 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Mon, 6 Nov 2023 21:44:21 +0800 Subject: [PATCH 087/268] enable memory tracker metrics for npu (#27280) --- src/transformers/trainer_utils.py | 15 +++++++++++++++ tests/trainer/test_trainer.py | 6 +++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index dd793c02036e..e6f26d0df519 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -459,6 +459,11 @@ def __init__(self, skip_memory_metrics=False): elif is_torch_xpu_available(): import torch + self.torch = torch + self.gpu = {} + elif is_torch_npu_available(): + import torch + self.torch = torch self.gpu = {} else: @@ -517,6 +522,9 @@ def start(self): elif is_torch_xpu_available(): self.torch.xpu.reset_peak_memory_stats() self.torch.xpu.empty_cache() + elif is_torch_npu_available(): + self.torch.npu.reset_peak_memory_stats() + self.torch.npu.empty_cache() # gpu if self.torch is not None: @@ -524,6 +532,8 @@ def start(self): self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() + elif is_torch_npu_available(): + self.gpu_mem_used_at_start = self.torch.npu.memory_allocated() # cpu self.cpu_mem_used_at_start = self.cpu_mem_used() @@ -551,6 +561,8 @@ def stop(self, stage): self.torch.cuda.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.empty_cache() + elif is_torch_npu_available(): + self.torch.npu.empty_cache() # concepts: # - alloc_delta: the difference of allocated memory between the end and the start @@ -565,6 +577,9 @@ def stop(self, stage): elif is_torch_xpu_available(): self.gpu_mem_used_now = self.torch.xpu.memory_allocated() self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() + elif is_torch_npu_available(): + self.gpu_mem_used_now = self.torch.npu.memory_allocated() + self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated() else: raise ValueError("No available GPU device found!") diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index ae6d8f7ae388..9df5ac84d747 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1944,18 +1944,18 @@ def check_mem_metrics(self, trainer, check_func): metrics = trainer.train().metrics check_func("init_mem_cpu_alloc_delta", metrics) check_func("train_mem_cpu_alloc_delta", metrics) - if torch.cuda.device_count() > 0: + if backend_device_count(torch_device) > 0: check_func("init_mem_gpu_alloc_delta", metrics) check_func("train_mem_gpu_alloc_delta", metrics) metrics = trainer.evaluate() check_func("eval_mem_cpu_alloc_delta", metrics) - if torch.cuda.device_count() > 0: + if backend_device_count(torch_device) > 0: check_func("eval_mem_gpu_alloc_delta", metrics) metrics = trainer.predict(RegressionDataset()).metrics check_func("test_mem_cpu_alloc_delta", metrics) - if torch.cuda.device_count() > 0: + if backend_device_count(torch_device) > 0: check_func("test_mem_gpu_alloc_delta", metrics) def test_mem_metrics(self): From 147f774671c72ab24d17547030bb1f2803925d3b Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Mon, 6 Nov 2023 15:11:00 +0100 Subject: [PATCH 088/268] [`PretrainedTokenizer`] add some of the most important functions to the doc (#27313) --- docs/source/en/main_classes/tokenizer.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/source/en/main_classes/tokenizer.md b/docs/source/en/main_classes/tokenizer.md index bf43014ee51d..2ad7e450404e 100644 --- a/docs/source/en/main_classes/tokenizer.md +++ b/docs/source/en/main_classes/tokenizer.md @@ -55,6 +55,8 @@ to a given token). [[autodoc]] PreTrainedTokenizer - __call__ + - add_tokens + - add_special_tokens - apply_chat_template - batch_decode - decode @@ -69,6 +71,8 @@ loaded very simply into 🤗 transformers. Take a look at the [Using tokenizers [[autodoc]] PreTrainedTokenizerFast - __call__ + - add_tokens + - add_special_tokens - apply_chat_template - batch_decode - decode From e9dbd3926317a4effb1d033d8454ff18280d0b7d Mon Sep 17 00:00:00 2001 From: Akshay Chintalapati <64036106+akshayvkt@users.noreply.github.com> Date: Mon, 6 Nov 2023 06:21:48 -0800 Subject: [PATCH 089/268] Update sequence_classification.md (#27281) I'm adding accelerate as one of the libraries to install because otherwise when running the Trainer, the model errorr out with the error. ImportError: Using the `Trainer` with `PyTorch` requires `accelerate>=0.20.1`: Please run `pip install transformers[torch]` or `pip install accelerate -U` Further context: 1. I've tried this across different environments so I believe that the environment is not the issue. 2. I had the latest transformers library version running. 3. Typically even after install accelerate and import it, it wouldn't resolve the issue until I restart the notebook and try again. --- docs/source/en/tasks/sequence_classification.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/tasks/sequence_classification.md b/docs/source/en/tasks/sequence_classification.md index b67d43453d27..7068e7ce088e 100644 --- a/docs/source/en/tasks/sequence_classification.md +++ b/docs/source/en/tasks/sequence_classification.md @@ -44,7 +44,7 @@ The task illustrated in this tutorial is supported by the following model archit Before you begin, make sure you have all the necessary libraries installed: ```bash -pip install transformers datasets evaluate +pip install transformers datasets evaluate accelerate ``` We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login: From a6e0d5a219c3a05171e695d375b1e35cf2915d71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iker=20Garc=C3=ADa-Ferrero?= Date: Mon, 6 Nov 2023 18:20:06 +0100 Subject: [PATCH 090/268] Fix VideoMAEforPretrained dtype error (#27296) * Fix dtype error * Fix mean and std dtype * make style --- src/transformers/models/videomae/modeling_videomae.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/videomae/modeling_videomae.py b/src/transformers/models/videomae/modeling_videomae.py index f78198451d08..aac69b6c536b 100644 --- a/src/transformers/models/videomae/modeling_videomae.py +++ b/src/transformers/models/videomae/modeling_videomae.py @@ -848,8 +848,9 @@ def forward( else: # first, unnormalize the frames device = pixel_values.device - mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, None, :, None, None] - std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, None, :, None, None] + dtype = pixel_values.dtype + mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device=device, dtype=dtype)[None, None, :, None, None] + std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device=device, dtype=dtype)[None, None, :, None, None] frames = pixel_values * std + mean # in [0, 1] batch_size, time, num_channels, height, width = frames.shape From 1b20e2bb421175270d625a9610ebaa00c445fcb4 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 6 Nov 2023 19:05:50 +0100 Subject: [PATCH 091/268] Fix `Kosmos2Processor` batch mode (#27323) * fix * fix * fix --------- Co-authored-by: ydshieh --- .../models/kosmos2/processing_kosmos2.py | 4 ++- tests/models/kosmos2/test_modeling_kosmos2.py | 28 +++++++++---------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/transformers/models/kosmos2/processing_kosmos2.py b/src/transformers/models/kosmos2/processing_kosmos2.py index 5dc0fad0de01..6943c12c4871 100644 --- a/src/transformers/models/kosmos2/processing_kosmos2.py +++ b/src/transformers/models/kosmos2/processing_kosmos2.py @@ -211,7 +211,9 @@ def __call__( image_embeds_position_mask.append(mask) if isinstance(text, list): - sorted_length = sorted([(idx, len(x)) for idx, x in enumerate(text_encoding.input_ids)]) + sorted_length = sorted( + [(idx, len(x)) for idx, x in enumerate(text_encoding.input_ids)], key=lambda x: x[-1] + ) _, min_len_not_padded = sorted_length[0] idx, _ = sorted_length[-1] diff --git a/tests/models/kosmos2/test_modeling_kosmos2.py b/tests/models/kosmos2/test_modeling_kosmos2.py index 3f55ad9759dd..5491ded1bc81 100644 --- a/tests/models/kosmos2/test_modeling_kosmos2.py +++ b/tests/models/kosmos2/test_modeling_kosmos2.py @@ -686,7 +686,7 @@ def test_snowman_image_captioning_batch(self): model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device) - prompt = ["An image of", "Describe this image in detail:"] + prompt = ["Describe this image in detail:", "An image of"] # left padding processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") @@ -699,10 +699,6 @@ def test_snowman_image_captioning_batch(self): # left padding gives identical results as non-padding EXPECTED_PROCESSED_TEXT_0 = ( - " An image of a snowman " - "warming himself by a fire." - ) - EXPECTED_PROCESSED_TEXT_1 = ( " Describe this image in detail: The image features a snowman sitting by a campfire" " in the snow. He is wearing a hat" ", scarf" @@ -712,21 +708,21 @@ def test_snowman_image_captioning_batch(self): "nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy " "atmosphere." ) + EXPECTED_PROCESSED_TEXT_1 = ( + " An image of a snowman " + "warming himself by a fire." + ) self.assertListEqual(processed_text, [EXPECTED_PROCESSED_TEXT_0, EXPECTED_PROCESSED_TEXT_1]) - EXPECTED_FINAL_TEXT_0 = "An image of a snowman warming himself by a fire." - EXPECTED_FINAL_TEXT_1 = ( + EXPECTED_FINAL_TEXT_0 = ( "Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is " "wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be " "enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere." ) + EXPECTED_FINAL_TEXT_1 = "An image of a snowman warming himself by a fire." self.assertListEqual(all_final_text, [EXPECTED_FINAL_TEXT_0, EXPECTED_FINAL_TEXT_1]) EXPECTED_ENTITIES_0 = [ - ("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), - ("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]), - ] - EXPECTED_ENTITIES_1 = [ ("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]), ("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]), ("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]), @@ -734,6 +730,10 @@ def test_snowman_image_captioning_batch(self): ("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]), ("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]), ] + EXPECTED_ENTITIES_1 = [ + ("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), + ("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]), + ] self.assertListEqual(all_entities, [EXPECTED_ENTITIES_0, EXPECTED_ENTITIES_1]) # right padding @@ -746,6 +746,6 @@ def test_snowman_image_captioning_batch(self): all_entities = [x[1] for x in final_text_with_entities] # For right padding, only the non-padded sequences will give the same results as non-padding - self.assertEqual(processed_text[1], EXPECTED_PROCESSED_TEXT_1) - self.assertEqual(all_final_text[1], EXPECTED_FINAL_TEXT_1) - self.assertListEqual(all_entities[1], EXPECTED_ENTITIES_1) + self.assertEqual(processed_text[0], EXPECTED_PROCESSED_TEXT_0) + self.assertEqual(all_final_text[0], EXPECTED_FINAL_TEXT_0) + self.assertListEqual(all_entities[0], EXPECTED_ENTITIES_0) From 9beb2737d758160e845b66742a0c01201e38007f Mon Sep 17 00:00:00 2001 From: Maria Khalusova Date: Mon, 6 Nov 2023 14:45:03 -0500 Subject: [PATCH 092/268] [docs] fixed links with 404 (#27327) * fixed links with 404 * make style --- docs/source/en/create_a_model.md | 2 +- docs/source/en/main_classes/processors.md | 2 +- docs/source/en/main_classes/quantization.md | 2 +- docs/source/en/model_doc/clap.md | 2 +- docs/source/en/model_doc/mobilebert.md | 2 +- docs/source/en/model_doc/nllb-moe.md | 2 +- docs/source/en/model_doc/regnet.md | 2 +- docs/source/en/model_doc/roberta-prelayernorm.md | 2 +- docs/source/en/model_doc/switch_transformers.md | 2 +- docs/source/en/model_doc/tapas.md | 2 +- docs/source/en/perf_train_gpu_many.md | 4 ++-- docs/source/en/perf_train_gpu_one.md | 4 ++-- docs/source/en/philosophy.md | 2 +- docs/source/en/sagemaker.md | 1 - docs/source/es/create_a_model.md | 2 +- docs/source/es/sagemaker.md | 1 - docs/source/it/create_a_model.md | 2 +- docs/source/ja/create_a_model.md | 2 +- docs/source/ja/main_classes/processors.md | 2 +- docs/source/ja/perf_train_gpu_many.md | 4 ++-- docs/source/ja/perf_train_gpu_one.md | 4 ++-- docs/source/ja/philosophy.md | 2 +- docs/source/ko/create_a_model.md | 2 +- docs/source/ko/perf_train_gpu_many.md | 4 ++-- docs/source/ko/philosophy.md | 2 +- docs/source/ko/sagemaker.md | 1 - docs/source/pt/create_a_model.md | 2 +- docs/source/zh/create_a_model.md | 2 +- src/transformers/models/albert/modeling_flax_albert.py | 7 ++++--- src/transformers/models/beit/modeling_flax_beit.py | 7 ++++--- src/transformers/models/bert/modeling_flax_bert.py | 7 ++++--- src/transformers/models/big_bird/modeling_flax_big_bird.py | 7 ++++--- src/transformers/models/clip/modeling_flax_clip.py | 7 ++++--- .../models/distilbert/modeling_flax_distilbert.py | 7 ++++--- src/transformers/models/regnet/modeling_flax_regnet.py | 7 ++++--- src/transformers/models/resnet/modeling_flax_resnet.py | 7 ++++--- src/transformers/models/roberta/modeling_flax_roberta.py | 7 ++++--- .../modeling_flax_roberta_prelayernorm.py | 7 ++++--- src/transformers/models/roformer/modeling_flax_roformer.py | 7 ++++--- .../modeling_flax_vision_text_dual_encoder.py | 7 ++++--- src/transformers/models/vit/modeling_flax_vit.py | 7 ++++--- .../models/xlm_roberta/modeling_flax_xlm_roberta.py | 7 ++++--- .../modeling_flax_{{cookiecutter.lowercase_modelname}}.py | 2 +- .../open_model_proposals/ADD_BIG_BIRD.md | 2 +- 44 files changed, 88 insertions(+), 77 deletions(-) diff --git a/docs/source/en/create_a_model.md b/docs/source/en/create_a_model.md index ba384d437b80..a70a734c2e3f 100644 --- a/docs/source/en/create_a_model.md +++ b/docs/source/en/create_a_model.md @@ -110,7 +110,7 @@ You can also save your configuration file as a dictionary or even just the diffe ## Model -The next step is to create a [model](main_classes/models). The model - also loosely referred to as the architecture - defines what each layer is doing and what operations are happening. Attributes like `num_hidden_layers` from the configuration are used to define the architecture. Every model shares the base class [`PreTrainedModel`] and a few common methods like resizing input embeddings and pruning self-attention heads. In addition, all models are also either a [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) or [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. This means models are compatible with each of their respective framework's usage. +The next step is to create a [model](main_classes/models). The model - also loosely referred to as the architecture - defines what each layer is doing and what operations are happening. Attributes like `num_hidden_layers` from the configuration are used to define the architecture. Every model shares the base class [`PreTrainedModel`] and a few common methods like resizing input embeddings and pruning self-attention heads. In addition, all models are also either a [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) or [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. This means models are compatible with each of their respective framework's usage. diff --git a/docs/source/en/main_classes/processors.md b/docs/source/en/main_classes/processors.md index 9763122ef4f9..5e943fc9fdd5 100644 --- a/docs/source/en/main_classes/processors.md +++ b/docs/source/en/main_classes/processors.md @@ -86,7 +86,7 @@ This library hosts the processor to load the XNLI data: Please note that since the gold labels are available on the test set, evaluation is performed on the test set. -An example using these processors is given in the [run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/text-classification/run_xnli.py) script. +An example using these processors is given in the [run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_xnli.py) script. ## SQuAD diff --git a/docs/source/en/main_classes/quantization.md b/docs/source/en/main_classes/quantization.md index 4de6a50d0913..7200039e3f50 100644 --- a/docs/source/en/main_classes/quantization.md +++ b/docs/source/en/main_classes/quantization.md @@ -95,7 +95,7 @@ The benchmark was run on a NVIDIA-A100 instance and the model used was [`TheBlok -You can find the full results together with packages versions in [this link](https://github.com/huggingface/optimum-benchmark/tree/main/examples/running-mistral). +You can find the full results together with packages versions in [this link](https://github.com/huggingface/optimum-benchmark/tree/main/examples/running-mistrals). From the results it appears that AWQ quantization method is the fastest quantization method for inference, text generation and among the lowest peak memory for text generation. However, AWQ seems to have the largest forward latency per batch size. diff --git a/docs/source/en/model_doc/clap.md b/docs/source/en/model_doc/clap.md index 7bfc75e23c35..2bd2814e1b06 100644 --- a/docs/source/en/model_doc/clap.md +++ b/docs/source/en/model_doc/clap.md @@ -27,7 +27,7 @@ The abstract from the paper is the following: *Contrastive learning has shown remarkable success in the field of multimodal representation learning. In this paper, we propose a pipeline of contrastive language-audio pretraining to develop an audio representation by combining audio data with natural language descriptions. To accomplish this target, we first release LAION-Audio-630K, a large collection of 633,526 audio-text pairs from different data sources. Second, we construct a contrastive language-audio pretraining model by considering different audio encoders and text encoders. We incorporate the feature fusion mechanism and keyword-to-caption augmentation into the model design to further enable the model to process audio inputs of variable lengths and enhance the performance. Third, we perform comprehensive experiments to evaluate our model across three tasks: text-to-audio retrieval, zero-shot audio classification, and supervised audio classification. The results demonstrate that our model achieves superior performance in text-to-audio retrieval task. In audio classification tasks, the model achieves state-of-the-art performance in the zeroshot setting and is able to obtain performance comparable to models' results in the non-zero-shot setting. LAION-Audio-6* -This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArtZucker) . +This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArthurZ) . The original code can be found [here](https://github.com/LAION-AI/Clap). ## ClapConfig diff --git a/docs/source/en/model_doc/mobilebert.md b/docs/source/en/model_doc/mobilebert.md index fbd9d34afb94..5c9a230d0d5c 100644 --- a/docs/source/en/model_doc/mobilebert.md +++ b/docs/source/en/model_doc/mobilebert.md @@ -37,7 +37,7 @@ natural language inference tasks of GLUE, MobileBERT achieves a GLUEscore o 77.7 latency on a Pixel 4 phone. On the SQuAD v1.1/v2.0 question answering task, MobileBERT achieves a dev F1 score of 90.0/79.2 (1.5/2.1 higher than BERT_BASE).* -This model was contributed by [vshampor](https://huggingface.co/vshampor). The original code can be found [here](https://github.com/google-research/mobilebert). +This model was contributed by [vshampor](https://huggingface.co/vshampor). The original code can be found [here](https://github.com/google-research/google-research/tree/master/mobilebert). ## Usage tips diff --git a/docs/source/en/model_doc/nllb-moe.md b/docs/source/en/model_doc/nllb-moe.md index eb2b7a7da26a..5c283fb3f0e1 100644 --- a/docs/source/en/model_doc/nllb-moe.md +++ b/docs/source/en/model_doc/nllb-moe.md @@ -37,7 +37,7 @@ improvements to counteract overfitting while training on thousands of tasks. Cri a human-translated benchmark, Flores-200, and combined human evaluation with a novel toxicity benchmark covering all languages in Flores-200 to assess translation safety. Our model achieves an improvement of 44% BLEU relative to the previous state-of-the-art, laying important groundwork towards realizing a universal translation system.* -This model was contributed by [Arthur Zucker](https://huggingface.co/ArtZucker). +This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/facebookresearch/fairseq). ## Usage tips diff --git a/docs/source/en/model_doc/regnet.md b/docs/source/en/model_doc/regnet.md index 2a8f7e733d85..acd833c77c2d 100644 --- a/docs/source/en/model_doc/regnet.md +++ b/docs/source/en/model_doc/regnet.md @@ -27,7 +27,7 @@ The abstract from the paper is the following: *In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs.* This model was contributed by [Francesco](https://huggingface.co/Francesco). The TensorFlow version of the model -was contributed by [sayakpaul](https://huggingface.com/sayakpaul) and [ariG23498](https://huggingface.com/ariG23498). +was contributed by [sayakpaul](https://huggingface.co/sayakpaul) and [ariG23498](https://huggingface.co/ariG23498). The original code can be found [here](https://github.com/facebookresearch/pycls). The huge 10B model from [Self-supervised Pretraining of Visual Features in the Wild](https://arxiv.org/abs/2103.01988), diff --git a/docs/source/en/model_doc/roberta-prelayernorm.md b/docs/source/en/model_doc/roberta-prelayernorm.md index 000c0a7d2d80..f748e273e8f8 100644 --- a/docs/source/en/model_doc/roberta-prelayernorm.md +++ b/docs/source/en/model_doc/roberta-prelayernorm.md @@ -25,7 +25,7 @@ The abstract from the paper is the following: *fairseq is an open-source sequence modeling toolkit that allows researchers and developers to train custom models for translation, summarization, language modeling, and other text generation tasks. The toolkit is based on PyTorch and supports distributed training across multiple GPUs and machines. We also support fast mixed-precision training and inference on modern GPUs.* -This model was contributed by [andreasmaden](https://huggingface.co/andreasmaden). +This model was contributed by [andreasmaden](https://huggingface.co/andreasmadsen). The original code can be found [here](https://github.com/princeton-nlp/DinkyTrain). ## Usage tips diff --git a/docs/source/en/model_doc/switch_transformers.md b/docs/source/en/model_doc/switch_transformers.md index 5080f711ace0..ca6748167f5e 100644 --- a/docs/source/en/model_doc/switch_transformers.md +++ b/docs/source/en/model_doc/switch_transformers.md @@ -27,7 +27,7 @@ The abstract from the paper is the following: *In deep learning, models typically reuse the same parameters for all inputs. Mixture of Experts (MoE) defies this and instead selects different parameters for each incoming example. The result is a sparsely-activated model -- with outrageous numbers of parameters -- but a constant computational cost. However, despite several notable successes of MoE, widespread adoption has been hindered by complexity, communication costs and training instability -- we address these with the Switch Transformer. We simplify the MoE routing algorithm and design intuitive improved models with reduced communication and computational costs. Our proposed training techniques help wrangle the instabilities and we show large sparse models may be trained, for the first time, with lower precision (bfloat16) formats. We design models based off T5-Base and T5-Large to obtain up to 7x increases in pre-training speed with the same computational resources. These improvements extend into multilingual settings where we measure gains over the mT5-Base version across all 101 languages. Finally, we advance the current scale of language models by pre-training up to trillion parameter models on the "Colossal Clean Crawled Corpus" and achieve a 4x speedup over the T5-XXL model.* -This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArtZucker) . +This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/google/flaxformer/tree/main/flaxformer/architectures/moe). ## Usage tips diff --git a/docs/source/en/model_doc/tapas.md b/docs/source/en/model_doc/tapas.md index 78d2f3ee1380..79bbe3e819cf 100644 --- a/docs/source/en/model_doc/tapas.md +++ b/docs/source/en/model_doc/tapas.md @@ -47,7 +47,7 @@ This model was contributed by [nielsr](https://huggingface.co/nielsr). The Tenso ## Usage tips - TAPAS is a model that uses relative position embeddings by default (restarting the position embeddings at every cell of the table). Note that this is something that was added after the publication of the original TAPAS paper. According to the authors, this usually results in a slightly better performance, and allows you to encode longer sequences without running out of embeddings. This is reflected in the `reset_position_index_per_cell` parameter of [`TapasConfig`], which is set to `True` by default. The default versions of the models available on the [hub](https://huggingface.co/models?search=tapas) all use relative position embeddings. You can still use the ones with absolute position embeddings by passing in an additional argument `revision="no_reset"` when calling the `from_pretrained()` method. Note that it's usually advised to pad the inputs on the right rather than the left. -- TAPAS is based on BERT, so `TAPAS-base` for example corresponds to a `BERT-base` architecture. Of course, `TAPAS-large` will result in the best performance (the results reported in the paper are from `TAPAS-large`). Results of the various sized models are shown on the [original Github repository](https://github.com/google-research/tapas>). +- TAPAS is based on BERT, so `TAPAS-base` for example corresponds to a `BERT-base` architecture. Of course, `TAPAS-large` will result in the best performance (the results reported in the paper are from `TAPAS-large`). Results of the various sized models are shown on the [original GitHub repository](https://github.com/google-research/tapas). - TAPAS has checkpoints fine-tuned on SQA, which are capable of answering questions related to a table in a conversational set-up. This means that you can ask follow-up questions such as "what is his age?" related to the previous question. Note that the forward pass of TAPAS is a bit different in case of a conversational set-up: in that case, you have to feed every table-question pair one by one to the model, such that the `prev_labels` token type ids can be overwritten by the predicted `labels` of the model to the previous question. See "Usage" section for more info. - TAPAS is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard. Note that TAPAS can be used as an encoder in the EncoderDecoderModel framework, to combine it with an autoregressive text decoder such as GPT-2. diff --git a/docs/source/en/perf_train_gpu_many.md b/docs/source/en/perf_train_gpu_many.md index ecabbcd06f36..1795782949d1 100644 --- a/docs/source/en/perf_train_gpu_many.md +++ b/docs/source/en/perf_train_gpu_many.md @@ -270,7 +270,7 @@ which is discussed next. Implementations: -- [DeepSpeed](https://www.deepspeed.ai/features/#the-zero-redundancy-optimizer) ZeRO-DP stages 1+2+3 +- [DeepSpeed](https://www.deepspeed.ai/tutorials/zero/) ZeRO-DP stages 1+2+3 - [`Accelerate` integration](https://huggingface.co/docs/accelerate/en/usage_guides/deepspeed) - [`transformers` integration](main_classes/trainer#trainer-integrations) @@ -434,7 +434,7 @@ This section is based on the original much more [detailed TP overview](https://g by [@anton-l](https://github.com/anton-l). Alternative names: -- DeepSpeed calls it [tensor slicing](https://www.deepspeed.ai/features/#model-parallelism) +- DeepSpeed calls it [tensor slicing](https://www.deepspeed.ai/training/#model-parallelism) Implementations: - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) has an internal implementation, as it's very model-specific diff --git a/docs/source/en/perf_train_gpu_one.md b/docs/source/en/perf_train_gpu_one.md index 17b62c3a1379..25117241f78f 100644 --- a/docs/source/en/perf_train_gpu_one.md +++ b/docs/source/en/perf_train_gpu_one.md @@ -394,7 +394,7 @@ Choose which backend to use by specifying it via `torch_compile_backend` in the **Inference-only backend**s: * `dynamo.optimize("ofi")` - Uses Torchscript optimize_for_inference. [Read more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html) -* `dynamo.optimize("fx2trt")` - Uses Nvidia TensorRT for inference optimizations. [Read more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst) +* `dynamo.optimize("fx2trt")` - Uses NVIDIA TensorRT for inference optimizations. [Read more](https://pytorch.org/TensorRT/tutorials/getting_started_with_fx_path.html) * `dynamo.optimize("onnxrt")` - Uses ONNXRT for inference on CPU/GPU. [Read more](https://onnxruntime.ai/) * `dynamo.optimize("ipex")` - Uses IPEX for inference on CPU. [Read more](https://github.com/intel/intel-extension-for-pytorch) @@ -505,7 +505,7 @@ Most related papers and implementations are built around Tensorflow/TPUs: - [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) - [GLaM: Generalist Language Model (GLaM)](https://ai.googleblog.com/2021/12/more-efficient-in-context-learning-with.html) -And for Pytorch DeepSpeed has built one as well: [DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale](https://arxiv.org/abs/2201.05596), [Mixture of Experts](https://www.deepspeed.ai/tutorials/mixture-of-experts/) - blog posts: [1](https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/), [2](https://www.microsoft.com/en-us/research/publication/scalable-and-efficient-moe-training-for-multitask-multilingual-models/) and specific deployment with large transformer-based natural language generation models: [blog post](https://www.deepspeed.ai/news/2021/12/09/deepspeed-moe-nlg.html), [Megatron-Deepspeed branch](Thttps://github.com/microsoft/Megatron-DeepSpeed/tree/moe-training). +And for Pytorch DeepSpeed has built one as well: [DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale](https://arxiv.org/abs/2201.05596), [Mixture of Experts](https://www.deepspeed.ai/tutorials/mixture-of-experts/) - blog posts: [1](https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/), [2](https://www.microsoft.com/en-us/research/publication/scalable-and-efficient-moe-training-for-multitask-multilingual-models/) and specific deployment with large transformer-based natural language generation models: [blog post](https://www.deepspeed.ai/2021/12/09/deepspeed-moe-nlg.html), [Megatron-Deepspeed branch](https://github.com/microsoft/Megatron-DeepSpeed/tree/moe-training). ## Using PyTorch native attention and Flash Attention diff --git a/docs/source/en/philosophy.md b/docs/source/en/philosophy.md index cad1e2ccdc8c..628cb39bbb33 100644 --- a/docs/source/en/philosophy.md +++ b/docs/source/en/philosophy.md @@ -64,7 +64,7 @@ A few other goals: The library is built around three types of classes for each model: -- **Model classes** can be PyTorch models ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras models ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) or JAX/Flax models ([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen.html)) that work with the pretrained weights provided in the library. +- **Model classes** can be PyTorch models ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras models ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) or JAX/Flax models ([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html)) that work with the pretrained weights provided in the library. - **Configuration classes** store the hyperparameters required to build a model (such as the number of layers and hidden size). You don't always need to instantiate these yourself. In particular, if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model). - **Preprocessing classes** convert the raw data into a format accepted by the model. A [tokenizer](main_classes/tokenizer) stores the vocabulary for each model and provide methods for encoding and decoding strings in a list of token embedding indices to be fed to a model. [Image processors](main_classes/image_processor) preprocess vision inputs, [feature extractors](main_classes/feature_extractor) preprocess audio inputs, and a [processor](main_classes/processors) handles multimodal inputs. diff --git a/docs/source/en/sagemaker.md b/docs/source/en/sagemaker.md index f0a5a5f9c114..579caa499c2f 100644 --- a/docs/source/en/sagemaker.md +++ b/docs/source/en/sagemaker.md @@ -26,4 +26,3 @@ The documentation has been moved to [hf.co/docs/sagemaker](https://huggingface.c - [Train Hugging Face models on Amazon SageMaker with the SageMaker Python SDK](https://huggingface.co/docs/sagemaker/train) - [Deploy Hugging Face models to Amazon SageMaker with the SageMaker Python SDK](https://huggingface.co/docs/sagemaker/inference) -- [Frequently Asked Questions](https://huggingface.co/docs/sagemaker/faq) diff --git a/docs/source/es/create_a_model.md b/docs/source/es/create_a_model.md index 04014a7b6a70..5d6349370539 100644 --- a/docs/source/es/create_a_model.md +++ b/docs/source/es/create_a_model.md @@ -109,7 +109,7 @@ También puedes guardar los archivos de configuración como un diccionario; o in ## Modelo -El siguiente paso será crear un [modelo](main_classes/models). El modelo, al que a veces también nos referimos como arquitectura, es el encargado de definir cada capa y qué operaciones se realizan. Los atributos como `num_hidden_layers` de la configuración se usan para definir la arquitectura. Todos los modelos comparten una clase base, [`PreTrainedModel`], y algunos métodos comunes que se pueden usar para redimensionar los _embeddings_ o para recortar cabezas de auto-atención (también llamadas _self-attention heads_). Además, todos los modelos son subclases de [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) o [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module), lo que significa que son compatibles con su respectivo framework. +El siguiente paso será crear un [modelo](main_classes/models). El modelo, al que a veces también nos referimos como arquitectura, es el encargado de definir cada capa y qué operaciones se realizan. Los atributos como `num_hidden_layers` de la configuración se usan para definir la arquitectura. Todos los modelos comparten una clase base, [`PreTrainedModel`], y algunos métodos comunes que se pueden usar para redimensionar los _embeddings_ o para recortar cabezas de auto-atención (también llamadas _self-attention heads_). Además, todos los modelos son subclases de [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) o [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html), lo que significa que son compatibles con su respectivo framework. diff --git a/docs/source/es/sagemaker.md b/docs/source/es/sagemaker.md index a874aefe76f6..9bc5b7410841 100644 --- a/docs/source/es/sagemaker.md +++ b/docs/source/es/sagemaker.md @@ -26,4 +26,3 @@ La documentación ha sido trasladada a [hf.co/docs/sagemaker](https://huggingfac - [Entrenar modelos de Hugging Face en Amazon SageMaker con SageMaker Python SDK](https://huggingface.co/docs/sagemaker/train) - [Desplegar modelos de Hugging Face en Amazon SageMaker con SageMaker Python SDK](https://huggingface.co/docs/sagemaker/inference) -- [Preguntas Frecuentes](https://huggingface.co/docs/sagemaker/faq) diff --git a/docs/source/it/create_a_model.md b/docs/source/it/create_a_model.md index c32040d7d389..75055beb9271 100644 --- a/docs/source/it/create_a_model.md +++ b/docs/source/it/create_a_model.md @@ -109,7 +109,7 @@ Puoi anche salvare il file di configurazione come dizionario oppure come la diff ## Modello -Il prossimo passo e di creare [modello](main_classes/models). Il modello - vagamente riferito anche come architettura - definisce cosa ogni strato deve fare e quali operazioni stanno succedendo. Attributi come `num_hidden_layers` provenienti dalla configurazione sono usati per definire l'architettura. Ogni modello condivide la classe base [`PreTrainedModel`] e alcuni metodi comuni come il ridimensionamento degli input embeddings e la soppressione delle self-attention heads . Inoltre, tutti i modelli sono la sottoclasse di [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) o [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module). Cio significa che i modelli sono compatibili con l'uso di ciascun di framework. +Il prossimo passo e di creare [modello](main_classes/models). Il modello - vagamente riferito anche come architettura - definisce cosa ogni strato deve fare e quali operazioni stanno succedendo. Attributi come `num_hidden_layers` provenienti dalla configurazione sono usati per definire l'architettura. Ogni modello condivide la classe base [`PreTrainedModel`] e alcuni metodi comuni come il ridimensionamento degli input embeddings e la soppressione delle self-attention heads . Inoltre, tutti i modelli sono la sottoclasse di [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) o [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html). Cio significa che i modelli sono compatibili con l'uso di ciascun di framework. diff --git a/docs/source/ja/create_a_model.md b/docs/source/ja/create_a_model.md index d39ceba528d7..086108733419 100644 --- a/docs/source/ja/create_a_model.md +++ b/docs/source/ja/create_a_model.md @@ -114,7 +114,7 @@ Once you are satisfied with your model configuration, you can save it with [`Pre 次のステップは、[モデル](main_classes/models)を作成することです。モデル(アーキテクチャとも緩く言われることがあります)は、各レイヤーが何をしているか、どの操作が行われているかを定義します。構成からの `num_hidden_layers` のような属性はアーキテクチャを定義するために使用されます。 すべてのモデルは [`PreTrainedModel`] をベースクラスとし、入力埋め込みのリサイズやセルフアテンションヘッドのプルーニングなど、共通のメソッドがいくつかあります。 -さらに、すべてのモデルは [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)、[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)、または [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module) のいずれかのサブクラスでもあります。つまり、モデルはそれぞれのフレームワークの使用法と互換性があります。 +さらに、すべてのモデルは [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)、[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)、または [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) のいずれかのサブクラスでもあります。つまり、モデルはそれぞれのフレームワークの使用法と互換性があります。 diff --git a/docs/source/ja/main_classes/processors.md b/docs/source/ja/main_classes/processors.md index bd459758aa17..63b94af6ea43 100644 --- a/docs/source/ja/main_classes/processors.md +++ b/docs/source/ja/main_classes/processors.md @@ -86,7 +86,7 @@ QQP、QNLI、RTE、WNLI。 テストセットにはゴールドラベルが付いているため、評価はテストセットで行われますのでご了承ください。 -これらのプロセッサを使用する例は、[run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/text-classification/run_xnli.py) スクリプトに示されています。 +これらのプロセッサを使用する例は、[run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_xnli.py) スクリプトに示されています。 ## SQuAD diff --git a/docs/source/ja/perf_train_gpu_many.md b/docs/source/ja/perf_train_gpu_many.md index adc25c04c6cd..fd7713c49369 100644 --- a/docs/source/ja/perf_train_gpu_many.md +++ b/docs/source/ja/perf_train_gpu_many.md @@ -242,7 +242,7 @@ ZeROがモデルの重みを分割する方法に注意を払うと、これは Implementations: -- [DeepSpeed](https://www.deepspeed.ai/features/#the-zero-redundancy-optimizer) ZeRO-DP stages 1+2+3 +- [DeepSpeed](https://www.deepspeed.ai/tutorials/zero/) ZeRO-DP stages 1+2+3 - [`transformers` integration](main_classes/trainer#trainer-integrations) @@ -360,7 +360,7 @@ by [@anton-l](https://github.com/anton-l)。 SageMakerは、より効率的な処理のためにTPとDPを組み合わせて使用します。 代替名: -- [DeepSpeed](https://github.com/microsoft/DeepSpeed)はこれを「テンソルスライシング」と呼びます。詳細は[DeepSpeedの特徴](https://www.deepspeed.ai/features/#model-parallelism)をご覧ください。 +- [DeepSpeed](https://github.com/microsoft/DeepSpeed)はこれを「テンソルスライシング」と呼びます。詳細は[DeepSpeedの特徴](https://www.deepspeed.ai/training/#model-parallelism)をご覧ください。 実装例: - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM)には、モデル固有の内部実装があります。 diff --git a/docs/source/ja/perf_train_gpu_one.md b/docs/source/ja/perf_train_gpu_one.md index 1361fbf1c629..773ecbfc7703 100644 --- a/docs/source/ja/perf_train_gpu_one.md +++ b/docs/source/ja/perf_train_gpu_one.md @@ -323,7 +323,7 @@ training_args = TrainingArguments(torch_compile=True, **default_args) **推論専用バックエンド**: * `dynamo.optimize("ofi")` - Torchscriptの`optimize_for_inference`を使用します。 [詳細はこちら](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html) -* `dynamo.optimize("fx2trt")` - Nvidia TensorRTを使用した推論の最適化にNvidia TensorRTを使用します。 [詳細はこちら](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst) +* `dynamo.optimize("fx2trt")` - Nvidia TensorRTを使用した推論の最適化にNvidia TensorRTを使用します。 [詳細はこちら](https://pytorch.org/TensorRT/tutorials/getting_started_with_fx_path.html) * `dynamo.optimize("onnxrt")` - CPU/GPUでの推論にONNX Runtimeを使用します。 [詳細はこちら](https://onnxruntime.ai/) * `dynamo.optimize("ipex")` - CPUでの推論にIPEXを使用します。 [詳細はこちら](https://github.com/intel/intel-extension-for-pytorch) @@ -411,7 +411,7 @@ PyTorchの[pipとcondaビルド](https://pytorch.org/get-started/locally/#start- - [Switch Transformers: シンプルで効率的なスパース性を備えたトリリオンパラメータモデルへのスケーリング](https://arxiv.org/abs/2101.03961) - [GLaM: Generalist Language Model (GLaM)](https://ai.googleblog.com/2021/12/more-efficient-in-context-learning-with.html) -PytorchにはDeepSpeedが構築したものもあります: [DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale](https://arxiv.org/abs/2201.05596)、[Mixture of Experts](https://www.deepspeed.ai/tutorials/mixture-of-experts/) - ブログ記事: [1](https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/)、[2](https://www.microsoft.com/en-us/research/publication/scalable-and-efficient-moe-training-for-multitask-multilingual-models/)、大規模なTransformerベースの自然言語生成モデルの具体的な展開については、[ブログ記事](https://www.deepspeed.ai/news/2021/12/09/deepspeed-moe-nlg.html)、[Megatron-Deepspeedブランチ](Thttps://github.com/microsoft/Megatron-DeepSpeed/tree/moe-training)を参照してください。 +PytorchにはDeepSpeedが構築したものもあります: [DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale](https://arxiv.org/abs/2201.05596)、[Mixture of Experts](https://www.deepspeed.ai/tutorials/mixture-of-experts/) - ブログ記事: [1](https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/)、[2](https://www.microsoft.com/en-us/research/publication/scalable-and-efficient-moe-training-for-multitask-multilingual-models/)、大規模なTransformerベースの自然言語生成モデルの具体的な展開については、[ブログ記事](https://www.deepspeed.ai/2021/12/09/deepspeed-moe-nlg.html)、[Megatron-Deepspeedブランチ](https://github.com/microsoft/Megatron-DeepSpeed/tree/moe-training)を参照してください。 ## PyTorchネイティブアテンションとFlash Attentionの使用 diff --git a/docs/source/ja/philosophy.md b/docs/source/ja/philosophy.md index 3e359aa4a51d..3edef0bd2add 100644 --- a/docs/source/ja/philosophy.md +++ b/docs/source/ja/philosophy.md @@ -56,7 +56,7 @@ rendered properly in your Markdown viewer. このライブラリは、各モデルについて次の3つのタイプのクラスを中心に構築されています: -- **モデルクラス**は、ライブラリで提供される事前トレーニング済みの重みと互換性のあるPyTorchモデル([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module))、Kerasモデル([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model))またはJAX/Flaxモデル([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen.html))を使用できます。 +- **モデルクラス**は、ライブラリで提供される事前トレーニング済みの重みと互換性のあるPyTorchモデル([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module))、Kerasモデル([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model))またはJAX/Flaxモデル([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html))を使用できます。 - **構成クラス**は、モデルを構築するために必要なハイパーパラメータを格納します(層の数や隠れ層のサイズなど)。これらを自分でインスタンス化する必要はありません。特に、変更を加えずに事前トレーニング済みモデルを使用している場合、モデルを作成すると自動的に構成がインスタンス化されるようになります(これはモデルの一部です)。 - **前処理クラス**は、生データをモデルが受け入れる形式に変換します。[トークナイザ](main_classes/tokenizer)は各モデルの語彙を保存し、文字列をトークン埋め込みのインデックスのリストにエンコードおよびデコードするためのメソッドを提供します。[イメージプロセッサ](main_classes/image_processor)はビジョン入力を前処理し、[特徴抽出器](main_classes/feature_extractor)はオーディオ入力を前処理し、[プロセッサ](main_classes/processors)はマルチモーダル入力を処理します。 diff --git a/docs/source/ko/create_a_model.md b/docs/source/ko/create_a_model.md index 8c7be3291e24..62a118563f1c 100644 --- a/docs/source/ko/create_a_model.md +++ b/docs/source/ko/create_a_model.md @@ -110,7 +110,7 @@ configuration 파일을 딕셔너리로 저장하거나 사용자 정의 configu ## 모델[[model]] -다음 단계는 [모델(model)](main_classes/models)을 만드는 것입니다. 느슨하게 아키텍처라고도 불리는 모델은 각 계층이 수행하는 동작과 발생하는 작업을 정의합니다. configuration의 `num_hidden_layers`와 같은 속성은 아키텍처를 정의하는 데 사용됩니다. 모든 모델은 기본 클래스 [`PreTrainedModel`]과 입력 임베딩 크기 조정 및 셀프 어텐션 헤드 가지 치기와 같은 몇 가지 일반적인 메소드를 공유합니다. 또한 모든 모델은 [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) 또는 [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module)의 서브클래스이기도 합니다. 즉, 모델은 각 프레임워크의 사용법과 호환됩니다. +다음 단계는 [모델(model)](main_classes/models)을 만드는 것입니다. 느슨하게 아키텍처라고도 불리는 모델은 각 계층이 수행하는 동작과 발생하는 작업을 정의합니다. configuration의 `num_hidden_layers`와 같은 속성은 아키텍처를 정의하는 데 사용됩니다. 모든 모델은 기본 클래스 [`PreTrainedModel`]과 입력 임베딩 크기 조정 및 셀프 어텐션 헤드 가지 치기와 같은 몇 가지 일반적인 메소드를 공유합니다. 또한 모든 모델은 [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) 또는 [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html)의 서브클래스이기도 합니다. 즉, 모델은 각 프레임워크의 사용법과 호환됩니다. diff --git a/docs/source/ko/perf_train_gpu_many.md b/docs/source/ko/perf_train_gpu_many.md index d5e1f83239b6..9d80fd65727d 100644 --- a/docs/source/ko/perf_train_gpu_many.md +++ b/docs/source/ko/perf_train_gpu_many.md @@ -240,7 +240,7 @@ ZeRO가 모델 가중치를 분할하는 방식을 자세히 살펴보면, 텐 구현: -- [DeepSpeed](https://www.deepspeed.ai/features/#the-zero-redundancy-optimizer)는 1단계 + 2단계 + 3단계의 ZeRO-DP를 제공합니다. +- [DeepSpeed](https://www.deepspeed.ai/tutorials/zero/)는 1단계 + 2단계 + 3단계의 ZeRO-DP를 제공합니다. - [Fairscale](https://github.com/facebookresearch/fairscale/#optimizer-state-sharding-zero)은 1단계 + 2단계 + 3단계의 ZeRO-DP를 제공합니다. - [`transformers` 통합](main_classes/trainer#trainer-integrations) @@ -362,7 +362,7 @@ Megatron 논문의 표기법을 따라 행렬의 점곱 부분을 `Y = GeLU(XA)` SageMaker는 더 효율적인 처리를 위해 TP와 DP를 결합합니다. 대체 이름: -- DeepSpeed는 이를 [텐서 슬라이싱](https://www.deepspeed.ai/features/#model-parallelism)이라고 부릅니다. +- DeepSpeed는 이를 [텐서 슬라이싱](https://www.deepspeed.ai/training/#model-parallelism)이라고 부릅니다. 구현: - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM)은 내부 구현을 가지고 있으므로 모델에 매우 특화되어 있습니다. diff --git a/docs/source/ko/philosophy.md b/docs/source/ko/philosophy.md index 94b6c46f60e2..e303709a11b8 100644 --- a/docs/source/ko/philosophy.md +++ b/docs/source/ko/philosophy.md @@ -54,7 +54,7 @@ rendered properly in your Markdown viewer. 이 라이브러리는 각 모델에 대해 세 가지 유형의 클래스를 기반으로 구축되었습니다: -- **모델 클래스**는 라이브러리에서 제공하는 사전 훈련된 가중치와 함께 작동하는 PyTorch 모델([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras 모델([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)), JAX/Flax 모델([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen.html))일 수 있습니다. +- **모델 클래스**는 라이브러리에서 제공하는 사전 훈련된 가중치와 함께 작동하는 PyTorch 모델([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras 모델([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)), JAX/Flax 모델([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html))일 수 있습니다. - **구성 클래스**는 모델을 구축하는 데 필요한 하이퍼파라미터(예: 레이어 수 및 은닉 크기)를 저장합니다. 구성 클래스를 직접 인스턴스화할 필요는 없습니다. 특히, 수정 없이 고 사전 학습된 모델을 사용하는 경우 모델을 생성하면 모델의 일부인 구성을 자동으로 인스턴스화됩니다. - **전처리 클래스**는 원시 데이터를 모델이 수용하는 형식으로 변환합니다. [Tokenizer](main_classes/tokenizer)는 각 모델의 어휘를 저장하고, 문자열을 토큰 임베딩 인덱스 리스트로 인코딩하고 디코딩하기 위한 메소드를 제공합니다. [Image processors](main_classes/image_processor)는 비전 입력을 전처리하고, [feature extractors](main_classes/feature_extractor)는 오디오 입력을 전처리하며, [processor](main_classes/processors)는 멀티모달 입력을 처리합니다. diff --git a/docs/source/ko/sagemaker.md b/docs/source/ko/sagemaker.md index f612435d3c1a..18aafc28a161 100644 --- a/docs/source/ko/sagemaker.md +++ b/docs/source/ko/sagemaker.md @@ -26,4 +26,3 @@ rendered properly in your Markdown viewer. - [Train Hugging Face models on Amazon SageMaker with the SageMaker Python SDK](https://huggingface.co/docs/sagemaker/train) - [Deploy Hugging Face models to Amazon SageMaker with the SageMaker Python SDK](https://huggingface.co/docs/sagemaker/inference) -- [Frequently Asked Questions](https://huggingface.co/docs/sagemaker/faq) diff --git a/docs/source/pt/create_a_model.md b/docs/source/pt/create_a_model.md index 8c53752d6cf8..fd1e9c8f39ad 100644 --- a/docs/source/pt/create_a_model.md +++ b/docs/source/pt/create_a_model.md @@ -109,7 +109,7 @@ Você pode também salvar seu arquivo de configurações como um dicionário ou ## Modelo -O próximo passo é criar um [model](main_classes/models). O modelo - também vagamente referido como arquitetura - define o que cada camada está fazendo e quais operações estão acontecendo. Atributos como `num_hidden_layers` das configurações são utilizados para definir a arquitetura. Todo modelo compartilha a classe base [`PreTrainedModel`] e alguns métodos em comum como redimensionar o tamanho dos embeddings de entrada e podar as 'self-attention heads'. Além disso, todos os modelos também são subclasses de [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) ou [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module). Isso significa que os modelos são compatíveis com cada respectivo uso de framework. +O próximo passo é criar um [model](main_classes/models). O modelo - também vagamente referido como arquitetura - define o que cada camada está fazendo e quais operações estão acontecendo. Atributos como `num_hidden_layers` das configurações são utilizados para definir a arquitetura. Todo modelo compartilha a classe base [`PreTrainedModel`] e alguns métodos em comum como redimensionar o tamanho dos embeddings de entrada e podar as 'self-attention heads'. Além disso, todos os modelos também são subclasses de [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) ou [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html). Isso significa que os modelos são compatíveis com cada respectivo uso de framework. diff --git a/docs/source/zh/create_a_model.md b/docs/source/zh/create_a_model.md index b934708074d3..9b36d5397626 100644 --- a/docs/source/zh/create_a_model.md +++ b/docs/source/zh/create_a_model.md @@ -110,7 +110,7 @@ DistilBertConfig { ## 模型 -接下来,创建一个[模型](main_classes/models)。模型,也可泛指架构,定义了每一层网络的行为以及进行的操作。配置中的 `num_hidden_layers` 等属性用于定义架构。每个模型都共享基类 [`PreTrainedModel`] 和一些常用方法,例如调整输入嵌入的大小和修剪自注意力头。此外,所有模型都是 [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)、[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) 或 [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module) 的子类。这意味着模型与各自框架的用法兼容。 +接下来,创建一个[模型](main_classes/models)。模型,也可泛指架构,定义了每一层网络的行为以及进行的操作。配置中的 `num_hidden_layers` 等属性用于定义架构。每个模型都共享基类 [`PreTrainedModel`] 和一些常用方法,例如调整输入嵌入的大小和修剪自注意力头。此外,所有模型都是 [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)、[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) 或 [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) 的子类。这意味着模型与各自框架的用法兼容。 diff --git a/src/transformers/models/albert/modeling_flax_albert.py b/src/transformers/models/albert/modeling_flax_albert.py index 55fd9d5a4c91..ba054cb17932 100644 --- a/src/transformers/models/albert/modeling_flax_albert.py +++ b/src/transformers/models/albert/modeling_flax_albert.py @@ -86,9 +86,10 @@ class FlaxAlbertForPreTrainingOutput(ModelOutput): This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/beit/modeling_flax_beit.py b/src/transformers/models/beit/modeling_flax_beit.py index 0f0dc809e680..c1da64d263a2 100644 --- a/src/transformers/models/beit/modeling_flax_beit.py +++ b/src/transformers/models/beit/modeling_flax_beit.py @@ -69,9 +69,10 @@ class FlaxBeitModelOutputWithPooling(FlaxBaseModelOutputWithPooling): This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/bert/modeling_flax_bert.py b/src/transformers/models/bert/modeling_flax_bert.py index bb2af0e0602a..d99b908a0738 100644 --- a/src/transformers/models/bert/modeling_flax_bert.py +++ b/src/transformers/models/bert/modeling_flax_bert.py @@ -93,9 +93,10 @@ class FlaxBertForPreTrainingOutput(ModelOutput): This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/big_bird/modeling_flax_big_bird.py b/src/transformers/models/big_bird/modeling_flax_big_bird.py index c6d8b7c1612e..94eabdec451d 100644 --- a/src/transformers/models/big_bird/modeling_flax_big_bird.py +++ b/src/transformers/models/big_bird/modeling_flax_big_bird.py @@ -122,9 +122,10 @@ class FlaxBigBirdForQuestionAnsweringModelOutput(ModelOutput): This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/clip/modeling_flax_clip.py b/src/transformers/models/clip/modeling_flax_clip.py index 5aeaa5d960a7..bae7097a8c9d 100644 --- a/src/transformers/models/clip/modeling_flax_clip.py +++ b/src/transformers/models/clip/modeling_flax_clip.py @@ -43,9 +43,10 @@ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/distilbert/modeling_flax_distilbert.py b/src/transformers/models/distilbert/modeling_flax_distilbert.py index 24e2c7e3987e..3ba34eb9b202 100644 --- a/src/transformers/models/distilbert/modeling_flax_distilbert.py +++ b/src/transformers/models/distilbert/modeling_flax_distilbert.py @@ -48,9 +48,10 @@ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/regnet/modeling_flax_regnet.py b/src/transformers/models/regnet/modeling_flax_regnet.py index 9fef1868d60a..fc4258257bdb 100644 --- a/src/transformers/models/regnet/modeling_flax_regnet.py +++ b/src/transformers/models/regnet/modeling_flax_regnet.py @@ -47,9 +47,10 @@ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/resnet/modeling_flax_resnet.py b/src/transformers/models/resnet/modeling_flax_resnet.py index c76da498710d..07c07e95115b 100644 --- a/src/transformers/models/resnet/modeling_flax_resnet.py +++ b/src/transformers/models/resnet/modeling_flax_resnet.py @@ -42,9 +42,10 @@ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/roberta/modeling_flax_roberta.py b/src/transformers/models/roberta/modeling_flax_roberta.py index 6bc72f12b407..9cace4118182 100644 --- a/src/transformers/models/roberta/modeling_flax_roberta.py +++ b/src/transformers/models/roberta/modeling_flax_roberta.py @@ -78,9 +78,10 @@ def create_position_ids_from_input_ids(input_ids, padding_idx): This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py index e98897993742..7fc73e530db4 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py @@ -80,9 +80,10 @@ def create_position_ids_from_input_ids(input_ids, padding_idx): This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/roformer/modeling_flax_roformer.py b/src/transformers/models/roformer/modeling_flax_roformer.py index d95a4d73832e..cb7c2e4bb313 100644 --- a/src/transformers/models/roformer/modeling_flax_roformer.py +++ b/src/transformers/models/roformer/modeling_flax_roformer.py @@ -59,9 +59,10 @@ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py index 12453fde9812..f38b6b931f5a 100644 --- a/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py @@ -52,9 +52,10 @@ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it + as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/vit/modeling_flax_vit.py b/src/transformers/models/vit/modeling_flax_vit.py index 1ab2671efd75..586c8b62f6da 100644 --- a/src/transformers/models/vit/modeling_flax_vit.py +++ b/src/transformers/models/vit/modeling_flax_vit.py @@ -38,9 +38,10 @@ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py index fb03c390f6f4..e197add6d75d 100644 --- a/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py @@ -88,9 +88,10 @@ def create_position_ids_from_input_ids(input_ids, padding_idx): This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. Finally, this model supports inherent JAX features such as: diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_flax_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_flax_{{cookiecutter.lowercase_modelname}}.py index 63b5d83d308a..6cccf46eeb62 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_flax_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_flax_{{cookiecutter.lowercase_modelname}}.py @@ -63,7 +63,7 @@ generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) - This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module + This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: diff --git a/templates/adding_a_new_model/open_model_proposals/ADD_BIG_BIRD.md b/templates/adding_a_new_model/open_model_proposals/ADD_BIG_BIRD.md index 9c45c5b07f9d..be10dadc0beb 100644 --- a/templates/adding_a_new_model/open_model_proposals/ADD_BIG_BIRD.md +++ b/templates/adding_a_new_model/open_model_proposals/ADD_BIG_BIRD.md @@ -7,7 +7,7 @@ Begin: 12.02.2020 Estimated End: 19.03.2020 -Contributor: [Vasudev](https://github.com/vasudevgupta7) +Contributor: [Vasudev](https://github.com/thevasudevgupta) Adding a new model is often difficult and requires an in-depth knowledge of the 🤗 Transformers library and ideally also of the model's original From da7ea9a4e337eb2eed204090fe38198418c01134 Mon Sep 17 00:00:00 2001 From: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> Date: Tue, 7 Nov 2023 10:04:23 +0000 Subject: [PATCH 093/268] [Whisper] Block language/task args for English-only (#27322) * [Whisper] Block language/task args for English-only * Update src/transformers/models/whisper/modeling_whisper.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../models/whisper/modeling_whisper.py | 16 ++++++++ ..._pipelines_automatic_speech_recognition.py | 38 +++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index a107adf74e16..ad54d51b73f3 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -1841,6 +1841,22 @@ def generate( else: generation_config.return_timestamps = False + if is_multilingual is not None: + if not hasattr(generation_config, "is_multilingual"): + raise ValueError( + "The generation config is outdated and is thus not compatible with the `is_multilingual` argument " + "to `generate`. Please update the generation config as per the instructions " + "https://github.com/huggingface/transformers/issues/25084#issuecomment-1664398224" + ) + generation_config.is_multilingual = is_multilingual + + if hasattr(generation_config, "is_multilingual") and not generation_config.is_multilingual: + if task is not None or language is not None: + raise ValueError( + "Cannot specify `task` or `language` for an English-only model. If the model is intended to be " + "multilingual, pass `is_multilingual=True` to generate, or update the generation config." + ) + if language is not None: if not hasattr(generation_config, "lang_to_id"): raise ValueError( diff --git a/tests/pipelines/test_pipelines_automatic_speech_recognition.py b/tests/pipelines/test_pipelines_automatic_speech_recognition.py index 0343c32939d0..ea62198e2e13 100644 --- a/tests/pipelines/test_pipelines_automatic_speech_recognition.py +++ b/tests/pipelines/test_pipelines_automatic_speech_recognition.py @@ -852,6 +852,44 @@ def test_simple_whisper_translation(self): output_3 = speech_translator(filename) self.assertEqual(output_3, {"text": " Un uomo ha detto all'universo, Sir, esiste."}) + @slow + @require_torch + def test_whisper_language(self): + speech_recognizer = pipeline( + task="automatic-speech-recognition", + model="openai/whisper-tiny.en", + framework="pt", + ) + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + filename = ds[0]["file"] + + # 1. English-only model compatible with no language argument + output = speech_recognizer(filename) + self.assertEqual( + output, + {"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."}, + ) + + # 2. English-only Whisper does not accept the language argument + with self.assertRaisesRegex( + ValueError, + "Cannot specify `task` or `langauge` for an English-only model. If the model is intended to be multilingual, " + "pass `is_multilingual=True` to generate, or update the generation config.", + ): + _ = speech_recognizer(filename, generate_kwargs={"language": "en"}) + + # 3. Multilingual model accepts language argument + speech_recognizer = pipeline( + task="automatic-speech-recognition", + model="openai/whisper-tiny", + framework="pt", + ) + output = speech_recognizer(filename, generate_kwargs={"language": "en"}) + self.assertEqual( + output, + {"text": " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."}, + ) + @slow @require_torch @require_torchaudio From 26d8d5f211a6b1d4755f9221dba4568b00a80d4d Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:21:04 +0100 Subject: [PATCH 094/268] Fix autoawq docker image (#27339) * Update Dockerfile * Update docker/transformers-all-latest-gpu/Dockerfile --- docker/transformers-all-latest-gpu/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index e2056caed4cc..0557faffec9e 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -56,7 +56,7 @@ RUN python3 -m pip install --no-cache-dir auto-gptq --extra-index-url https://hu RUN python3 -m pip install --no-cache-dir einops # Add autoawq for quantization testing -RUN python3 -m pip install --no-cache-dir autoawq +RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.1.6/autoawq-0.1.6+cu118-cp38-cp38-linux_x86_64.whl # For bettertransformer + gptq RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum From 90b4adc1f1111f42eada62ea611895646aaee6b6 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 7 Nov 2023 12:08:28 +0000 Subject: [PATCH 095/268] Generate: skip tests on unsupported models instead of passing (#27265) --- tests/generation/test_utils.py | 62 +++++++++++++--------------------- 1 file changed, 24 insertions(+), 38 deletions(-) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 7e2f242c6fd6..7531502be289 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -749,8 +749,7 @@ def test_greedy_generate_dict_outputs_use_cache(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() if not hasattr(config, "use_cache"): - # only relevant if model has "use_cache" - return + self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True @@ -983,8 +982,7 @@ def test_beam_search_generate_dict_outputs_use_cache(self): config.forced_eos_token_id = None if not hasattr(config, "use_cache"): - # only relevant if model has "use_cache" - return + self.skipTest("This model doesn't support caching") model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: @@ -1420,13 +1418,13 @@ def test_contrastive_generate(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): - return + self.skipTest("Won't fix: old model with different cache format") config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): - return + self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True @@ -1441,14 +1439,14 @@ def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): - return + self.skipTest("Won't fix: old model with different cache format") # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): - return + self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True @@ -1472,18 +1470,16 @@ def test_contrastive_generate_dict_outputs_use_cache(self): def test_contrastive_generate_low_memory(self): # Check that choosing 'low_memory' does not change the model output for model_class in self.all_generative_model_classes: - # won't fix: FSMT, Reformer, gptbigcode, and speech2text have a different cache variable type (and format). - if any( - model_name in model_class.__name__.lower() - for model_name in ["fsmt", "reformer", "gptbigcode", "speech2text"] - ): - return + if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer", "speech2text"]): + self.skipTest("Won't fix: old model with different cache format") + if any(model_name in model_class.__name__.lower() for model_name in ["gptbigcode"]): + self.skipTest("TODO: fix me") config, input_ids, attention_mask, max_length = self._get_input_ids_and_config(batch_size=1) # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): - return + self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True @@ -1510,8 +1506,6 @@ def test_contrastive_generate_low_memory(self): ) self.assertListEqual(low_output.tolist(), high_output.tolist()) - return - @slow # TODO(Joao): remove this. Some models (e.g. data2vec, xcom, roberta) have an error rate between 1 and 10%. def test_assisted_decoding_matches_greedy_search(self): # This test ensures that the assisted generation does not introduce output changes over greedy search. @@ -1522,15 +1516,13 @@ def test_assisted_decoding_matches_greedy_search(self): # - assisted_decoding does not support `batch_size > 1` for model_class in self.all_generative_model_classes: - # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): - return - # may fix in the future: the following models fail with assisted decoding, and need model-specific fixes + self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in ["bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet"] ): - return + self.skipTest("May fix in the future: need model-specific fixes") # This for loop is a naive and temporary effort to make the test less flaky. failed = 0 @@ -1540,7 +1532,7 @@ def test_assisted_decoding_matches_greedy_search(self): # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): - return + self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True @@ -1587,24 +1579,21 @@ def test_assisted_decoding_matches_greedy_search(self): def test_assisted_decoding_sample(self): # Seeded assisted decoding will not match sample for the same seed, as the forward pass does not return the # exact same logits (the forward pass of the main model, now with several tokens at once, has causal masking). - for model_class in self.all_generative_model_classes: - # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): - return - # may fix in the future: the following models fail with assisted decoding, and need model-specific fixes + self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in ["bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t"] ): - return + self.skipTest("May fix in the future: need model-specific fixes") # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): - return + self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True @@ -1716,7 +1705,7 @@ def test_past_key_values_format(self): # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): - return + self.skipTest("This model doesn't support caching") model = model_class(config).to(torch_device) if "use_cache" not in inputs: @@ -1725,7 +1714,7 @@ def test_past_key_values_format(self): # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: - return + self.skipTest("This model doesn't return `past_key_values`") num_hidden_layers = ( getattr(config, "decoder_layers", None) @@ -1832,18 +1821,15 @@ def test_generate_from_inputs_embeds_decoder_only(self): def test_generate_continue_from_past_key_values(self): # Tests that we can continue generating from past key values, returned from a previous `generate` call for model_class in self.all_generative_model_classes: - # won't fix: old models with unique inputs/caches/others if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt"]): - return - # may fix in the future: needs modeling or test input preparation fixes for compatibility + self.skipTest("Won't fix: old model with unique inputs/caches/other") if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): - return + self.skipTest("TODO: needs modeling or test input preparation fixes for compatibility") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() - # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): - return + self.skipTest("This model doesn't support caching") # Let's make it always: # 1. use cache (for obvious reasons) @@ -1862,10 +1848,10 @@ def test_generate_continue_from_past_key_values(self): model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 model.generation_config.forced_eos_token_id = None - # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) + # If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format) outputs = model(**inputs) if "past_key_values" not in outputs: - return + self.skipTest("This model doesn't return `past_key_values`") # Traditional way of generating text, with `return_dict_in_generate` to return the past key values outputs = model.generate(**inputs, do_sample=False, max_new_tokens=4, return_dict_in_generate=True) From 606d90845ff149ee08629edab95b576f788243a0 Mon Sep 17 00:00:00 2001 From: Xabier de Zuazo Date: Tue, 7 Nov 2023 13:39:42 +0100 Subject: [PATCH 096/268] Fix Whisper Conversion Script: Correct decoder_attention_heads and _download function (#26834) * Fix error in convert_openai_to_hf.py: "_download() missing 1 required positional argument: root" * Fix error in convert_openai_to_hf.py: "TypeError: byte indices must be integers or slices, not str" * Fix decoder_attention_heads value in convert_openai_to_hf.py. Correct the assignment for `decoder_attention_heads` in the conversion script for the Whisper model. * Black reformat convert_openai_to_hf.py file. * Fix Whisper model configuration defaults (for Tiny). - Correct encoder/decoder layers and attention heads count. - Update model width (`d_model`) to 384. * Add docstring to the convert_openai_to_hf.py script with a doctest * Add shebang and +x permission to the convert_openai_to_hf.py * convert_openai_to_hf.py: reuse the read model_bytes in the _download() function * Move convert_openai_to_hf.py doctest example to whisper.md * whisper.md: Add an inference example to the Conversion section. * whisper.md: remove `model.config.forced_decoder_ids` from examples (deprecated) * whisper.md: Remove "## Format Conversion" section; not used by users * whisper.md: Use librispeech_asr_dummy dataset and load_dataset() --- docs/source/en/model_doc/whisper.md | 36 +++++++++++++++++++ .../models/whisper/configuration_whisper.py | 20 +++++------ .../models/whisper/convert_openai_to_hf.py | 14 +++++--- 3 files changed, 55 insertions(+), 15 deletions(-) mode change 100644 => 100755 src/transformers/models/whisper/convert_openai_to_hf.py diff --git a/docs/source/en/model_doc/whisper.md b/docs/source/en/model_doc/whisper.md index 4ea7e943813b..15f9e91137be 100644 --- a/docs/source/en/model_doc/whisper.md +++ b/docs/source/en/model_doc/whisper.md @@ -34,6 +34,42 @@ The original code can be found [here](https://github.com/openai/whisper). - Inference is currently only implemented for short-form i.e. audio is pre-segmented into <=30s segments. Long-form (including timestamps) will be implemented in a future release. - One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text. +This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts). +The original code can be found [here](https://github.com/openai/whisper). + +## Inference + +Here is a step-by-step guide to transcribing an audio sample using a pre-trained Whisper model: + +```python +>>> from datasets import load_dataset +>>> from transformers import WhisperProcessor, WhisperForConditionalGeneration + +>>> # Select an audio file and read it: +>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") +>>> audio_sample = ds[0]["audio"] +>>> waveform = audio_sample["array"] +>>> sampling_rate = audio_sample["sampling_rate"] + +>>> # Load the Whisper model in Hugging Face format: +>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") +>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + +>>> # Use the model and processor to transcribe the audio: +>>> input_features = processor( +... waveform, sampling_rate=sampling_rate, return_tensors="pt" +... ).input_features + +>>> # Generate token ids +>>> predicted_ids = model.generate(input_features) + +>>> # Decode token ids to text +>>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) + +>>> transcription[0] +' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' +``` + ## WhisperConfig [[autodoc]] WhisperConfig diff --git a/src/transformers/models/whisper/configuration_whisper.py b/src/transformers/models/whisper/configuration_whisper.py index a8bbc9718f11..6ff5e529b196 100644 --- a/src/transformers/models/whisper/configuration_whisper.py +++ b/src/transformers/models/whisper/configuration_whisper.py @@ -77,13 +77,13 @@ class WhisperConfig(PretrainedConfig): num_mel_bins (`int`, *optional*, defaults to 80): Number of mel features used per input features. Should correspond to the value used in the `WhisperProcessor` class. - encoder_layers (`int`, *optional*, defaults to 6): + encoder_layers (`int`, *optional*, defaults to 4): Number of encoder layers. - decoder_layers (`int`, *optional*, defaults to 6): + decoder_layers (`int`, *optional*, defaults to 4): Number of decoder layers. - encoder_attention_heads (`int`, *optional*, defaults to 4): + encoder_attention_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. - decoder_attention_heads (`int`, *optional*, defaults to 4): + decoder_attention_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer decoder. encoder_ffn_dim (`int`, *optional*, defaults to 1536): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. @@ -106,7 +106,7 @@ class WhisperConfig(PretrainedConfig): activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. - d_model (`int`, *optional*, defaults to 256): + d_model (`int`, *optional*, defaults to 384): Dimensionality of the layers. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. @@ -197,10 +197,10 @@ def __init__( self, vocab_size=51865, num_mel_bins=80, - encoder_layers=6, - encoder_attention_heads=4, - decoder_layers=6, - decoder_attention_heads=4, + encoder_layers=4, + encoder_attention_heads=6, + decoder_layers=4, + decoder_attention_heads=6, decoder_ffn_dim=1536, encoder_ffn_dim=1536, encoder_layerdrop=0.0, @@ -209,7 +209,7 @@ def __init__( use_cache=True, is_encoder_decoder=True, activation_function="gelu", - d_model=256, + d_model=384, dropout=0.0, attention_dropout=0.0, activation_dropout=0.0, diff --git a/src/transformers/models/whisper/convert_openai_to_hf.py b/src/transformers/models/whisper/convert_openai_to_hf.py old mode 100644 new mode 100755 index 3e7d42634bad..6eb7e0f233c8 --- a/src/transformers/models/whisper/convert_openai_to_hf.py +++ b/src/transformers/models/whisper/convert_openai_to_hf.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python +"""Converts a Whisper model in OpenAI format to Hugging Face format.""" # Copyright 2022 The HuggingFace Inc. team and the OpenAI team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +16,7 @@ import argparse import hashlib +import io import os import urllib import warnings @@ -90,7 +93,7 @@ def make_linear_from_emb(emb): return lin_layer -def _download(url: str, root: str) -> bytes: +def _download(url: str, root: str) -> io.BytesIO: os.makedirs(root, exist_ok=True) filename = os.path.basename(url) @@ -103,7 +106,7 @@ def _download(url: str, root: str) -> bytes: if os.path.isfile(download_target): model_bytes = open(download_target, "rb").read() if hashlib.sha256(model_bytes).hexdigest() == expected_sha256: - return model_bytes + return torch.load(io.BytesIO(model_bytes)) else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") @@ -125,12 +128,13 @@ def _download(url: str, root: str) -> bytes: "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) - return model_bytes + return torch.load(io.BytesIO(model_bytes)) def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): if ".pt" not in checkpoint_path: - original_checkpoint = _download(_MODELS[checkpoint_path]) + root = os.path.dirname(pytorch_dump_folder_path) or "." + original_checkpoint = _download(_MODELS[checkpoint_path], root) else: original_checkpoint = torch.load(checkpoint_path, map_location="cpu") dimensions = original_checkpoint["dims"] @@ -151,7 +155,7 @@ def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): encoder_layers=dimensions["n_audio_layer"], encoder_attention_heads=dimensions["n_audio_head"], decoder_layers=dimensions["n_text_layer"], - decoder_attention_heads=dimensions["n_text_state"], + decoder_attention_heads=dimensions["n_text_head"], max_source_positions=dimensions["n_audio_ctx"], ) From 0ded281557f88add8733aa536763fae61207b382 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 7 Nov 2023 19:24:01 +0530 Subject: [PATCH 097/268] [`FA2`] Add flash attention for `GPT-Neo` (#26486) * added flash attention for gpt-neo * small change Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * readme updated * . * changes * removed padding_mask * Update src/transformers/models/gpt_neo/modeling_gpt_neo.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docs/source/en/model_doc/gpt_neo.md | 40 +++ .../models/gpt_neo/modeling_gpt_neo.py | 253 ++++++++++++++++-- 2 files changed, 269 insertions(+), 24 deletions(-) diff --git a/docs/source/en/model_doc/gpt_neo.md b/docs/source/en/model_doc/gpt_neo.md index 6a598ef6a961..fb2385bc73db 100644 --- a/docs/source/en/model_doc/gpt_neo.md +++ b/docs/source/en/model_doc/gpt_neo.md @@ -54,6 +54,46 @@ The `generate()` method can be used to generate text using GPT Neo model. >>> gen_text = tokenizer.batch_decode(gen_tokens)[0] ``` +## Combining GPT-Neo and Flash Attention 2 + +First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature. + +```bash +pip install -U flash-attn --no-build-isolation +``` + +Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16``) + +To load and run a model using Flash Attention 2, refer to the snippet below: + +```python +>>> import torch +>>> from transformers import AutoModelForCausalLM, AutoTokenizer +>>> device = "cuda" # the device to load the model onto + +>>> model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B", torch_dtype=torch.float16, use_flash_attention_2=True) +>>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B") + +>>> prompt = "def hello_world():" + +>>> model_inputs = tokenizer([prompt], return_tensors="pt").to(device) +>>> model.to(device) + +>>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True) +>>> tokenizer.batch_decode(generated_ids)[0] +"def hello_world():\n >>> run_script("hello.py")\n >>> exit(0)\n<|endoftext|>" +``` + +### Expected speedups + +Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `EleutherAI/gpt-neo-2.7B` checkpoint and the Flash Attention 2 version of the model. +Note that for GPT-Neo it is not possible to train / run on very long context as the max [position embeddings](https://huggingface.co/EleutherAI/gpt-neo-2.7B/blob/main/config.json#L58 ) is limited to 2048 - but this is applicable to all gpt-neo models and not specific to FA-2 + +
+ +
+ + ## Resources - [Text classification task guide](../tasks/sequence_classification) diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index 90ca265a8220..aa1f1295157d 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -19,11 +19,13 @@ from typing import Optional, Tuple, Union import torch +import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, @@ -34,10 +36,28 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_torch_fx_available, + logging, +) from .configuration_gpt_neo import GPTNeoConfig +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. +# It means that the function will not be traced through and simply appear as a node in the graph. +if is_torch_fx_available(): + _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) + + logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "GPTNeoConfig" @@ -50,6 +70,19 @@ _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B" +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path): """Load tf checkpoints in a pytorch model""" try: @@ -133,6 +166,7 @@ def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path): class GPTNeoSelfAttention(nn.Module): def __init__(self, config, attention_type): super().__init__() + self.config = config max_positions = config.max_position_embeddings bias = torch.tril(torch.ones((max_positions, max_positions), dtype=bool)).view( @@ -150,6 +184,7 @@ def __init__(self, config, attention_type): self.attn_dropout = nn.Dropout(float(config.attention_dropout)) self.resid_dropout = nn.Dropout(float(config.resid_dropout)) + self.is_causal = True self.embed_dim = config.hidden_size self.num_heads = config.num_heads @@ -253,6 +288,185 @@ def forward( return outputs # a, present, (attentions) +class GPTNeoFlashAttention2(GPTNeoSelfAttention): + """ + GPTNeo flash attention module. This module inherits from `GPTNeoSelfAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def forward( + self, + hidden_states, + attention_mask=None, + layer_past=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + bsz, _, _ = hidden_states.size() + + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if layer_past is not None: + past_key = layer_past[0] + past_value = layer_past[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + query_length = query.shape[2] + tgt_len = key.shape[2] + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim) + key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + + attn_dropout = self.config.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + if query.dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query = query.to(target_dtype) + key = key.to(target_dtype) + value = value.to(target_dtype) + + attn_output = self._flash_attention_forward( + query, key, value, attention_mask, query_length, dropout=attn_dropout, softmax_scale=1.0 + ) + + attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim) + attn_output = self.out_proj(attn_weights_reshaped) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights_reshaped,) + + return outputs + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + class GPTNeoAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() @@ -261,7 +475,11 @@ def __init__(self, config, layer_id=0): self.attention_type = self.attention_layers[layer_id] if self.attention_type in ["global", "local"]: - self.attention = GPTNeoSelfAttention(config, self.attention_type) + self.attention = ( + GPTNeoSelfAttention(config, self.attention_type) + if not getattr(config, "_flash_attn_2_enabled", False) + else GPTNeoFlashAttention2(config, self.attention_type) + ) else: raise NotImplementedError( "Only attn layer types 'global' and 'local' exist, but got `config.attention_layers`: " @@ -364,6 +582,7 @@ class GPTNeoPreTrainedModel(PreTrainedModel): supports_gradient_checkpointing = True _no_split_modules = ["GPTNeoBlock"] _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @@ -524,10 +743,8 @@ def forward( self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) - batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] - batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") @@ -546,26 +763,6 @@ def forward( position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) - # Attention mask. - if attention_mask is not None: - if batch_size <= 0: - raise ValueError("batch_size has to be defined and > 0") - attention_mask = attention_mask.view(batch_size, -1) - # We create a 3D attention mask from a 2D tensor mask. - # Sizes are [batch_size, 1, 1, to_seq_length] - # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] - # this attention mask is more simple than the triangular masking of causal attention - # used in OpenAI GPT, we just need to prepare the broadcast dimension here. - attention_mask = attention_mask[:, None, None, :] - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and the dtype's smallest value for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility - attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min - # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_heads x N x N @@ -577,6 +774,14 @@ def forward( position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds + # Attention mask. + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_length) + if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds From 88832c01c8a962b653874c4ce4ed8df5783ac5cd Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Tue, 7 Nov 2023 15:07:55 +0100 Subject: [PATCH 098/268] [`Whisper`] Add conversion script for the tokenizer (#27338) * draft * updates * full conversion taken from `https://gist.github.com/xenova/a452a6474428de0182b17605a98631ee` * psuh * nits * updates * more nits * Add co author Co-authored-by: Joshua Lochner * fixup * cleanup * styling * add proper path * update * nits * don't push the exit * clean * update whisper doc * don't error out if tiktoken is not here * make sure we are BC with conversion * nit * Update docs/source/en/model_doc/whisper.md Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * merge and update * update markdwon * Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> --------- Co-authored-by: Joshua Lochner Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docs/source/en/model_doc/whisper.md | 9 +- .../models/whisper/convert_openai_to_hf.py | 119 +++++++++++++++++- .../models/whisper/tokenization_whisper.py | 4 +- 3 files changed, 128 insertions(+), 4 deletions(-) diff --git a/docs/source/en/model_doc/whisper.md b/docs/source/en/model_doc/whisper.md index 15f9e91137be..8d73a5655fdf 100644 --- a/docs/source/en/model_doc/whisper.md +++ b/docs/source/en/model_doc/whisper.md @@ -34,8 +34,13 @@ The original code can be found [here](https://github.com/openai/whisper). - Inference is currently only implemented for short-form i.e. audio is pre-segmented into <=30s segments. Long-form (including timestamps) will be implemented in a future release. - One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text. -This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts). -The original code can be found [here](https://github.com/openai/whisper). +- To convert the tokenizer, we recommend using the following: + +```bash +python src/transformers/models/whisper/convert_openai_to_hf.py --checkpoint_path "" --pytorch_dump_folder_path "Arthur/whisper-3" --convert_tokenizer True --whisper_version 3 --multilingual True +``` +Here the `whisper_version` will set the number of languages to `100` to account for `cantonese` which was added in `whisper-large-v3`. + ## Inference diff --git a/src/transformers/models/whisper/convert_openai_to_hf.py b/src/transformers/models/whisper/convert_openai_to_hf.py index 6eb7e0f233c8..1d016b598439 100755 --- a/src/transformers/models/whisper/convert_openai_to_hf.py +++ b/src/transformers/models/whisper/convert_openai_to_hf.py @@ -17,7 +17,9 @@ import argparse import hashlib import io +import json import os +import tempfile import urllib import warnings @@ -25,7 +27,9 @@ from torch import nn from tqdm import tqdm -from transformers import WhisperConfig, WhisperForConditionalGeneration +from transformers import WhisperConfig, WhisperForConditionalGeneration, WhisperTokenizer +from transformers.models.whisper.tokenization_whisper import LANGUAGES, bytes_to_unicode +from transformers.utils.import_utils import _is_package_available _MODELS = { @@ -41,6 +45,11 @@ "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } +_TOKENIZERS = { + "multilingual": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/multilingual.tiktoken", + "english": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/gpt2.tiktoken", +} + def remove_ignore_keys_(state_dict): ignore_keys = ["layers", "blocks"] @@ -178,11 +187,119 @@ def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): model.save_pretrained(pytorch_dump_folder_path) +# Adapted from https://github.com/openai/tiktoken/issues/60#issuecomment-1499977960 +def _bpe(mergeable_ranks, token: bytes, max_rank=None) -> list[bytes]: + parts = [bytes([b]) for b in token] + while True: + min_idx = None + min_rank = None + for i, pair in enumerate(zip(parts[:-1], parts[1:])): + rank = mergeable_ranks.get(pair[0] + pair[1]) + if rank is not None and (min_rank is None or rank < min_rank): + min_idx = i + min_rank = rank + if min_rank is None or (max_rank is not None and min_rank >= max_rank): + break + assert min_idx is not None + parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2 :] + return parts + + +def convert_tiktoken_bpe_to_hf(tiktoken_url: str): + bpe_ranks = load_tiktoken_bpe(tiktoken_url) + byte_encoder = bytes_to_unicode() + + def token_bytes_to_string(b): + return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")]) + + merges = [] + vocab = {} + for token, rank in bpe_ranks.items(): + vocab[token_bytes_to_string(token)] = rank + if len(token) == 1: + continue + merged = tuple(_bpe(bpe_ranks, token, max_rank=rank)) + if len(merged) == 2: # account for empty token + merges.append(" ".join(map(token_bytes_to_string, merged))) + return vocab, merges + + +def convert_tiktoken_to_hf( + pytorch_dump_folder_path: str, multilingual: bool = True, num_languages: int = 100, time_precision=0.02 +) -> WhisperTokenizer: + # requires whisper, unless we use the path to the tiktoken file + tiktoken_tokenizer_path = _TOKENIZERS["multilingual" if multilingual else "english"] + start_of_transcript = ["<|endoftext|>", "<|startoftranscript|>"] + control_tokens = [ + "<|translate|>", + "<|transcribe|>", + "<|startoflm|>", + "<|startofprev|>", + "<|nocaptions|>", + "<|notimestamps|>", + ] + # these are special tokens, not normalized + language_tokens = [f"<|{k}|>" for k in list(LANGUAGES)[:num_languages]] + # These are not special but normalized + timestamp_tokens = [("<|%.2f|>" % (i * time_precision)) for i in range(1500 + 1)] + + vocab, merges = convert_tiktoken_bpe_to_hf(tiktoken_tokenizer_path) + + with tempfile.TemporaryDirectory() as tmpdirname: + vocab_file = f"{tmpdirname}/vocab.json" + merge_file = f"{tmpdirname}/merges.txt" + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens in merges: + writer.write(bpe_tokens + "\n") + + hf_tokenizer = WhisperTokenizer(vocab_file, merge_file) + + hf_tokenizer.add_tokens(start_of_transcript + language_tokens + control_tokens, special_tokens=True) + hf_tokenizer.add_tokens(timestamp_tokens, special_tokens=False) + hf_tokenizer.save_pretrained(pytorch_dump_folder_path) + + if __name__ == "__main__": parser = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") + parser.add_argument( + "--convert_tokenizer", + type=bool, + default=False, + help="Whether or not the tokenizer should be converted along with the model.", + ) + parser.add_argument( + "--whisper_version", + type=int, + default=2, + help="Version of the whisper release", + ) + parser.add_argument( + "--multilingual", + type=bool, + default="store_true", + help="Whether or not the model is multilingual or english only", + ) args = parser.parse_args() + if args.convert_tokenizer: + try: + if not _is_package_available("tiktoken"): + raise """`tiktoken` is not installed, use `pip install tiktoken` to convert the tokenizer""" + except Exception: + pass + else: + from tiktoken.load import load_tiktoken_bpe + + NUM_LANGUAGES_PER_RELEASE = {1: 99, 2: 99, 3: 100} + convert_tiktoken_to_hf( + args.pytorch_dump_folder_path, args.multilingual, NUM_LANGUAGES_PER_RELEASE[args.whisper_version] + ) + convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path) diff --git a/src/transformers/models/whisper/tokenization_whisper.py b/src/transformers/models/whisper/tokenization_whisper.py index 3fa1fe2755c2..a54103ccef8f 100644 --- a/src/transformers/models/whisper/tokenization_whisper.py +++ b/src/transformers/models/whisper/tokenization_whisper.py @@ -191,6 +191,7 @@ def get_pairs(word): "ba": "bashkir", "jw": "javanese", "su": "sundanese", + "yue": "cantonese", } # language code lookup by name, with a few language aliases @@ -207,6 +208,7 @@ def get_pairs(word): "moldovan": "ro", "sinhalese": "si", "castilian": "es", + "mandarin": "zh", } TASK_IDS = ["translate", "transcribe"] @@ -1206,7 +1208,7 @@ def _combine_tokens_into_words( if language is None: language = "english" - if language in {"chinese", "japanese", "thai", "lao", "myanmar"}: + if language in {"chinese", "japanese", "thai", "lao", "myanmar", "cantonese"}: # These languages don't typically use spaces. words, word_tokens, token_indices = _split_tokens_on_unicode(tokenizer, tokens) else: From 9459d821d15575943bcacedcc32835c9459bf39b Mon Sep 17 00:00:00 2001 From: Chi Date: Tue, 7 Nov 2023 21:27:48 +0530 Subject: [PATCH 099/268] Remove a redundant variable. (#27288) * Removed the redundant SiLUActivation class and now use nn.functional.silu directly. * I apologize for adding torch.functional.silu. I have replaced it with nn.SiLU. * Remove redundant variable in feature_extraction file --- src/transformers/pipelines/feature_extraction.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/transformers/pipelines/feature_extraction.py b/src/transformers/pipelines/feature_extraction.py index b8b5eafeb760..5fc6a128e7ea 100644 --- a/src/transformers/pipelines/feature_extraction.py +++ b/src/transformers/pipelines/feature_extraction.py @@ -77,8 +77,7 @@ def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_ten return preprocess_params, {}, postprocess_params def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]: - return_tensors = self.framework - model_inputs = self.tokenizer(inputs, return_tensors=return_tensors, **tokenize_kwargs) + model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs) return model_inputs def _forward(self, model_inputs): From 8c91f15ae576a5f33559aa243218199697195279 Mon Sep 17 00:00:00 2001 From: Folco Bertini Baldassini <46280006+folbaeni@users.noreply.github.com> Date: Tue, 7 Nov 2023 17:26:15 +0100 Subject: [PATCH 100/268] Resolve AttributeError by utilizing device calculation at the start of the forward function (#27347) This commit addresses the 'NoneType' object AttributeError within the IdeficsModel forward function. Previously, the 'device' attribute was accessed directly from input_ids, resulting in a potential 'NoneType' error. Now, the device is properly calculated at the beginning of the forward function and utilized consistently throughout, ensuring the 'image_hidden_states' are derived from the correct device. This modification enables smoother processing and compatibility, ensuring the correct device attribution for 'image_encoder_embeddings' in the IdeficsModel forward pass. --- src/transformers/models/idefics/modeling_idefics.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index aba0b43f695b..f7881ddd39ed 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -1161,7 +1161,6 @@ def forward( position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) elif position_ids is None: - device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) @@ -1186,7 +1185,7 @@ def forward( elif image_encoder_embeddings is not None: batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size() - image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=input_ids.device) + image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device) image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size) if self.config.use_resampler: From cc9f27bb1e80bd45a9c47481e719641aab3bbcd8 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 7 Nov 2023 22:54:43 +0530 Subject: [PATCH 101/268] Remove padding_masks from `gpt_bigcode`. (#27348) Update modeling_gpt_bigcode.py --- .../models/gpt_bigcode/modeling_gpt_bigcode.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index 14db9efb4bca..533c471aa95f 100644 --- a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -235,16 +235,10 @@ def forward( encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, - **kwargs, ) -> Union[ Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], ]: - if "padding_mask" in kwargs: - logger.warning_once( - "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" - ) - if encoder_hidden_states is not None: if not hasattr(self, "q_attn") or not self.is_cross_attention: raise ValueError( @@ -308,19 +302,10 @@ def forward( encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, - **kwargs, ) -> Union[ Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], ]: - if "padding_mask" in kwargs: - logger.warning_once( - "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" - ) - - # overwrite attention_mask with padding_mask - attention_mask = kwargs.pop("padding_mask") - if encoder_hidden_states is not None: if not hasattr(self, "q_attn") or not self.is_cross_attention: raise ValueError( From 8f840edd31d933230bc191d4a8b5aa5c00b5a4a0 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Tue, 7 Nov 2023 18:43:26 +0100 Subject: [PATCH 102/268] [`Whisper`] Nit converting the tokenizer (#27349) * `nospeech` instead of `nocaption` for the no speech token * oups --- src/transformers/models/whisper/convert_openai_to_hf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/whisper/convert_openai_to_hf.py b/src/transformers/models/whisper/convert_openai_to_hf.py index 1d016b598439..0db555c4aab2 100755 --- a/src/transformers/models/whisper/convert_openai_to_hf.py +++ b/src/transformers/models/whisper/convert_openai_to_hf.py @@ -235,7 +235,7 @@ def convert_tiktoken_to_hf( "<|transcribe|>", "<|startoflm|>", "<|startofprev|>", - "<|nocaptions|>", + "<|nospeech|>", "<|notimestamps|>", ] # these are special tokens, not normalized From ac5d4cf6de24b4f7fa92996e92d1d71dd5411a6a Mon Sep 17 00:00:00 2001 From: Yoach Lacombe <52246514+ylacombe@users.noreply.github.com> Date: Tue, 7 Nov 2023 18:32:00 +0000 Subject: [PATCH 103/268] FIx Bark batching feature (#27271) * fix bark batching * make style * add tests and make style --- src/transformers/models/bark/modeling_bark.py | 65 +++++++++++++++---- tests/models/bark/test_modeling_bark.py | 31 +++++++++ 2 files changed, 82 insertions(+), 14 deletions(-) diff --git a/src/transformers/models/bark/modeling_bark.py b/src/transformers/models/bark/modeling_bark.py index 9b3870e82505..2c04f15c0456 100644 --- a/src/transformers/models/bark/modeling_bark.py +++ b/src/transformers/models/bark/modeling_bark.py @@ -909,8 +909,9 @@ def generate( coarse_generation_config: BarkCoarseGenerationConfig = None, codebook_size: int = 1024, history_prompt: Optional[Dict[str, torch.Tensor]] = None, + return_output_lengths: Optional[bool] = None, **kwargs, - ) -> torch.LongTensor: + ) -> Union[torch.LongTensor, Tuple[torch.LongTensor, torch.LongTensor]]: """ Generates coarse acoustics tokens from input text semantic tokens and an additional optional `Bark` speaker prompt. @@ -926,8 +927,14 @@ def generate( Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. + return_output_lengths (`bool`, *optional*): + Whether or not to return the output lengths. Useful when batching. Returns: - torch.LongTensor: Output coarse acoustics tokens. + By default: + torch.LongTensor: Output coarse acoustics tokens. + If `return_output_lengths=True`: + `Tuple(torch.Tensor, torch.Tensor): The output coarse acoustics tokens, and the length of each sample + of the batch. """ if semantic_generation_config is None: @@ -954,13 +961,13 @@ def generate( ) max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) - # beware, depends on the seq_len of the longest sequence of the batch. - # Also, the seq_len might be one token too long because of an added - # pad_token as compared to Bark original implementation. - max_generated_len = np.floor( - semantic_output.shape[1] * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks + output_lengths = (semantic_output != coarse_generation_config.coarse_semantic_pad_token).sum(1) + output_lengths = torch.floor( + output_lengths * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks ) - max_generated_len = int(round(max_generated_len * coarse_generation_config.n_coarse_codebooks)) + output_lengths = torch.round(output_lengths * coarse_generation_config.n_coarse_codebooks).int() + + max_generated_len = torch.max(output_lengths).item() batch_size = semantic_output.shape[0] @@ -1026,6 +1033,9 @@ def generate( coarse_output = x_coarse[:, len_coarse_history:] + if return_output_lengths: + return coarse_output, output_lengths + return coarse_output @@ -1502,13 +1512,21 @@ def enable_cpu_offload(self, gpu_id: Optional[int] = 0): # We'll offload the last model manually. self.codec_model_hook = hook - def codec_decode(self, fine_output): + def codec_decode(self, fine_output, output_lengths=None): """Turn quantized audio codes into audio array using encodec.""" fine_output = fine_output.transpose(0, 1) emb = self.codec_model.quantizer.decode(fine_output) - out = self.codec_model.decoder(emb) - audio_arr = out.squeeze(1) # squeeze the codebook dimension + + if output_lengths is not None: + # encodec uses LSTMs which behaves differently with appended padding + # decoding with encodec takes around 0.1% of the total generation time + # to keep generation quality, we break batching + out = [sample[:, :l].unsqueeze(0) for (sample, l) in zip(emb, output_lengths)] + audio_arr = [self.codec_model.decoder(sample).squeeze() for sample in out] + else: + out = self.codec_model.decoder(emb) + audio_arr = out.squeeze(1) # squeeze the codebook dimension return audio_arr @@ -1517,6 +1535,7 @@ def generate( self, input_ids: Optional[torch.Tensor] = None, history_prompt: Optional[Dict[str, torch.Tensor]] = None, + return_output_lengths: Optional[bool] = None, **kwargs, ) -> torch.LongTensor: """ @@ -1535,9 +1554,15 @@ def generate( semantic, coarse and fine respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for all sub-models except one. + return_output_lengths (`bool`, *optional*): + Whether or not to return the waveform lengths. Useful when batching. Returns: - torch.LongTensor: Output generated audio. - + By default: + - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. + When `return_output_lengths=True`: + Returns a tuple made of: + - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. + - **output_lengths** (`torch.Tensor` of shape (batch_size)): The length of each waveform in the batch Example: ```python @@ -1603,9 +1628,16 @@ def generate( semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, codebook_size=self.generation_config.codebook_size, + return_output_lengths=return_output_lengths, **kwargs_coarse, ) + output_lengths = None + if return_output_lengths: + coarse_output, output_lengths = coarse_output + # (batch_size, seq_len*coarse_codebooks) -> (batch_size, seq_len) + output_lengths = output_lengths // coarse_generation_config.n_coarse_codebooks + # 3. "generate" from the fine model output = self.fine_acoustics.generate( coarse_output, @@ -1625,10 +1657,15 @@ def generate( self.codec_model = self.codec_model.to(self.device) # 4. Decode the output and generate audio array - audio = self.codec_decode(output) + audio = self.codec_decode(output, output_lengths) if getattr(self, "codec_model_hook", None) is not None: # Offload codec_model to CPU self.codec_model_hook.offload() + if return_output_lengths: + output_lengths = [len(sample) for sample in audio] + audio = nn.utils.rnn.pad_sequence(audio, batch_first=True, padding_value=0) + return audio, output_lengths + return audio diff --git a/tests/models/bark/test_modeling_bark.py b/tests/models/bark/test_modeling_bark.py index 4186a72628e3..bf13203ecd40 100644 --- a/tests/models/bark/test_modeling_bark.py +++ b/tests/models/bark/test_modeling_bark.py @@ -1067,6 +1067,37 @@ def test_generate_end_to_end_with_args(self): self.model.generate(**input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6) self.model.generate(**input_ids, do_sample=True, temperature=0.6, num_beams=4) + @slow + def test_generate_batching(self): + args = {"do_sample": False, "temperature": None} + + s1 = "I love HuggingFace" + s2 = "In the light of the moon, a little egg lay on a leaf" + voice_preset = "en_speaker_6" + input_ids = self.processor([s1, s2], voice_preset=voice_preset).to(torch_device) + + # generate in batch + outputs, audio_lengths = self.model.generate(**input_ids, **args, return_output_lengths=True) + + # generate one-by-one + s1 = self.processor(s1, voice_preset=voice_preset).to(torch_device) + s2 = self.processor(s2, voice_preset=voice_preset).to(torch_device) + output1 = self.model.generate(**s1, **args) + output2 = self.model.generate(**s2, **args) + + # up until the coarse acoustic model (included), results are the same + # the fine acoustic model introduces small differences + # first verify if same length (should be the same because it's decided in the coarse model) + self.assertEqual(tuple(audio_lengths), (output1.shape[1], output2.shape[1])) + + # then assert almost equal + self.assertTrue(torch.allclose(outputs[0, : audio_lengths[0]], output1.squeeze(), atol=2e-3)) + self.assertTrue(torch.allclose(outputs[1, : audio_lengths[1]], output2.squeeze(), atol=2e-3)) + + # now test single input with return_output_lengths = True + outputs, _ = self.model.generate(**s1, **args, return_output_lengths=True) + self.assertTrue((outputs == output1).all().item()) + @slow def test_generate_end_to_end_with_sub_models_args(self): input_ids = self.inputs From 7e1eff7600085814eac65876d4d8a0e38c2f6ccc Mon Sep 17 00:00:00 2001 From: Plemeur <37846989+Plemeur@users.noreply.github.com> Date: Wed, 8 Nov 2023 06:40:00 +0900 Subject: [PATCH 104/268] Allow scheduler parameters (#26480) * Allow for scheduler kwargs * Formatting * Arguments checks, passing the tests * Black failed somehow --------- Co-authored-by: Pierre --- src/transformers/optimization.py | 14 +++++++++++++- src/transformers/trainer.py | 1 + src/transformers/training_args.py | 10 ++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/transformers/optimization.py b/src/transformers/optimization.py index 5734b6e9cd58..124813b22abb 100644 --- a/src/transformers/optimization.py +++ b/src/transformers/optimization.py @@ -337,6 +337,7 @@ def get_scheduler( optimizer: Optimizer, num_warmup_steps: Optional[int] = None, num_training_steps: Optional[int] = None, + scheduler_specific_kwargs: Optional[dict] = None, ): """ Unified API to get any scheduler from its name. @@ -352,6 +353,9 @@ def get_scheduler( num_training_steps (`int``, *optional*): The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. + scheduler_specific_kwargs (`dict`, *optional*): + Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler + parameters will cause the scheduler function to raise a TypeError. """ name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] @@ -372,7 +376,15 @@ def get_scheduler( if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") - return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) + if scheduler_specific_kwargs is None: + scheduler_specific_kwargs = {} + + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + **scheduler_specific_kwargs, + ) class AdamW(Optimizer): diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 7af453249269..40159d816348 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1137,6 +1137,7 @@ def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optim optimizer=self.optimizer if optimizer is None else optimizer, num_warmup_steps=self.args.get_warmup_steps(num_training_steps), num_training_steps=num_training_steps, + **self.args.lr_scheduler_kwargs, ) self._created_lr_scheduler = True return self.lr_scheduler diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index aaedc83528a9..7ec6e56ff505 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -238,6 +238,8 @@ class TrainingArguments: when all data is exhausted lr_scheduler_type (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. + lr_scheduler_kwargs ('dict', *optional*, defaults to {}): + The extra arguments for the lr_scheduler. See the documentation of each scheduler for possible values. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): @@ -729,6 +731,14 @@ class TrainingArguments: default="linear", metadata={"help": "The scheduler type to use."}, ) + lr_scheduler_kwargs: Optional[Dict] = field( + default_factory=dict, + metadata={ + "help": ( + "Extra parameters for the lr_scheduler such as {'num_cycles': 1} for the cosine with hard restarts" + ) + }, + ) warmup_ratio: float = field( default=0.0, metadata={"help": "Linear warmup over warmup_ratio fraction of total steps."} ) From f213d5dd8cea1eb31d9b44dbdf268e4265a6d338 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B9=9D=E6=98=AF=E5=90=A6=E9=9A=8F=E6=84=8F=E7=9A=84?= =?UTF-8?q?=E7=A7=B0=E5=91=BC?= <34758215+ZouJiu1@users.noreply.github.com> Date: Wed, 8 Nov 2023 07:31:51 +0800 Subject: [PATCH 105/268] translate the en tokenizer_summary.md to Chinese (#27291) * translate the en tokenizer_summary.md to Chinese * revise WordPiece * add to source/zh/_toctree.yml --- docs/source/zh/_toctree.yml | 2 + docs/source/zh/tokenizer_summary.md | 234 ++++++++++++++++++++++++++++ 2 files changed, 236 insertions(+) create mode 100644 docs/source/zh/tokenizer_summary.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index 05ed5de31d99..e12364ac8de4 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -41,4 +41,6 @@ - sections: - local: task_summary title: 🤗Transformers能做什么 + - local: tokenizer_summary + title: 分词器的摘要 title: 概念指南 \ No newline at end of file diff --git a/docs/source/zh/tokenizer_summary.md b/docs/source/zh/tokenizer_summary.md new file mode 100644 index 000000000000..d3a4cf7a3305 --- /dev/null +++ b/docs/source/zh/tokenizer_summary.md @@ -0,0 +1,234 @@ + + +# 分词器的摘要 +[[open-in-colab]] + +在这个页面,我们来仔细研究分词的知识。 + + +正如我们在[the preprocessing tutorial](preprocessing)所看到的那样,对文本进行分词就是将一段文本分割成很多单词或者子单词, +这些单词或者子单词然后会通过一个查询表格被转换到id,将单词或者子单词转换到id是很直截了当的,也就是一个简单的映射, +所以这么来看,我们主要关注将一段文本分割成很多单词或者很多子单词(像:对一段文本进行分词),更加准确的来说,我们将关注 +在🤗 Transformers内用到的三种主要类型的分词器:[Byte-Pair Encoding (BPE)](#byte-pair-encoding), [WordPiece](#wordpiece), +and [SentencePiece](#sentencepiece),并且给出了示例,哪个模型用到了哪种类型的分词器。 + +注意到在每个模型的主页,你可以查看文档上相关的分词器,就可以知道预训练模型使用了哪种类型的分词器。 +举个例子,如果我们查看[`BertTokenizer`],我们就能看到模型使用了[WordPiece](#wordpiece)。 + +## 介绍 +将一段文本分词到小块是一个比它看起来更加困难的任务,并且有很多方式来实现分词,举个例子,让我们看看这个句子 +`"Don't you love 🤗 Transformers? We sure do."` + + + +对这段文本分词的一个简单方式,就是使用空格来分词,得到的结果是: + +``` +["Don't", "you", "love", "🤗", "Transformers?", "We", "sure", "do."] +``` + +上面的分词是一个明智的开始,但是如果我们查看token `"Transformers?"` 和 `"do."`,我们可以观察到标点符号附在单词`"Transformer"` +和 `"do"`的后面,这并不是最理想的情况。我们应该将标点符号考虑进来,这样一个模型就没必要学习一个单词和每个可能跟在后面的 +标点符号的不同的组合,这么组合的话,模型需要学习的组合的数量会急剧上升。将标点符号也考虑进来,对范例文本进行分词的结果就是: + +``` +["Don", "'", "t", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."] +``` + +分词的结果更好了,然而,这么做也是不好的,分词怎么处理单词`"Don't"`,`"Don't"`的含义是`"do not"`,所以这么分词`["Do", "n't"]` +会更好。现在开始事情就开始变得复杂起来了,部分的原因是每个模型都有它自己的分词类型。依赖于我们应用在文本分词上的规则, +相同的文本会产生不同的分词输出。用在训练数据上的分词规则,被用来对输入做分词操作,一个预训练模型才会正确的执行。 + +[spaCy](https://spacy.io/) and [Moses](http://www.statmt.org/moses/?n=Development.GetStarted) 是两个受欢迎的基于规则的 +分词器。将这两个分词器应用在示例文本上,*spaCy* 和 *Moses*会输出类似下面的结果: + +``` +["Do", "n't", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."] +``` + +可见上面的分词使用到了空格和标点符号的分词方式,以及基于规则的分词方式。空格和标点符号分词以及基于规则的分词都是单词分词的例子。 +不那么严格的来说,单词分词的定义就是将句子分割到很多单词。然而将文本分割到更小的块是符合直觉的,当处理大型文本语料库时,上面的 +分词方法会导致很多问题。在这种情况下,空格和标点符号分词通常会产生一个非常大的词典(使用到的所有不重复的单词和tokens的集合)。 +像:[Transformer XL](model_doc/transformerxl)使用空格和标点符号分词,结果会产生一个大小是267,735的词典! + +这么大的一个词典容量,迫使模型有着一个巨大的embedding矩阵,以及巨大的输入和输出层,这会增加内存使用量,也会提高时间复杂度。通常 +情况下,transformers模型几乎没有词典容量大于50,000的,特别是只在一种语言上预训练的模型。 + +所以如果简单的空格和标点符号分词让人不满意,为什么不简单的对字符分词? + + + +尽管字符分词是非常简单的,并且能极大的减少内存使用,降低时间复杂度,但是这样做会让模型很难学到有意义的输入表达。像: +比起学到单词`"today"`的一个有意义的上下文独立的表达,学到字母`"t"`的一个有意义的上下文独立的表达是相当困难的。因此, +字符分词经常会伴随着性能的下降。所以为了获得最好的结果,transformers模型在单词级别分词和字符级别分词之间使用了一个折中的方案 +被称作**子词**分词。 + +## 子词分词 + + + +子词分词算法依赖这样的原则:频繁使用的单词不应该被分割成更小的子词,但是很少使用的单词应该被分解到有意义的子词。举个例子: +`"annoyingly"`能被看作一个很少使用的单词,能被分解成`"annoying"`和`"ly"`。`"annoying"`和`"ly"`作为独立地子词,出现 +的次数都很频繁,而且与此同时单词`"annoyingly"`的含义可以通过组合`"annoying"`和`"ly"`的含义来获得。在粘合和胶水语言上, +像Turkish语言,这么做是相当有用的,在这样的语言里,通过线性组合子词,大多数情况下你能形成任意长的复杂的单词。 + +子词分词允许模型有一个合理的词典大小,而且能学到有意义的上下文独立地表达。除此以外,子词分词可以让模型处理以前从来没见过的单词, +方式是通过分解这些单词到已知的子词,举个例子:[`~transformers.BertTokenizer`]对句子`"I have a new GPU!"`分词的结果如下: + +```py +>>> from transformers import BertTokenizer + +>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") +>>> tokenizer.tokenize("I have a new GPU!") +["i", "have", "a", "new", "gp", "##u", "!"] +``` + +因为我们正在考虑不区分大小写的模型,句子首先被转换成小写字母形式。我们可以见到单词`["i", "have", "a", "new"]`在分词器 +的词典内,但是这个单词`"gpu"`不在词典内。所以,分词器将`"gpu"`分割成已知的子词`["gp" and "##u"]`。`"##"`意味着剩下的 +token应该附着在前面那个token的后面,不带空格的附着(分词的解码或者反向)。 + +另外一个例子,[`~transformers.XLNetTokenizer`]对前面的文本例子分词结果如下: + +```py +>>> from transformers import XLNetTokenizer + +>>> tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased") +>>> tokenizer.tokenize("Don't you love 🤗 Transformers? We sure do.") +["▁Don", "'", "t", "▁you", "▁love", "▁", "🤗", "▁", "Transform", "ers", "?", "▁We", "▁sure", "▁do", "."] +``` + +当我们查看[SentencePiece](#sentencepiece)时会回过头来解释这些`"▁"`符号的含义。正如你能见到的,很少使用的单词 +`"Transformers"`能被分割到更加频繁使用的子词`"Transform"`和`"ers"`。 + +现在让我们来看看不同的子词分割算法是怎么工作的,注意到所有的这些分词算法依赖于某些训练的方式,这些训练通常在语料库上完成, +相应的模型也是在这个语料库上训练的。 + + + +### Byte-Pair Encoding (BPE) + +Byte-Pair Encoding (BPE)来自于[Neural Machine Translation of Rare Words with Subword Units (Sennrich et +al., 2015)](https://arxiv.org/abs/1508.07909)。BPE依赖于一个预分词器,这个预分词器会将训练数据分割成单词。预分词可以是简单的 +空格分词,像::[GPT-2](model_doc/gpt2),[RoBERTa](model_doc/roberta)。更加先进的预分词方式包括了基于规则的分词,像: [XLM](model_doc/xlm),[FlauBERT](model_doc/flaubert),FlauBERT在大多数语言使用了Moses,或者[GPT](model_doc/gpt),GPT +使用了Spacy和ftfy,统计了训练语料库中每个单词的频次。 + +在预分词以后,生成了单词的集合,也确定了训练数据中每个单词出现的频次。下一步,BPE产生了一个基础词典,包含了集合中所有的符号, +BPE学习融合的规则-组合基础词典中的两个符号来形成一个新的符号。BPE会一直学习直到词典的大小满足了期望的词典大小的要求。注意到 +期望的词典大小是一个超参数,在训练这个分词器以前就需要人为指定。 + +举个例子,让我们假设在预分词以后,下面的单词集合以及他们的频次都已经确定好了: + +``` +("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5) +``` + +所以,基础的词典是`["b", "g", "h", "n", "p", "s", "u"]`。将所有单词分割成基础词典内的符号,就可以获得: + +``` +("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5) +``` +BPE接着会统计每个可能的符号对的频次,然后挑出出现最频繁的的符号对,在上面的例子中,`"h"`跟了`"u"`出现了10 + 5 = 15次 +(10次是出现了10次`"hug"`,5次是出现了5次`"hugs"`)。然而,最频繁的符号对是`"u"`后面跟了个`"g"`,总共出现了10 + 5 + 5 += 20次。因此,分词器学到的第一个融合规则是组合所有的`"u"`后面跟了个`"g"`符号。下一步,`"ug"`被加入到了词典内。单词的集合 +就变成了: + +``` +("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5) +``` + +BPE接着会统计出下一个最普遍的出现频次最大的符号对。也就是`"u"`后面跟了个`"n"`,出现了16次。`"u"`,`"n"`被融合成了`"un"`。 +也被加入到了词典中,再下一个出现频次最大的符号对是`"h"`后面跟了个`"ug"`,出现了15次。又一次这个符号对被融合成了`"hug"`, +也被加入到了词典中。 + +在当前这步,词典是`["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]`,我们的单词集合则是: + +``` +("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5) +``` + +假设,the Byte-Pair Encoding在这个时候停止训练,学到的融合规则并应用到其他新的单词上(只要这些新单词不包括不在基础词典内的符号 +就行)。举个例子,单词`"bug"`会被分词到`["b", "ug"]`,但是`"mug"`会被分词到`["", "ug"]`,因为符号`"m"`不在基础词典内。 +通常来看的话,单个字母像`"m"`不会被`""`符号替换掉,因为训练数据通常包括了每个字母,每个字母至少出现了一次,但是在特殊的符号 +中也可能发生像emojis。 + +就像之前提到的那样,词典的大小,举个例子,基础词典的大小 + 融合的数量,是一个需要配置的超参数。举个例子:[GPT](model_doc/gpt) +的词典大小是40,478,因为GPT有着478个基础词典内的字符,在40,000次融合以后选择了停止训练。 + +#### Byte-level BPE + +一个包含了所有可能的基础字符的基础字典可能会非常大,如果考虑将所有的unicode字符作为基础字符。为了拥有一个更好的基础词典,[GPT-2](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf)使用了字节 +作为基础词典,这是一个非常聪明的技巧,迫使基础词典是256大小,而且确保了所有基础字符包含在这个词典内。使用了其他的规则 +来处理标点符号,这个GPT2的分词器能对每个文本进行分词,不需要使用到符号。[GPT-2](model_doc/gpt)有一个大小是50,257 +的词典,对应到256字节的基础tokens,一个特殊的文本结束token,这些符号经过了50,000次融合学习。 + + + +### WordPiece + +WordPiece是子词分词算法,被用在[BERT](model_doc/bert),[DistilBERT](model_doc/distilbert),和[Electra](model_doc/electra)。 +这个算法发布在[Japanese and Korean +Voice Search (Schuster et al., 2012)](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) +和BPE非常相似。WordPiece首先初始化一个词典,这个词典包含了出现在训练数据中的每个字符,然后递进的学习一个给定数量的融合规则。和BPE相比较, +WordPiece不会选择出现频次最大的符号对,而是选择了加入到字典以后能最大化训练数据似然值的符号对。 + +所以这到底意味着什么?参考前面的例子,最大化训练数据的似然值,等价于找到一个符号对,它们的概率除以这个符号对中第一个符号的概率, +接着除以第二个符号的概率,在所有的符号对中商最大。像:如果`"ug"`的概率除以`"u"`除以`"g"`的概率的商,比其他任何符号对更大, +这个时候才能融合`"u"`和`"g"`。直觉上,WordPiece,和BPE有点点不同,WordPiece是评估融合两个符号会失去的量,来确保这么做是值得的。 + + + +### Unigram + +Unigram是一个子词分词器算法,介绍见[Subword Regularization: Improving Neural Network Translation +Models with Multiple Subword Candidates (Kudo, 2018)](https://arxiv.org/pdf/1804.10959.pdf)。和BPE或者WordPiece相比较 +,Unigram使用大量的符号来初始化它的基础字典,然后逐渐的精简每个符号来获得一个更小的词典。举例来看基础词典能够对应所有的预分词 +的单词以及最常见的子字符串。Unigram没有直接用在任何transformers的任何模型中,但是和[SentencePiece](#sentencepiece)一起联合使用。 + +在每个训练的步骤,Unigram算法在当前词典的训练数据上定义了一个损失函数(经常定义为log似然函数的),还定义了一个unigram语言模型。 +然后,对词典内的每个符号,算法会计算如果这个符号从词典内移除,总的损失会升高多少。Unigram然后会移除百分之p的符号,这些符号的loss +升高是最低的(p通常是10%或者20%),像:这些在训练数据上对总的损失影响最小的符号。重复这个过程,直到词典已经达到了期望的大小。 +为了任何单词都能被分词,Unigram算法总是保留基础的字符。 + +因为Unigram不是基于融合规则(和BPE以及WordPiece相比较),在训练以后算法有几种方式来分词,如果一个训练好的Unigram分词器 +的词典是这个: + +``` +["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"], +``` +`"hugs"`可以被分词成`["hug", "s"]`, `["h", "ug", "s"]`或者`["h", "u", "g", "s"]`。所以选择哪一个呢?Unigram在保存 +词典的时候还会保存训练语料库内每个token的概率,所以在训练以后可以计算每个可能的分词结果的概率。实际上算法简单的选择概率 +最大的那个分词结果,但是也会提供概率来根据分词结果的概率来采样一个可能的分词结果。 + +分词器在损失函数上训练,这些损失函数定义了这些概率。假设训练数据包含了这些单词 $x_{1}$, $\dots$, $x_{N}$,一个单词$x_{i}$ +的所有可能的分词结果的集合定义为$S(x_{i})$,然后总的损失就可以定义为: + +$$\mathcal{L} = -\sum_{i=1}^{N} \log \left ( \sum_{x \in S(x_{i})} p(x) \right )$$ + + + +### SentencePiece +目前为止描述的所有分词算法都有相同的问题:它们都假设输入的文本使用空格来分开单词。然而,不是所有的语言都使用空格来分开单词。 +一个可能的解决方案是使用某种语言特定的预分词器。像:[XLM](model_doc/xlm)使用了一个特定的中文、日语和Thai的预分词器。 +为了更加广泛的解决这个问题,[SentencePiece: A simple and language independent subword tokenizer and +detokenizer for Neural Text Processing (Kudo et al., 2018)](https://arxiv.org/pdf/1808.06226.pdf) +将输入文本看作一个原始的输入流,因此使用的符合集合中也包括了空格。SentencePiece然后会使用BPE或者unigram算法来产生合适的 +词典。 + +举例来说,[`XLNetTokenizer`]使用了SentencePiece,这也是为什么上面的例子中`"▁"`符号包含在词典内。SentencePiece解码是非常容易的,因为所有的tokens能被concatenate起来,然后将`"▁"`替换成空格。 + +库内所有使用了SentencePiece的transformers模型,会和unigram组合起来使用,像:使用了SentencePiece的模型是[ALBERT](model_doc/albert), +[XLNet](model_doc/xlnet),[Marian](model_doc/marian),和[T5](model_doc/t5)。 From e2647450511e44ad9020d7d5dd455b2851850538 Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Tue, 7 Nov 2023 17:34:33 -0600 Subject: [PATCH 106/268] translate model_sharing.md and llm_tutorial.md to chinese (#27283) * translate model_sharing.md * translate llm_tutorial.md to chiense * update wrong translation * update _torctree.yml * update typos * update --- docs/source/zh/_toctree.yml | 4 + docs/source/zh/llm_tutorial.md | 269 ++++++++++++++++++++++++++++++++ docs/source/zh/model_sharing.md | 238 ++++++++++++++++++++++++++++ 3 files changed, 511 insertions(+) create mode 100644 docs/source/zh/llm_tutorial.md create mode 100644 docs/source/zh/model_sharing.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index e12364ac8de4..a0ee896f0982 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -21,8 +21,12 @@ title: 使用🤗Accelerate进行分布式训练 - local: peft title: 使用🤗 PEFT加载和训练adapters + - local: model_sharing + title: 分享您的模型 - local: transformers_agents title: agents教程 + - local: llm_tutorial + title: 使用LLMs进行生成 title: 教程 - sections: - local: fast_tokenizers diff --git a/docs/source/zh/llm_tutorial.md b/docs/source/zh/llm_tutorial.md new file mode 100644 index 000000000000..47a6742c8974 --- /dev/null +++ b/docs/source/zh/llm_tutorial.md @@ -0,0 +1,269 @@ + + + +## 使用LLMs进行生成 + +[[open-in-colab]] + +LLMs,即大语言模型,是文本生成背后的关键组成部分。简单来说,它们包含经过大规模预训练的transformer模型,用于根据给定的输入文本预测下一个词(或更准确地说,下一个`token`)。由于它们一次只预测一个`token`,因此除了调用模型之外,您需要执行更复杂的操作来生成新的句子——您需要进行自回归生成。 + +自回归生成是在给定一些初始输入,通过迭代调用模型及其自身的生成输出来生成文本的推理过程,。在🤗 Transformers中,这由[`~generation.GenerationMixin.generate`]方法处理,所有具有生成能力的模型都可以使用该方法。 + +本教程将向您展示如何: + +* 使用LLM生成文本 +* 避免常见的陷阱 +* 帮助您充分利用LLM下一步指导 + +在开始之前,请确保已安装所有必要的库: + + +```bash +pip install transformers bitsandbytes>=0.39.0 -q +``` + + +## 生成文本 + +一个用于[因果语言建模](tasks/language_modeling)训练的语言模型,将文本`tokens`序列作为输入,并返回下一个`token`的概率分布。 + + + +
+ +
"LLM的前向传递"
+
+ +使用LLM进行自回归生成的一个关键方面是如何从这个概率分布中选择下一个`token`。这个步骤可以随意进行,只要最终得到下一个迭代的`token`。这意味着可以简单的从概率分布中选择最可能的`token`,也可以复杂的在对结果分布进行采样之前应用多种变换,这取决于你的需求。 + + +
+ +
"自回归生成迭代地从概率分布中选择下一个token以生成文本"
+
+ +上述过程是迭代重复的,直到达到某个停止条件。理想情况下,停止条件由模型决定,该模型应学会在何时输出一个结束序列(`EOS`)标记。如果不是这种情况,生成将在达到某个预定义的最大长度时停止。 + +正确设置`token`选择步骤和停止条件对于让你的模型按照预期的方式执行任务至关重要。这就是为什么我们为每个模型都有一个[~generation.GenerationConfig]文件,它包含一个效果不错的默认生成参数配置,并与您模型一起加载。 + +让我们谈谈代码! + + + +如果您对基本的LLM使用感兴趣,我们高级的[`Pipeline`](pipeline_tutorial)接口是一个很好的起点。然而,LLMs通常需要像`quantization`和`token选择步骤的精细控制`等高级功能,这最好通过[`~generation.GenerationMixin.generate`]来完成。使用LLM进行自回归生成也是资源密集型的操作,应该在GPU上执行以获得足够的吞吐量。 + + + +首先,您需要加载模型。 + +```py +>>> from transformers import AutoModelForCausalLM + +>>> model = AutoModelForCausalLM.from_pretrained( +... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True +... ) +``` + +您将会注意到在`from_pretrained`调用中的两个标志: + +- `device_map`确保模型被移动到您的GPU(s)上 +- `load_in_4bit`应用[4位动态量化](main_classes/quantization)来极大地减少资源需求 + +还有其他方式来初始化一个模型,但这是一个开始使用LLM很好的起点。 + +接下来,你需要使用一个[tokenizer](tokenizer_summary)来预处理你的文本输入。 + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", padding_side="left") +>>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda") +``` + +`model_inputs`变量保存着分词后的文本输入以及注意力掩码。尽管[`~generation.GenerationMixin.generate`]在未传递注意力掩码时会尽其所能推断出注意力掩码,但建议尽可能传递它以获得最佳结果。 + +在对输入进行分词后,可以调用[`~generation.GenerationMixin.generate`]方法来返回生成的`tokens`。生成的`tokens`应该在打印之前转换为文本。 + +```py +>>> generated_ids = model.generate(**model_inputs) +>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] +'A list of colors: red, blue, green, yellow, orange, purple, pink,' +``` + +最后,您不需要一次处理一个序列!您可以批量输入,这将在小延迟和低内存成本下显著提高吞吐量。您只需要确保正确地填充您的输入(详见下文)。 + +```py +>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default +>>> model_inputs = tokenizer( +... ["A list of colors: red, blue", "Portugal is"], return_tensors="pt", padding=True +... ).to("cuda") +>>> generated_ids = model.generate(**model_inputs) +>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) +['A list of colors: red, blue, green, yellow, orange, purple, pink,', +'Portugal is a country in southwestern Europe, on the Iber'] +``` + +就是这样!在几行代码中,您就可以利用LLM的强大功能。 + + +## 常见陷阱 + +有许多[生成策略](generation_strategies),有时默认值可能不适合您的用例。如果您的输出与您期望的结果不匹配,我们已经创建了一个最常见的陷阱列表以及如何避免它们。 + +```py +>>> from transformers import AutoModelForCausalLM, AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") +>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default +>>> model = AutoModelForCausalLM.from_pretrained( +... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True +... ) +``` + +### 生成的输出太短/太长 + +如果在[`~generation.GenerationConfig`]文件中没有指定,`generate`默认返回20个tokens。我们强烈建议在您的`generate`调用中手动设置`max_new_tokens`以控制它可以返回的最大新tokens数量。请注意,LLMs(更准确地说,仅[解码器模型](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt))也将输入提示作为输出的一部分返回。 + +```py +>>> model_inputs = tokenizer(["A sequence of numbers: 1, 2"], return_tensors="pt").to("cuda") + +>>> # By default, the output will contain up to 20 tokens +>>> generated_ids = model.generate(**model_inputs) +>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] +'A sequence of numbers: 1, 2, 3, 4, 5' + +>>> # Setting `max_new_tokens` allows you to control the maximum length +>>> generated_ids = model.generate(**model_inputs, max_new_tokens=50) +>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] +'A sequence of numbers: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,' +``` + +### 错误的生成模式 + +默认情况下,除非在[`~generation.GenerationConfig`]文件中指定,否则`generate`会在每个迭代中选择最可能的token(贪婪解码)。对于您的任务,这可能是不理想的;像聊天机器人或写作文章这样的创造性任务受益于采样。另一方面,像音频转录或翻译这样的基于输入的任务受益于贪婪解码。通过将`do_sample=True`启用采样,您可以在这篇[博客文章](https://huggingface.co/blog/how-to-generate)中了解更多关于这个话题的信息。 + +```py +>>> # Set seed or reproducibility -- you don't need this unless you want full reproducibility +>>> from transformers import set_seed +>>> set_seed(42) + +>>> model_inputs = tokenizer(["I am a cat."], return_tensors="pt").to("cuda") + +>>> # LLM + greedy decoding = repetitive, boring output +>>> generated_ids = model.generate(**model_inputs) +>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] +'I am a cat. I am a cat. I am a cat. I am a cat' + +>>> # With sampling, the output becomes more creative! +>>> generated_ids = model.generate(**model_inputs, do_sample=True) +>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] +'I am a cat. Specifically, I am an indoor-only cat. I' +``` + +### 错误的填充位置 + +LLMs是[仅解码器](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)架构,意味着它们会持续迭代您的输入提示。如果您的输入长度不相同,则需要对它们进行填充。由于LLMs没有接受过从`pad tokens`继续训练,因此您的输入需要左填充。确保在生成时不要忘记传递注意力掩码! + +```py +>>> # The tokenizer initialized above has right-padding active by default: the 1st sequence, +>>> # which is shorter, has padding on the right side. Generation fails to capture the logic. +>>> model_inputs = tokenizer( +... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt" +... ).to("cuda") +>>> generated_ids = model.generate(**model_inputs) +>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] +'1, 2, 33333333333' + +>>> # With left-padding, it works as expected! +>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", padding_side="left") +>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default +>>> model_inputs = tokenizer( +... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt" +... ).to("cuda") +>>> generated_ids = model.generate(**model_inputs) +>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] +'1, 2, 3, 4, 5, 6,' +``` + +### 错误的提示 + +一些模型和任务期望某种输入提示格式才能正常工作。当未应用此格式时,您将获得悄然的性能下降:模型能工作,但不如预期提示那样好。有关提示的更多信息,包括哪些模型和任务需要小心,可在[指南](tasks/prompting)中找到。让我们看一个使用[聊天模板](chat_templating)的聊天LLM示例: + +```python +>>> tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha") +>>> model = AutoModelForCausalLM.from_pretrained( +... "HuggingFaceH4/zephyr-7b-alpha", device_map="auto", load_in_4bit=True +... ) +>>> set_seed(0) +>>> prompt = """How many helicopters can a human eat in one sitting? Reply as a thug.""" +>>> model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda") +>>> input_length = model_inputs.input_ids.shape[1] +>>> generated_ids = model.generate(**model_inputs, max_new_tokens=20) +>>> print(tokenizer.batch_decode(generated_ids[:, input_length:], skip_special_tokens=True)[0]) +"I'm not a thug, but i can tell you that a human cannot eat" +>>> # Oh no, it did not follow our instruction to reply as a thug! Let's see what happens when we write +>>> # a better prompt and use the right template for this model (through `tokenizer.apply_chat_template`) + +>>> set_seed(0) +>>> messages = [ +... { +... "role": "system", +... "content": "You are a friendly chatbot who always responds in the style of a thug", +... }, +... {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, +... ] +>>> model_inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to("cuda") +>>> input_length = model_inputs.shape[1] +>>> generated_ids = model.generate(model_inputs, do_sample=True, max_new_tokens=20) +>>> print(tokenizer.batch_decode(generated_ids[:, input_length:], skip_special_tokens=True)[0]) +'None, you thug. How bout you try to focus on more useful questions?' +>>> # As we can see, it followed a proper thug style 😎 +``` + +## 更多资源 + +虽然自回归生成过程相对简单,但要充分利用LLM可能是一个具有挑战性的任务,因为很多组件复杂且密切关联。以下是帮助您深入了解LLM使用和理解的下一步: + +### 高级生成用法 + +1. [指南](generation_strategies),介绍如何控制不同的生成方法、如何设置生成配置文件以及如何进行输出流式传输; +2. [指南](chat_templating),介绍聊天LLMs的提示模板; +3. [指南](tasks/prompting),介绍如何充分利用提示设计; +4. API参考文档,包括[`~generation.GenerationConfig`]、[`~generation.GenerationMixin.generate`]和[与生成相关的类](internal/generation_utils)。 + +### LLM排行榜 + +1. [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), 侧重于开源模型的质量; +2. [Open LLM-Perf Leaderboard](https://huggingface.co/spaces/optimum/llm-perf-leaderboard), 侧重于LLM的吞吐量. + +### 延迟、吞吐量和内存利用率 + +1. [指南](llm_tutorial_optimization),如何优化LLMs以提高速度和内存利用; +2. [指南](main_classes/quantization), 关于`quantization`,如bitsandbytes和autogptq的指南,教您如何大幅降低内存需求。 + +### 相关库 + +1. [`text-generation-inference`](https://github.com/huggingface/text-generation-inference), 一个面向生产的LLM服务器; +2. [`optimum`](https://github.com/huggingface/optimum), 一个🤗 Transformers的扩展,优化特定硬件设备的性能 diff --git a/docs/source/zh/model_sharing.md b/docs/source/zh/model_sharing.md new file mode 100644 index 000000000000..fbea41a90398 --- /dev/null +++ b/docs/source/zh/model_sharing.md @@ -0,0 +1,238 @@ + + +# 分享模型 + +最后两个教程展示了如何使用PyTorch、Keras和 🤗 Accelerate进行分布式设置来微调模型。下一步是将您的模型与社区分享!在Hugging Face,我们相信公开分享知识和资源,能实现人工智能的普及化,让每个人都能受益。我们鼓励您将您的模型与社区分享,以帮助他人节省时间和精力。 + +在本教程中,您将学习两种在[Model Hub](https://huggingface.co/models)上共享训练好的或微调的模型的方法: + +- 通过编程将文件推送到Hub。 +- 使用Web界面将文件拖放到Hub。 + + + + + +要与社区共享模型,您需要在[huggingface.co](https://huggingface.co/join)上拥有一个帐户。您还可以加入现有的组织或创建一个新的组织。 + + + +## 仓库功能 + +Model Hub上的每个仓库都像是一个典型的GitHub仓库。我们的仓库提供版本控制、提交历史记录以及可视化差异的能力。 + +Model Hub的内置版本控制基于git和[git-lfs](https://git-lfs.github.com/)。换句话说,您可以将一个模型视为一个仓库,从而实现更好的访问控制和可扩展性。版本控制允许使用*修订*方法来固定特定版本的模型,可以使用提交哈希值、标签或分支来标记。 + +因此,您可以通过`revision`参数加载特定的模型版本: + +```py +>>> model = AutoModel.from_pretrained( +... "julien-c/EsperBERTo-small", revision="v2.0.1" # tag name, or branch name, or commit hash +... ) +``` + +文件也可以轻松地在仓库中编辑,您可以查看提交历史记录以及差异: +![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png) + +## 设置 + +在将模型共享到Hub之前,您需要拥有Hugging Face的凭证。如果您有访问终端的权限,请在安装🤗 Transformers的虚拟环境中运行以下命令。这将在您的Hugging Face缓存文件夹(默认为`~/.cache/`)中存储您的`access token`: + + +```bash +huggingface-cli login +``` + +如果您正在使用像Jupyter或Colaboratory这样的`notebook`,请确保您已安装了[`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library)库。该库允许您以编程方式与Hub进行交互。 + +```bash +pip install huggingface_hub +``` +然后使用`notebook_login`登录到Hub,并按照[这里](https://huggingface.co/settings/token)的链接生成一个token进行登录: + + +```py +>>> from huggingface_hub import notebook_login + +>>> notebook_login() +``` + +## 转换模型适用于所有框架 + +为确保您的模型可以被使用不同框架的人使用,我们建议您将PyTorch和TensorFlow `checkpoints`都转换并上传。如果您跳过此步骤,用户仍然可以从其他框架加载您的模型,但速度会变慢,因为🤗 Transformers需要实时转换`checkpoints`。 + +为另一个框架转换`checkpoints`很容易。确保您已安装PyTorch和TensorFlow(请参阅[此处](installation)的安装说明),然后在其他框架中找到适合您任务的特定模型。 + + + + +指定`from_tf=True`将checkpoint从TensorFlow转换为PyTorch。 + +```py +>>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True) +>>> pt_model.save_pretrained("path/to/awesome-name-you-picked") +``` + + + +指定`from_pt=True`将checkpoint从PyTorch转换为TensorFlow。 + +```py +>>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True) +``` + +然后,您可以使用新的checkpoint保存您的新TensorFlow模型: + +```py +>>> tf_model.save_pretrained("path/to/awesome-name-you-picked") +``` + + + +如果模型在Flax中可用,您还可以将PyTorch checkpoint转换为Flax: + +```py +>>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( +... "path/to/awesome-name-you-picked", from_pt=True +... ) +``` + + + +## 在训练过程中推送模型 + + + + + +将模型分享到Hub就像添加一个额外的参数或回调函数一样简单。请记住,在[微调教程](training)中,`TrainingArguments`类是您指定超参数和附加训练选项的地方。其中一项训练选项包括直接将模型推送到Hub的能力。在您的`TrainingArguments`中设置`push_to_hub=True`: + + +```py +>>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True) +``` + +像往常一样将您的训练参数传递给[`Trainer`]: + +```py +>>> trainer = Trainer( +... model=model, +... args=training_args, +... train_dataset=small_train_dataset, +... eval_dataset=small_eval_dataset, +... compute_metrics=compute_metrics, +... ) +``` + +在您微调完模型后,在[`Trainer`]上调用[`~transformers.Trainer.push_to_hub`]将训练好的模型推送到Hub。🤗 Transformers甚至会自动将训练超参数、训练结果和框架版本添加到你的模型卡片中! + +```py +>>> trainer.push_to_hub() +``` + + + +使用[`PushToHubCallback`]将模型分享到Hub。在[`PushToHubCallback`]函数中,添加以下内容: + +- 一个用于存储模型的输出目录。 +- 一个tokenizer。 +- `hub_model_id`,即您的Hub用户名和模型名称。 + + +```py +>>> from transformers import PushToHubCallback + +>>> push_to_hub_callback = PushToHubCallback( +... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model" +... ) +``` + +将回调函数添加到 [`fit`](https://keras.io/api/models/model_training_apis/)中,然后🤗 Transformers 会将训练好的模型推送到 Hub: + +```py +>>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback) +``` + + + +## 使用`push_to_hub`功能 + +您可以直接在您的模型上调用`push_to_hub`来将其上传到Hub。 + +在`push_to_hub`中指定你的模型名称: + +```py +>>> pt_model.push_to_hub("my-awesome-model") +``` + +这会在您的用户名下创建一个名为`my-awesome-model`的仓库。用户现在可以使用`from_pretrained`函数加载您的模型: + +```py +>>> from transformers import AutoModel + +>>> model = AutoModel.from_pretrained("your_username/my-awesome-model") +``` + +如果您属于一个组织,并希望将您的模型推送到组织名称下,只需将其添加到`repo_id`中: + +```py +>>> pt_model.push_to_hub("my-awesome-org/my-awesome-model") +``` + +`push_to_hub`函数还可以用于向模型仓库添加其他文件。例如,向模型仓库中添加一个`tokenizer`: + +```py +>>> tokenizer.push_to_hub("my-awesome-model") +``` + +或者,您可能希望将您的微调后的PyTorch模型的TensorFlow版本添加进去: + +```py +>>> tf_model.push_to_hub("my-awesome-model") +``` +现在,当您导航到您的Hugging Face个人资料时,您应该看到您新创建的模型仓库。点击**文件**选项卡将显示您已上传到仓库的所有文件。 + +有关如何创建和上传文件到仓库的更多详细信息,请参考Hub文档[这里](https://huggingface.co/docs/hub/how-to-upstream)。 + + +## 使用Web界面上传 + +喜欢无代码方法的用户可以通过Hugging Face的Web界面上传模型。访问[huggingface.co/new](https://huggingface.co/new)创建一个新的仓库: + +![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png) + +从这里开始,添加一些关于您的模型的信息: + +- 选择仓库的**所有者**。这可以是您本人或者您所属的任何组织。 +- 为您的项目选择一个名称,该名称也将成为仓库的名称。 +- 选择您的模型是公开还是私有。 +- 指定您的模型的许可证使用情况。 + +现在点击**文件**选项卡,然后点击**添加文件**按钮将一个新文件上传到你的仓库。接着拖放一个文件进行上传,并添加提交信息。 + +![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png) + +## 添加模型卡片 + +为了确保用户了解您的模型的能力、限制、潜在偏差和伦理考虑,请在仓库中添加一个模型卡片。模型卡片在`README.md`文件中定义。你可以通过以下方式添加模型卡片: + +* 手动创建并上传一个`README.md`文件。 +* 在你的模型仓库中点击**编辑模型卡片**按钮。 + +可以参考DistilBert的[模型卡片](https://huggingface.co/distilbert-base-uncased)来了解模型卡片应该包含的信息类型。有关您可以在`README.md`文件中控制的更多选项的细节,例如模型的碳足迹或小部件示例,请参考文档[这里](https://huggingface.co/docs/hub/models-cards)。 \ No newline at end of file From be74b2ead69df1849ec62ac5c86c7d5dee663448 Mon Sep 17 00:00:00 2001 From: Yoach Lacombe <52246514+ylacombe@users.noreply.github.com> Date: Wed, 8 Nov 2023 07:39:37 +0000 Subject: [PATCH 107/268] Add numpy alternative to FE using torchaudio (#26339) * add audio_utils usage in the FE of SpeechToText * clean unecessary parameters of AudioSpectrogramTransformer FE * add audio_utils usage in AST * add serialization tests and function to FEs * make style * remove use_torchaudio and move to_dict to FE * test audio_utils usage * make style and fix import (remove torchaudio dependency import) * fix torch dependency for jax and tensor tests * fix typo * clean tests with suggestions * add lines to test if is_speech_availble is False --- src/transformers/__init__.py | 27 ++------ src/transformers/feature_extraction_utils.py | 11 +-- .../audio_spectrogram_transformer/__init__.py | 21 ++---- ...xtraction_audio_spectrogram_transformer.py | 68 ++++++++++++++----- .../models/speech_to_text/__init__.py | 19 +----- .../feature_extraction_speech_to_text.py | 52 +++++++++++--- ...xtraction_audio_spectrogram_transformer.py | 49 ++++++++++++- .../test_feature_extraction_speech_to_text.py | 53 +++++++++++++-- .../test_processor_speech_to_text.py | 6 +- 9 files changed, 208 insertions(+), 98 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 97cc4e578c74..4e98a717f028 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -146,6 +146,7 @@ "models.audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", + "ASTFeatureExtractor", ], "models.auto": [ "ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", @@ -535,6 +536,7 @@ "models.speech_to_text": [ "SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig", + "Speech2TextFeatureExtractor", "Speech2TextProcessor", ], "models.speech_to_text_2": [ @@ -913,20 +915,6 @@ else: _import_structure["convert_slow_tokenizer"] = ["SLOW_TO_FAST_CONVERTERS", "convert_slow_tokenizer"] -# Speech-specific objects -try: - if not is_speech_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from .utils import dummy_speech_objects - - _import_structure["utils.dummy_speech_objects"] = [ - name for name in dir(dummy_speech_objects) if not name.startswith("_") - ] -else: - _import_structure["models.audio_spectrogram_transformer"].append("ASTFeatureExtractor") - _import_structure["models.speech_to_text"].append("Speech2TextFeatureExtractor") - # Tensorflow-text-specific objects try: if not is_tensorflow_text_available(): @@ -4352,6 +4340,7 @@ from .models.audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, + ASTFeatureExtractor, ) from .models.auto import ( ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -4722,6 +4711,7 @@ from .models.speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig, + Speech2TextFeatureExtractor, Speech2TextProcessor, ) from .models.speech_to_text_2 import ( @@ -5067,15 +5057,6 @@ else: from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer - try: - if not is_speech_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - from .utils.dummy_speech_objects import * - else: - from .models.audio_spectrogram_transformer import ASTFeatureExtractor - from .models.speech_to_text import Speech2TextFeatureExtractor - try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() diff --git a/src/transformers/feature_extraction_utils.py b/src/transformers/feature_extraction_utils.py index b626ff3dd717..fe1f7a78c93f 100644 --- a/src/transformers/feature_extraction_utils.py +++ b/src/transformers/feature_extraction_utils.py @@ -584,14 +584,15 @@ def from_dict(cls, feature_extractor_dict: Dict[str, Any], **kwargs) -> PreTrain def to_dict(self) -> Dict[str, Any]: """ - Serializes this instance to a Python dictionary. - - Returns: - `Dict[str, Any]`: Dictionary of all the attributes that make up this feature extractor instance. + Serializes this instance to a Python dictionary. Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) output["feature_extractor_type"] = self.__class__.__name__ - + if "mel_filters" in output: + del output["mel_filters"] + if "window" in output: + del output["window"] return output @classmethod diff --git a/src/transformers/models/audio_spectrogram_transformer/__init__.py b/src/transformers/models/audio_spectrogram_transformer/__init__.py index 9aa42423cf5f..2b48fe07311c 100644 --- a/src/transformers/models/audio_spectrogram_transformer/__init__.py +++ b/src/transformers/models/audio_spectrogram_transformer/__init__.py @@ -13,14 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", - ] + ], + "feature_extraction_audio_spectrogram_transformer": ["ASTFeatureExtractor"], } try: @@ -36,19 +37,13 @@ "ASTPreTrainedModel", ] -try: - if not is_speech_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_audio_spectrogram_transformer"] = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) + from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor try: if not is_torch_available(): @@ -63,14 +58,6 @@ ASTPreTrainedModel, ) - try: - if not is_speech_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor - else: import sys diff --git a/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py b/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py index 786548fd2336..2bd122b4098c 100644 --- a/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +++ b/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py @@ -19,12 +19,18 @@ from typing import List, Optional, Union import numpy as np -import torch -import torchaudio.compliance.kaldi as ta_kaldi +from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature -from ...utils import TensorType, logging +from ...utils import TensorType, is_speech_available, is_torch_available, logging + + +if is_speech_available(): + import torchaudio.compliance.kaldi as ta_kaldi + +if is_torch_available(): + import torch logger = logging.get_logger(__name__) @@ -37,8 +43,8 @@ class ASTFeatureExtractor(SequenceFeatureExtractor): This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. - This class extracts mel-filter bank features from raw speech using TorchAudio, pads/truncates them to a fixed - length and normalizes them using a mean and standard deviation. + This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy + otherwise, pads/truncates them to a fixed length and normalizes them using a mean and standard deviation. Args: feature_size (`int`, *optional*, defaults to 1): @@ -83,6 +89,21 @@ def __init__( self.std = std self.return_attention_mask = return_attention_mask + if not is_speech_available(): + mel_filters = mel_filter_bank( + num_frequency_bins=256, + num_mel_filters=self.num_mel_bins, + min_frequency=20, + max_frequency=sampling_rate // 2, + sampling_rate=sampling_rate, + norm=None, + mel_scale="kaldi", + triangularize_in_mel_space=True, + ) + + self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) + self.window = window_function(400, "hann", periodic=False) + def _extract_fbank_features( self, waveform: np.ndarray, @@ -93,17 +114,32 @@ def _extract_fbank_features( and hence the waveform should not be normalized before feature extraction. """ # waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers - waveform = torch.from_numpy(waveform).unsqueeze(0) - fbank = ta_kaldi.fbank( - waveform, - htk_compat=True, - sample_frequency=self.sampling_rate, - use_energy=False, - window_type="hanning", - num_mel_bins=self.num_mel_bins, - dither=0.0, - frame_shift=10, - ) + if is_speech_available(): + waveform = torch.from_numpy(waveform).unsqueeze(0) + fbank = ta_kaldi.fbank( + waveform, + sample_frequency=self.sampling_rate, + window_type="hanning", + num_mel_bins=self.num_mel_bins, + ) + else: + waveform = np.squeeze(waveform) + fbank = spectrogram( + waveform, + self.window, + frame_length=400, + hop_length=160, + fft_length=512, + power=2.0, + center=False, + preemphasis=0.97, + mel_filters=self.mel_filters, + log_mel="log", + mel_floor=1.192092955078125e-07, + remove_dc_offset=True, + ).T + + fbank = torch.from_numpy(fbank) n_frames = fbank.shape[0] difference = max_length - n_frames diff --git a/src/transformers/models/speech_to_text/__init__.py b/src/transformers/models/speech_to_text/__init__.py index 45a91c2b4962..3194f99931a4 100644 --- a/src/transformers/models/speech_to_text/__init__.py +++ b/src/transformers/models/speech_to_text/__init__.py @@ -17,7 +17,6 @@ OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, - is_speech_available, is_tf_available, is_torch_available, ) @@ -25,6 +24,7 @@ _import_structure = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], + "feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"], "processing_speech_to_text": ["Speech2TextProcessor"], } @@ -36,14 +36,6 @@ else: _import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"] -try: - if not is_speech_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_speech_to_text"] = ["Speech2TextFeatureExtractor"] - try: if not is_tf_available(): raise OptionalDependencyNotAvailable() @@ -73,6 +65,7 @@ if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig + from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor from .processing_speech_to_text import Speech2TextProcessor try: @@ -83,14 +76,6 @@ else: from .tokenization_speech_to_text import Speech2TextTokenizer - try: - if not is_speech_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor - try: if not is_tf_available(): raise OptionalDependencyNotAvailable() diff --git a/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py b/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py index 0d5b077c9387..193f2dda0946 100644 --- a/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py +++ b/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py @@ -19,14 +19,17 @@ from typing import List, Optional, Union import numpy as np -import torch -import torchaudio.compliance.kaldi as ta_kaldi +from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature -from ...utils import PaddingStrategy, TensorType, logging +from ...utils import PaddingStrategy, TensorType, is_speech_available, logging +if is_speech_available(): + import torch + import torchaudio.compliance.kaldi as ta_kaldi + logger = logging.get_logger(__name__) @@ -37,8 +40,8 @@ class Speech2TextFeatureExtractor(SequenceFeatureExtractor): This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. - This class extracts mel-filter bank features from raw speech using TorchAudio and applies utterance-level cepstral - mean and variance normalization to the extracted features. + This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy + otherwise, and applies utterance-level cepstral mean and variance normalization to the extracted features. Args: feature_size (`int`, *optional*, defaults to 80): @@ -77,6 +80,21 @@ def __init__( self.normalize_vars = normalize_vars self.return_attention_mask = True + if not is_speech_available(): + mel_filters = mel_filter_bank( + num_frequency_bins=256, + num_mel_filters=self.num_mel_bins, + min_frequency=20, + max_frequency=sampling_rate // 2, + sampling_rate=sampling_rate, + norm=None, + mel_scale="kaldi", + triangularize_in_mel_space=True, + ) + + self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) + self.window = window_function(400, "povey", periodic=False) + def _extract_fbank_features( self, waveform: np.ndarray, @@ -86,9 +104,27 @@ def _extract_fbank_features( and hence the waveform should not be normalized before feature extraction. """ waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers - waveform = torch.from_numpy(waveform).unsqueeze(0) - features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate) - return features.numpy() + if is_speech_available(): + waveform = torch.from_numpy(waveform).unsqueeze(0) + features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate) + features = features.numpy() + else: + waveform = np.squeeze(waveform) + features = spectrogram( + waveform, + self.window, + frame_length=400, + hop_length=160, + fft_length=512, + power=2.0, + center=False, + preemphasis=0.97, + mel_filters=self.mel_filters, + log_mel="log", + mel_floor=1.192092955078125e-07, + remove_dc_offset=True, + ).T + return features @staticmethod def utterance_cmvn( diff --git a/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py b/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py index 85d696479c2c..ac6cd5eb1fbc 100644 --- a/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py +++ b/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py @@ -15,13 +15,15 @@ import itertools +import os import random +import tempfile import unittest import numpy as np from transformers import ASTFeatureExtractor -from transformers.testing_utils import require_torch, require_torchaudio +from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin @@ -173,3 +175,48 @@ def test_integration(self): input_values = feature_extractor(input_speech, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 1024, 128)) self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4)) + + def test_feat_extract_from_and_save_pretrained(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] + check_json_file_has_correct_format(saved_file) + feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + self.assertDictEqual(dict_first, dict_second) + + def test_feat_extract_to_json_file(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + json_file_path = os.path.join(tmpdirname, "feat_extract.json") + feat_extract_first.to_json_file(json_file_path) + feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + self.assertEqual(dict_first, dict_second) + + +# exact same tests than before, except that we simulate that torchaudio is not available +@require_torch +@unittest.mock.patch( + "transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer.is_speech_available", + lambda: False, +) +class ASTFeatureExtractionWithoutTorchaudioTest(ASTFeatureExtractionTest): + def test_using_audio_utils(self): + # Tests that it uses audio_utils instead of torchaudio + feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + + self.assertTrue(hasattr(feat_extract, "window")) + self.assertTrue(hasattr(feat_extract, "mel_filters")) + + from transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer import ( + is_speech_available, + ) + + self.assertFalse(is_speech_available()) diff --git a/tests/models/speech_to_text/test_feature_extraction_speech_to_text.py b/tests/models/speech_to_text/test_feature_extraction_speech_to_text.py index d8929c4ef0d2..f652d09ffca5 100644 --- a/tests/models/speech_to_text/test_feature_extraction_speech_to_text.py +++ b/tests/models/speech_to_text/test_feature_extraction_speech_to_text.py @@ -15,20 +15,19 @@ import itertools +import os import random +import tempfile import unittest import numpy as np -from transformers import is_speech_available -from transformers.testing_utils import require_torch, require_torchaudio +from transformers import Speech2TextFeatureExtractor +from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin -if is_speech_available(): - from transformers import Speech2TextFeatureExtractor - global_rng = random.Random() @@ -105,7 +104,7 @@ def _flatten(list_of_lists): @require_torch @require_torchaudio class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): - feature_extraction_class = Speech2TextFeatureExtractor if is_speech_available() else None + feature_extraction_class = Speech2TextFeatureExtractor def setUp(self): self.feat_extract_tester = Speech2TextFeatureExtractionTester(self) @@ -280,3 +279,45 @@ def test_integration(self): input_features = feature_extractor(input_speech, return_tensors="pt").input_features self.assertEquals(input_features.shape, (1, 584, 24)) self.assertTrue(np.allclose(input_features[0, 0, :30], expected, atol=1e-4)) + + def test_feat_extract_from_and_save_pretrained(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] + check_json_file_has_correct_format(saved_file) + feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + self.assertDictEqual(dict_first, dict_second) + + def test_feat_extract_to_json_file(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + json_file_path = os.path.join(tmpdirname, "feat_extract.json") + feat_extract_first.to_json_file(json_file_path) + feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + self.assertEqual(dict_first, dict_second) + + +# exact same tests than before, except that we simulate that torchaudio is not available +@require_torch +@unittest.mock.patch( + "transformers.models.speech_to_text.feature_extraction_speech_to_text.is_speech_available", lambda: False +) +class Speech2TextFeatureExtractionWithoutTorchaudioTest(Speech2TextFeatureExtractionTest): + def test_using_audio_utils(self): + # Tests that it uses audio_utils instead of torchaudio + feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + + self.assertTrue(hasattr(feat_extract, "window")) + self.assertTrue(hasattr(feat_extract, "mel_filters")) + + from transformers.models.speech_to_text.feature_extraction_speech_to_text import is_speech_available + + self.assertFalse(is_speech_available()) diff --git a/tests/models/speech_to_text/test_processor_speech_to_text.py b/tests/models/speech_to_text/test_processor_speech_to_text.py index 9b8b3ccf66b2..923ba29d1a87 100644 --- a/tests/models/speech_to_text/test_processor_speech_to_text.py +++ b/tests/models/speech_to_text/test_processor_speech_to_text.py @@ -18,7 +18,7 @@ from pathlib import Path from shutil import copyfile -from transformers import Speech2TextTokenizer, is_speech_available +from transformers import Speech2TextFeatureExtractor, Speech2TextProcessor, Speech2TextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, require_torchaudio from transformers.utils import FEATURE_EXTRACTOR_NAME @@ -26,10 +26,6 @@ from .test_feature_extraction_speech_to_text import floats_list -if is_speech_available(): - from transformers import Speech2TextFeatureExtractor, Speech2TextProcessor - - SAMPLE_SP = get_tests_dir("fixtures/test_sentencepiece.model") From b6dbfee0a21d333447b47887dbe2cb87720ebfd0 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Wed, 8 Nov 2023 16:27:37 +0800 Subject: [PATCH 108/268] moving example of benchmarking to legacy dir (#27337) move example of benchmarking to legacy --- examples/{pytorch => legacy}/benchmarking/README.md | 0 examples/{pytorch => legacy}/benchmarking/plot_csv_file.py | 0 examples/{pytorch => legacy}/benchmarking/requirements.txt | 0 examples/{pytorch => legacy}/benchmarking/run_benchmark.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename examples/{pytorch => legacy}/benchmarking/README.md (100%) rename examples/{pytorch => legacy}/benchmarking/plot_csv_file.py (100%) rename examples/{pytorch => legacy}/benchmarking/requirements.txt (100%) rename examples/{pytorch => legacy}/benchmarking/run_benchmark.py (100%) mode change 100755 => 100644 diff --git a/examples/pytorch/benchmarking/README.md b/examples/legacy/benchmarking/README.md similarity index 100% rename from examples/pytorch/benchmarking/README.md rename to examples/legacy/benchmarking/README.md diff --git a/examples/pytorch/benchmarking/plot_csv_file.py b/examples/legacy/benchmarking/plot_csv_file.py similarity index 100% rename from examples/pytorch/benchmarking/plot_csv_file.py rename to examples/legacy/benchmarking/plot_csv_file.py diff --git a/examples/pytorch/benchmarking/requirements.txt b/examples/legacy/benchmarking/requirements.txt similarity index 100% rename from examples/pytorch/benchmarking/requirements.txt rename to examples/legacy/benchmarking/requirements.txt diff --git a/examples/pytorch/benchmarking/run_benchmark.py b/examples/legacy/benchmarking/run_benchmark.py old mode 100755 new mode 100644 similarity index 100% rename from examples/pytorch/benchmarking/run_benchmark.py rename to examples/legacy/benchmarking/run_benchmark.py From efa57cb234eeebe831f241fe2bb97da567e3ab05 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Wed, 8 Nov 2023 07:45:21 -0500 Subject: [PATCH 109/268] Fix example tests from failing (#27353) * Fix example tests from failing * CHange thresh --- examples/pytorch/test_accelerate_examples.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/pytorch/test_accelerate_examples.py b/examples/pytorch/test_accelerate_examples.py index d5e20d820e82..22dd7dea27ae 100644 --- a/examples/pytorch/test_accelerate_examples.py +++ b/examples/pytorch/test_accelerate_examples.py @@ -21,7 +21,6 @@ import shutil import sys import tempfile -import unittest from unittest import mock from accelerate.utils import write_basic_config @@ -89,6 +88,7 @@ def test_run_glue_no_trainer(self): --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 + --num_warmup_steps=2 --checkpointing_steps epoch --with_tracking """.split() @@ -177,7 +177,6 @@ def test_run_ner_no_trainer(self): self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "ner_no_trainer"))) - @unittest.skip(reason="Fix me @muellerzr") @mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) def test_run_squad_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() @@ -332,6 +331,6 @@ def test_run_image_classification_no_trainer(self): run_command(self._launch_args + testargs) result = get_results(tmp_dir) # The base model scores a 25% - self.assertGreaterEqual(result["eval_accuracy"], 0.6) + self.assertGreaterEqual(result["eval_accuracy"], 0.4) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "step_1"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "image_classification_no_trainer"))) From 5ef650b0aeb8ce3a9491db9f39591264dbff4145 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 8 Nov 2023 14:14:45 +0100 Subject: [PATCH 110/268] Fix `Kosmos-2` device issue (#27346) * fix * fix * fix --------- Co-authored-by: ydshieh --- src/transformers/models/kosmos2/modeling_kosmos2.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index 781813db4570..797523493eb2 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -1149,7 +1149,9 @@ def forward_embedding( inputs_embeds = self.embed_tokens(input_ids) if image_embeds is not None: - inputs_embeds[img_input_mask.to(dtype=torch.bool)] = image_embeds.view(-1, image_embeds.size(-1)) + inputs_embeds[img_input_mask.to(dtype=torch.bool)] = image_embeds.to(inputs_embeds.device).view( + -1, image_embeds.size(-1) + ) inputs_embeds = inputs_embeds * self.embed_scale @@ -1338,7 +1340,7 @@ class Kosmos2PreTrainedModel(PreTrainedModel): config_class = Kosmos2Config supports_gradient_checkpointing = True - _no_split_modules = ["Kosmos2VisionEncoderLayer, Kosmos2TextBlock"] + _no_split_modules = ["Kosmos2VisionEncoderLayer", "Kosmos2TextBlock"] def _init_weights(self, module): """Initialize the weights""" From f16ff0f07e3867db5feda00a661572a190f404e6 Mon Sep 17 00:00:00 2001 From: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> Date: Wed, 8 Nov 2023 13:26:02 +0000 Subject: [PATCH 111/268] MusicGen Update (#27084) * [MusicGen] Add stereo model * safe serialization * Update src/transformers/models/musicgen/modeling_musicgen.py * split over 2 lines * fix slow tests on cuda --- docs/source/en/model_doc/musicgen.md | 5 + .../models/musicgen/configuration_musicgen.py | 9 ++ .../musicgen/convert_musicgen_transformers.py | 50 ++++++-- .../models/musicgen/modeling_musicgen.py | 88 ++++++++++--- .../models/musicgen/test_modeling_musicgen.py | 120 ++++++++++++++++++ 5 files changed, 244 insertions(+), 28 deletions(-) diff --git a/docs/source/en/model_doc/musicgen.md b/docs/source/en/model_doc/musicgen.md index 40c48382734c..d9e84294852d 100644 --- a/docs/source/en/model_doc/musicgen.md +++ b/docs/source/en/model_doc/musicgen.md @@ -57,6 +57,11 @@ Generation is limited by the sinusoidal positional embeddings to 30 second input than 30 seconds of audio (1503 tokens), and input audio passed by Audio-Prompted Generation contributes to this limit so, given an input of 20 seconds of audio, MusicGen cannot generate more than 10 seconds of additional audio. +Transformers supports both mono (1-channel) and stereo (2-channel) variants of MusicGen. The mono channel versions +generate a single set of codebooks. The stereo versions generate 2 sets of codebooks, 1 for each channel (left/right), +and each set of codebooks is decoded independently through the audio compression model. The audio streams for each +channel are combined to give the final stereo output. + ### Unconditional Generation The inputs for unconditional (or 'null') generation can be obtained through the method diff --git a/src/transformers/models/musicgen/configuration_musicgen.py b/src/transformers/models/musicgen/configuration_musicgen.py index 03371e10446c..e954181242ed 100644 --- a/src/transformers/models/musicgen/configuration_musicgen.py +++ b/src/transformers/models/musicgen/configuration_musicgen.py @@ -75,6 +75,9 @@ class MusicgenDecoderConfig(PretrainedConfig): The number of parallel codebooks forwarded to the model. tie_word_embeddings(`bool`, *optional*, defaults to `False`): Whether input and output word embeddings should be tied. + audio_channels (`int`, *optional*, defaults to 1 + Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate + audio stream for the left/right output channels. Mono models generate a single audio stream output. """ model_type = "musicgen_decoder" keys_to_ignore_at_inference = ["past_key_values"] @@ -96,6 +99,7 @@ def __init__( initializer_factor=0.02, scale_embedding=False, num_codebooks=4, + audio_channels=1, pad_token_id=2048, bos_token_id=2048, eos_token_id=None, @@ -117,6 +121,11 @@ def __init__( self.use_cache = use_cache self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.num_codebooks = num_codebooks + + if audio_channels not in [1, 2]: + raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.") + self.audio_channels = audio_channels + super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, diff --git a/src/transformers/models/musicgen/convert_musicgen_transformers.py b/src/transformers/models/musicgen/convert_musicgen_transformers.py index 517f0099d0cd..d4b61046e5ea 100644 --- a/src/transformers/models/musicgen/convert_musicgen_transformers.py +++ b/src/transformers/models/musicgen/convert_musicgen_transformers.py @@ -88,32 +88,48 @@ def rename_state_dict(state_dict: OrderedDict, hidden_size: int) -> Tuple[Dict, def decoder_config_from_checkpoint(checkpoint: str) -> MusicgenDecoderConfig: - if checkpoint == "small": + if checkpoint == "small" or checkpoint == "facebook/musicgen-stereo-small": # default config values hidden_size = 1024 num_hidden_layers = 24 num_attention_heads = 16 - elif checkpoint == "medium": + elif checkpoint == "medium" or checkpoint == "facebook/musicgen-stereo-medium": hidden_size = 1536 num_hidden_layers = 48 num_attention_heads = 24 - elif checkpoint == "large": + elif checkpoint == "large" or checkpoint == "facebook/musicgen-stereo-large": hidden_size = 2048 num_hidden_layers = 48 num_attention_heads = 32 else: - raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.") + raise ValueError( + "Checkpoint should be one of `['small', 'medium', 'large']` for the mono checkpoints, " + "or `['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` " + f"for the stereo checkpoints, got {checkpoint}." + ) + + if "stereo" in checkpoint: + audio_channels = 2 + num_codebooks = 8 + else: + audio_channels = 1 + num_codebooks = 4 + config = MusicgenDecoderConfig( hidden_size=hidden_size, ffn_dim=hidden_size * 4, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, + num_codebooks=num_codebooks, + audio_channels=audio_channels, ) return config @torch.no_grad() -def convert_musicgen_checkpoint(checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu"): +def convert_musicgen_checkpoint( + checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu", safe_serialization=False +): fairseq_model = MusicGen.get_pretrained(checkpoint, device=device) decoder_config = decoder_config_from_checkpoint(checkpoint) @@ -146,18 +162,20 @@ def convert_musicgen_checkpoint(checkpoint, pytorch_dump_folder=None, repo_id=No model.enc_to_dec_proj.load_state_dict(enc_dec_proj_state_dict) # check we can do a forward pass - input_ids = torch.arange(0, 8, dtype=torch.long).reshape(2, -1) - decoder_input_ids = input_ids.reshape(2 * 4, -1) + input_ids = torch.arange(0, 2 * decoder_config.num_codebooks, dtype=torch.long).reshape(2, -1) + decoder_input_ids = input_ids.reshape(2 * decoder_config.num_codebooks, -1) with torch.no_grad(): logits = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits - if logits.shape != (8, 1, 2048): + if logits.shape != (2 * decoder_config.num_codebooks, 1, 2048): raise ValueError("Incorrect shape for logits") # now construct the processor tokenizer = AutoTokenizer.from_pretrained("t5-base") - feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz", padding_side="left") + feature_extractor = AutoFeatureExtractor.from_pretrained( + "facebook/encodec_32khz", padding_side="left", feature_size=decoder_config.audio_channels + ) processor = MusicgenProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) @@ -173,12 +191,12 @@ def convert_musicgen_checkpoint(checkpoint, pytorch_dump_folder=None, repo_id=No if pytorch_dump_folder is not None: Path(pytorch_dump_folder).mkdir(exist_ok=True) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}") - model.save_pretrained(pytorch_dump_folder) + model.save_pretrained(pytorch_dump_folder, safe_serialization=safe_serialization) processor.save_pretrained(pytorch_dump_folder) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}") - model.push_to_hub(repo_id) + model.push_to_hub(repo_id, safe_serialization=safe_serialization) processor.push_to_hub(repo_id) @@ -189,7 +207,10 @@ def convert_musicgen_checkpoint(checkpoint, pytorch_dump_folder=None, repo_id=No "--checkpoint", default="small", type=str, - help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", + help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: " + "`['small', 'medium', 'large']` for the mono checkpoints, or " + "`['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` " + "for the stereo checkpoints.", ) parser.add_argument( "--pytorch_dump_folder", @@ -204,6 +225,11 @@ def convert_musicgen_checkpoint(checkpoint, pytorch_dump_folder=None, repo_id=No parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) + parser.add_argument( + "--safe_serialization", + action="store_true", + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).", + ) args = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub) diff --git a/src/transformers/models/musicgen/modeling_musicgen.py b/src/transformers/models/musicgen/modeling_musicgen.py index 2a015fc0321f..584b29e62313 100644 --- a/src/transformers/models/musicgen/modeling_musicgen.py +++ b/src/transformers/models/musicgen/modeling_musicgen.py @@ -1077,21 +1077,33 @@ def build_delay_pattern_mask(self, input_ids: torch.LongTensor, pad_token_id: in torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1 ) + channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks # we only apply the mask if we have a large enough seq len - otherwise we return as is - if max_length < 2 * num_codebooks - 1: + if max_length < 2 * channel_codebooks - 1: return input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1) # fill the shifted ids with the prompt entries, offset by the codebook idx - for codebook in range(num_codebooks): - input_ids_shifted[:, codebook, codebook : seq_len + codebook] = input_ids[:, codebook] + for codebook in range(channel_codebooks): + if self.config.audio_channels == 1: + # mono channel - loop over the codebooks one-by-one + input_ids_shifted[:, codebook, codebook : seq_len + codebook] = input_ids[:, codebook] + else: + # left/right channels are interleaved in the generated codebooks, so handle one then the other + input_ids_shifted[:, 2 * codebook, codebook : seq_len + codebook] = input_ids[:, 2 * codebook] + input_ids_shifted[:, 2 * codebook + 1, codebook : seq_len + codebook] = input_ids[:, 2 * codebook + 1] # construct a pattern mask that indicates the positions of padding tokens for each codebook # first fill the upper triangular part (the EOS padding) delay_pattern = torch.triu( - torch.ones((num_codebooks, max_length), dtype=torch.bool), diagonal=max_length - num_codebooks + 1 + torch.ones((channel_codebooks, max_length), dtype=torch.bool), diagonal=max_length - channel_codebooks + 1 ) # then fill the lower triangular part (the BOS padding) - delay_pattern = delay_pattern + torch.tril(torch.ones((num_codebooks, max_length), dtype=torch.bool)) + delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.bool)) + + if self.config.audio_channels == 2: + # for left/right channel we need to duplicate every row of the pattern mask in an interleaved fashion + delay_pattern = delay_pattern.repeat_interleave(2, dim=0) + mask = ~delay_pattern.to(input_ids.device) input_ids = mask * input_ids_shifted + ~mask * pad_token_id @@ -1856,6 +1868,11 @@ def forward( f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is " "disabled by setting `chunk_length=None` in the audio encoder." ) + + if self.config.audio_channels == 2 and audio_codes.shape[2] == self.decoder.num_codebooks // 2: + # mono input through encodec that we convert to stereo + audio_codes = audio_codes.repeat_interleave(2, dim=2) + decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len) # Decode @@ -2074,12 +2091,42 @@ def _prepare_audio_encoder_kwargs_for_generation( # 3. make sure that encoder returns `ModelOutput` model_input_name = model_input_name if model_input_name is not None else self.audio_encoder.main_input_name encoder_kwargs["return_dict"] = True - encoder_kwargs[model_input_name] = input_values - audio_encoder_outputs = encoder.encode(**encoder_kwargs) + if self.decoder.config.audio_channels == 1: + encoder_kwargs[model_input_name] = input_values + audio_encoder_outputs = encoder.encode(**encoder_kwargs) + audio_codes = audio_encoder_outputs.audio_codes + audio_scales = audio_encoder_outputs.audio_scales - audio_codes = audio_encoder_outputs.audio_codes - frames, bsz, codebooks, seq_len = audio_codes.shape + frames, bsz, codebooks, seq_len = audio_codes.shape + + else: + if input_values.shape[1] != 2: + raise ValueError( + f"Expected stereo audio (2-channels) but example has {input_values.shape[1]} channel." + ) + + encoder_kwargs[model_input_name] = input_values[:, :1, :] + audio_encoder_outputs_left = encoder.encode(**encoder_kwargs) + audio_codes_left = audio_encoder_outputs_left.audio_codes + audio_scales_left = audio_encoder_outputs_left.audio_scales + + encoder_kwargs[model_input_name] = input_values[:, 1:, :] + audio_encoder_outputs_right = encoder.encode(**encoder_kwargs) + audio_codes_right = audio_encoder_outputs_right.audio_codes + audio_scales_right = audio_encoder_outputs_right.audio_scales + + frames, bsz, codebooks, seq_len = audio_codes_left.shape + # copy alternating left/right channel codes into stereo codebook + audio_codes = audio_codes_left.new_ones((frames, bsz, 2 * codebooks, seq_len)) + + audio_codes[:, :, ::2, :] = audio_codes_left + audio_codes[:, :, 1::2, :] = audio_codes_right + + if audio_scales_left != [None] or audio_scales_right != [None]: + audio_scales = torch.stack([audio_scales_left, audio_scales_right], dim=1) + else: + audio_scales = [None] * bsz if frames != 1: raise ValueError( @@ -2090,7 +2137,7 @@ def _prepare_audio_encoder_kwargs_for_generation( decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len) model_kwargs["decoder_input_ids"] = decoder_input_ids - model_kwargs["audio_scales"] = audio_encoder_outputs.audio_scales + model_kwargs["audio_scales"] = audio_scales return model_kwargs def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): @@ -2433,16 +2480,25 @@ def generate( if audio_scales is None: audio_scales = [None] * batch_size - output_values = self.audio_encoder.decode( - output_ids, - audio_scales=audio_scales, - ) + if self.decoder.config.audio_channels == 1: + output_values = self.audio_encoder.decode( + output_ids, + audio_scales=audio_scales, + ).audio_values + else: + codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales) + output_values_left = codec_outputs_left.audio_values + + codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales) + output_values_right = codec_outputs_right.audio_values + + output_values = torch.cat([output_values_left, output_values_right], dim=1) if generation_config.return_dict_in_generate: - outputs.sequences = output_values.audio_values + outputs.sequences = output_values return outputs else: - return output_values.audio_values + return output_values def get_unconditional_inputs(self, num_samples=1): """ diff --git a/tests/models/musicgen/test_modeling_musicgen.py b/tests/models/musicgen/test_modeling_musicgen.py index 2cd662bfe576..5e1d9ccdf298 100644 --- a/tests/models/musicgen/test_modeling_musicgen.py +++ b/tests/models/musicgen/test_modeling_musicgen.py @@ -379,6 +379,27 @@ def test_sample_generate_dict_output(self): self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) + def test_greedy_generate_stereo_outputs(self): + for model_class in self.greedy_sample_model_classes: + config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() + config.audio_channels = 2 + model = model_class(config).to(torch_device).eval() + output_greedy, output_generate = self._greedy_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + max_length=max_length, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) + self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) + + self.assertNotIn(config.pad_token_id, output_generate) + def prepare_musicgen_inputs_dict( config, @@ -1102,6 +1123,29 @@ def test_generate_fp16(self): input_dict["input_ids"], attention_mask=input_dict["attention_mask"], do_sample=True, max_new_tokens=10 ) + def test_greedy_generate_stereo_outputs(self): + for model_class in self.greedy_sample_model_classes: + config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() + config.audio_channels = 2 + + model = model_class(config).to(torch_device).eval() + output_greedy, output_generate = self._greedy_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + decoder_input_ids=decoder_input_ids, + max_length=max_length, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) + self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) + + self.assertNotIn(config.pad_token_id, output_generate) + def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000): """Produces a series of 'bip bip' sounds at a given frequency.""" @@ -1357,3 +1401,79 @@ def test_generate_text_audio_prompt(self): output_values.shape == (2, 1, 36480) ) # input values take shape 32000 and we generate from there self.assertTrue(torch.allclose(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES, atol=1e-4)) + + +@require_torch +class MusicgenStereoIntegrationTests(unittest.TestCase): + @cached_property + def model(self): + return MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-stereo-small").to(torch_device) + + @cached_property + def processor(self): + return MusicgenProcessor.from_pretrained("facebook/musicgen-stereo-small") + + @slow + def test_generate_unconditional_greedy(self): + model = self.model + + # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same + unconditional_inputs = model.get_unconditional_inputs(num_samples=1) + unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device) + + output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=12) + + # fmt: off + EXPECTED_VALUES_LEFT = torch.tensor( + [ + 0.0017, 0.0004, 0.0004, 0.0005, 0.0002, 0.0002, -0.0002, -0.0013, + -0.0010, -0.0015, -0.0018, -0.0032, -0.0060, -0.0082, -0.0096, -0.0099, + ] + ) + EXPECTED_VALUES_RIGHT = torch.tensor( + [ + 0.0038, 0.0028, 0.0031, 0.0032, 0.0031, 0.0032, 0.0030, 0.0019, + 0.0021, 0.0015, 0.0009, -0.0008, -0.0040, -0.0067, -0.0087, -0.0096, + ] + ) + # fmt: on + + # (bsz, channels, seq_len) + self.assertTrue(output_values.shape == (1, 2, 5760)) + self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, atol=1e-4)) + self.assertTrue(torch.allclose(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_RIGHT, atol=1e-4)) + + @slow + def test_generate_text_audio_prompt(self): + model = self.model + processor = self.processor + + # create stereo inputs + audio = [get_bip_bip(duration=0.5)[None, :].repeat(2, 0), get_bip_bip(duration=1.0)[None, :].repeat(2, 0)] + text = ["80s music", "Club techno"] + + inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") + inputs = place_dict_on_device(inputs, device=torch_device) + + output_values = model.generate(**inputs, do_sample=False, guidance_scale=3.0, max_new_tokens=12) + + # fmt: off + EXPECTED_VALUES_LEFT = torch.tensor( + [ + 0.2535, 0.2008, 0.1471, 0.0896, 0.0306, -0.0200, -0.0501, -0.0728, + -0.0832, -0.0856, -0.0867, -0.0884, -0.0864, -0.0866, -0.0744, -0.0430, + ] + ) + EXPECTED_VALUES_RIGHT = torch.tensor( + [ + 0.1695, 0.1213, 0.0732, 0.0239, -0.0264, -0.0705, -0.0935, -0.1103, + -0.1163, -0.1139, -0.1104, -0.1082, -0.1027, -0.1004, -0.0900, -0.0614, + ] + ) + # fmt: on + + # (bsz, channels, seq_len) + self.assertTrue(output_values.shape == (2, 2, 37760)) + # input values take shape 32000 and we generate from there - we check the last (generated) values + self.assertTrue(torch.allclose(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES_LEFT, atol=1e-4)) + self.assertTrue(torch.allclose(output_values[0, 1, -16:].cpu(), EXPECTED_VALUES_RIGHT, atol=1e-4)) From eb30a49b2028f2411514c10e432792ad581fc08b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mert=20Yan=C4=B1k?= Date: Wed, 8 Nov 2023 16:35:20 +0300 Subject: [PATCH 112/268] Translate index.md to Turkish (#27093) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add index.md for tukish language * Fix index.md (huggingface/transformers#27088) * Add 'tr' to additional files * Update docs/source/tr/_toctree.yml Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update index.md --------- Co-authored-by: Mert Yanık Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- .github/workflows/build_documentation.yml | 2 +- .github/workflows/build_pr_documentation.yml | 2 +- docs/source/tr/_toctree.yml | 4 + docs/source/tr/index.md | 295 +++++++++++++++++++ 4 files changed, 301 insertions(+), 2 deletions(-) create mode 100644 docs/source/tr/_toctree.yml create mode 100644 docs/source/tr/index.md diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml index 0dfa16f19332..99f0f15230a0 100644 --- a/.github/workflows/build_documentation.yml +++ b/.github/workflows/build_documentation.yml @@ -15,7 +15,7 @@ jobs: commit_sha: ${{ github.sha }} package: transformers notebook_folder: transformers_doc - languages: de en es fr hi it ko pt zh ja te + languages: de en es fr hi it ko pt tr zh ja te secrets: token: ${{ secrets.HUGGINGFACE_PUSH }} hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml index 2fddc9531013..f6fa4c8d537c 100644 --- a/.github/workflows/build_pr_documentation.yml +++ b/.github/workflows/build_pr_documentation.yml @@ -14,4 +14,4 @@ jobs: commit_sha: ${{ github.event.pull_request.head.sha }} pr_number: ${{ github.event.number }} package: transformers - languages: de en es fr hi it ko pt zh ja te + languages: de en es fr hi it ko pt tr zh ja te diff --git a/docs/source/tr/_toctree.yml b/docs/source/tr/_toctree.yml new file mode 100644 index 000000000000..8401da6e4eb0 --- /dev/null +++ b/docs/source/tr/_toctree.yml @@ -0,0 +1,4 @@ +- sections: + - local: index + title: 🤗 Transformers + title: Get started \ No newline at end of file diff --git a/docs/source/tr/index.md b/docs/source/tr/index.md new file mode 100644 index 000000000000..1b2c665e169d --- /dev/null +++ b/docs/source/tr/index.md @@ -0,0 +1,295 @@ + + +# 🤗 Transformers + +[PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) ve [JAX](https://jax.readthedocs.io/en/latest/) için son teknoloji makine öğrenimi. + +🤗 Transformers, güncel önceden eğitilmiş (pretrained) modelleri indirmenizi ve eğitmenizi kolaylaştıran API'ler ve araçlar sunar. Önceden eğitilmiş modeller kullanarak, hesaplama maliyetlerinizi ve karbon ayak izinizi azaltabilir, ve sıfırdan bir modeli eğitmek için gereken zaman ve kaynaklardan tasarruf edebilirsiniz. Bu modeller farklı modalitelerde ortak görevleri destekler. Örneğin: + +📝 **Doğal Dil İşleme**: metin sınıflandırma, adlandırılmış varlık tanıma, soru cevaplama, dil modelleme, özetleme, çeviri, çoktan seçmeli ve metin oluşturma.
+🖼️ **Bilgisayarlı Görü**: görüntü sınıflandırma, nesne tespiti ve bölümleme (segmentation).
+🗣️ **Ses**: otomatik konuşma tanıma ve ses sınıflandırma.
+🐙 **Çoklu Model**: tablo soru cevaplama, optik karakter tanıma, taranmış belgelerden bilgi çıkarma, video sınıflandırma ve görsel soru cevaplama. + +🤗 Transformers, PyTorch, TensorFlow ve JAX arasında çerçeve (framework) uyumluluğu sağlar. Bu, bir modelin yaşam döngüsünün her aşamasında farklı bir çerçeve kullanma esnekliği sunar; bir çerçevede üç satır kodla bir modeli eğitebilir ve başka bir çerçevede tahminleme için kullanabilirsiniz. Modeller ayrıca üretim ortamlarında kullanılmak üzere ONNX ve TorchScript gibi bir formata aktarılabilir. + +Büyüyen topluluğa [Hub](https://huggingface.co/models), [Forum](https://discuss.huggingface.co/) veya [Discord](https://discord.com/invite/JfAtkvEtRb) üzerinden katılabilirsiniz! + +## Hugging Face ekibinden özel destek arıyorsanız + + + HuggingFace Uzman Hızlandırma Programı + + +## İçindekiler + +Dokümantasyon, beş bölüme ayrılmıştır: + +- **BAŞLARKEN**, kütüphanenin hızlı bir turunu ve çalışmaya başlamak için kurulum talimatlarını sağlar. +- **ÖĞRETİCİLER**, başlangıç yapmak için harika bir yerdir. Bu bölüm, kütüphane kullanmaya başlamak için ihtiyacınız olan temel becerileri kazanmanıza yardımcı olacaktır. +- **NASIL YAPILIR KILAVUZLARI**, önceden eğitilmiş bir modele dil modellemesi için ince ayar (fine-tuning) yapmak veya özel bir model yazmak, ve paylaşmak gibi belirli bir hedefe nasıl ulaşılacağını gösterir. +- **KAVRAMSAL REHBERLER**, modellerin, görevlerin ve 🤗 Transformers tasarım felsefesinin temel kavramları ve fikirleri hakkında daha fazla tartışma ve açıklama sunar. +- **API** tüm sınıfları (class) ve fonksiyonları (functions) açıklar: + + - **ANA SINIFLAR**, yapılandırma, model, tokenizer ve pipeline gibi en önemli sınıfları (classes) ayrıntılandırır. + - **MODELLER**, kütüphanede kullanılan her modelle ilgili sınıfları ve fonksiyonları detaylı olarak inceler. + - **DAHİLİ YARDIMCILAR**, kullanılan yardımcı sınıfları ve fonksiyonları detaylı olarak inceler. + +## Desteklenen Modeller ve Çerçeveler + +Aşağıdaki tablo, her bir model için kütüphanede yer alan mevcut desteği temsil etmektedir. Her bir model için bir Python tokenizer'ına ("slow" olarak adlandırılır) sahip olup olmadıkları, 🤗 Tokenizers kütüphanesi tarafından desteklenen hızlı bir tokenizer'a sahip olup olmadıkları, Jax (Flax aracılığıyla), PyTorch ve/veya TensorFlow'da destek olup olmadıklarını göstermektedir. + + + +| Model | PyTorch support | TensorFlow support | Flax Support | +|:------------------------------------------------------------------------:|:---------------:|:------------------:|:------------:| +| [ALBERT](model_doc/albert) | ✅ | ✅ | ✅ | +| [ALIGN](model_doc/align) | ✅ | ❌ | ❌ | +| [AltCLIP](model_doc/altclip) | ✅ | ❌ | ❌ | +| [Audio Spectrogram Transformer](model_doc/audio-spectrogram-transformer) | ✅ | ❌ | ❌ | +| [Autoformer](model_doc/autoformer) | ✅ | ❌ | ❌ | +| [Bark](model_doc/bark) | ✅ | ❌ | ❌ | +| [BART](model_doc/bart) | ✅ | ✅ | ✅ | +| [BARThez](model_doc/barthez) | ✅ | ✅ | ✅ | +| [BARTpho](model_doc/bartpho) | ✅ | ✅ | ✅ | +| [BEiT](model_doc/beit) | ✅ | ❌ | ✅ | +| [BERT](model_doc/bert) | ✅ | ✅ | ✅ | +| [Bert Generation](model_doc/bert-generation) | ✅ | ❌ | ❌ | +| [BertJapanese](model_doc/bert-japanese) | ✅ | ✅ | ✅ | +| [BERTweet](model_doc/bertweet) | ✅ | ✅ | ✅ | +| [BigBird](model_doc/big_bird) | ✅ | ❌ | ✅ | +| [BigBird-Pegasus](model_doc/bigbird_pegasus) | ✅ | ❌ | ❌ | +| [BioGpt](model_doc/biogpt) | ✅ | ❌ | ❌ | +| [BiT](model_doc/bit) | ✅ | ❌ | ❌ | +| [Blenderbot](model_doc/blenderbot) | ✅ | ✅ | ✅ | +| [BlenderbotSmall](model_doc/blenderbot-small) | ✅ | ✅ | ✅ | +| [BLIP](model_doc/blip) | ✅ | ✅ | ❌ | +| [BLIP-2](model_doc/blip-2) | ✅ | ❌ | ❌ | +| [BLOOM](model_doc/bloom) | ✅ | ❌ | ✅ | +| [BORT](model_doc/bort) | ✅ | ✅ | ✅ | +| [BridgeTower](model_doc/bridgetower) | ✅ | ❌ | ❌ | +| [BROS](model_doc/bros) | ✅ | ❌ | ❌ | +| [ByT5](model_doc/byt5) | ✅ | ✅ | ✅ | +| [CamemBERT](model_doc/camembert) | ✅ | ✅ | ❌ | +| [CANINE](model_doc/canine) | ✅ | ❌ | ❌ | +| [Chinese-CLIP](model_doc/chinese_clip) | ✅ | ❌ | ❌ | +| [CLAP](model_doc/clap) | ✅ | ❌ | ❌ | +| [CLIP](model_doc/clip) | ✅ | ✅ | ✅ | +| [CLIPSeg](model_doc/clipseg) | ✅ | ❌ | ❌ | +| [CodeGen](model_doc/codegen) | ✅ | ❌ | ❌ | +| [CodeLlama](model_doc/code_llama) | ✅ | ❌ | ❌ | +| [Conditional DETR](model_doc/conditional_detr) | ✅ | ❌ | ❌ | +| [ConvBERT](model_doc/convbert) | ✅ | ✅ | ❌ | +| [ConvNeXT](model_doc/convnext) | ✅ | ✅ | ❌ | +| [ConvNeXTV2](model_doc/convnextv2) | ✅ | ❌ | ❌ | +| [CPM](model_doc/cpm) | ✅ | ✅ | ✅ | +| [CPM-Ant](model_doc/cpmant) | ✅ | ❌ | ❌ | +| [CTRL](model_doc/ctrl) | ✅ | ✅ | ❌ | +| [CvT](model_doc/cvt) | ✅ | ✅ | ❌ | +| [Data2VecAudio](model_doc/data2vec) | ✅ | ❌ | ❌ | +| [Data2VecText](model_doc/data2vec) | ✅ | ❌ | ❌ | +| [Data2VecVision](model_doc/data2vec) | ✅ | ✅ | ❌ | +| [DeBERTa](model_doc/deberta) | ✅ | ✅ | ❌ | +| [DeBERTa-v2](model_doc/deberta-v2) | ✅ | ✅ | ❌ | +| [Decision Transformer](model_doc/decision_transformer) | ✅ | ❌ | ❌ | +| [Deformable DETR](model_doc/deformable_detr) | ✅ | ❌ | ❌ | +| [DeiT](model_doc/deit) | ✅ | ✅ | ❌ | +| [DePlot](model_doc/deplot) | ✅ | ❌ | ❌ | +| [DETA](model_doc/deta) | ✅ | ❌ | ❌ | +| [DETR](model_doc/detr) | ✅ | ❌ | ❌ | +| [DialoGPT](model_doc/dialogpt) | ✅ | ✅ | ✅ | +| [DiNAT](model_doc/dinat) | ✅ | ❌ | ❌ | +| [DINOv2](model_doc/dinov2) | ✅ | ❌ | ❌ | +| [DistilBERT](model_doc/distilbert) | ✅ | ✅ | ✅ | +| [DiT](model_doc/dit) | ✅ | ❌ | ✅ | +| [DonutSwin](model_doc/donut) | ✅ | ❌ | ❌ | +| [DPR](model_doc/dpr) | ✅ | ✅ | ❌ | +| [DPT](model_doc/dpt) | ✅ | ❌ | ❌ | +| [EfficientFormer](model_doc/efficientformer) | ✅ | ✅ | ❌ | +| [EfficientNet](model_doc/efficientnet) | ✅ | ❌ | ❌ | +| [ELECTRA](model_doc/electra) | ✅ | ✅ | ✅ | +| [EnCodec](model_doc/encodec) | ✅ | ❌ | ❌ | +| [Encoder decoder](model_doc/encoder-decoder) | ✅ | ✅ | ✅ | +| [ERNIE](model_doc/ernie) | ✅ | ❌ | ❌ | +| [ErnieM](model_doc/ernie_m) | ✅ | ❌ | ❌ | +| [ESM](model_doc/esm) | ✅ | ✅ | ❌ | +| [FairSeq Machine-Translation](model_doc/fsmt) | ✅ | ❌ | ❌ | +| [Falcon](model_doc/falcon) | ✅ | ❌ | ❌ | +| [FLAN-T5](model_doc/flan-t5) | ✅ | ✅ | ✅ | +| [FLAN-UL2](model_doc/flan-ul2) | ✅ | ✅ | ✅ | +| [FlauBERT](model_doc/flaubert) | ✅ | ✅ | ❌ | +| [FLAVA](model_doc/flava) | ✅ | ❌ | ❌ | +| [FNet](model_doc/fnet) | ✅ | ❌ | ❌ | +| [FocalNet](model_doc/focalnet) | ✅ | ❌ | ❌ | +| [Funnel Transformer](model_doc/funnel) | ✅ | ✅ | ❌ | +| [Fuyu](model_doc/fuyu) | ✅ | ❌ | ❌ | +| [GIT](model_doc/git) | ✅ | ❌ | ❌ | +| [GLPN](model_doc/glpn) | ✅ | ❌ | ❌ | +| [GPT Neo](model_doc/gpt_neo) | ✅ | ❌ | ✅ | +| [GPT NeoX](model_doc/gpt_neox) | ✅ | ❌ | ❌ | +| [GPT NeoX Japanese](model_doc/gpt_neox_japanese) | ✅ | ❌ | ❌ | +| [GPT-J](model_doc/gptj) | ✅ | ✅ | ✅ | +| [GPT-Sw3](model_doc/gpt-sw3) | ✅ | ✅ | ✅ | +| [GPTBigCode](model_doc/gpt_bigcode) | ✅ | ❌ | ❌ | +| [GPTSAN-japanese](model_doc/gptsan-japanese) | ✅ | ❌ | ❌ | +| [Graphormer](model_doc/graphormer) | ✅ | ❌ | ❌ | +| [GroupViT](model_doc/groupvit) | ✅ | ✅ | ❌ | +| [HerBERT](model_doc/herbert) | ✅ | ✅ | ✅ | +| [Hubert](model_doc/hubert) | ✅ | ✅ | ❌ | +| [I-BERT](model_doc/ibert) | ✅ | ❌ | ❌ | +| [IDEFICS](model_doc/idefics) | ✅ | ❌ | ❌ | +| [ImageGPT](model_doc/imagegpt) | ✅ | ❌ | ❌ | +| [Informer](model_doc/informer) | ✅ | ❌ | ❌ | +| [InstructBLIP](model_doc/instructblip) | ✅ | ❌ | ❌ | +| [Jukebox](model_doc/jukebox) | ✅ | ❌ | ❌ | +| [LayoutLM](model_doc/layoutlm) | ✅ | ✅ | ❌ | +| [LayoutLMv2](model_doc/layoutlmv2) | ✅ | ❌ | ❌ | +| [LayoutLMv3](model_doc/layoutlmv3) | ✅ | ✅ | ❌ | +| [LayoutXLM](model_doc/layoutxlm) | ✅ | ❌ | ❌ | +| [LED](model_doc/led) | ✅ | ✅ | ❌ | +| [LeViT](model_doc/levit) | ✅ | ❌ | ❌ | +| [LiLT](model_doc/lilt) | ✅ | ❌ | ❌ | +| [LLaMA](model_doc/llama) | ✅ | ❌ | ❌ | +| [Llama2](model_doc/llama2) | ✅ | ❌ | ❌ | +| [Longformer](model_doc/longformer) | ✅ | ✅ | ❌ | +| [LongT5](model_doc/longt5) | ✅ | ❌ | ✅ | +| [LUKE](model_doc/luke) | ✅ | ❌ | ❌ | +| [LXMERT](model_doc/lxmert) | ✅ | ✅ | ❌ | +| [M-CTC-T](model_doc/mctct) | ✅ | ❌ | ❌ | +| [M2M100](model_doc/m2m_100) | ✅ | ❌ | ❌ | +| [Marian](model_doc/marian) | ✅ | ✅ | ✅ | +| [MarkupLM](model_doc/markuplm) | ✅ | ❌ | ❌ | +| [Mask2Former](model_doc/mask2former) | ✅ | ❌ | ❌ | +| [MaskFormer](model_doc/maskformer) | ✅ | ❌ | ❌ | +| [MatCha](model_doc/matcha) | ✅ | ❌ | ❌ | +| [mBART](model_doc/mbart) | ✅ | ✅ | ✅ | +| [mBART-50](model_doc/mbart50) | ✅ | ✅ | ✅ | +| [MEGA](model_doc/mega) | ✅ | ❌ | ❌ | +| [Megatron-BERT](model_doc/megatron-bert) | ✅ | ❌ | ❌ | +| [Megatron-GPT2](model_doc/megatron_gpt2) | ✅ | ✅ | ✅ | +| [MGP-STR](model_doc/mgp-str) | ✅ | ❌ | ❌ | +| [Mistral](model_doc/mistral) | ✅ | ❌ | ❌ | +| [mLUKE](model_doc/mluke) | ✅ | ❌ | ❌ | +| [MMS](model_doc/mms) | ✅ | ✅ | ✅ | +| [MobileBERT](model_doc/mobilebert) | ✅ | ✅ | ❌ | +| [MobileNetV1](model_doc/mobilenet_v1) | ✅ | ❌ | ❌ | +| [MobileNetV2](model_doc/mobilenet_v2) | ✅ | ❌ | ❌ | +| [MobileViT](model_doc/mobilevit) | ✅ | ✅ | ❌ | +| [MobileViTV2](model_doc/mobilevitv2) | ✅ | ❌ | ❌ | +| [MPNet](model_doc/mpnet) | ✅ | ✅ | ❌ | +| [MPT](model_doc/mpt) | ✅ | ❌ | ❌ | +| [MRA](model_doc/mra) | ✅ | ❌ | ❌ | +| [MT5](model_doc/mt5) | ✅ | ✅ | ✅ | +| [MusicGen](model_doc/musicgen) | ✅ | ❌ | ❌ | +| [MVP](model_doc/mvp) | ✅ | ❌ | ❌ | +| [NAT](model_doc/nat) | ✅ | ❌ | ❌ | +| [Nezha](model_doc/nezha) | ✅ | ❌ | ❌ | +| [NLLB](model_doc/nllb) | ✅ | ❌ | ❌ | +| [NLLB-MOE](model_doc/nllb-moe) | ✅ | ❌ | ❌ | +| [Nougat](model_doc/nougat) | ✅ | ✅ | ✅ | +| [Nyströmformer](model_doc/nystromformer) | ✅ | ❌ | ❌ | +| [OneFormer](model_doc/oneformer) | ✅ | ❌ | ❌ | +| [OpenAI GPT](model_doc/openai-gpt) | ✅ | ✅ | ❌ | +| [OpenAI GPT-2](model_doc/gpt2) | ✅ | ✅ | ✅ | +| [OpenLlama](model_doc/open-llama) | ✅ | ❌ | ❌ | +| [OPT](model_doc/opt) | ✅ | ✅ | ✅ | +| [OWL-ViT](model_doc/owlvit) | ✅ | ❌ | ❌ | +| [OWLv2](model_doc/owlv2) | ✅ | ❌ | ❌ | +| [Pegasus](model_doc/pegasus) | ✅ | ✅ | ✅ | +| [PEGASUS-X](model_doc/pegasus_x) | ✅ | ❌ | ❌ | +| [Perceiver](model_doc/perceiver) | ✅ | ❌ | ❌ | +| [Persimmon](model_doc/persimmon) | ✅ | ❌ | ❌ | +| [PhoBERT](model_doc/phobert) | ✅ | ✅ | ✅ | +| [Pix2Struct](model_doc/pix2struct) | ✅ | ❌ | ❌ | +| [PLBart](model_doc/plbart) | ✅ | ❌ | ❌ | +| [PoolFormer](model_doc/poolformer) | ✅ | ❌ | ❌ | +| [Pop2Piano](model_doc/pop2piano) | ✅ | ❌ | ❌ | +| [ProphetNet](model_doc/prophetnet) | ✅ | ❌ | ❌ | +| [PVT](model_doc/pvt) | ✅ | ❌ | ❌ | +| [QDQBert](model_doc/qdqbert) | ✅ | ❌ | ❌ | +| [RAG](model_doc/rag) | ✅ | ✅ | ❌ | +| [REALM](model_doc/realm) | ✅ | ❌ | ❌ | +| [Reformer](model_doc/reformer) | ✅ | ❌ | ❌ | +| [RegNet](model_doc/regnet) | ✅ | ✅ | ✅ | +| [RemBERT](model_doc/rembert) | ✅ | ✅ | ❌ | +| [ResNet](model_doc/resnet) | ✅ | ✅ | ✅ | +| [RetriBERT](model_doc/retribert) | ✅ | ❌ | ❌ | +| [RoBERTa](model_doc/roberta) | ✅ | ✅ | ✅ | +| [RoBERTa-PreLayerNorm](model_doc/roberta-prelayernorm) | ✅ | ✅ | ✅ | +| [RoCBert](model_doc/roc_bert) | ✅ | ❌ | ❌ | +| [RoFormer](model_doc/roformer) | ✅ | ✅ | ✅ | +| [RWKV](model_doc/rwkv) | ✅ | ❌ | ❌ | +| [SAM](model_doc/sam) | ✅ | ✅ | ❌ | +| [SeamlessM4T](model_doc/seamless_m4t) | ✅ | ❌ | ❌ | +| [SegFormer](model_doc/segformer) | ✅ | ✅ | ❌ | +| [SEW](model_doc/sew) | ✅ | ❌ | ❌ | +| [SEW-D](model_doc/sew-d) | ✅ | ❌ | ❌ | +| [Speech Encoder decoder](model_doc/speech-encoder-decoder) | ✅ | ❌ | ✅ | +| [Speech2Text](model_doc/speech_to_text) | ✅ | ✅ | ❌ | +| [SpeechT5](model_doc/speecht5) | ✅ | ❌ | ❌ | +| [Splinter](model_doc/splinter) | ✅ | ❌ | ❌ | +| [SqueezeBERT](model_doc/squeezebert) | ✅ | ❌ | ❌ | +| [SwiftFormer](model_doc/swiftformer) | ✅ | ❌ | ❌ | +| [Swin Transformer](model_doc/swin) | ✅ | ✅ | ❌ | +| [Swin Transformer V2](model_doc/swinv2) | ✅ | ❌ | ❌ | +| [Swin2SR](model_doc/swin2sr) | ✅ | ❌ | ❌ | +| [SwitchTransformers](model_doc/switch_transformers) | ✅ | ❌ | ❌ | +| [T5](model_doc/t5) | ✅ | ✅ | ✅ | +| [T5v1.1](model_doc/t5v1.1) | ✅ | ✅ | ✅ | +| [Table Transformer](model_doc/table-transformer) | ✅ | ❌ | ❌ | +| [TAPAS](model_doc/tapas) | ✅ | ✅ | ❌ | +| [TAPEX](model_doc/tapex) | ✅ | ✅ | ✅ | +| [Time Series Transformer](model_doc/time_series_transformer) | ✅ | ❌ | ❌ | +| [TimeSformer](model_doc/timesformer) | ✅ | ❌ | ❌ | +| [Trajectory Transformer](model_doc/trajectory_transformer) | ✅ | ❌ | ❌ | +| [Transformer-XL](model_doc/transfo-xl) | ✅ | ✅ | ❌ | +| [TrOCR](model_doc/trocr) | ✅ | ❌ | ❌ | +| [TVLT](model_doc/tvlt) | ✅ | ❌ | ❌ | +| [UL2](model_doc/ul2) | ✅ | ✅ | ✅ | +| [UMT5](model_doc/umt5) | ✅ | ❌ | ❌ | +| [UniSpeech](model_doc/unispeech) | ✅ | ❌ | ❌ | +| [UniSpeechSat](model_doc/unispeech-sat) | ✅ | ❌ | ❌ | +| [UPerNet](model_doc/upernet) | ✅ | ❌ | ❌ | +| [VAN](model_doc/van) | ✅ | ❌ | ❌ | +| [VideoMAE](model_doc/videomae) | ✅ | ❌ | ❌ | +| [ViLT](model_doc/vilt) | ✅ | ❌ | ❌ | +| [Vision Encoder decoder](model_doc/vision-encoder-decoder) | ✅ | ✅ | ✅ | +| [VisionTextDualEncoder](model_doc/vision-text-dual-encoder) | ✅ | ✅ | ✅ | +| [VisualBERT](model_doc/visual_bert) | ✅ | ❌ | ❌ | +| [ViT](model_doc/vit) | ✅ | ✅ | ✅ | +| [ViT Hybrid](model_doc/vit_hybrid) | ✅ | ❌ | ❌ | +| [VitDet](model_doc/vitdet) | ✅ | ❌ | ❌ | +| [ViTMAE](model_doc/vit_mae) | ✅ | ✅ | ❌ | +| [ViTMatte](model_doc/vitmatte) | ✅ | ❌ | ❌ | +| [ViTMSN](model_doc/vit_msn) | ✅ | ❌ | ❌ | +| [VITS](model_doc/vits) | ✅ | ❌ | ❌ | +| [ViViT](model_doc/vivit) | ✅ | ❌ | ❌ | +| [Wav2Vec2](model_doc/wav2vec2) | ✅ | ✅ | ✅ | +| [Wav2Vec2-Conformer](model_doc/wav2vec2-conformer) | ✅ | ❌ | ❌ | +| [Wav2Vec2Phoneme](model_doc/wav2vec2_phoneme) | ✅ | ✅ | ✅ | +| [WavLM](model_doc/wavlm) | ✅ | ❌ | ❌ | +| [Whisper](model_doc/whisper) | ✅ | ✅ | ✅ | +| [X-CLIP](model_doc/xclip) | ✅ | ❌ | ❌ | +| [X-MOD](model_doc/xmod) | ✅ | ❌ | ❌ | +| [XGLM](model_doc/xglm) | ✅ | ✅ | ✅ | +| [XLM](model_doc/xlm) | ✅ | ✅ | ❌ | +| [XLM-ProphetNet](model_doc/xlm-prophetnet) | ✅ | ❌ | ❌ | +| [XLM-RoBERTa](model_doc/xlm-roberta) | ✅ | ✅ | ✅ | +| [XLM-RoBERTa-XL](model_doc/xlm-roberta-xl) | ✅ | ❌ | ❌ | +| [XLM-V](model_doc/xlm-v) | ✅ | ✅ | ✅ | +| [XLNet](model_doc/xlnet) | ✅ | ✅ | ❌ | +| [XLS-R](model_doc/xls_r) | ✅ | ✅ | ✅ | +| [XLSR-Wav2Vec2](model_doc/xlsr_wav2vec2) | ✅ | ✅ | ✅ | +| [YOLOS](model_doc/yolos) | ✅ | ❌ | ❌ | +| [YOSO](model_doc/yoso) | ✅ | ❌ | ❌ | + + From 845aa832b7637e158a6aa1baae40f0c218e90824 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Wed, 8 Nov 2023 09:07:32 -0500 Subject: [PATCH 113/268] Remove unused param from example script tests (#27354) Unused param --- examples/pytorch/test_accelerate_examples.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/examples/pytorch/test_accelerate_examples.py b/examples/pytorch/test_accelerate_examples.py index 22dd7dea27ae..cdc9d6dc5b56 100644 --- a/examples/pytorch/test_accelerate_examples.py +++ b/examples/pytorch/test_accelerate_examples.py @@ -28,7 +28,6 @@ from transformers.testing_utils import ( TestCasePlus, backend_device_count, - is_torch_fp16_available_on_device, run_command, slow, torch_device, @@ -93,9 +92,6 @@ def test_run_glue_no_trainer(self): --with_tracking """.split() - if is_torch_fp16_available_on_device(torch_device): - testargs.append("--fp16") - run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) @@ -325,9 +321,6 @@ def test_run_image_classification_no_trainer(self): --checkpointing_steps 1 """.split() - if is_torch_fp16_available_on_device(torch_device): - testargs.append("--fp16") - run_command(self._launch_args + testargs) result = get_results(tmp_dir) # The base model scores a 25% From 7b175cfaa780ff54c534f236fae5bcaa22214874 Mon Sep 17 00:00:00 2001 From: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> Date: Wed, 8 Nov 2023 15:11:38 +0000 Subject: [PATCH 114/268] [Flax Whisper] large-v3 compatibility (#27360) --- src/transformers/models/whisper/modeling_flax_whisper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/whisper/modeling_flax_whisper.py b/src/transformers/models/whisper/modeling_flax_whisper.py index ffcaeb53ad71..14ce899ab16c 100644 --- a/src/transformers/models/whisper/modeling_flax_whisper.py +++ b/src/transformers/models/whisper/modeling_flax_whisper.py @@ -867,7 +867,7 @@ class FlaxWhisperPreTrainedModel(FlaxPreTrainedModel): def __init__( self, config: WhisperConfig, - input_shape: Tuple[int] = (1, 80, 3000), + input_shape: Tuple[int] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, @@ -875,6 +875,8 @@ def __init__( **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) + if input_shape is None: + input_shape = (1, config.num_mel_bins, 2 * config.max_source_positions) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def enable_gradient_checkpointing(self): From bd8f45b1674be4da875097cb5557557d80bcce46 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 8 Nov 2023 17:15:57 +0100 Subject: [PATCH 115/268] Fix tiny model script: not using `from_pt=True` (#27372) fix Co-authored-by: ydshieh --- utils/create_dummy_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/create_dummy_models.py b/utils/create_dummy_models.py index 343a66020034..5ee53ef7dab6 100644 --- a/utils/create_dummy_models.py +++ b/utils/create_dummy_models.py @@ -916,7 +916,7 @@ def build_composite_models(config_class, output_dir): model.save_pretrained(model_path) if tf_model_class is not None: - model = tf_model_class.from_pretrained(model_path, from_pt=True) + model = tf_model_class.from_pretrained(model_path) model.save_pretrained(model_path) # copy the processors @@ -1204,7 +1204,7 @@ def build(config_class, models_to_create, output_dir): ckpt = get_checkpoint_dir(output_dir, pt_arch) # Use the same weights from PyTorch. try: - model = tensorflow_arch.from_pretrained(ckpt, from_pt=True) + model = tensorflow_arch.from_pretrained(ckpt) model.save_pretrained(ckpt) except Exception as e: # Conversion may fail. Let's not create a model with different weights to avoid confusion (for now). From ef716736165cbbac433a3c4b2c3ea97246b3d6b9 Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Wed, 8 Nov 2023 10:48:46 -0600 Subject: [PATCH 116/268] translate big_models.md and performance.md to chinese (#27334) * translate performance.md * tranlsate performance.md and big_models.md * update translation * update review --- docs/source/zh/_toctree.yml | 6 ++ docs/source/zh/big_models.md | 123 ++++++++++++++++++++++++++++++++++ docs/source/zh/performance.md | 63 +++++++++++++++++ 3 files changed, 192 insertions(+) create mode 100644 docs/source/zh/big_models.md create mode 100644 docs/source/zh/performance.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index a0ee896f0982..d5226c78847c 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -42,6 +42,12 @@ - local: tflite title: 导出为 TFLite title: 开发者指南 +- sections: + - local: performance + title: 综述 + - local: big_models + title: 实例化大模型 + title: 性能和可扩展性 - sections: - local: task_summary title: 🤗Transformers能做什么 diff --git a/docs/source/zh/big_models.md b/docs/source/zh/big_models.md new file mode 100644 index 000000000000..92442ea2981f --- /dev/null +++ b/docs/source/zh/big_models.md @@ -0,0 +1,123 @@ + + +# 实例化大型模型 + +当你想使用一个非常大的预训练模型时,一个挑战是尽量减少对内存的使用。通常从PyTorch开始的工作流程如下: + +1. 用随机权重创建你的模型。 +2. 加载你的预训练权重。 +3. 将这些预训练权重放入你的随机模型中。 + +步骤1和2都需要完整版本的模型在内存中,这在大多数情况下不是问题,但如果你的模型开始达到几个GB的大小,这两个副本可能会让你超出内存的限制。更糟糕的是,如果你使用`torch.distributed`来启动分布式训练,每个进程都会加载预训练模型并将这两个副本存储在内存中。 + + + +请注意,随机创建的模型使用“空”张量进行初始化,这些张量占用内存空间但不填充它(因此随机值是给定时间内该内存块中的任何内容)。在第3步之后,对未初始化的权重执行适合模型/参数种类的随机初始化(例如正态分布),以尽可能提高速度! + + + +在本指南中,我们将探讨 Transformers 提供的解决方案来处理这个问题。请注意,这是一个积极开发的领域,因此这里解释的API在将来可能会略有变化。 + +## 分片checkpoints + +自4.18.0版本起,占用空间超过10GB的模型检查点将自动分成较小的片段。在使用`model.save_pretrained(save_dir)`时,您最终会得到几个部分`checkpoints`(每个的大小都小于10GB)以及一个索引,该索引将参数名称映射到存储它们的文件。 + +您可以使用`max_shard_size`参数来控制分片之前的最大大小。为了示例的目的,我们将使用具有较小分片大小的普通大小的模型:让我们以传统的BERT模型为例。 + + +```py +from transformers import AutoModel + +model = AutoModel.from_pretrained("bert-base-cased") +``` + +如果您使用 [`PreTrainedModel.save_pretrained`](模型预训练保存) 进行保存,您将得到一个新的文件夹,其中包含两个文件:模型的配置和权重: + +```py +>>> import os +>>> import tempfile + +>>> with tempfile.TemporaryDirectory() as tmp_dir: +... model.save_pretrained(tmp_dir) +... print(sorted(os.listdir(tmp_dir))) +['config.json', 'pytorch_model.bin'] +``` + +现在让我们使用最大分片大小为200MB: + +```py +>>> with tempfile.TemporaryDirectory() as tmp_dir: +... model.save_pretrained(tmp_dir, max_shard_size="200MB") +... print(sorted(os.listdir(tmp_dir))) +['config.json', 'pytorch_model-00001-of-00003.bin', 'pytorch_model-00002-of-00003.bin', 'pytorch_model-00003-of-00003.bin', 'pytorch_model.bin.index.json'] +``` + +在模型配置文件最上方,我们可以看到三个不同的权重文件,以及一个`index.json`索引文件。这样的`checkpoint`可以使用`[~PreTrainedModel.from_pretrained]`方法完全重新加载: + +```py +>>> with tempfile.TemporaryDirectory() as tmp_dir: +... model.save_pretrained(tmp_dir, max_shard_size="200MB") +... new_model = AutoModel.from_pretrained(tmp_dir) +``` + +对于大型模型来说,这样做的主要优点是在上述工作流程的步骤2中,每个`checkpoint`的分片在前一个分片之后加载,从而将内存中的内存使用限制在模型大小加上最大分片的大小。 + +在后台,索引文件用于确定`checkpoint`中包含哪些键以及相应的权重存储在哪里。我们可以像加载任何json一样加载该索引,并获得一个字典: + +```py +>>> import json + +>>> with tempfile.TemporaryDirectory() as tmp_dir: +... model.save_pretrained(tmp_dir, max_shard_size="200MB") +... with open(os.path.join(tmp_dir, "pytorch_model.bin.index.json"), "r") as f: +... index = json.load(f) + +>>> print(index.keys()) +dict_keys(['metadata', 'weight_map']) +``` + +目前元数据仅包括模型的总大小。我们计划在将来添加其他信息: +```py +>>> index["metadata"] +{'total_size': 433245184} +``` + +权重映射是该索引的主要部分,它将每个参数的名称(通常在PyTorch模型的`state_dict`中找到)映射到存储该参数的文件: + +```py +>>> index["weight_map"] +{'embeddings.LayerNorm.bias': 'pytorch_model-00001-of-00003.bin', + 'embeddings.LayerNorm.weight': 'pytorch_model-00001-of-00003.bin', + ... +``` + +如果您想直接在模型内部加载这样的分片`checkpoint`,而不使用 [`PreTrainedModel.from_pretrained`](就像您会为完整`checkpoint`执行 `model.load_state_dict()` 一样),您应该使用 [`modeling_utils.load_sharded_checkpoint`]: + + +```py +>>> from transformers.modeling_utils import load_sharded_checkpoint + +>>> with tempfile.TemporaryDirectory() as tmp_dir: +... model.save_pretrained(tmp_dir, max_shard_size="200MB") +... load_sharded_checkpoint(model, tmp_dir) +``` + +## 低内存加载 + +分片`checkpoints`在上述工作流的第2步中降低了内存使用,但为了在低内存环境中使用该模型,我们建议使用基于 Accelerate 库的工具。 + +请阅读以下指南以获取更多信息:[使用 Accelerate 进行大模型加载](./main_classes/model#large-model-loading) diff --git a/docs/source/zh/performance.md b/docs/source/zh/performance.md new file mode 100644 index 000000000000..afe41c8fdd14 --- /dev/null +++ b/docs/source/zh/performance.md @@ -0,0 +1,63 @@ + + +# 性能与可扩展性 + +训练大型transformer模型并将其部署到生产环境会面临各种挑战。 +在训练过程中,模型可能需要比可用的GPU内存更多的资源,或者表现出较慢的训练速度。在部署阶段,模型可能在生产环境中难以处理所需的吞吐量。 + +本文档旨在帮助您克服这些挑战,并找到适合您使用场景的最佳设置。教程分为训练和推理部分,因为每个部分都有不同的挑战和解决方案。在每个部分中,您将找到针对不同硬件配置的单独指南,例如单GPU与多GPU用于训练或CPU与GPU用于推理。 + +将此文档作为您的起点,进一步导航到与您的情况匹配的方法。 + +## 训练 + +高效训练大型transformer模型需要使用加速器硬件,如GPU或TPU。最常见的情况是您只有一个GPU。您应用于单个GPU上提高训练效率的方法可以扩展到其他设置,如多个GPU。然而,也有一些特定于多GPU或CPU训练的技术。我们在单独的部分中介绍它们。 + +* [在单个GPU上进行高效训练的方法和工具](perf_train_gpu_one):从这里开始学习常见的方法,可以帮助优化GPU内存利用率、加快训练速度或两者兼备。 +* [多GPU训练部分](perf_train_gpu_many):探索此部分以了解适用于多GPU设置的进一步优化方法,例如数据并行、张量并行和流水线并行。 +* [CPU训练部分](perf_train_cpu):了解在CPU上的混合精度训练。 +* [在多个CPU上进行高效训练](perf_train_cpu_many):了解分布式CPU训练。 +* [使用TensorFlow在TPU上进行训练](perf_train_tpu_tf):如果您对TPU还不熟悉,请参考此部分,了解有关在TPU上进行训练和使用XLA的建议性介绍。 +* [自定义硬件进行训练](perf_hardware):在构建自己的深度学习机器时查找技巧和窍门。 +* [使用Trainer API进行超参数搜索](hpo_train) + + +## 推理 + +在生产环境中对大型模型进行高效推理可能与训练它们一样具有挑战性。在接下来的部分中,我们将详细介绍如何在CPU和单/多GPU设置上进行推理的步骤。 + +* [在单个CPU上进行推理](perf_infer_cpu) +* [在单个GPU上进行推理](perf_infer_gpu_one) +* [多GPU推理](perf_infer_gpu_one) +* [TensorFlow模型的XLA集成](tf_xla) + +## 训练和推理 + +在这里,您将找到适用于训练模型或使用它进行推理的技巧、窍门和技巧。 + +* [实例化大型模型](big_models) +* [解决性能问题](debugging) + +## 贡献 + +这份文档还远远没有完成,还有很多需要添加的内容,所以如果你有补充或更正的内容,请毫不犹豫地提交一个PR(Pull Request),或者如果你不确定,可以创建一个Issue,我们可以在那里讨论细节。 + +在做出贡献时,如果A比B更好,请尽量包含可重复的基准测试和(或)该信息来源的链接(除非它直接来自您)。 From a5bee89c9d5ec2402cb72e819860d51cc2ca35fc Mon Sep 17 00:00:00 2001 From: Yoach Lacombe <52246514+ylacombe@users.noreply.github.com> Date: Wed, 8 Nov 2023 17:06:35 +0000 Subject: [PATCH 117/268] Add Flash Attention 2 support to Bark (#27364) * change handmade attention mask to _prepare_4d_attention_mask * add flashattention2 support in Bark * add flashattention2 tests on BarkSemanticModel * make style * fix flashattention and tests + make style * fix memory leak and allow Bark to pass flash attention to sub-models * make style * Apply suggestions from code review Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> * remove unecessary code from tests + justify overriding * Update tests/models/bark/test_modeling_bark.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * make style --------- Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- src/transformers/models/bark/modeling_bark.py | 256 ++++++++++++++++-- tests/models/bark/test_modeling_bark.py | 119 ++++++++ 2 files changed, 355 insertions(+), 20 deletions(-) diff --git a/src/transformers/models/bark/modeling_bark.py b/src/transformers/models/bark/modeling_bark.py index 2c04f15c0456..f8b9eab5d397 100644 --- a/src/transformers/models/bark/modeling_bark.py +++ b/src/transformers/models/bark/modeling_bark.py @@ -26,12 +26,14 @@ BarkEosPrioritizerLogitsProcessor, SuppressTokensLogitsProcessor, ) +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import CausalLMOutputWithPast, MaskedLMOutput from ...modeling_utils import PreTrainedModel, get_parameter_device from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, + is_flash_attn_2_available, logging, ) from ..auto import AutoModel @@ -49,6 +51,11 @@ ) +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + logger = logging.get_logger(__name__) @@ -62,6 +69,19 @@ ] +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + class BarkSelfAttention(nn.Module): # adapted from GPTNeoSelfAttention and Bark code # BarkSelfAttention can have two attention type, i.e full attention or causal attention @@ -187,6 +207,177 @@ def forward( return outputs +class BarkSelfFlashAttention2(BarkSelfAttention): + """ + Bark flash attention module. This module inherits from `BarkSelfAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def _split_heads(self, tensor, num_heads, attn_head_size): + """ + Splits hidden_size dim into attn_head_size and num_heads + """ + new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) + tensor = tensor.view(new_shape) + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim - (batch, seq_length, head, head_features) + return tensor + + def _merge_heads(self, tensor, num_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden_size + """ + # re-assemble all head outputs side by side + # (batch, seq_len, num_heads, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) + tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) + return tensor + + def forward( + self, + hidden_states, + attention_mask=None, + past_key_values=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + batch_size, query_len, _ = hidden_states.size() + + # calculate query, key, values for all heads in batch and move head forward to be the batch dim + query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if past_key_values is not None: + # (batch, head, seq_length, head_features) -> (batch, seq_length, head, head_features) + past_key = past_key_values[0].transpose(1, 2) + past_value = past_key_values[1].transpose(1, 2) + # and merge on seq_length + key = torch.cat((past_key, key), dim=1) + value = torch.cat((past_value, value), dim=1) + + if use_cache is True: + # (batch, head, seq_length, head_features) + present = (key.transpose(1, 2), value.transpose(1, 2)) + else: + present = None + + attn_output = self._flash_attention_forward(query, key, value, attention_mask, query_len, dropout=self.dropout) + + attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + attn_weights = None + outputs += (attn_weights,) + + return outputs + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +BARK_ATTENTION_CLASSES = { + "default": BarkSelfAttention, + "flash_attention_2": BarkSelfFlashAttention2, +} + + class BarkLayerNorm(nn.Module): """LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False.""" @@ -229,7 +420,8 @@ def __init__(self, config, is_causal=False): self.layernorm_1 = nn.LayerNorm(config.hidden_size) self.layernorm_2 = nn.LayerNorm(config.hidden_size) - self.attn = BarkSelfAttention(config, is_causal=is_causal) + attn_type = "flash_attention_2" if getattr(config, "_flash_attn_2_enabled", False) else "default" + self.attn = BARK_ATTENTION_CLASSES[attn_type](config, is_causal=is_causal) self.mlp = BarkMLP(config) @@ -277,6 +469,7 @@ class BarkPreTrainedModel(PreTrainedModel): config_class = BarkConfig supports_gradient_checkpointing = False + _supports_flash_attn_2 = True def _init_weights(self, module): """Initialize the weights.""" @@ -596,21 +789,13 @@ def forward( if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") - attention_mask = attention_mask.view(batch_size, -1) - # We create a 3D attention mask from a 2D tensor mask. - # Sizes are [batch_size, 1, 1, to_seq_length] - # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] - # this attention mask is more simple than the triangular masking of causal attention - # used in OpenAI GPT, we just need to prepare the broadcast dimension here. - attention_mask = attention_mask[:, None, None, :] - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and the dtype's smallest value for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility - attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min + if getattr(self.config, "_flash_attn_2_enabled", False): + attention_mask = attention_mask if 0 in attention_mask else None + else: + attention_mask = attention_mask.view(batch_size, -1) + # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] + # from_seq_length is 1 to easily broadcast + attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head @@ -1233,10 +1418,12 @@ def forward( if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") - attention_mask = attention_mask.view(batch_size, -1) - attention_mask = attention_mask[:, None, None, :] - attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility - attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min + if getattr(self.config, "_flash_attn_2_enabled", False): + attention_mask = attention_mask if 0 in attention_mask else None + else: + # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] + # from_seq_length is 1 to easily broadcast + attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) head_mask = self.get_head_mask(head_mask, self.config.num_layers) @@ -1669,3 +1856,32 @@ def generate( return audio, output_lengths return audio + + @classmethod + def _check_and_enable_flash_attn_2( + cls, config, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None + ): + """ + `_check_and_enable_flash_attn_2` originally don't expand flash attention enabling to the model + sub-configurations. We override the original method to make sure that Bark sub-models are using Flash Attention + if necessary. + + If you don't know about Flash Attention, check out the official repository of flash attention: + https://github.com/Dao-AILab/flash-attention + + For using Flash Attention 1.0 you can do it directly via the `BetterTransformer` API, have a look at this + specific section of the documentation to learn more about it: + https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#decoder-models + + The method checks if the current setup is compatible with Flash Attention as it requires the model to be in + half precision and not ran on CPU. + + If all checks pass, the method will create an attribute in the config `_flash_attn_2_enabled` so that the model + can initialize the correct attention module + """ + config = super()._check_and_enable_flash_attn_2(config, torch_dtype, device_map) + + config.semantic_config._flash_attn_2_enabled = getattr(config, "_flash_attn_2_enabled", False) + config.coarse_acoustics_config._flash_attn_2_enabled = getattr(config, "_flash_attn_2_enabled", False) + config.fine_acoustics_config._flash_attn_2_enabled = getattr(config, "_flash_attn_2_enabled", False) + return config diff --git a/tests/models/bark/test_modeling_bark.py b/tests/models/bark/test_modeling_bark.py index bf13203ecd40..a8545ad3c0a5 100644 --- a/tests/models/bark/test_modeling_bark.py +++ b/tests/models/bark/test_modeling_bark.py @@ -20,6 +20,8 @@ import tempfile import unittest +from pytest import mark + from transformers import ( BarkCoarseConfig, BarkConfig, @@ -33,6 +35,7 @@ BarkSemanticGenerationConfig, ) from transformers.testing_utils import ( + require_flash_attn, require_torch, require_torch_fp16, require_torch_gpu, @@ -872,6 +875,122 @@ def test_resize_embeddings_untied(self): # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) + @require_flash_attn + @require_torch_gpu + @mark.flash_attn_test + @slow + def test_flash_attn_2_inference(self): + for model_class in self.all_model_classes: + if not model_class._supports_flash_attn_2: + return + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model_fa = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True + ) + model_fa.to(torch_device) + + model = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False + ) + model.to(torch_device) + + dummy_input = inputs_dict["input_ids"][:1] + if dummy_input.dtype in [torch.float32, torch.float16]: + dummy_input = dummy_input.to(torch.bfloat16) + + dummy_attention_mask = inputs_dict.get("attention_mask", None) + + if dummy_attention_mask is not None: + dummy_attention_mask = dummy_attention_mask[:1] + dummy_attention_mask[:, 1:] = 1 + dummy_attention_mask[:, :1] = 0 + + outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) + outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) + + logits = outputs.hidden_states[-1] + logits_fa = outputs_fa.hidden_states[-1] + + assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) + + other_inputs = {"output_hidden_states": True} + if dummy_attention_mask is not None: + other_inputs["attention_mask"] = dummy_attention_mask + + outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs) + outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) + + logits = outputs.hidden_states[-1] + logits_fa = outputs_fa.hidden_states[-1] + + assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) + + # check with inference + dropout + model.train() + _ = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) + + @require_flash_attn + @require_torch_gpu + @mark.flash_attn_test + @slow + def test_flash_attn_2_inference_padding_right(self): + for model_class in self.all_model_classes: + if not model_class._supports_flash_attn_2: + return + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model_fa = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True + ) + model_fa.to(torch_device) + + model = model_class.from_pretrained( + tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False + ) + model.to(torch_device) + + dummy_input = inputs_dict["input_ids"][:1] + if dummy_input.dtype in [torch.float32, torch.float16]: + dummy_input = dummy_input.to(torch.bfloat16) + + dummy_attention_mask = inputs_dict.get("attention_mask", None) + + if dummy_attention_mask is not None: + dummy_attention_mask = dummy_attention_mask[:1] + dummy_attention_mask[:, :-1] = 1 + dummy_attention_mask[:, -1:] = 0 + + outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) + outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) + + logits = outputs.hidden_states[-1] + logits_fa = outputs_fa.hidden_states[-1] + + assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) + + other_inputs = { + "output_hidden_states": True, + } + if dummy_attention_mask is not None: + other_inputs["attention_mask"] = dummy_attention_mask + + outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs) + outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) + + logits = outputs.hidden_states[-1] + logits_fa = outputs_fa.hidden_states[-1] + + assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) + @require_torch class BarkModelIntegrationTests(unittest.TestCase): From 0e402e1478ff8c9db876eb7b3a708c1477cec2fa Mon Sep 17 00:00:00 2001 From: Sergii Dymchenko Date: Wed, 8 Nov 2023 11:58:36 -0800 Subject: [PATCH 118/268] Update deprecated `torch.range` in `test_modeling_ibert.py` (#27355) * Update deprecated torch.range * Remove comment --- tests/models/ibert/test_modeling_ibert.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/models/ibert/test_modeling_ibert.py b/tests/models/ibert/test_modeling_ibert.py index 096a55169a00..b552cb75a5a6 100644 --- a/tests/models/ibert/test_modeling_ibert.py +++ b/tests/models/ibert/test_modeling_ibert.py @@ -519,7 +519,7 @@ def test_int_gelu(self): gelu_q = IntGELU(quant_mode=True) gelu_dq = nn.GELU() - x_int = torch.range(-10000, 10000, 1) + x_int = torch.arange(-10000, 10001, 1) x_scaling_factor = torch.tensor(0.001) x = x_int * x_scaling_factor @@ -534,7 +534,7 @@ def test_int_gelu(self): self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4)) def test_force_dequant_gelu(self): - x_int = torch.range(-10000, 10000, 1) + x_int = torch.arange(-10000, 10001, 1) x_scaling_factor = torch.tensor(0.001) x = x_int * x_scaling_factor @@ -565,7 +565,6 @@ def test_int_softmax(self): softmax_q = IntSoftmax(output_bit, quant_mode=True) softmax_dq = nn.Softmax() - # x_int = torch.range(-10000, 10000, 1) def _test(array): x_int = torch.tensor(array) x_scaling_factor = torch.tensor(0.1) From ced9fd86f55ebb6b656c273f6e23f8ba50652f83 Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Wed, 8 Nov 2023 16:04:06 -0600 Subject: [PATCH 119/268] translate debugging.md to chinese (#27374) * update * update --- docs/source/zh/_toctree.yml | 2 + docs/source/zh/debugging.md | 308 ++++++++++++++++++++++++++++++++++++ 2 files changed, 310 insertions(+) create mode 100644 docs/source/zh/debugging.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index d5226c78847c..5f2fa5a172af 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -47,6 +47,8 @@ title: 综述 - local: big_models title: 实例化大模型 + - local: debugging + title: 问题定位及解决 title: 性能和可扩展性 - sections: - local: task_summary diff --git a/docs/source/zh/debugging.md b/docs/source/zh/debugging.md new file mode 100644 index 000000000000..77746a694fce --- /dev/null +++ b/docs/source/zh/debugging.md @@ -0,0 +1,308 @@ + + +# 调试 + +## 多GPU网络问题调试 + +当使用`DistributedDataParallel`和多个GPU进行训练或推理时,如果遇到进程和(或)节点之间的互联问题,您可以使用以下脚本来诊断网络问题。 + +```bash +wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py +``` + +例如,要测试两个GPU之间的互联,请执行以下操作: + +```bash +python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py +``` + +如果两个进程能够相互通信并分配GPU内存,它们各自将打印出 "OK" 状态。 + +对于更多的GPU或节点,可以根据脚本中的参数进行调整。 + +在诊断脚本内部,您将找到更多详细信息,甚至有关如何在SLURM环境中运行它的说明。 + +另一种级别的调试是添加 `NCCL_DEBUG=INFO` 环境变量,如下所示: + + +```bash +NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py +``` + +这将产生大量与NCCL相关的调试信息,如果发现有问题报告,您可以在线搜索以获取相关信息。或者,如果您不确定如何解释输出,可以在`issue`中分享日志文件。 + + +## 下溢和上溢检测 + + + +目前,此功能仅适用于PyTorch。 + + + + + +对于多GPU训练,它需要使用DDP(`torch.distributed.launch`)。 + + + + + +此功能可以与任何基于`nn.Module`的模型一起使用。 + + + +如果您开始发现`loss=NaN`或模型因激活值或权重中的`inf`或`nan`而出现一些异常行为,就需要发现第一个下溢或上溢发生的地方以及导致它的原因。幸运的是,您可以通过激活一个特殊模块来自动进行检测。 + +如果您正在使用[`Trainer`],只需把以下内容: + + +```bash +--debug underflow_overflow +``` + +添加到常规命令行参数中,或在创建[`TrainingArguments`]对象时传递 `debug="underflow_overflow"`。 + +如果您正在使用自己的训练循环或其他Trainer,您可以通过以下方式实现相同的功能: + +```python +from transformers.debug_utils import DebugUnderflowOverflow + +debug_overflow = DebugUnderflowOverflow(model) +``` + +[`debug_utils.DebugUnderflowOverflow`] 将`hooks`插入模型,紧跟在每次前向调用之后,进而测试输入和输出变量,以及相应模块的权重。一旦在激活值或权重的至少一个元素中检测到`inf`或`nan`,程序将执行`assert`并打印报告,就像这样(这是在`google/mt5-small`下使用fp16混合精度捕获的): + +``` +Detected inf/nan during batch_number=0 +Last 21 forward frames: +abs min abs max metadata + encoder.block.1.layer.1.DenseReluDense.dropout Dropout +0.00e+00 2.57e+02 input[0] +0.00e+00 2.85e+02 output +[...] + encoder.block.2.layer.0 T5LayerSelfAttention +6.78e-04 3.15e+03 input[0] +2.65e-04 3.42e+03 output[0] + None output[1] +2.25e-01 1.00e+04 output[2] + encoder.block.2.layer.1.layer_norm T5LayerNorm +8.69e-02 4.18e-01 weight +2.65e-04 3.42e+03 input[0] +1.79e-06 4.65e+00 output + encoder.block.2.layer.1.DenseReluDense.wi_0 Linear +2.17e-07 4.50e+00 weight +1.79e-06 4.65e+00 input[0] +2.68e-06 3.70e+01 output + encoder.block.2.layer.1.DenseReluDense.wi_1 Linear +8.08e-07 2.66e+01 weight +1.79e-06 4.65e+00 input[0] +1.27e-04 2.37e+02 output + encoder.block.2.layer.1.DenseReluDense.dropout Dropout +0.00e+00 8.76e+03 input[0] +0.00e+00 9.74e+03 output + encoder.block.2.layer.1.DenseReluDense.wo Linear +1.01e-06 6.44e+00 weight +0.00e+00 9.74e+03 input[0] +3.18e-04 6.27e+04 output + encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense +1.79e-06 4.65e+00 input[0] +3.18e-04 6.27e+04 output + encoder.block.2.layer.1.dropout Dropout +3.18e-04 6.27e+04 input[0] +0.00e+00 inf output +``` + +由于篇幅原因,示例输出中间的部分已经被缩减。 + +第二列显示了绝对最大元素的值,因此,如果您仔细查看最后`frame`,输入和输出都在`1e4`的范围内。因此,在使用fp16混合精度进行训练时,最后一步发生了溢出(因为在`fp16`下,在`inf`之前的最大数字是`64e3`)。为了避免在`fp16`下发生溢出,激活值必须保持低于`1e4`,因为`1e4 * 1e4 = 1e8`,因此任何具有大激活值的矩阵乘法都会导致数值溢出。 + +在跟踪的开始处,您可以发现问题发生在哪个批次(这里的`Detected inf/nan during batch_number=0`表示问题发生在第一个批次)。 + +每个报告的`frame`都以声明相应模块的层信息为开头,说明这一`frame`是为哪个模块报告的。如果只看这个`frame`: + +``` + encoder.block.2.layer.1.layer_norm T5LayerNorm +8.69e-02 4.18e-01 weight +2.65e-04 3.42e+03 input[0] +1.79e-06 4.65e+00 output +``` + +在这里,`encoder.block.2.layer.1.layer_norm` 表示它是编码器的第二个块中第一层的`layer norm`。而 `forward` 的具体调用是 `T5LayerNorm`。 + +让我们看看该报告的最后几个`frame`: + +``` +Detected inf/nan during batch_number=0 +Last 21 forward frames: +abs min abs max metadata +[...] + encoder.block.2.layer.1.DenseReluDense.wi_0 Linear +2.17e-07 4.50e+00 weight +1.79e-06 4.65e+00 input[0] +2.68e-06 3.70e+01 output + encoder.block.2.layer.1.DenseReluDense.wi_1 Linear +8.08e-07 2.66e+01 weight +1.79e-06 4.65e+00 input[0] +1.27e-04 2.37e+02 output + encoder.block.2.layer.1.DenseReluDense.wo Linear +1.01e-06 6.44e+00 weight +0.00e+00 9.74e+03 input[0] +3.18e-04 6.27e+04 output + encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense +1.79e-06 4.65e+00 input[0] +3.18e-04 6.27e+04 output + encoder.block.2.layer.1.dropout Dropout +3.18e-04 6.27e+04 input[0] +0.00e+00 inf output +``` + +最后一个`frame`报告了`Dropout.forward`函数,第一个条目是唯一的输入,第二个条目是唯一的输出。您可以看到,它是从`DenseReluDense`类内的属性`dropout`中调用的。我们可以看到它发生在第2个块的第1层,也就是在第一个批次期间。最后,绝对最大的输入元素值为`6.27e+04`,输出也是`inf`。 + +您可以在这里看到,`T5DenseGatedGeluDense.forward`产生了输出激活值,其绝对最大值约为62.7K,非常接近fp16的上限64K。在下一个`frame`中,我们有`Dropout`对权重进行重新归一化,之后将某些元素归零,将绝对最大值推到了64K以上,导致溢出(`inf`)。 + +正如你所看到的,我们需要查看前面的`frame`, 从那里fp16数字开始变得非常大。 + +让我们将报告与`models/t5/modeling_t5.py`中的代码匹配: + +```python +class T5DenseGatedGeluDense(nn.Module): + def __init__(self, config): + super().__init__() + self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) + self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) + self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) + self.dropout = nn.Dropout(config.dropout_rate) + self.gelu_act = ACT2FN["gelu_new"] + + def forward(self, hidden_states): + hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) + hidden_linear = self.wi_1(hidden_states) + hidden_states = hidden_gelu * hidden_linear + hidden_states = self.dropout(hidden_states) + hidden_states = self.wo(hidden_states) + return hidden_states +``` + +现在很容易看到`dropout`调用,以及所有之前的调用。 + +由于检测是在前向`hook`中进行的,这些报告将立即在每个`forward`返回后打印出来。 + +回到完整的报告,要采取措施并解决问题,我们需要往回看几个`frame`,在那里数字开始上升,并且最有可能切换到fp32模式以便在乘法或求和时数字不会溢出。当然,可能还有其他解决方案。例如,如果启用了`amp`,我们可以在将原始`forward`移到`helper wrapper`中后,暂时关闭它,如下所示: + +```python +def _forward(self, hidden_states): + hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) + hidden_linear = self.wi_1(hidden_states) + hidden_states = hidden_gelu * hidden_linear + hidden_states = self.dropout(hidden_states) + hidden_states = self.wo(hidden_states) + return hidden_states + + +import torch + + +def forward(self, hidden_states): + if torch.is_autocast_enabled(): + with torch.cuda.amp.autocast(enabled=False): + return self._forward(hidden_states) + else: + return self._forward(hidden_states) +``` + +由于自动检测器仅报告完整`frame`的输入和输出,一旦知道在哪里查找,您可能还希望分析特定`forward`函数的中间阶段。在这种情况下,您可以使用`detect_overflow`辅助函数将检测器放到希望的位置,例如: + +```python +from debug_utils import detect_overflow + + +class T5LayerFF(nn.Module): + [...] + + def forward(self, hidden_states): + forwarded_states = self.layer_norm(hidden_states) + detect_overflow(forwarded_states, "after layer_norm") + forwarded_states = self.DenseReluDense(forwarded_states) + detect_overflow(forwarded_states, "after DenseReluDense") + return hidden_states + self.dropout(forwarded_states) +``` + +可以看到,我们添加了2个检测器,现在我们可以跟踪是否在`forwarded_states`中间的某个地方检测到了`inf`或`nan`。 + +实际上,检测器已经报告了这些,因为上面示例中的每个调用都是一个`nn.Module`,但假设如果您有一些本地的直接计算,这就是您将如何执行的方式。 + +此外,如果您在自己的代码中实例化调试器,您可以调整从其默认打印的`frame`数,例如: + +```python +from transformers.debug_utils import DebugUnderflowOverflow + +debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) +``` + +### 特定批次的绝对最小值和最大值跟踪 + +当关闭下溢/上溢检测功能, 同样的调试类可以用于批处理跟踪。 + +假设您想要监视给定批次的每个`forward`调用的所有成分的绝对最小值和最大值,并且仅对批次1和3执行此操作,您可以这样实例化这个类: + +```python +debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) +``` + +现在,完整的批次1和3将以与下溢/上溢检测器相同的格式进行跟踪。 + +批次从0开始计数。 + +如果您知道程序在某个批次编号之后开始出现问题,那么您可以直接快进到该区域。以下是一个截取的配置示例输出: + +``` + *** Starting batch number=1 *** +abs min abs max metadata + shared Embedding +1.01e-06 7.92e+02 weight +0.00e+00 2.47e+04 input[0] +5.36e-05 7.92e+02 output +[...] + decoder.dropout Dropout +1.60e-07 2.27e+01 input[0] +0.00e+00 2.52e+01 output + decoder T5Stack + not a tensor output + lm_head Linear +1.01e-06 7.92e+02 weight +0.00e+00 1.11e+00 input[0] +6.06e-02 8.39e+01 output + T5ForConditionalGeneration + not a tensor output + + *** Starting batch number=3 *** +abs min abs max metadata + shared Embedding +1.01e-06 7.92e+02 weight +0.00e+00 2.78e+04 input[0] +5.36e-05 7.92e+02 output +[...] +``` + +在这里,您将获得大量的`frame`被`dump` - 与您的模型中的前向调用一样多,它有可能符合也可能不符合您的要求,但有时对于调试目的来说,它可能比正常的调试器更容易使用。例如,如果问题开始发生在批次号150上,您可以`dump`批次149和150的跟踪,并比较数字开始发散的地方。 + +你还可以使用以下命令指定停止训练的批次号: + +```python +debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) +``` From 7ecd229ba475dbf78040f368ae86c86bba875442 Mon Sep 17 00:00:00 2001 From: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:47:24 +0530 Subject: [PATCH 120/268] Smangrul/fix failing ds ci tests (#27358) * fix failing DeepSpeed CI tests due to `safetensors` being default * debug * remove debug statements * resolve comments * Update test_deepspeed.py --- tests/deepspeed/test_deepspeed.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py index 5e8c0d162903..9daad85b02f0 100644 --- a/tests/deepspeed/test_deepspeed.py +++ b/tests/deepspeed/test_deepspeed.py @@ -48,7 +48,7 @@ slow, ) from transformers.trainer_utils import get_last_checkpoint, set_seed -from transformers.utils import WEIGHTS_NAME, is_torch_bf16_gpu_available +from transformers.utils import SAFE_WEIGHTS_NAME, is_torch_bf16_gpu_available if is_torch_available(): @@ -565,8 +565,7 @@ def test_gradient_accumulation(self, stage, dtype): def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage, dtype): # adapted from TrainerIntegrationCommon.check_saved_checkpoints - - file_list = [WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"] + file_list = [SAFE_WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"] if stage == ZERO2: ds_file_list = ["mp_rank_00_model_states.pt"] @@ -581,7 +580,6 @@ def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage, dtyp for step in range(freq, total, freq): checkpoint = os.path.join(output_dir, f"checkpoint-{step}") self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found") - # common files for filename in file_list: path = os.path.join(checkpoint, filename) From 085ea7e56c2d109dacb662d8a0a05c29bb2ed825 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Thu, 9 Nov 2023 10:15:10 +0100 Subject: [PATCH 121/268] [`CodeLlamaTokenizer`] Nit, update __init__ to make sure the AddedTokens are not normalized because they are special (#27359) * make sure tokens are properly initialized for codellama slow * add m ore pretrained models * style * test more tokenizers checkpoints --- .../models/code_llama/tokenization_code_llama.py | 6 +++--- tests/models/code_llama/test_tokenization_code_llama.py | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/code_llama/tokenization_code_llama.py b/src/transformers/models/code_llama/tokenization_code_llama.py index 165aa3634a4c..db280bbc1561 100644 --- a/src/transformers/models/code_llama/tokenization_code_llama.py +++ b/src/transformers/models/code_llama/tokenization_code_llama.py @@ -149,9 +149,9 @@ def __init__( ): requires_backends(self, "protobuf") self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs - bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token - eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token - unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token self.use_default_system_prompt = use_default_system_prompt # mark tokens special to skip them diff --git a/tests/models/code_llama/test_tokenization_code_llama.py b/tests/models/code_llama/test_tokenization_code_llama.py index 7c3d89a8dd58..7452990ba755 100644 --- a/tests/models/code_llama/test_tokenization_code_llama.py +++ b/tests/models/code_llama/test_tokenization_code_llama.py @@ -150,6 +150,8 @@ def test_save_pretrained(self): self.tokenizers_list = [ (self.rust_tokenizer_class, "hf-internal-testing/llama-code-tokenizer", {}), (self.tokenizer_class, "hf-internal-testing/llama-code-tokenizer", {}), + (self.tokenizer_class, "codellama/CodeLlama-34b-Instruct-hf", {}), + (self.rust_tokenizer_class, "codellama/CodeLlama-34b-Instruct-hf", {}), ] for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): From e9adb0c9cf9f2e4017615ab64f4d2f364339136e Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Thu, 9 Nov 2023 04:44:36 -0500 Subject: [PATCH 122/268] Change thresh in test (#27378) Change thresh --- examples/pytorch/test_accelerate_examples.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/test_accelerate_examples.py b/examples/pytorch/test_accelerate_examples.py index cdc9d6dc5b56..e2d9d090611f 100644 --- a/examples/pytorch/test_accelerate_examples.py +++ b/examples/pytorch/test_accelerate_examples.py @@ -169,7 +169,7 @@ def test_run_ner_no_trainer(self): run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) - self.assertLess(result["train_loss"], 0.5) + self.assertLess(result["train_loss"], 0.6) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "ner_no_trainer"))) From bc086a251699f7c22af0434a2eaa2024098ffaa0 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:50:19 +0100 Subject: [PATCH 123/268] Put doctest options back to `pyproject.toml` (#27366) fix Co-authored-by: ydshieh --- pyproject.toml | 6 +++--- setup.cfg | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) delete mode 100644 setup.cfg diff --git a/pyproject.toml b/pyproject.toml index c55795eb1378..37bc1d7efe16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,6 +18,6 @@ line-length = 119 lines-after-imports = 2 known-first-party = ["transformers"] -# This is ignored, maybe because of the header? If someone finds a fix, we can uncomment and remove setup.cfg -# [tool.pytest] -# doctest_optionflags="NUMBER NORMALIZE_WHITESPACE ELLIPSIS" +[tool.pytest.ini_options] +doctest_optionflags="NUMBER NORMALIZE_WHITESPACE ELLIPSIS" +doctest_glob="**/*.md" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index ffe8973dd21c..000000000000 --- a/setup.cfg +++ /dev/null @@ -1,3 +0,0 @@ -[tool:pytest] -doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS -doctest_glob=**/*.md \ No newline at end of file From 9999b739685c51bba888cfd672f0f2d1c4de92d2 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:03:37 +0000 Subject: [PATCH 124/268] Skip failing cache call tests (#27393) * Skip failing cache call tests * Fixup --- tests/models/auto/test_modeling_auto.py | 3 +++ tests/models/auto/test_tokenization_auto.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/tests/models/auto/test_modeling_auto.py b/tests/models/auto/test_modeling_auto.py index 347cabd38a28..55bc3f3999ff 100644 --- a/tests/models/auto/test_modeling_auto.py +++ b/tests/models/auto/test_modeling_auto.py @@ -482,6 +482,9 @@ def test_model_from_flax_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_flax=True` to load this model"): _ = AutoModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + @unittest.skip( + "Currently failing with new huggingface_hub release. See: https://github.com/huggingface/transformers/pull/27389" + ) def test_cached_model_has_minimum_calls_to_head(self): # Make sure we have cached the model. _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") diff --git a/tests/models/auto/test_tokenization_auto.py b/tests/models/auto/test_tokenization_auto.py index a3a776083893..40dc99cd1368 100644 --- a/tests/models/auto/test_tokenization_auto.py +++ b/tests/models/auto/test_tokenization_auto.py @@ -419,6 +419,9 @@ def test_revision_not_found(self): ): _ = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") + @unittest.skip( + "Currently failing with new huggingface_hub release. See: https://github.com/huggingface/transformers/pull/27389" + ) def test_cached_tokenizer_has_minimum_calls_to_head(self): # Make sure we have cached the tokenizer. _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") From c5d7754b1140aaad95ad36c5d9ddfda5abda19f6 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Thu, 9 Nov 2023 19:34:13 +0800 Subject: [PATCH 125/268] device-agnostic deepspeed testing (#27342) --- tests/deepspeed/test_deepspeed.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py index 9daad85b02f0..2352cf522f29 100644 --- a/tests/deepspeed/test_deepspeed.py +++ b/tests/deepspeed/test_deepspeed.py @@ -38,17 +38,18 @@ CaptureStderr, LoggingLevel, TestCasePlus, + backend_device_count, execute_subprocess_async, - get_gpu_count, mockenv_context, require_deepspeed, require_optuna, - require_torch_gpu, - require_torch_multi_gpu, + require_torch_accelerator, + require_torch_multi_accelerator, slow, + torch_device, ) from transformers.trainer_utils import get_last_checkpoint, set_seed -from transformers.utils import SAFE_WEIGHTS_NAME, is_torch_bf16_gpu_available +from transformers.utils import SAFE_WEIGHTS_NAME, is_torch_bf16_available_on_device if is_torch_available(): @@ -125,7 +126,7 @@ def get_launcher(distributed=False): # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) - num_gpus = min(2, get_gpu_count()) if distributed else 1 + num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split() @@ -145,7 +146,7 @@ def get_launcher(distributed=False): schedulers = [HF_SCHEDULER, DS_SCHEDULER] stages = [ZERO2, ZERO3] -if is_torch_bf16_gpu_available(): +if is_torch_bf16_available_on_device(torch_device): dtypes = [FP16, BF16] else: dtypes = [FP16] @@ -165,7 +166,7 @@ def parameterized_custom_name_func(func, param_num, param): @require_deepspeed -@require_torch_gpu +@require_torch_accelerator class CoreIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon): """ Testing non-Trainer DeepSpeed integration @@ -273,7 +274,7 @@ def get_config_dict(self, stage): @require_deepspeed -@require_torch_gpu +@require_torch_accelerator class TrainerIntegrationDeepSpeed(TrainerIntegrationDeepSpeedWithCustomConfig, TrainerIntegrationCommon): """ @@ -875,7 +876,7 @@ def get_dataset(): @slow @require_deepspeed -@require_torch_gpu +@require_torch_accelerator class TestDeepSpeedWithLauncher(TestCasePlus): """This class is for testing via an external script - can do multiple gpus""" @@ -896,7 +897,7 @@ class TestDeepSpeedWithLauncher(TestCasePlus): # @parameterized.expand(params, name_func=parameterized_custom_name_func) - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_basic_distributed(self, stage, dtype): self.run_and_check(stage=stage, dtype=dtype, distributed=True) @@ -927,7 +928,7 @@ def test_fp32_non_distributed(self, stage, dtype): ) @parameterized.expand(params, name_func=parameterized_custom_name_func) - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_fp32_distributed(self, stage, dtype): # real model needs too much GPU memory under stage2+fp32, so using tiny random model here - # therefore no quality checks, just basic completion checks are done @@ -968,9 +969,9 @@ def test_resume_train_not_from_ds_checkpoint(self, stage, dtype): self.do_checks(output_dir, do_train=do_train, do_eval=do_eval) @parameterized.expand(["bf16", "fp16", "fp32"]) - @require_torch_multi_gpu + @require_torch_multi_accelerator def test_inference(self, dtype): - if dtype == "bf16" and not is_torch_bf16_gpu_available(): + if dtype == "bf16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest("test requires bfloat16 hardware support") # this is just inference, so no optimizer should be loaded From 791ec370d1f20e331c4267e95cb3edbb1dc0cfdc Mon Sep 17 00:00:00 2001 From: Dave Berenbaum Date: Thu, 9 Nov 2023 07:19:31 -0500 Subject: [PATCH 126/268] Adds dvclive callback (#27352) * dvclive trainer callback * style fixes * dvclive link fixes --- docs/source/en/main_classes/callback.md | 4 + docs/source/ja/main_classes/callback.md | 4 + examples/pytorch/README.md | 1 + src/transformers/__init__.py | 2 + src/transformers/integrations/__init__.py | 4 + .../integrations/integration_utils.py | 101 +++++++++++++++++- src/transformers/training_args.py | 8 +- 7 files changed, 119 insertions(+), 5 deletions(-) diff --git a/docs/source/en/main_classes/callback.md b/docs/source/en/main_classes/callback.md index 87bf0d63af1f..bc7323f5911e 100644 --- a/docs/source/en/main_classes/callback.md +++ b/docs/source/en/main_classes/callback.md @@ -44,6 +44,7 @@ By default, `TrainingArguments.report_to` is set to `"all"`, so a [`Trainer`] wi - [`~integrations.ClearMLCallback`] if [clearml](https://github.com/allegroai/clearml) is installed. - [`~integrations.DagsHubCallback`] if [dagshub](https://dagshub.com/) is installed. - [`~integrations.FlyteCallback`] if [flyte](https://flyte.org/) is installed. +- [`~integrations.DVCLiveCallback`] if [dvclive](https://dvc.org/doc/dvclive) is installed. If a package is installed but you don't wish to use the accompanying integration, you can change `TrainingArguments.report_to` to a list of just those integrations you want to use (e.g. `["azure_ml", "wandb"]`). @@ -88,6 +89,9 @@ Here is the list of the available [`TrainerCallback`] in the library: [[autodoc]] integrations.FlyteCallback +[[autodoc]] integrations.DVCLiveCallback + - setup + ## TrainerCallback [[autodoc]] TrainerCallback diff --git a/docs/source/ja/main_classes/callback.md b/docs/source/ja/main_classes/callback.md index 75938bb6a45b..3ea4938841e3 100644 --- a/docs/source/ja/main_classes/callback.md +++ b/docs/source/ja/main_classes/callback.md @@ -45,6 +45,7 @@ rendered properly in your Markdown viewer. - [`~integrations.ClearMLCallback`] [clearml](https://github.com/allegroai/clearml) がインストールされている場合。 - [`~integrations.DagsHubCallback`] [dagshub](https://dagshub.com/) がインストールされている場合。 - [`~integrations.FlyteCallback`] [flyte](https://flyte.org/) がインストールされている場合。 +- [`~integrations.DVCLiveCallback`] [dvclive](https://www.dvc.org/doc/dvclive) がインストールされている場合。 パッケージがインストールされているが、付随する統合を使用したくない場合は、`TrainingArguments.report_to` を、使用したい統合のみのリストに変更できます (例: `["azure_ml", "wandb"]`) 。 @@ -88,6 +89,9 @@ rendered properly in your Markdown viewer. [[autodoc]] integrations.FlyteCallback +[[autodoc]] integrations.DVCLiveCallback + - setup + ## TrainerCallback [[autodoc]] TrainerCallback diff --git a/examples/pytorch/README.md b/examples/pytorch/README.md index 2a00db08d2ae..fd98a8e9180d 100644 --- a/examples/pytorch/README.md +++ b/examples/pytorch/README.md @@ -201,6 +201,7 @@ You can easily log and monitor your runs code. The following are currently suppo * [Comet ML](https://www.comet.ml/docs/python-sdk/huggingface/) * [Neptune](https://docs.neptune.ai/integrations-and-supported-tools/model-training/hugging-face) * [ClearML](https://clear.ml/docs/latest/docs/getting_started/ds/ds_first_steps) +* [DVCLive](https://dvc.org/doc/dvclive/ml-frameworks/huggingface) ### Weights & Biases diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 4e98a717f028..05f9bc7796da 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -108,6 +108,7 @@ "integrations": [ "is_clearml_available", "is_comet_available", + "is_dvclive_available", "is_neptune_available", "is_optuna_available", "is_ray_available", @@ -4300,6 +4301,7 @@ from .integrations import ( is_clearml_available, is_comet_available, + is_dvclive_available, is_neptune_available, is_optuna_available, is_ray_available, diff --git a/src/transformers/integrations/__init__.py b/src/transformers/integrations/__init__.py index 7596555984a3..427b5e00000f 100644 --- a/src/transformers/integrations/__init__.py +++ b/src/transformers/integrations/__init__.py @@ -44,6 +44,7 @@ "CodeCarbonCallback", "CometCallback", "DagsHubCallback", + "DVCLiveCallback", "FlyteCallback", "MLflowCallback", "NeptuneCallback", @@ -58,6 +59,7 @@ "is_codecarbon_available", "is_comet_available", "is_dagshub_available", + "is_dvclive_available", "is_flyte_deck_standard_available", "is_flytekit_available", "is_mlflow_available", @@ -105,6 +107,7 @@ CodeCarbonCallback, CometCallback, DagsHubCallback, + DVCLiveCallback, FlyteCallback, MLflowCallback, NeptuneCallback, @@ -119,6 +122,7 @@ is_codecarbon_available, is_comet_available, is_dagshub_available, + is_dvclive_available, is_flyte_deck_standard_available, is_flytekit_available, is_mlflow_available, diff --git a/src/transformers/integrations/integration_utils.py b/src/transformers/integrations/integration_utils.py index 5911d3419349..5bddb24ed326 100644 --- a/src/transformers/integrations/integration_utils.py +++ b/src/transformers/integrations/integration_utils.py @@ -26,7 +26,7 @@ import tempfile from dataclasses import asdict from pathlib import Path -from typing import TYPE_CHECKING, Dict, Optional +from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, Union import numpy as np @@ -152,6 +152,10 @@ def is_flyte_deck_standard_available(): return importlib.util.find_spec("flytekitplugins.deck") is not None +def is_dvclive_available(): + return importlib.util.find_spec("dvclive") is not None + + def hp_params(trial): if is_optuna_available(): import optuna @@ -541,6 +545,8 @@ def get_available_reporting_integrations(): integrations.append("comet_ml") if is_dagshub_available(): integrations.append("dagshub") + if is_dvclive_available(): + integrations.append("dvclive") if is_mlflow_available(): integrations.append("mlflow") if is_neptune_available(): @@ -1605,6 +1611,98 @@ def on_train_end(self, args, state, control, **kwargs): Deck("Log History", TableRenderer().to_html(log_history_df)) +class DVCLiveCallback(TrainerCallback): + """ + A [`TrainerCallback`] that sends the logs to [DVCLive](https://www.dvc.org/doc/dvclive). + + Use the environment variables below in `setup` to configure the integration. To customize this callback beyond + those environment variables, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). + + Args: + live (`dvclive.Live`, *optional*, defaults to `None`): + Optional Live instance. If None, a new instance will be created using **kwargs. + log_model (Union[Literal["all"], bool], *optional*, defaults to `None`): + Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True`, + the final checkpoint is logged at the end of training. If set to `"all"`, the entire + [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. + """ + + def __init__( + self, + live: Optional[Any] = None, + log_model: Optional[Union[Literal["all"], bool]] = None, + **kwargs, + ): + if not is_dvclive_available(): + raise RuntimeError("DVCLiveCallback requires dvclive to be installed. Run `pip install dvclive`.") + from dvclive import Live + + self._log_model = log_model + + self._initialized = False + self.live = None + if isinstance(live, Live): + self.live = live + self._initialized = True + elif live is not None: + raise RuntimeError(f"Found class {live.__class__} for live, expected dvclive.Live") + + def setup(self, args, state, model): + """ + Setup the optional DVCLive integration. To customize this callback beyond the environment variables below, see + [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). + + Environment: + - **HF_DVCLIVE_LOG_MODEL** (`str`, *optional*): + Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True` or + *1*, the final checkpoint is logged at the end of training. If set to `all`, the entire + [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. + """ + from dvclive import Live + + self._initalized = True + if self._log_model is not None: + log_model_env = os.getenv("HF_DVCLIVE_LOG_MODEL") + if log_model_env.upper() in ENV_VARS_TRUE_VALUES: + self._log_model = True + elif log_model_env.lower() == "all": + self._log_model = "all" + if state.is_world_process_zero: + if not self.live: + self.live = Live() + self.live.log_params(args.to_dict()) + + def on_train_begin(self, args, state, control, model=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + + def on_log(self, args, state, control, model=None, logs=None, **kwargs): + if not self._initialized: + self.setup(args, state, model) + if state.is_world_process_zero: + from dvclive.utils import standardize_metric_name + + for key, value in logs.items(): + self.live.log_metric(standardize_metric_name(key, "dvclive.huggingface"), value) + self.live.next_step() + + def on_save(self, args, state, control, **kwargs): + if self._log_model == "all" and self._initialized and state.is_world_process_zero: + self.live.log_artifact(args.output_dir) + + def on_train_end(self, args, state, control, **kwargs): + if self._initialized and state.is_world_process_zero: + from transformers.trainer import Trainer + + if self._log_model is True: + fake_trainer = Trainer(args=args, model=kwargs.get("model"), tokenizer=kwargs.get("tokenizer")) + name = "best" if args.load_best_model_at_end else "last" + output_dir = os.path.join(args.output_dir, name) + fake_trainer.save_model(output_dir) + self.live.log_artifact(output_dir, name=name, type="model", copy=True) + self.live.end() + + INTEGRATION_TO_CALLBACK = { "azure_ml": AzureMLCallback, "comet_ml": CometCallback, @@ -1616,6 +1714,7 @@ def on_train_end(self, args, state, control, **kwargs): "clearml": ClearMLCallback, "dagshub": DagsHubCallback, "flyte": FlyteCallback, + "dvclive": DVCLiveCallback, } diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 7ec6e56ff505..32446f0ab201 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -509,7 +509,7 @@ class TrainingArguments: instance of `Dataset`. report_to (`str` or `List[str]`, *optional*, defaults to `"all"`): The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, - `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"flyte"`, `"mlflow"`, `"neptune"`, + `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`, `"neptune"`, `"tensorboard"`, and `"wandb"`. Use `"all"` to report to all integrations installed, `"none"` for no integrations. ddp_find_unused_parameters (`bool`, *optional*): @@ -2391,9 +2391,9 @@ def set_logging( and lets the application set the level. report_to (`str` or `List[str]`, *optional*, defaults to `"all"`): The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, - `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"flyte"`, `"mlflow"`, `"neptune"`, - `"tensorboard"`, and `"wandb"`. Use `"all"` to report to all integrations installed, `"none"` for no - integrations. + `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`, + `"neptune"`, `"tensorboard"`, and `"wandb"`. Use `"all"` to report to all integrations installed, + `"none"` for no integrations. first_step (`bool`, *optional*, defaults to `False`): Whether to log and evaluate the first `global_step` or not. nan_inf_filter (`bool`, *optional*, defaults to `True`): From 3258ff93304078b9e27d752e6c19d3813f664855 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 9 Nov 2023 13:32:54 +0100 Subject: [PATCH 127/268] use `pytest.mark` directly (#27390) fix Co-authored-by: ydshieh --- tests/models/bark/test_modeling_bark.py | 6 +++--- tests/models/distilbert/test_modeling_distilbert.py | 6 +++--- tests/models/llama/test_modeling_llama.py | 4 ++-- tests/models/mistral/test_modeling_mistral.py | 6 +++--- tests/models/whisper/test_modeling_whisper.py | 6 +++--- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/models/bark/test_modeling_bark.py b/tests/models/bark/test_modeling_bark.py index a8545ad3c0a5..4a71f9f723ca 100644 --- a/tests/models/bark/test_modeling_bark.py +++ b/tests/models/bark/test_modeling_bark.py @@ -20,7 +20,7 @@ import tempfile import unittest -from pytest import mark +import pytest from transformers import ( BarkCoarseConfig, @@ -877,7 +877,7 @@ def test_resize_embeddings_untied(self): @require_flash_attn @require_torch_gpu - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference(self): for model_class in self.all_model_classes: @@ -936,7 +936,7 @@ def test_flash_attn_2_inference(self): @require_flash_attn @require_torch_gpu - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): for model_class in self.all_model_classes: diff --git a/tests/models/distilbert/test_modeling_distilbert.py b/tests/models/distilbert/test_modeling_distilbert.py index b6d3c0f57aad..8194c4285916 100644 --- a/tests/models/distilbert/test_modeling_distilbert.py +++ b/tests/models/distilbert/test_modeling_distilbert.py @@ -16,7 +16,7 @@ import tempfile import unittest -from pytest import mark +import pytest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device @@ -290,7 +290,7 @@ def test_torchscript_device_change(self): # Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test. @require_flash_attn @require_torch_accelerator - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference(self): import torch @@ -344,7 +344,7 @@ def test_flash_attn_2_inference(self): # Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test. @require_flash_attn @require_torch_accelerator - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): import torch diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py index 4d6b363e4a75..a646e5ab7a5c 100644 --- a/tests/models/llama/test_modeling_llama.py +++ b/tests/models/llama/test_modeling_llama.py @@ -17,8 +17,8 @@ import unittest +import pytest from parameterized import parameterized -from pytest import mark from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import ( @@ -385,7 +385,7 @@ def test_model_rope_scaling(self, scaling_type): @require_flash_attn @require_torch_gpu - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): """ diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py index 4bcb722c144e..2989f4030465 100644 --- a/tests/models/mistral/test_modeling_mistral.py +++ b/tests/models/mistral/test_modeling_mistral.py @@ -19,7 +19,7 @@ import tempfile import unittest -from pytest import mark +import pytest from transformers import AutoTokenizer, MistralConfig, is_torch_available from transformers.testing_utils import ( @@ -369,7 +369,7 @@ def test_past_key_values_format(self): @require_flash_attn @require_torch_gpu - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): import torch @@ -403,7 +403,7 @@ def test_flash_attn_2_generate_padding_right(self): @require_flash_attn @require_torch_gpu - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): import torch diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 05d48786148e..22290bab6691 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -21,7 +21,7 @@ import unittest import numpy as np -from pytest import mark +import pytest import transformers from transformers import WhisperConfig @@ -800,7 +800,7 @@ def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_ @require_flash_attn @require_torch_gpu - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference(self): import torch @@ -845,7 +845,7 @@ def test_flash_attn_2_inference(self): @require_flash_attn @require_torch_gpu - @mark.flash_attn_test + @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): import torch From cf2a3f37bfac2142f1d081760f787c9db263f895 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 9 Nov 2023 15:47:46 +0100 Subject: [PATCH 128/268] Fix fuyu checkpoint repo in `FuyuConfig` (#27399) fix Co-authored-by: ydshieh --- src/transformers/models/fuyu/configuration_fuyu.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/fuyu/configuration_fuyu.py b/src/transformers/models/fuyu/configuration_fuyu.py index d3c447469c71..c385ad06130c 100644 --- a/src/transformers/models/fuyu/configuration_fuyu.py +++ b/src/transformers/models/fuyu/configuration_fuyu.py @@ -22,7 +22,7 @@ logger = logging.get_logger(__name__) FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "adept/fuyu-8b-base": "https://huggingface.co/adept/fuyu-8b-base/resolve/main/config.json", + "adept/fuyu-8b": "https://huggingface.co/adept/fuyu-8b/resolve/main/config.json", } @@ -31,7 +31,7 @@ class FuyuConfig(PretrainedConfig): This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an Fuyu model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the - [adept/fuyu-8b-base](https://huggingface.co/adept/fuyu-8b-base). + [adept/fuyu-8b](https://huggingface.co/adept/fuyu-8b). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. From c5037b459e117b9286c611092f38663f6cb763b0 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Thu, 9 Nov 2023 10:20:12 -0500 Subject: [PATCH 129/268] Use editable install for git deps (#27404) * Use editable install * Full command --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 6dda676126c1..4104fd477f33 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -283,7 +283,7 @@ def job_name(self): "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]", "pip install -U --upgrade-strategy eager tensorflow_probability", - "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate", + "pip install -U -e --upgrade-strategy eager git+https://github.com/huggingface/accelerate@main#egg=accelerate", ], marker="is_pt_tf_cross_test", pytest_options={"rA": None, "durations": 0}, From c8b6052ff681e3ca8dab168dfd524b9fbbceb5bd Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 9 Nov 2023 18:52:29 +0100 Subject: [PATCH 130/268] Final fix of the accelerate installation issue (#27408) * fix * [test-all] commit * fix * [test-all] commit * [test-all] commit * fix * fix * fix * fix * fix --------- Co-authored-by: ydshieh --- .circleci/create_circleci_config.py | 9 +++++---- examples/pytorch/test_accelerate_examples.py | 3 +++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 4104fd477f33..ba766dc1b3d4 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -283,7 +283,7 @@ def job_name(self): "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]", "pip install -U --upgrade-strategy eager tensorflow_probability", - "pip install -U -e --upgrade-strategy eager git+https://github.com/huggingface/accelerate@main#egg=accelerate", + "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate", ], marker="is_pt_tf_cross_test", pytest_options={"rA": None, "durations": 0}, @@ -297,7 +297,7 @@ def job_name(self): "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng", "pip install -U --upgrade-strategy eager --upgrade pip", "pip install -U --upgrade-strategy eager .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]", - "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate", + "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate", ], marker="is_pt_flax_cross_test", pytest_options={"rA": None, "durations": 0}, @@ -310,7 +310,7 @@ def job_name(self): "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]", - "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate", + "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate", ], parallelism=1, pytest_num_workers=6, @@ -403,6 +403,7 @@ def job_name(self): "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager .[sklearn,torch,sentencepiece,testing,torch-speech]", "pip install -U --upgrade-strategy eager -r examples/pytorch/_tests_requirements.txt", + "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate", ], ) @@ -510,7 +511,7 @@ def job_name(self): "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time ffmpeg", "pip install --upgrade --upgrade-strategy eager pip", "pip install -U --upgrade-strategy eager -e .[dev]", - "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate", + "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate", "pip install --upgrade --upgrade-strategy eager pytest pytest-sugar", "pip install -U --upgrade-strategy eager natten", "find -name __pycache__ -delete", diff --git a/examples/pytorch/test_accelerate_examples.py b/examples/pytorch/test_accelerate_examples.py index e2d9d090611f..837e84e3320a 100644 --- a/examples/pytorch/test_accelerate_examples.py +++ b/examples/pytorch/test_accelerate_examples.py @@ -21,6 +21,7 @@ import shutil import sys import tempfile +import unittest from unittest import mock from accelerate.utils import write_basic_config @@ -98,6 +99,7 @@ def test_run_glue_no_trainer(self): self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "glue_no_trainer"))) + @unittest.skip("Zach is working on this.") @mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) def test_run_clm_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() @@ -125,6 +127,7 @@ def test_run_clm_no_trainer(self): self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "clm_no_trainer"))) + @unittest.skip("Zach is working on this.") @mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) def test_run_mlm_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() From e38348ae8fa54f60e4110546e33d496808620850 Mon Sep 17 00:00:00 2001 From: Lucain Date: Thu, 9 Nov 2023 18:53:26 +0100 Subject: [PATCH 131/268] Fix RequestCounter to make it more future-proof (#27406) * Fix RequestCounter to make it more future-proof * code quality --- src/transformers/testing_utils.py | 51 ++++++++++++--------- tests/models/auto/test_modeling_auto.py | 15 +++--- tests/models/auto/test_modeling_tf_auto.py | 12 ++--- tests/models/auto/test_tokenization_auto.py | 9 ++-- tests/pipelines/test_pipelines_common.py | 6 +-- 5 files changed, 48 insertions(+), 45 deletions(-) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 815a13c9e96d..eb21cbac2303 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -29,14 +29,15 @@ import tempfile import time import unittest +from collections import defaultdict from collections.abc import Mapping from io import StringIO from pathlib import Path from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union from unittest import mock +from unittest.mock import patch -import huggingface_hub -import requests +import urllib3 from transformers import logging as transformers_logging @@ -1983,32 +1984,40 @@ def run_command(command: List[str], return_stdout=False): class RequestCounter: """ Helper class that will count all requests made online. + + Might not be robust if urllib3 changes its logging format but should be good enough for us. + + Usage: + ```py + with RequestCounter() as counter: + _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") + assert counter["GET"] == 0 + assert counter["HEAD"] == 1 + assert counter.total_calls == 1 + ``` """ def __enter__(self): - self.head_request_count = 0 - self.get_request_count = 0 - self.other_request_count = 0 - - # Mock `get_session` to count HTTP calls. - self.old_get_session = huggingface_hub.utils._http.get_session - self.session = requests.Session() - self.session.request = self.new_request - huggingface_hub.utils._http.get_session = lambda: self.session + self._counter = defaultdict(int) + self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug) + self.mock = self.patcher.start() return self - def __exit__(self, *args, **kwargs): - huggingface_hub.utils._http.get_session = self.old_get_session + def __exit__(self, *args, **kwargs) -> None: + for call in self.mock.call_args_list: + log = call.args[0] % call.args[1:] + for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"): + if method in log: + self._counter[method] += 1 + break + self.patcher.stop() - def new_request(self, method, **kwargs): - if method == "GET": - self.get_request_count += 1 - elif method == "HEAD": - self.head_request_count += 1 - else: - self.other_request_count += 1 + def __getitem__(self, key: str) -> int: + return self._counter[key] - return requests.request(method=method, **kwargs) + @property + def total_calls(self) -> int: + return sum(self._counter.values()) def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None): diff --git a/tests/models/auto/test_modeling_auto.py b/tests/models/auto/test_modeling_auto.py index 55bc3f3999ff..41f52517483c 100644 --- a/tests/models/auto/test_modeling_auto.py +++ b/tests/models/auto/test_modeling_auto.py @@ -482,25 +482,22 @@ def test_model_from_flax_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_flax=True` to load this model"): _ = AutoModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") - @unittest.skip( - "Currently failing with new huggingface_hub release. See: https://github.com/huggingface/transformers/pull/27389" - ) def test_cached_model_has_minimum_calls_to_head(self): # Make sure we have cached the model. _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") - self.assertEqual(counter.get_request_count, 0) - self.assertEqual(counter.head_request_count, 1) - self.assertEqual(counter.other_request_count, 0) + self.assertEqual(counter["GET"], 0) + self.assertEqual(counter["HEAD"], 1) + self.assertEqual(counter.total_calls, 1) # With a sharded checkpoint _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") with RequestCounter() as counter: _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") - self.assertEqual(counter.get_request_count, 0) - self.assertEqual(counter.head_request_count, 1) - self.assertEqual(counter.other_request_count, 0) + self.assertEqual(counter["GET"], 0) + self.assertEqual(counter["HEAD"], 1) + self.assertEqual(counter.total_calls, 1) def test_attr_not_existing(self): from transformers.models.auto.auto_factory import _LazyAutoMapping diff --git a/tests/models/auto/test_modeling_tf_auto.py b/tests/models/auto/test_modeling_tf_auto.py index 2f6fe476158f..537d48a57e48 100644 --- a/tests/models/auto/test_modeling_tf_auto.py +++ b/tests/models/auto/test_modeling_tf_auto.py @@ -301,14 +301,14 @@ def test_cached_model_has_minimum_calls_to_head(self): _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") - self.assertEqual(counter.get_request_count, 0) - self.assertEqual(counter.head_request_count, 1) - self.assertEqual(counter.other_request_count, 0) + self.assertEqual(counter["GET"], 0) + self.assertEqual(counter["HEAD"], 1) + self.assertEqual(counter.total_calls, 1) # With a sharded checkpoint _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") with RequestCounter() as counter: _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") - self.assertEqual(counter.get_request_count, 0) - self.assertEqual(counter.head_request_count, 1) - self.assertEqual(counter.other_request_count, 0) + self.assertEqual(counter["GET"], 0) + self.assertEqual(counter["HEAD"], 1) + self.assertEqual(counter.total_calls, 1) diff --git a/tests/models/auto/test_tokenization_auto.py b/tests/models/auto/test_tokenization_auto.py index 40dc99cd1368..597c995b6e32 100644 --- a/tests/models/auto/test_tokenization_auto.py +++ b/tests/models/auto/test_tokenization_auto.py @@ -419,14 +419,11 @@ def test_revision_not_found(self): ): _ = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") - @unittest.skip( - "Currently failing with new huggingface_hub release. See: https://github.com/huggingface/transformers/pull/27389" - ) def test_cached_tokenizer_has_minimum_calls_to_head(self): # Make sure we have cached the tokenizer. _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") - self.assertEqual(counter.get_request_count, 0) - self.assertEqual(counter.head_request_count, 1) - self.assertEqual(counter.other_request_count, 0) + self.assertEqual(counter["GET"], 0) + self.assertEqual(counter["HEAD"], 1) + self.assertEqual(counter.total_calls, 1) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 56467bdc4b8b..e760d2790146 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -763,9 +763,9 @@ def test_cached_pipeline_has_minimum_calls_to_head(self): _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") - self.assertEqual(counter.get_request_count, 0) - self.assertEqual(counter.head_request_count, 1) - self.assertEqual(counter.other_request_count, 0) + self.assertEqual(counter["GET"], 0) + self.assertEqual(counter["HEAD"], 1) + self.assertEqual(counter.total_calls, 1) @require_torch def test_chunk_pipeline_batching_single_file(self): From 51a98c40eee1e2825b4ebe1f4b279133b9ea8426 Mon Sep 17 00:00:00 2001 From: Yoach Lacombe <52246514+ylacombe@users.noreply.github.com> Date: Thu, 9 Nov 2023 18:35:42 +0000 Subject: [PATCH 132/268] remove failing tests and clean FE files (#27414) * remove failing tests and clean FE files * remove same similar text from tvlt --- .../pop2piano/feature_extraction_pop2piano.py | 14 -------- .../feature_extraction_seamless_m4t.py | 16 --------- .../whisper/feature_extraction_whisper.py | 16 +-------- .../tvlt/test_feature_extraction_tvlt.py | 34 +------------------ 4 files changed, 2 insertions(+), 78 deletions(-) diff --git a/src/transformers/models/pop2piano/feature_extraction_pop2piano.py b/src/transformers/models/pop2piano/feature_extraction_pop2piano.py index 3c14085c21aa..0568fb74efba 100644 --- a/src/transformers/models/pop2piano/feature_extraction_pop2piano.py +++ b/src/transformers/models/pop2piano/feature_extraction_pop2piano.py @@ -14,7 +14,6 @@ # limitations under the License. """ Feature extractor class for Pop2Piano""" -import copy import warnings from typing import List, Optional, Union @@ -448,16 +447,3 @@ def __call__( ) return output - - def to_dict(self): - """ - Serializes this instance to a Python dictionary. - - Returns: - `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. - """ - output = copy.deepcopy(self.__dict__) - output["feature_extractor_type"] = self.__class__.__name__ - if "mel_filters" in output: - del output["mel_filters"] - return output diff --git a/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py b/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py index 852be5c73a1d..13bb687dd595 100644 --- a/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py @@ -16,7 +16,6 @@ Feature extractor class for SeamlessM4T """ -import copy from typing import List, Optional, Union import numpy as np @@ -288,18 +287,3 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs - - def to_dict(self): - """ - Serializes this instance to a Python dictionary. - - Returns: - `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. - """ - output = copy.deepcopy(self.__dict__) - output["feature_extractor_type"] = self.__class__.__name__ - if "mel_filters" in output: - del output["mel_filters"] - if "window" in output: - del output["window"] - return output diff --git a/src/transformers/models/whisper/feature_extraction_whisper.py b/src/transformers/models/whisper/feature_extraction_whisper.py index 70eb8bd94e76..b6c171ce932f 100644 --- a/src/transformers/models/whisper/feature_extraction_whisper.py +++ b/src/transformers/models/whisper/feature_extraction_whisper.py @@ -15,8 +15,7 @@ """ Feature extractor class for Whisper """ -import copy -from typing import Any, Dict, List, Optional, Union +from typing import List, Optional, Union import numpy as np @@ -262,16 +261,3 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs - - def to_dict(self) -> Dict[str, Any]: - """ - Serializes this instance to a Python dictionary. - - Returns: - `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. - """ - output = copy.deepcopy(self.__dict__) - output["feature_extractor_type"] = self.__class__.__name__ - if "mel_filters" in output: - del output["mel_filters"] - return output diff --git a/tests/models/tvlt/test_feature_extraction_tvlt.py b/tests/models/tvlt/test_feature_extraction_tvlt.py index 166f31021cde..e2d8c624b0b7 100644 --- a/tests/models/tvlt/test_feature_extraction_tvlt.py +++ b/tests/models/tvlt/test_feature_extraction_tvlt.py @@ -15,15 +15,13 @@ """ Testing suite for the TVLT feature extraction. """ import itertools -import os import random -import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available -from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio +from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin @@ -123,36 +121,6 @@ def test_feat_extract_properties(self): self.assertTrue(hasattr(feature_extractor, "chunk_length")) self.assertTrue(hasattr(feature_extractor, "sampling_rate")) - def test_feat_extract_from_and_save_pretrained(self): - feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) - - with tempfile.TemporaryDirectory() as tmpdirname: - saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] - check_json_file_has_correct_format(saved_file) - feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) - - dict_first = feat_extract_first.to_dict() - dict_second = feat_extract_second.to_dict() - mel_1 = dict_first.pop("mel_filters") - mel_2 = dict_second.pop("mel_filters") - self.assertTrue(np.allclose(mel_1, mel_2)) - self.assertEqual(dict_first, dict_second) - - def test_feat_extract_to_json_file(self): - feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) - - with tempfile.TemporaryDirectory() as tmpdirname: - json_file_path = os.path.join(tmpdirname, "feat_extract.json") - feat_extract_first.to_json_file(json_file_path) - feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) - - dict_first = feat_extract_first.to_dict() - dict_second = feat_extract_second.to_dict() - mel_1 = dict_first.pop("mel_filters") - mel_2 = dict_second.pop("mel_filters") - self.assertTrue(np.allclose(mel_1, mel_2)) - self.assertEqual(dict_first, dict_second) - def test_call(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) From 740cd935900dea019109a5ad5bc083128105b207 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 9 Nov 2023 21:39:03 +0100 Subject: [PATCH 133/268] Fix `Owlv2` checkpoint name and a default value in `Owlv2VisionConfig` (#27402) * fix * fix * fix * fix * fix * fix * fix --------- Co-authored-by: ydshieh --- .../models/owlv2/configuration_owlv2.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/transformers/models/owlv2/configuration_owlv2.py b/src/transformers/models/owlv2/configuration_owlv2.py index 54611c4daf64..4b96cf1f142c 100644 --- a/src/transformers/models/owlv2/configuration_owlv2.py +++ b/src/transformers/models/owlv2/configuration_owlv2.py @@ -32,13 +32,13 @@ } -# Copied from transformers.models.owlvit.configuration_owlvit.OwlViTTextConfig with OwlViT->Owlv2, owlvit-base-patch-16->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2 +# Copied from transformers.models.owlvit.configuration_owlvit.OwlViTTextConfig with OwlViT->Owlv2, owlvit-base-patch32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2 class Owlv2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`Owlv2TextModel`]. It is used to instantiate an Owlv2 text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Owlv2 - [google/owlv2-base-patch32](https://huggingface.co/google/owlv2-base-patch32) architecture. + [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. @@ -83,10 +83,10 @@ class Owlv2TextConfig(PretrainedConfig): ```python >>> from transformers import Owlv2TextConfig, Owlv2TextModel - >>> # Initializing a Owlv2TextModel with google/owlv2-base-patch32 style configuration + >>> # Initializing a Owlv2TextModel with google/owlv2-base-patch16 style configuration >>> configuration = Owlv2TextConfig() - >>> # Initializing a Owlv2TextConfig from the google/owlv2-base-patch32 style configuration + >>> # Initializing a Owlv2TextConfig from the google/owlv2-base-patch16 style configuration >>> model = Owlv2TextModel(configuration) >>> # Accessing the model configuration @@ -145,13 +145,13 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return cls.from_dict(config_dict, **kwargs) -# Copied from transformers.models.owlvit.configuration_owlvit.OwlViTVisionConfig with OwlViT->Owlv2, owlvit-base-patch-32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2 +# Copied from transformers.models.owlvit.configuration_owlvit.OwlViTVisionConfig with OwlViT->Owlv2, owlvit-base-patch32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2, 32->16 class Owlv2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`Owlv2VisionModel`]. It is used to instantiate an OWLv2 image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 - [google/owlv2-base-patch32](https://huggingface.co/google/owlv2-base-patch32) architecture. + [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. @@ -169,7 +169,7 @@ class Owlv2VisionConfig(PretrainedConfig): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. - patch_size (`int`, *optional*, defaults to 32): + patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, @@ -189,10 +189,10 @@ class Owlv2VisionConfig(PretrainedConfig): ```python >>> from transformers import Owlv2VisionConfig, Owlv2VisionModel - >>> # Initializing a Owlv2VisionModel with google/owlv2-base-patch32 style configuration + >>> # Initializing a Owlv2VisionModel with google/owlv2-base-patch16 style configuration >>> configuration = Owlv2VisionConfig() - >>> # Initializing a Owlv2VisionModel model from the google/owlv2-base-patch32 style configuration + >>> # Initializing a Owlv2VisionModel model from the google/owlv2-base-patch16 style configuration >>> model = Owlv2VisionModel(configuration) >>> # Accessing the model configuration @@ -209,7 +209,7 @@ def __init__( num_attention_heads=12, num_channels=3, image_size=768, - patch_size=32, + patch_size=16, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, @@ -251,13 +251,13 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return cls.from_dict(config_dict, **kwargs) -# Copied from transformers.models.owlvit.configuration_owlvit.OwlViTConfig with OwlViT->Owlv2, owlvit-base-patch-32->owlv2-base-patch32, owlvit->owlv2, OWL-ViT->OWLv2 +# Copied from transformers.models.owlvit.configuration_owlvit.OwlViTConfig with OwlViT->Owlv2, owlvit-base-patch32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2 class Owlv2Config(PretrainedConfig): r""" [`Owlv2Config`] is the configuration class to store the configuration of an [`Owlv2Model`]. It is used to instantiate an OWLv2 model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 - [google/owlv2-base-patch32](https://huggingface.co/google/owlv2-base-patch32) architecture. + [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. From cf32c941350cb296e4c2c9e26a9274291d515e90 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 9 Nov 2023 22:01:06 +0100 Subject: [PATCH 134/268] Run all tests if `circleci/create_circleci_config.py` is modified (#27413) * fix * fix --------- Co-authored-by: ydshieh --- utils/tests_fetcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index a60d6a558dcb..728a76bde008 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -948,7 +948,7 @@ def infer_tests_to_run( print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}") # Grab the corresponding test files: - if "setup.py" in modified_files: + if any(x in modified_files for x in ["setup.py", ".circleci/create_circleci_config.py"]): test_files_to_run = ["tests", "examples"] repo_utils_launch = True # in order to trigger pipeline tests even if no code change at all From 184f60dcec6f7f664687a9e211e8d2216052b05d Mon Sep 17 00:00:00 2001 From: jiqing-feng <107918818+jiqing-feng@users.noreply.github.com> Date: Fri, 10 Nov 2023 19:05:15 +0800 Subject: [PATCH 135/268] add attention_mask and position_ids in assisted model (#26892) * add attention_mask and position_ids in assisted model * fix bug * fix attention mask * fix attention_mask * check assist inputs * check assist input ids length * fix assist model type * set assist attention mask device --- src/transformers/generation/utils.py | 90 ++++++++++++++-------------- 1 file changed, 46 insertions(+), 44 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 69cbc373e5f7..4dbfc367064b 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -4488,11 +4488,6 @@ def assisted_decoding( else: num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens - # check if assistant model accepts encoder_outputs - assistant_accepts_encoder_outputs = "encoder_outputs" in set( - inspect.signature(assistant_model.forward).parameters.keys() - ) - # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() @@ -4535,15 +4530,6 @@ def assisted_decoding( # other auxiliary variables max_len = stopping_criteria[0].max_length - assistant_kv_indexing = ( - 1 - if "bloom" in assistant_model.__class__.__name__.lower() - or ( - assistant_model.config.architectures is not None - and "bloom" in assistant_model.config.architectures[0].lower() - ) - else 0 - ) this_peer_finished = False # used by synced_gpus only while True: @@ -4564,42 +4550,28 @@ def assisted_decoding( # `.generate()` call if we decide to add `past_key_values` as a possible output of generate, as we # need access to the assistant cache to secure strong speedups. candidate_input_ids = input_ids + assistant_attention_mask = model_kwargs.get("attention_mask", None) + assistant_decoder_attention_mask = model_kwargs.get("decoder_attention_mask", None) + assistant_encoder_outputs = (model_kwargs.get("assistant_encoder_outputs", None),) for _ in range(int(num_assistant_tokens)): # 1.1. use the assistant model to obtain the next candidate logits - if "assistant_past_key_values" in model_kwargs: - prev_seq_len = model_kwargs["assistant_past_key_values"][0][assistant_kv_indexing].shape[-2] - # `new_token_len` can be 1 or 2 (next token in assistant + last token picked by the larger model) - new_token_len = candidate_input_ids.shape[1] - prev_seq_len - assist_inputs = candidate_input_ids[:, -new_token_len:] - # TODO (joao): make it compatible with models that use unconventional fwd pass logic, like blip2 - if assistant_model.config.is_encoder_decoder: - assistant_model_outputs = assistant_model( - decoder_input_ids=assist_inputs, - past_key_values=model_kwargs["assistant_past_key_values"], - encoder_outputs=model_kwargs["assistant_encoder_outputs"], - ) - else: - encoder_kwargs = {} - - if assistant_accepts_encoder_outputs and "assistant_encoder_outputs" in model_kwargs: - encoder_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] - - assistant_model_outputs = assistant_model( - assist_inputs, past_key_values=model_kwargs["assistant_past_key_values"], **encoder_kwargs - ) - else: + assistant_inputs = assistant_model.prepare_inputs_for_generation( + candidate_input_ids, + attention_mask=assistant_attention_mask, + decoder_attention_mask=assistant_decoder_attention_mask, + encoder_outputs=assistant_encoder_outputs, + past_key_values=model_kwargs.get("assistant_past_key_values", None), + ) + if assistant_inputs.get("past_key_values", None) is not None: if assistant_model.config.is_encoder_decoder: - assistant_model_outputs = assistant_model( - decoder_input_ids=candidate_input_ids, - encoder_outputs=model_kwargs["assistant_encoder_outputs"], - ) + input_ids_len = assistant_inputs["decoder_input_ids"].shape[-1] else: - encoder_kwargs = {} + input_ids_len = assistant_inputs["input_ids"].shape[-1] - if assistant_accepts_encoder_outputs and "assistant_encoder_outputs" in model_kwargs: - encoder_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] + if input_ids_len not in (1, 2): + raise ValueError("The length of the input ids in assistant inputs should be 1 or 2") - assistant_model_outputs = assistant_model(candidate_input_ids, **encoder_kwargs) + assistant_model_outputs = assistant_model(**assistant_inputs) # 1.2. greedily select the next candidate token model_kwargs["assistant_past_key_values"] = assistant_model_outputs.past_key_values @@ -4607,8 +4579,31 @@ def assisted_decoding( assistant_model_outputs.logits[:, -1, :] = logits_processor( candidate_input_ids, assistant_model_outputs.logits[:, -1, :] ) + new_token = assistant_model_outputs.logits[:, -1, :].argmax(dim=-1) candidate_input_ids = torch.cat((candidate_input_ids, new_token[:, None]), dim=-1) + if assistant_model.config.is_encoder_decoder and assistant_decoder_attention_mask is not None: + assistant_decoder_attention_mask = torch.cat( + ( + assistant_decoder_attention_mask, + torch.ones( + [1, 1], + dtype=assistant_decoder_attention_mask.dtype, + device=assistant_decoder_attention_mask.device, + ), + ), + dim=-1, + ) + elif not assistant_model.config.is_encoder_decoder and assistant_attention_mask is not None: + assistant_attention_mask = torch.cat( + ( + assistant_attention_mask, + torch.ones( + [1, 1], dtype=assistant_attention_mask.dtype, device=assistant_attention_mask.device + ), + ), + dim=-1, + ) # 1.3. stop assistant generation on EOS if eos_token_id_tensor is not None: @@ -4744,6 +4739,13 @@ def assisted_decoding( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) + # Update attention_mask for the assistant's next round of generations + if n_matches > 0 and model_kwargs.get("attention_mask", None) is not None: + attention_mask = model_kwargs["attention_mask"] + model_kwargs["attention_mask"] = torch.cat( + [attention_mask, attention_mask.new_ones((attention_mask.shape[0], n_matches))], dim=-1 + ) + # if eos_token was found in one sentence, set sentence to finished if eos_token_id_tensor is not None: unfinished_sequences = unfinished_sequences.mul( From fd685cfd593e1e254f7fbbe9ee91aa679fa51199 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Fri, 10 Nov 2023 13:45:00 +0100 Subject: [PATCH 136/268] [`Quantization`] Add str to enum conversion for AWQ (#27320) * add str to enum conversion * fixup * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- src/transformers/utils/quantization_config.py | 11 +++++++++++ tests/quantization/autoawq/test_awq.py | 7 +++++++ 2 files changed, 18 insertions(+) diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 34f7cb799a72..222ba68a6dc1 100644 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -44,6 +44,16 @@ class AWQLinearVersion(str, Enum): GEMM = "gemm" GEMV = "gemv" + @staticmethod + def from_str(version: str): + version = version.lower() + if version == "gemm": + return AWQLinearVersion.GEMM + elif version == "gemv": + return AWQLinearVersion.GEMV + else: + raise ValueError(f"Unknown AWQLinearVersion {version}") + class AwqBackendPackingMethod(str, Enum): AUTOAWQ = "autoawq" @@ -566,6 +576,7 @@ def post_init(self): f"Only supported quantization backends in {AwqBackendPackingMethod.AUTOAWQ} and {AwqBackendPackingMethod.LLMAWQ} - not recognized backend {self.backend}" ) + self.version = AWQLinearVersion.from_str(self.version) if self.version not in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV]: raise ValueError( f"Only supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV] - not recognized version {self.version}" diff --git a/tests/quantization/autoawq/test_awq.py b/tests/quantization/autoawq/test_awq.py index 2b3622d82321..1f1b51b77831 100644 --- a/tests/quantization/autoawq/test_awq.py +++ b/tests/quantization/autoawq/test_awq.py @@ -47,6 +47,13 @@ def test_wrong_backend(self): with self.assertRaises(ValueError): AwqConfig(bits=4, backend="") + # These should work fine + _ = AwqConfig(bits=4, version="GEMM") + _ = AwqConfig(bits=4, version="gemm") + + with self.assertRaises(ValueError): + AwqConfig(bits=4, backend="unexisting-backend") + # LLMAWQ does not work on a T4 with self.assertRaises(ValueError): AwqConfig(bits=4, backend="llm-awq") From 9dd58c53dd420550142bb30ed4861670eabb0cc0 Mon Sep 17 00:00:00 2001 From: Yoach Lacombe <52246514+ylacombe@users.noreply.github.com> Date: Fri, 10 Nov 2023 13:40:30 +0000 Subject: [PATCH 137/268] update Bark FA2 docs (#27400) * update Bark FA2 docs * update benchmark section * Update bark.md * Apply suggestions from code review Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> * rephrase --------- Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> --- docs/source/en/model_doc/bark.md | 58 ++++++++++++++++++++++------ docs/source/en/perf_infer_gpu_one.md | 2 +- 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/docs/source/en/model_doc/bark.md b/docs/source/en/model_doc/bark.md index 0d9127d917d2..2160159bd783 100644 --- a/docs/source/en/model_doc/bark.md +++ b/docs/source/en/model_doc/bark.md @@ -44,7 +44,19 @@ device = "cuda" if torch.cuda.is_available() else "cpu" model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16).to(device) ``` -#### Using 🤗 Better Transformer +#### Using CPU offload + +As mentioned above, Bark is made up of 4 sub-models, which are called up sequentially during audio generation. In other words, while one sub-model is in use, the other sub-models are idle. + +If you're using a CUDA device, a simple solution to benefit from an 80% reduction in memory footprint is to offload the submodels from GPU to CPU when they're idle. This operation is called *CPU offloading*. You can use it with one line of code as follows: + +```python +model.enable_cpu_offload() +``` + +Note that 🤗 Accelerate must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/accelerate/basic_tutorials/install) + +#### Using Better Transformer Better Transformer is an 🤗 Optimum feature that performs kernel fusion under the hood. You can gain 20% to 30% in speed with zero performance degradation. It only requires one line of code to export the model to 🤗 Better Transformer: @@ -54,21 +66,46 @@ model = model.to_bettertransformer() Note that 🤗 Optimum must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/optimum/installation) -#### Using CPU offload +#### Using Flash Attention 2 -As mentioned above, Bark is made up of 4 sub-models, which are called up sequentially during audio generation. In other words, while one sub-model is in use, the other sub-models are idle. +Flash Attention 2 is an even faster, optimized version of the previous optimization. + +##### Installation + +First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer). -If you're using a CUDA device, a simple solution to benefit from an 80% reduction in memory footprint is to offload the GPU's submodels when they're idle. This operation is called CPU offloading. You can use it with one line of code. +Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2: + +```bash +pip install -U flash-attn --no-build-isolation +``` + + +##### Usage + +To load a model using Flash Attention 2, we can pass the `use_flash_attention_2` flag to [`.from_pretrained`](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel.from_pretrained). We'll also load the model in half-precision (e.g. `torch.float16`), since it results in almost no degradation to audio quality but significantly lower memory usage and faster inference: ```python -model.enable_cpu_offload() +model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16, use_flash_attention_2=True).to(device) ``` -Note that 🤗 Accelerate must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/accelerate/basic_tutorials/install) +##### Performance comparison + + +The following diagram shows the latency for the native attention implementation (no optimisation) against Better Transformer and Flash Attention 2. In all cases, we generate 400 semantic tokens on a 40GB A100 GPU with PyTorch 2.1. Flash Attention 2 is also consistently faster than Better Transformer, and its performance improves even more as batch sizes increase: + +
+ +
+ +To put this into perspective, on an NVIDIA A100 and when generating 400 semantic tokens with a batch size of 16, you can get 17 times the [throughput](https://huggingface.co/blog/optimizing-bark#throughput) and still be 2 seconds faster than generating sentences one by one with the native model implementation. In other words, all the samples will be generated 17 times faster. + +At batch size 8, on an NVIDIA A100, Flash Attention 2 is also 10% faster than Better Transformer, and at batch size 16, 25%. + #### Combining optimization techniques -You can combine optimization techniques, and use CPU offload, half-precision and 🤗 Better Transformer all at once. +You can combine optimization techniques, and use CPU offload, half-precision and Flash Attention 2 (or 🤗 Better Transformer) all at once. ```python from transformers import BarkModel @@ -76,11 +113,8 @@ import torch device = "cuda" if torch.cuda.is_available() else "cpu" -# load in fp16 -model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16).to(device) - -# convert to bettertransformer -model = BetterTransformer.transform(model, keep_original_model=False) +# load in fp16 and use Flash Attention 2 +model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16, use_flash_attention_2=True).to(device) # enable CPU offload model.enable_cpu_offload() diff --git a/docs/source/en/perf_infer_gpu_one.md b/docs/source/en/perf_infer_gpu_one.md index 06e91c550226..ba339c1a3068 100644 --- a/docs/source/en/perf_infer_gpu_one.md +++ b/docs/source/en/perf_infer_gpu_one.md @@ -36,7 +36,7 @@ FlashAttention-2 is experimental and may change considerably in future versions. 1. additionally parallelizing the attention computation over sequence length 2. partitioning the work between GPU threads to reduce communication and shared memory reads/writes between them -FlashAttention-2 supports inference with Llama, Mistral, and Falcon models. You can request to add FlashAttention-2 support for another model by opening a GitHub Issue or Pull Request. +FlashAttention-2 supports inference with Llama, Mistral, Falcon and Bark models. You can request to add FlashAttention-2 support for another model by opening a GitHub Issue or Pull Request. Before you begin, make sure you have FlashAttention-2 installed (see the [installation](https://github.com/Dao-AILab/flash-attention?tab=readme-ov-file#installation-and-features) guide for more details about prerequisites): From 7e9f10ac94c626780cf9e17485e73aec2c644bf2 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Fri, 10 Nov 2023 19:19:10 +0530 Subject: [PATCH 138/268] Add CLVP (#24745) * init commit * attention arch done except rotary emb * rotary emb done * text encoder working * outputs matching * arch first pass done * make commands done, tests and docs remaining * all tests passed, only docs remaining * docs done * doc-builder fix * convert script removed(not relevant) * minor comments done * added ckpt conversion script * tokenizer done * very minor fix of index.md 2 * mostly make fixup related * all done except fe and rotary emb * very small change * removed unidecode dependency * style changes * tokenizer removed require_backends * added require_inflect to tokenizer tests * removed VOCAB_FILES in tokenizer test * inflect dependency removed * added rotary pos emb cache and simplified the apply method * style * little doc change * more comments * feature extractor added * added processor * auto-regressive config added * added CLVPConditioningEncoder * comments done except the test one * weights added successfull(NOT tested) * tokenizer fix with numbers * generate outputs matching * almost tests passing Integ tests not written * Integ tests added * major CUDA error fixed * docs done * rebase and multiple fixes * fixed rebase overwrites * generate code simplified and tests for AutoRegressive model added * minor changes * refectored gpt2 code in clvp file * weights done and all code refactored * mostly done except the fast_tokenizer * doc test fix * config file's doc fixes * more config fix * more comments * tokenizer comments mostly done * modeling file mostly refactored and can load modules * ClvpEncoder tested * ClvpDecoder, ClvpModel and ClvpForCausalLM tested * integration and all tests passed * more fixes * docs almost done * ckpt conversion refectored * style and some failing tests fix * comments * temporary output fix but test_assisted_decoding_matches_greedy_search test fails * majority changes done * use_cache outputs same now! Along with the asisted_greedy_decoding test fix * more comments * more comments * prepare_inputs_for_generation fixed and _prepare_model_inputs added * style fix * clvp.md change * moved clvpconditionalencoder norms * add model to new index * added tokenizer input_ids_with_special_tokens * small fix * config mostly done * added config-tester and changed conversion script * more comments * comments * style fix * some comments * tokenizer changed back to prev state * small commnets * added output hidden states for the main model * style fix * comments * small change * revert small change * . * Update clvp.md * Update test_modeling_clvp.py * :) * some minor change * new fixes * remove to_dict from FE --- README.md | 1 + README_es.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.md | 1 + docs/source/en/model_doc/clvp.md | 126 ++ src/transformers/__init__.py | 38 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/feature_extraction_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 1 + .../models/auto/processing_auto.py | 1 + .../models/auto/tokenization_auto.py | 1 + src/transformers/models/clvp/__init__.py | 83 + .../models/clvp/configuration_clvp.py | 457 ++++ .../models/clvp/convert_clvp_to_hf.py | 234 ++ .../models/clvp/feature_extraction_clvp.py | 238 ++ src/transformers/models/clvp/modeling_clvp.py | 1945 +++++++++++++++++ .../models/clvp/number_normalizer.py | 238 ++ .../models/clvp/processing_clvp.py | 90 + .../models/clvp/tokenization_clvp.py | 379 ++++ src/transformers/utils/dummy_pt_objects.py | 45 + tests/models/clvp/__init__.py | 0 .../clvp/test_feature_extraction_clvp.py | 237 ++ tests/models/clvp/test_modeling_clvp.py | 640 ++++++ tests/models/clvp/test_processor_clvp.py | 136 ++ tests/models/clvp/test_tokenization_clvp.py | 312 +++ utils/check_repo.py | 2 + 32 files changed, 5218 insertions(+) create mode 100644 docs/source/en/model_doc/clvp.md create mode 100644 src/transformers/models/clvp/__init__.py create mode 100644 src/transformers/models/clvp/configuration_clvp.py create mode 100644 src/transformers/models/clvp/convert_clvp_to_hf.py create mode 100644 src/transformers/models/clvp/feature_extraction_clvp.py create mode 100644 src/transformers/models/clvp/modeling_clvp.py create mode 100644 src/transformers/models/clvp/number_normalizer.py create mode 100644 src/transformers/models/clvp/processing_clvp.py create mode 100644 src/transformers/models/clvp/tokenization_clvp.py create mode 100644 tests/models/clvp/__init__.py create mode 100644 tests/models/clvp/test_feature_extraction_clvp.py create mode 100644 tests/models/clvp/test_modeling_clvp.py create mode 100644 tests/models/clvp/test_processor_clvp.py create mode 100644 tests/models/clvp/test_tokenization_clvp.py diff --git a/README.md b/README.md index 307d1bf9cb8e..7fed44a6d900 100644 --- a/README.md +++ b/README.md @@ -321,6 +321,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker. +1. **[CLVP](https://huggingface.co/docs/transformers/main/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. 1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (from MetaAI) released with the paper [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) by Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve. 1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. diff --git a/README_es.md b/README_es.md index 48e9ed3db0ed..172bd5569151 100644 --- a/README_es.md +++ b/README_es.md @@ -296,6 +296,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker. +1. **[CLVP](https://huggingface.co/docs/transformers/main/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. 1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (from MetaAI) released with the paper [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) by Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve. 1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. diff --git a/README_hd.md b/README_hd.md index 920aff460e0a..3616e1a84032 100644 --- a/README_hd.md +++ b/README_hd.md @@ -270,6 +270,7 @@ conda install -c huggingface transformers 1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI से) Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. द्वाराअनुसंधान पत्र [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) के साथ जारी किया गया 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI से) साथ वाला पेपर [लर्निंग ट्रांसफरेबल विजुअल मॉडल फ्रॉम नेचुरल लैंग्वेज सुपरविजन](https://arxiv.org /abs/2103.00020) एलेक रैडफोर्ड, जोंग वूक किम, क्रिस हैलासी, आदित्य रमेश, गेब्रियल गोह, संध्या अग्रवाल, गिरीश शास्त्री, अमांडा एस्केल, पामेला मिश्किन, जैक क्लार्क, ग्रेचेन क्रुएगर, इल्या सुत्स्केवर द्वारा। 1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker. +1. **[CLVP](https://huggingface.co/docs/transformers/main/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (सेल्सफोर्स से) साथ में पेपर [प्रोग्राम सिंथेसिस के लिए एक संवादात्मक प्रतिमान](https://arxiv.org/abs/2203.13474) एरिक निजकैंप, बो पैंग, हिरोआकी हयाशी, लिफू तू, हुआन वांग, यिंगबो झोउ, सिल्वियो सावरेस, कैमिंग जिओंग रिलीज। 1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (MetaAI से) Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve. द्वाराअनुसंधान पत्र [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) के साथ जारी किया गया 1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (माइक्रोसॉफ्ट रिसर्च एशिया से) कागज के साथ [फास्ट ट्रेनिंग कन्वर्जेंस के लिए सशर्त डीईटीआर](https://arxiv. org/abs/2108.06152) डेपू मेंग, ज़ियाओकांग चेन, ज़ेजिया फैन, गैंग ज़ेंग, होउकियांग ली, युहुई युआन, लेई सन, जिंगडोंग वांग द्वारा। diff --git a/README_ja.md b/README_ja.md index 6079d40a2429..60b46e917629 100644 --- a/README_ja.md +++ b/README_ja.md @@ -330,6 +330,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI から) Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. から公開された研究論文 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI から) Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever から公開された研究論文: [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (University of Göttingen から) Timo Lüddecke and Alexander Ecker から公開された研究論文: [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) +1. **[CLVP](https://huggingface.co/docs/transformers/main/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (Salesforce から) Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong から公開された研究論文: [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (MetaAI から) Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve. から公開された研究論文 [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) 1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (Microsoft Research Asia から) Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang から公開された研究論文: [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) diff --git a/README_ko.md b/README_ko.md index b5c0031b178a..583b8cce1bc3 100644 --- a/README_ko.md +++ b/README_ko.md @@ -245,6 +245,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI 에서 제공)은 Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.의 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687)논문과 함께 발표했습니다. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI 에서) Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 의 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 논문과 함께 발표했습니다. 1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (University of Göttingen 에서) Timo Lüddecke and Alexander Ecker 의 [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) 논문과 함께 발표했습니다. +1. **[CLVP](https://huggingface.co/docs/transformers/main/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (Salesforce 에서) Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 의 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 논문과 함께 발표했습니다. 1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (MetaAI 에서 제공)은 Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve.의 [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/)논문과 함께 발표했습니다. 1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (Microsoft Research Asia 에서) Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 의 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 논문과 함께 발표했습니다. diff --git a/README_zh-hans.md b/README_zh-hans.md index 3fc5fd3b83f7..d9581112a872 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -269,6 +269,7 @@ conda install -c huggingface transformers 1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (来自 LAION-AI) 伴随论文 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) 由 Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov 发布。 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。 1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (来自 University of Göttingen) 伴随论文 [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) 由 Timo Lüddecke and Alexander Ecker 发布。 +1. **[CLVP](https://huggingface.co/docs/transformers/main/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。 1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (来自 MetaAI) 伴随论文 [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) 由 Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve 发布。 1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 8b4ebed36386..77384e8d17c7 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -281,6 +281,7 @@ conda install -c huggingface transformers 1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker. +1. **[CLVP](https://huggingface.co/docs/transformers/main/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. 1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (from MetaAI) released with the paper [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) by Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve. 1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 4d434b7a18c1..c07502402ccd 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -663,6 +663,8 @@ title: CLIP - local: model_doc/clipseg title: CLIPSeg + - local: model_doc/clvp + title: CLVP - local: model_doc/data2vec title: Data2Vec - local: model_doc/deplot diff --git a/docs/source/en/index.md b/docs/source/en/index.md index 8aa372391d19..f32c5eccdb9f 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -92,6 +92,7 @@ Flax), PyTorch, and/or TensorFlow. | [CLAP](model_doc/clap) | ✅ | ❌ | ❌ | | [CLIP](model_doc/clip) | ✅ | ✅ | ✅ | | [CLIPSeg](model_doc/clipseg) | ✅ | ❌ | ❌ | +| [CLVP](model_doc/clvp) | ✅ | ❌ | ❌ | | [CodeGen](model_doc/codegen) | ✅ | ❌ | ❌ | | [CodeLlama](model_doc/code_llama) | ✅ | ❌ | ❌ | | [Conditional DETR](model_doc/conditional_detr) | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/clvp.md b/docs/source/en/model_doc/clvp.md new file mode 100644 index 000000000000..a30269faf9ca --- /dev/null +++ b/docs/source/en/model_doc/clvp.md @@ -0,0 +1,126 @@ + + +# CLVP + +## Overview + +The CLVP (Contrastive Language-Voice Pretrained Transformer) model was proposed in [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. + +The abstract from the paper is the following: + +*In recent years, the field of image generation has been revolutionized by the application of autoregressive transformers and DDPMs. These approaches model the process of image generation as a step-wise probabilistic processes and leverage large amounts of compute and data to learn the image distribution. This methodology of improving performance need not be confined to images. This paper describes a way to apply advances in the image generative domain to speech synthesis. The result is TorToise - an expressive, multi-voice text-to-speech system.* + + +This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). +The original code can be found [here](https://github.com/neonbjb/tortoise-tts). + + +## Usage tips + +1. CLVP is an integral part of the Tortoise TTS model. +2. CLVP can be used to compare different generated speech candidates with the provided text, and the best speech tokens are forwarded to the diffusion model. +3. The use of the [`ClvpModelForConditionalGeneration.generate()`] method is strongly recommended for tortoise usage. +4. Note that the CLVP model expects the audio to be sampled at 22.05 kHz contrary to other audio models which expects 16 kHz. + + +## Brief Explanation: + +- The [`ClvpTokenizer`] tokenizes the text input, and the [`ClvpFeatureExtractor`] extracts the log mel-spectrogram from the desired audio. +- [`ClvpConditioningEncoder`] takes those text tokens and audio representations and converts them into embeddings conditioned on the text and audio. +- The [`ClvpForCausalLM`] uses those embeddings to generate multiple speech candidates. +- Each speech candidate is passed through the speech encoder ([`ClvpEncoder`]) which converts them into a vector representation, and the text encoder ([`ClvpEncoder`]) converts the text tokens into the same latent space. +- At the end, we compare each speech vector with the text vector to see which speech vector is most similar to the text vector. +- [`ClvpModelForConditionalGeneration.generate()`] compresses all of the logic described above into a single method. + + +Example : + +```python +>>> import datasets +>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration + +>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library). +>>> text = "This is an example text." + +>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") +>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) +>>> sample = ds[0]["audio"] + +>>> # Define processor and model. +>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") +>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") + +>>> # Generate processor output and model output. +>>> processor_output = processor(raw_speech=sample["array"], sampling_rate=sample["sampling_rate"], text=text, return_tensors="pt") +>>> generated_output = model.generate(**processor_output) +``` + + +## ClvpConfig + +[[autodoc]] ClvpConfig + - from_sub_model_configs + +## ClvpEncoderConfig + +[[autodoc]] ClvpEncoderConfig + +## ClvpDecoderConfig + +[[autodoc]] ClvpDecoderConfig + +## ClvpTokenizer + +[[autodoc]] ClvpTokenizer + - save_vocabulary + +## ClvpFeatureExtractor + +[[autodoc]] ClvpFeatureExtractor + - __call__ + +## ClvpProcessor + +[[autodoc]] ClvpProcessor + - __call__ + - decode + - batch_decode + +## ClvpModelForConditionalGeneration + +[[autodoc]] ClvpModelForConditionalGeneration + - forward + - generate + - get_text_features + - get_speech_features + +## ClvpForCausalLM + +[[autodoc]] ClvpForCausalLM + +## ClvpModel + +[[autodoc]] ClvpModel + +## ClvpEncoder + +[[autodoc]] ClvpEncoder + +## ClvpDecoder + +[[autodoc]] ClvpDecoder + diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 05f9bc7796da..eb27314cb62a 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -256,6 +256,15 @@ "CLIPSegTextConfig", "CLIPSegVisionConfig", ], + "models.clvp": [ + "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ClvpConfig", + "ClvpDecoderConfig", + "ClvpEncoderConfig", + "ClvpFeatureExtractor", + "ClvpProcessor", + "ClvpTokenizer", + ], "models.code_llama": [], "models.codegen": ["CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenTokenizer"], "models.conditional_detr": ["CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig"], @@ -1458,6 +1467,17 @@ "CLIPSegVisionModel", ] ) + _import_structure["models.clvp"].extend( + [ + "CLVP_PRETRAINED_MODEL_ARCHIVE_LIST", + "ClvpDecoder", + "ClvpEncoder", + "ClvpForCausalLM", + "ClvpModel", + "ClvpModelForConditionalGeneration", + "ClvpPreTrainedModel", + ] + ) _import_structure["models.codegen"].extend( [ "CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -4446,6 +4466,15 @@ CLIPSegTextConfig, CLIPSegVisionConfig, ) + from .models.clvp import ( + CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP, + ClvpConfig, + ClvpDecoderConfig, + ClvpEncoderConfig, + ClvpFeatureExtractor, + ClvpProcessor, + ClvpTokenizer, + ) from .models.codegen import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenTokenizer from .models.conditional_detr import CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer @@ -5516,6 +5545,15 @@ CLIPSegTextModel, CLIPSegVisionModel, ) + from .models.clvp import ( + CLVP_PRETRAINED_MODEL_ARCHIVE_LIST, + ClvpDecoder, + ClvpEncoder, + ClvpForCausalLM, + ClvpModel, + ClvpModelForConditionalGeneration, + ClvpPreTrainedModel, + ) from .models.codegen import ( CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST, CodeGenForCausalLM, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 81e71500a5cc..46e275e3f320 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -46,6 +46,7 @@ clap, clip, clipseg, + clvp, code_llama, codegen, conditional_detr, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index c3baabea56a2..2c27892260aa 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -57,6 +57,7 @@ ("clap", "ClapConfig"), ("clip", "CLIPConfig"), ("clipseg", "CLIPSegConfig"), + ("clvp", "ClvpConfig"), ("code_llama", "LlamaConfig"), ("codegen", "CodeGenConfig"), ("conditional_detr", "ConditionalDetrConfig"), @@ -276,6 +277,7 @@ ("clap", "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST"), ("clip", "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("clipseg", "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("clvp", "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("codegen", "CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("conditional_detr", "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("convbert", "CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -481,6 +483,7 @@ ("clap", "CLAP"), ("clip", "CLIP"), ("clipseg", "CLIPSeg"), + ("clvp", "CLVP"), ("code_llama", "CodeLlama"), ("codegen", "CodeGen"), ("conditional_detr", "Conditional DETR"), diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 41c1639fbdc8..2c2699502642 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -44,6 +44,7 @@ ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), + ("clvp", "ClvpFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 5387809ca483..f4f8eab9967c 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -55,6 +55,7 @@ ("clap", "ClapModel"), ("clip", "CLIPModel"), ("clipseg", "CLIPSegModel"), + ("clvp", "ClvpModelForConditionalGeneration"), ("code_llama", "LlamaModel"), ("codegen", "CodeGenModel"), ("conditional_detr", "ConditionalDetrModel"), diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index c0b4f49893b9..84f7ba3be5bf 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -53,6 +53,7 @@ ("clap", "ClapProcessor"), ("clip", "CLIPProcessor"), ("clipseg", "CLIPSegProcessor"), + ("clvp", "ClvpProcessor"), ("flava", "FlavaProcessor"), ("fuyu", "FuyuProcessor"), ("git", "GitProcessor"), diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 80d2581882ce..6f983b97810b 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -121,6 +121,7 @@ "CLIPTokenizerFast" if is_tokenizers_available() else None, ), ), + ("clvp", ("ClvpTokenizer", None)), ( "code_llama", ( diff --git a/src/transformers/models/clvp/__init__.py b/src/transformers/models/clvp/__init__.py new file mode 100644 index 000000000000..fb88e24171c3 --- /dev/null +++ b/src/transformers/models/clvp/__init__.py @@ -0,0 +1,83 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = { + "configuration_clvp": [ + "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ClvpConfig", + "ClvpDecoderConfig", + "ClvpEncoderConfig", + ], + "feature_extraction_clvp": ["ClvpFeatureExtractor"], + "processing_clvp": ["ClvpProcessor"], + "tokenization_clvp": ["ClvpTokenizer"], +} + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_clvp"] = [ + "CLVP_PRETRAINED_MODEL_ARCHIVE_LIST", + "ClvpModelForConditionalGeneration", + "ClvpForCausalLM", + "ClvpModel", + "ClvpPreTrainedModel", + "ClvpEncoder", + "ClvpDecoder", + ] + + +if TYPE_CHECKING: + from .configuration_clvp import ( + CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP, + ClvpConfig, + ClvpDecoderConfig, + ClvpEncoderConfig, + ) + from .feature_extraction_clvp import ClvpFeatureExtractor + from .processing_clvp import ClvpProcessor + from .tokenization_clvp import ClvpTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_clvp import ( + CLVP_PRETRAINED_MODEL_ARCHIVE_LIST, + ClvpDecoder, + ClvpEncoder, + ClvpForCausalLM, + ClvpModel, + ClvpModelForConditionalGeneration, + ClvpPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/clvp/configuration_clvp.py b/src/transformers/models/clvp/configuration_clvp.py new file mode 100644 index 000000000000..3d20b5c16d5d --- /dev/null +++ b/src/transformers/models/clvp/configuration_clvp.py @@ -0,0 +1,457 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" CLVP model configuration""" + + +import os +from typing import TYPE_CHECKING, Union + + +if TYPE_CHECKING: + pass + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "susnato/clvp_dev": "https://huggingface.co/susnato/clvp_dev/resolve/main/config.json", +} + + +class ClvpEncoderConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ClvpEncoder`]. It is used to instantiate a CLVP + text or CLVP speech encoder according to the specified arguments. Instantiating a configuration with the defaults + will yield a similar configuration to that of the encoder of the CLVP + [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 256): + Vocabulary size of the CLVP Encoder model. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 1536): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + projection_dim (`int`, *optional*, defaults to 768): + Dimensionality of the projection vector. + num_hidden_layers (`int`, *optional*, defaults to 20): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for the feed-forward layers in [`ClvpEncoderMLP`]. + use_rotary_embedding (`bool`, *optional*, defaults to `True`): + Whether to use rotary_embedding or not. + use_attention_bias (`bool`, *optional*, defaults to `False`): + Whether to use bias in Query, Key and Value layers during self attention. + summary_type (`str`, *optional*, defaults to `"mean"`): + What strategy to use to get pooler_output from the last_hidden_state. `"last"`, `"first"`, `"mean"` and + `"cls_index"` are supported. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization + testing). + bos_token_id (`int`, *optional*, defaults to 255): + Beginning of sequence token id. + eos_token_id (`int`, *optional*, defaults to 0): + End of sequence token id. + + Example: + + ```python + >>> from transformers import ClvpEncoderConfig, ClvpEncoder + + >>> # Initializing a ClvpEncoderConfig with susnato/clvp_dev style configuration + >>> encoder_configuration = ClvpEncoderConfig() + + >>> # Initializing a ClvpEncoder (with random weights) from the susnato/clvp_dev style configuration + >>> model = ClvpEncoder(encoder_configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "clvp_encoder" + + def __init__( + self, + vocab_size=256, + hidden_size=768, + intermediate_size=1536, + projection_dim=768, + num_hidden_layers=20, + num_attention_heads=12, + hidden_act="gelu", + layer_norm_eps=1e-5, + attention_dropout=0.1, + dropout=0.1, + use_rotary_embedding=True, + use_attention_bias=False, + summary_type="mean", + initializer_factor=1.0, + bos_token_id=255, + eos_token_id=0, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.projection_dim = projection_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.dropout = dropout + self.use_rotary_embedding = use_rotary_embedding + self.use_attention_bias = use_attention_bias + self.summary_type = summary_type + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + + super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + @classmethod + def from_pretrained( + cls, pretrained_model_name_or_path: Union[str, os.PathLike], config_type: str = "text_config", **kwargs + ) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # make sure to have the config_type be either "text_config" or "speech_config" + # this is to make sure that we can load only text or speech configs from the nested ClvpConfig. + if config_type not in ["text_config", "speech_config"]: + raise ValueError( + f"We can only load either 'text_config' or 'speech_config' but you are trying to load" f"{config_type}" + ) + + # get the text config dict if we are loading from ClvpConfig + if config_dict.get("model_type") == "clvp": + config_dict = config_dict[config_type] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class ClvpDecoderConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ClvpDecoder`]. It is used to instantiate a CLVP + Decoder Model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the Decoder part of the CLVP + [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + The architecture is similar to GPT2. + + Args: + vocab_size (`int`, *optional*, defaults to 8194): + Vocabulary size of the model. + max_position_embeddings (`int`, *optional*, defaults to 608): + The maximum sequence length of mel tokens that this model might ever be used with. Similar to `n_positions` + in `GPT2Config`. + max_text_tokens (`int`, *optional*, defaults to 404): + The maximum sequence length of text tokens that this model might ever be used with. Similar to + `n_positions` in `GPT2Config`. + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the embeddings and hidden states. + num_hidden_layers (`int`, *optional*, defaults to 30): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + n_inner (`int`, *optional*): + Dimensionality of the inner feed-forward layers. `None` will set it to 4 times `hidden_size`. + num_mel_attn_blocks (`int`, *optional*, defaults to 6): + Denotes the number of self attention layers in [`ClvpConditioningEncoder`]. + activation_function (`str`, *optional*, defaults to `"gelu_new"`): + Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. + resid_pdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + embd_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the embeddings. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): + The epsilon to use in the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + summary_type (`string`, *optional*, defaults to `"cls_index"`): + Argument used when doing sequence summary. + + Has to be one of the following options: + + - `"last"`: Take the last token hidden state (like XLNet). + - `"first"`: Take the first token hidden state (like BERT). + - `"mean"`: Take the mean of all tokens hidden states. + - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). + - `"attn"`: Not implemented now, use multi-head attention. + summary_use_proj (`bool`, *optional*, defaults to `True`): + Whether or not to add a projection after the vector extraction. + summary_activation (`str`, *optional*): + Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation. + summary_proj_to_labels (`bool`, *optional*, defaults to `True`): + Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes. + summary_first_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio to be used after the projection and activation. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + bos_token_id (`int`, *optional*, defaults to 8192): + Beginning of sequence token id, used at the start of the generation. + eos_token_id (`int`, *optional*, defaults to 8193): + End of sequence token id, used in the method + [`ClvpModelForConditionalGeneration.fix_speech_decoder_output()`] to correct decoder outputs. + feature_size (`int`, *optional*, defaults to 80): + The feature dimension of the extracted mel features. This value is used in [`ClvpConditioningEncoder`]. + use_attention_bias (`bool`, *optional*, defaults to `True`): + Whether to use bias in Query, Key and Value layers during self attention. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization + testing). + decoder_fixing_codes (`list`, *optional*, defaults to `[83, 45, 45, 248]`): + These values are used in the method `fix_speech_decoder_output` to fix decoder generated outputs. + + Example: + + ```python + >>> from transformers import ClvpDecoderConfig, ClvpDecoder + + >>> # Initializing a ClvpDecoderConfig with susnato/clvp_dev style configuration + >>> decoder_configuration = ClvpDecoderConfig() + + >>> # Initializing a ClvpDecoder (with random weights) from the susnato/clvp_dev style configuration + >>> model = ClvpDecoder(decoder_configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "clvp_decoder" + + def __init__( + self, + vocab_size=8194, + max_position_embeddings=608, + max_text_tokens=404, + hidden_size=1024, + num_hidden_layers=30, + num_attention_heads=16, + n_inner=None, + num_mel_attn_blocks=6, + activation_function="gelu_new", + resid_pdrop=0.1, + embd_pdrop=0.1, + attention_dropout=0.1, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + summary_type="cls_index", + summary_use_proj=True, + summary_activation=None, + summary_proj_to_labels=True, + summary_first_dropout=0.1, + use_cache=True, + bos_token_id=8192, + eos_token_id=8193, + feature_size=80, + use_attention_bias=True, + initializer_factor=1.0, + decoder_fixing_codes=[83, 45, 45, 248], + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.max_text_tokens = max_text_tokens + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.n_inner = n_inner + self.num_mel_attn_blocks = num_mel_attn_blocks + self.activation_function = activation_function + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attention_dropout = attention_dropout + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + self.summary_type = summary_type + self.summary_use_proj = summary_use_proj + self.summary_activation = summary_activation + self.summary_first_dropout = summary_first_dropout + self.summary_proj_to_labels = summary_proj_to_labels + self.use_cache = use_cache + self.feature_size = feature_size + self.use_attention_bias = use_attention_bias + self.initializer_factor = initializer_factor + self.decoder_fixing_codes = decoder_fixing_codes + + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + + super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the speech config dict if we are loading from ClvpConfig + if config_dict.get("model_type") == "clvp": + config_dict = config_dict["decoder_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class ClvpConfig(PretrainedConfig): + r""" + [`ClvpConfig`] is the configuration class to store the configuration of a [`ClvpModelForConditionalGeneration`]. It + is used to instantiate a CLVP model according to the specified arguments, defining the text model, speech model and + decoder model configs. Instantiating a configuration with the defaults will yield a similar configuration to that + of the CLVP [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + text_config (`dict`, *optional*): + Dictionary of configuration options used to initialize the CLVP text encoder. + speech_config (`dict`, *optional*): + Dictionary of configuration options used to initialize CLVP speech encoder. + decoder_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`ClvpDecoderConfig`]. + projection_dim (`int`, *optional*, defaults to 768): + Dimentionality of text and speech projection layers. + logit_scale_init_value (`float`, *optional*, defaults to 2.6592): + The inital value of the *logit_scale* paramter. Default is used as per the original CLVP implementation. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization + testing). + kwargs (*optional*): + Dictionary of keyword arguments. + + Example: + + ```python + >>> from transformers import ClvpConfig, ClvpModelForConditionalGeneration + + >>> # Initializing a ClvpConfig with susnato/clvp_dev style configuration + >>> configuration = ClvpConfig() + + >>> # Initializing a ClvpModelForConditionalGeneration (with random weights) from the susnato/clvp_dev style configuration + >>> model = ClvpModelForConditionalGeneration(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # We can also initialize a CLVPConfig from a CLVPTextConfig, CLVPSpeechConfig and a CLVPAutoRegressiveConfig + >>> from transformers import ClvpEncoderConfig, ClvpDecoderConfig + + >>> # Initializing a CLVP text, CLVP speech and CLVP decoder configuration + >>> config_text = ClvpEncoderConfig() + >>> config_speech = ClvpEncoderConfig() + >>> decoder_config = ClvpDecoderConfig() + + >>> config = ClvpConfig.from_sub_model_configs(config_text, config_speech, decoder_config) + ```""" + + model_type = "clvp" + is_composition = True + + def __init__( + self, + text_config=None, + speech_config=None, + decoder_config=None, + projection_dim=768, + logit_scale_init_value=2.6592, + initializer_factor=1.0, + **kwargs, + ): + super().__init__(**kwargs) + + if text_config is None: + text_config = {} + logger.info("`text_config` is `None`. Initializing the `ClvpEncoderConfig` with default values.") + + if speech_config is None: + speech_config = {} + logger.info("`speech_config` is `None`. initializing the `ClvpEncoderConfig` with default values.") + + if decoder_config is None: + decoder_config = {} + logger.info("`decoder_config` is `None`. initializing the `ClvpDecoderConfig` with default values.") + + self.text_config = ClvpEncoderConfig(**text_config) + self.speech_config = ClvpEncoderConfig(**speech_config) + self.decoder_config = ClvpDecoderConfig(**decoder_config) + + self.projection_dim = projection_dim + self.logit_scale_init_value = logit_scale_init_value + self.initializer_factor = initializer_factor + + @classmethod + def from_sub_model_configs( + cls, + text_config: ClvpEncoderConfig, + speech_config: ClvpEncoderConfig, + decoder_config: ClvpDecoderConfig, + **kwargs, + ): + r""" + Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model + configuration and CLVP decoder model configuration. + + Args: + text_config (`ClvpEncoderConfig`): + Text model configuration of type [`ClvpEncoderConfig`]. + speech_config (`ClvpEncoderConfig`): + Speech model configuration of type [`ClvpEncoderConfig`]. + decoder_config (`ClvpDecoderConfig`): + Decoder model configuration of type [`ClvpDecoderConfig`]. + + Returns: + [`ClvpConfig`]: An instance of a configuration object + """ + + return cls( + text_config=text_config.to_dict(), + speech_config=speech_config.to_dict(), + decoder_config=decoder_config.to_dict(), + **kwargs, + ) diff --git a/src/transformers/models/clvp/convert_clvp_to_hf.py b/src/transformers/models/clvp/convert_clvp_to_hf.py new file mode 100644 index 000000000000..4ae6fd425497 --- /dev/null +++ b/src/transformers/models/clvp/convert_clvp_to_hf.py @@ -0,0 +1,234 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Weights conversion script for CLVP +""" + +import argparse +import os + +import torch +from huggingface_hub import hf_hub_download + +from transformers import ClvpConfig, ClvpModelForConditionalGeneration + + +_MODELS = { + "clvp": "https://huggingface.co/jbetker/tortoise-tts-v2/blob/main/.models/clvp2.pth", + "decoder": "https://huggingface.co/jbetker/tortoise-tts-v2/blob/main/.models/autoregressive.pth", +} + +dim = 1024 +sub_dim = dim // 16 + +CLVP_ENCODERS_MAPPING = { + "text_transformer.transformer.attn_layers": "text_encoder_model", + "speech_transformer.transformer.attn_layers": "speech_encoder_model", + "text_transformer.transformer.norm": "text_encoder_model.final_layer_norm", + "speech_transformer.transformer.norm": "speech_encoder_model.final_layer_norm", + "to_text_latent": "text_encoder_model.projection", + "to_speech_latent": "speech_encoder_model.projection", + "text_emb": "text_encoder_model.token_embedding", + "speech_emb": "speech_encoder_model.token_embedding", + "1.wrap.net.0": "mlp.fc1", + "1.wrap.net.3": "mlp.fc2", + "1.wrap": "self_attn", + "to_out": "out_proj", + "to_q": "q_proj", + "to_k": "k_proj", + "to_v": "v_proj", + "temperature": "logit_scale", +} + +CLVP_DECODER_MAPPING = { + "conditioning_encoder.init": "conditioning_encoder.mel_conv", + "conditioning_encoder.attn": "conditioning_encoder.mel_attn_blocks", + "mel_attn_blocks": "group_norms", + ".norm.weight": ".weight", + ".norm.bias": ".bias", + "text_embedding": "conditioning_encoder.text_token_embedding", + "text_pos_embedding.emb": "conditioning_encoder.text_position_embedding", + "final_norm": "speech_decoder_model.final_norm", + "mel_head": "speech_decoder_model.lm_head", + "gpt.ln_f": "speech_decoder_model.model.decoder.layer_norm", + "mel_embedding": "speech_decoder_model.model.decoder.input_embeds_layer", + "mel_pos_embedding.emb": "speech_decoder_model.model.decoder.position_embeds_layer", + "gpt.h": "speech_decoder_model.model.decoder.layers", + "ln_1": "input_layernorm", + "ln_2": "post_attention_layernorm", +} + + +def update_index(present_index): + if present_index % 2 == 0: + return int(present_index / 2) + else: + return int((present_index - 1) / 2) + + +def convert_encoder_weights(original_weights): + converted_weights = {} + original_weights_keys = sorted(original_weights.keys()) + for original_key in original_weights_keys: + updated_key = original_key + # for input_rmsnorm.weight and post_attention_rmsnorm.weight + if "0.0.g" in updated_key: + present_index = updated_key.split(".")[4] + if int(present_index) % 2 == 0: + updated_key = updated_key.replace("0.0.g", "input_rmsnorm.weight") + else: + updated_key = updated_key.replace("0.0.g", "post_attention_rmsnorm.weight") + + if "transformer.attn_layers.layers" in updated_key: + present_index = updated_key.split(".")[4] + updated_index = update_index(int(present_index)) + updated_key = updated_key.replace( + f"transformer.attn_layers.layers.{present_index}", f"transformer.attn_layers.layers.{updated_index}" + ) + + for k, v in CLVP_ENCODERS_MAPPING.items(): + if k in updated_key: + updated_key = updated_key.replace(k, v) + + converted_weights[updated_key] = original_weights.pop(original_key) + + return converted_weights + + +def convert_decoder_weights(original_weights): + converted_weights = {} + original_weights_keys = sorted(original_weights.keys()) + for original_key in original_weights_keys: + updated_key = original_key + if len(updated_key.split(".")) > 3: + index, attr = updated_key.split(".")[2], updated_key.split(".")[-1] + + # for decoder attention + if "attn.c_attn" in updated_key: + if attr == "weight": + slice1, slice2, slice3 = original_weights[updated_key].squeeze(-1).T.split(split_size=dim, dim=0) + else: + slice1, slice2, slice3 = original_weights[updated_key].split(split_size=dim, dim=0) + converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.q_proj.{attr}"] = slice1 + converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.k_proj.{attr}"] = slice2 + converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.v_proj.{attr}"] = slice3 + continue + + if "attn.c_proj" in updated_key: + converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.out_proj.{attr}"] = ( + original_weights[updated_key].squeeze(-1).T + ) + continue + + if "attn.bias" in updated_key or "attn.masked_bias" in updated_key or "text_head" in updated_key: + original_weights.pop(updated_key) + continue + + # conditional encoder attention + if "qkv" in updated_key: + if attr == "weight": + slice1, slice2, slice3 = original_weights[updated_key].squeeze(-1).split(split_size=dim, dim=0) + else: + slice1, slice2, slice3 = original_weights[updated_key].split(split_size=dim, dim=0) + + indices = torch.arange(dim) + index1, index2, index3 = ( + indices.unfold(0, sub_dim, sub_dim * 3).flatten(), + indices[sub_dim:].unfold(0, sub_dim, sub_dim * 3).flatten(), + indices[2 * sub_dim :].unfold(0, sub_dim, sub_dim * 3).flatten(), + ) + + converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.q_proj.{attr}"] = torch.concatenate( + [slice1[index1], slice2[index3], slice3[index2]], + axis=0, + ) + converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.k_proj.{attr}"] = torch.concatenate( + [slice1[index2], slice2[index1], slice3[index3]], + axis=0, + ) + converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.v_proj.{attr}"] = torch.concatenate( + [slice1[index3], slice2[index2], slice3[index1]], + axis=0, + ) + continue + + if "proj_out" in updated_key: + converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.out_proj.{attr}"] = original_weights[ + updated_key + ].squeeze(-1) + continue + + for k, v in CLVP_DECODER_MAPPING.items(): + if k in updated_key: + updated_key = updated_key.replace(k, v) + + converted_weights[updated_key] = original_weights.pop(original_key) + + return converted_weights + + +def _download(url: str, root: str): + repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}" + filename = f"{url.split('/')[-2]}/{url.split('/')[-1]}" + hf_hub_download( + repo_id=repo_id, + filename=filename, + force_filename=root, + local_dir_use_symlinks=False, + ) + + +def convert_clvp_weights(checkpoint_path, pytorch_dump_folder_path): + converted_checkpoint = {} + + for each_model_name, each_model_url in _MODELS.items(): + each_model_path = os.path.join(checkpoint_path, each_model_url.split("/")[-1]) + if not os.path.exists(each_model_path): + print(f"\n{each_model_name} was not found! Downloading it to {each_model_path}") + _download(url=each_model_url, root=each_model_path) + + if each_model_name == "clvp": + clvp_checkpoint = torch.load(each_model_path, map_location="cpu") + else: + decoder_checkpoint = torch.load(each_model_path, map_location="cpu") + + # Converting the weights + converted_checkpoint.update(**convert_encoder_weights(clvp_checkpoint)) + converted_checkpoint.update(**convert_decoder_weights(decoder_checkpoint)) + + config = ClvpConfig.from_pretrained("susnato/clvp_dev") + model = ClvpModelForConditionalGeneration(config) + + model.load_state_dict(converted_checkpoint, strict=True) + model.save_pretrained(pytorch_dump_folder_path) + print(f"Model saved at {pytorch_dump_folder_path}!") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # # Required parameters + parser.add_argument( + "--checkpoint_path", type=str, help="Path to the folder of downloaded checkpoints. (Please enter full path)" + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + help="Path to the output PyTorch model. (Please enter full path)", + ) + args = parser.parse_args() + + convert_clvp_weights(args.checkpoint_path, args.pytorch_dump_folder_path) diff --git a/src/transformers/models/clvp/feature_extraction_clvp.py b/src/transformers/models/clvp/feature_extraction_clvp.py new file mode 100644 index 000000000000..69741a03f575 --- /dev/null +++ b/src/transformers/models/clvp/feature_extraction_clvp.py @@ -0,0 +1,238 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Feature extractor class for CLVP +""" + +from typing import List, Optional, Union + +import numpy as np + +from ...audio_utils import mel_filter_bank, spectrogram, window_function +from ...feature_extraction_sequence_utils import SequenceFeatureExtractor +from ...feature_extraction_utils import BatchFeature +from ...utils import TensorType, logging + + +logger = logging.get_logger(__name__) + + +class ClvpFeatureExtractor(SequenceFeatureExtractor): + r""" + Constructs a CLVP feature extractor. + + This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains + most of the main methods. Users should refer to this superclass for more information regarding those methods. + + This class extracts log-mel-spectrogram features from raw speech using a custom numpy implementation of the `Short + Time Fourier Transform` which should match pytorch's `torch.stft` equivalent. + + Args: + feature_size (`int`, *optional*, defaults to 80): + The feature dimension of the extracted features. + sampling_rate (`int`, *optional*, defaults to 22050): + The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). + default_audio_length (`int`, *optional*, defaults to 6): + The default length of raw audio in seconds. If `max_length` is not set during `__call__` then it will + automatically be set to default_audio_length * `self.sampling_rate`. + hop_length (`int`, *optional*, defaults to 256): + Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients. + chunk_length (`int`, *optional*, defaults to 30): + The maximum number of chuncks of `sampling_rate` samples used to trim and pad longer or shorter audio + sequences. + n_fft (`int`, *optional*, defaults to 1024): + Size of the Fourier transform. + padding_value (`float`, *optional*, defaults to 0.0): + Padding value used to pad the audio. Should correspond to silences. + mel_norms (`list` of length `feature_size`, *optional*): + If `mel_norms` is provided then it will be used to normalize the log-mel spectrograms along each + mel-filter. + return_attention_mask (`bool`, *optional*, defaults to `False`): + Whether to return the attention mask. If left to the default, it will return the attention mask. + + [What are attention masks?](../glossary#attention-mask) + """ + + model_input_names = ["input_features", "attention_mask"] + + def __init__( + self, + feature_size=80, + sampling_rate=22050, + default_audio_length=6, + hop_length=256, + chunk_length=30, + n_fft=1024, + padding_value=0.0, + mel_norms=None, + return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask + **kwargs, + ): + super().__init__( + feature_size=feature_size, + sampling_rate=sampling_rate, + padding_value=padding_value, + return_attention_mask=return_attention_mask, + **kwargs, + ) + self.n_fft = n_fft + self.hop_length = hop_length + self.chunk_length = chunk_length + self.n_samples = chunk_length * sampling_rate + self.nb_max_frames = self.n_samples // hop_length + self.sampling_rate = sampling_rate + self.default_audio_length = default_audio_length + self.mel_norms = mel_norms + self.mel_filters = mel_filter_bank( + num_frequency_bins=1 + (n_fft // 2), + num_mel_filters=feature_size, + min_frequency=0.0, + max_frequency=8000.0, + sampling_rate=sampling_rate, + norm="slaney", + mel_scale="htk", + ) + + def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: + """ + This method first computes the log-mel spectrogram of the provided audio then applies normalization along the + each mel-filterbank, if `mel_norms` is provided. + """ + log_spec = spectrogram( + waveform, + window_function(self.n_fft, "hann"), + frame_length=self.n_fft, + hop_length=self.hop_length, + power=2.0, + mel_filters=self.mel_filters, + log_mel=None, + ) + + log_spec = np.log(np.clip(log_spec, a_min=1e-5, a_max=None)) + + if self.mel_norms is not None: + log_spec = log_spec / np.array(self.mel_norms)[:, None] + + return log_spec + + def __call__( + self, + raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], + sampling_rate: Optional[int] = None, + truncation: bool = True, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_attention_mask: Optional[bool] = True, + padding: Optional[str] = "max_length", + max_length: Optional[int] = None, + **kwargs, + ) -> BatchFeature: + """ + `ClvpFeatureExtractor` is used to extract various voice specific properties such as the pitch and tone of the + voice, speaking speed, and even speaking defects like a lisp or stuttering from a sample voice or `raw_speech`. + + First the voice is padded or truncated in a way such that it becomes a waveform of `self.default_audio_length` + seconds long and then the log-mel spectrogram is extracted from it. + + Args: + raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): + The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float + values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not + stereo, i.e. single float per timestep. + sampling_rate (`int`, *optional*): + The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass + `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition + pipeline. + truncation (`bool`, *optional*, default to `True`): + Activates truncation to cut input sequences longer than *max_length* to *max_length*. + pad_to_multiple_of (`int`, *optional*): + If set will pad the sequence to a multiple of the provided value. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. + return_attention_mask (`bool`, *optional*, defaults to `True`): + Whether to return the attention mask. If left to the default, it will return the attention mask. + + [What are attention masks?](../glossary#attention-mask) + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + padding_value (`float`, defaults to 0.0): + The value that is used to fill the padding values / vectors. + max_length (`int`, *optional*): + The maximum input length of the inputs. + """ + + if sampling_rate is not None: + if sampling_rate != self.sampling_rate: + raise ValueError( + f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" + f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" + f" was sampled with {self.sampling_rate} and not {sampling_rate}." + ) + else: + logger.warning( + "It is strongly recommended to pass the `sampling_rate` argument to this function. " + "Failing to do so can result in silent errors that might be hard to debug." + ) + + is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 + if is_batched_numpy and len(raw_speech.shape) > 2: + raise ValueError(f"Only mono-channel audio is supported for input to {self}") + is_batched = is_batched_numpy or ( + isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) + ) + + if is_batched: + raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] + elif not is_batched and not isinstance(raw_speech, np.ndarray): + raw_speech = np.asarray(raw_speech, dtype=np.float32) + elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): + raw_speech = raw_speech.astype(np.float32) + + # always return batch + if not is_batched: + raw_speech = [np.asarray([raw_speech]).T] + + batched_speech = BatchFeature({"input_features": raw_speech}) + + max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length + + padded_inputs = self.pad( + batched_speech, + padding=padding, + max_length=max_length, + truncation=truncation, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + # make sure list is in array format + input_features = padded_inputs.get("input_features").transpose(2, 0, 1) + + input_features = [ + self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0] + ] + + if isinstance(input_features[0], List): + padded_inputs["input_features"] = [np.asarray(feature) for feature in input_features] + else: + padded_inputs["input_features"] = input_features + + return padded_inputs.convert_to_tensors(return_tensors) diff --git a/src/transformers/models/clvp/modeling_clvp.py b/src/transformers/models/clvp/modeling_clvp.py new file mode 100644 index 000000000000..045ac33ffb82 --- /dev/null +++ b/src/transformers/models/clvp/modeling_clvp.py @@ -0,0 +1,1945 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" PyTorch CLVP model.""" + + +import copy +import math +from dataclasses import dataclass +from typing import Dict, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...generation import GenerationConfig +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPooling, + CausalLMOutputWithCrossAttentions, +) +from ...modeling_utils import PreTrainedModel, SequenceSummary +from ...pytorch_utils import Conv1D +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_clvp import ( + ClvpConfig, + ClvpDecoderConfig, + ClvpEncoderConfig, +) + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "susnato/clvp_dev" + +CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "susnato/clvp_dev", + # See all Clvp models at https://huggingface.co/models?filter=clvp +] + + +# Copied from transformers.models.clip.modeling_clip.contrastive_loss +def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) + + +# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->clvp, image_loss->speech_loss +def clvp_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity) + speech_loss = contrastive_loss(similarity.t()) + return (caption_loss + speech_loss) / 2.0 + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +@dataclass +class ClvpEncoderOutput(ModelOutput): + """ + Base class for CLVP encoder's outputs that contains a pooling of the last hidden states as well as a projection + output (a linear layer on top of the pooled output). + + Args: + embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when model is initialized with `with_projection=True`): + The embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + The hidden state of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Pooled output of the `last_hidden_state`. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of + the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + embeds: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + pooler_output: Optional[torch.FloatTensor] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class ClvpOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for speech-text similarity. + speech_ids (`torch.LongTensor`, *optional*): + speech_ids (or speech candidates) generated by the `ClvpForCausalLM` model. + logits_per_speech (`torch.FloatTensor` of shape `(speech_batch_size, text_batch_size)`): + The scaled dot product scores between `speech_embeds` and `text_embeds`. This represents the speech-text + similarity scores. + logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, speech_batch_size)`): + The scaled dot product scores between `text_embeds` and `speech_embeds`. This represents the text-speech + similarity scores. + text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of the text encoder + model. + speech_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): + The speech embeddings obtained by applying the projection layer to the pooled output of the speech encoder + model. + text_model_output (`BaseModelOutputWithPooling`): + The pooled output of the `last_hidden_state` of the text encoder Model. + speech_model_output (`BaseModelOutputWithPooling`): + The pooled output of the `last_hidden_state` of the speech encoder Model. + decoder_hidden_states (`torch.FloatTensor`, *optional*): + The hidden states of the decoder model. + text_encoder_hidden_states (`torch.FloatTensor`, *optional*): + The hidden states of the text encoder model. + speech_encoder_hidden_states (`torch.FloatTensor`, *optional*): + The hidden states of the speech encoder model. + """ + + loss: Optional[torch.FloatTensor] = None + speech_ids: Optional[torch.LongTensor] = None + logits_per_speech: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + speech_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPooling = None + speech_model_output: BaseModelOutputWithPooling = None + decoder_hidden_states: torch.FloatTensor = None + text_encoder_hidden_states: torch.FloatTensor = None + speech_encoder_hidden_states: torch.FloatTensor = None + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Clvp +class ClvpRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + ClvpRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +class ClvpRotaryPositionalEmbedding(nn.Module): + """ + Rotary Position Embedding Class for CLVP. It was proposed in the paper 'ROFORMER: ENHANCED TRANSFORMER WITH ROTARY + POSITION EMBEDDING', Please see https://arxiv.org/pdf/2104.09864v1.pdf . + """ + + def __init__(self, config): + super().__init__() + dim = max(config.projection_dim // (config.num_attention_heads * 2), 32) + inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + + self.register_buffer("inv_freq", inv_freq) + self.cached_sequence_length = None + self.cached_rotary_positional_embedding = None + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + sequence_length = hidden_states.shape[1] + + if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None: + return self.cached_rotary_positional_embedding + + self.cached_sequence_length = sequence_length + time_stamps = torch.arange(sequence_length, device=hidden_states.device).type_as(self.inv_freq) + freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq) + embeddings = torch.cat((freqs, freqs), dim=-1) + + self.cached_rotary_positional_embedding = embeddings.unsqueeze(0) + return self.cached_rotary_positional_embedding + + +class ClvpSelfAttention(nn.Module): + """ + Multi-headed attention to combine Absolute and Rotary Positional Embeddings into a single Attention module. + """ + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + + if hasattr(config, "max_position_embeddings"): + max_positions = config.max_position_embeddings + bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)) + bias = bias.view(1, 1, max_positions, max_positions) + self.register_buffer("bias", bias, persistent=False) + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + # Copied from transformers.models.clip.modeling_clip.CLIPAttention._shape + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.FloatTensor, + rotary_pos_emb: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + use_cache: Optional[bool] = False, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]: + # Raise error when position_ids is None but rotary_pos_emb is provided, because we need that when applying + # rotary_pos_emb to query and key states. + if rotary_pos_emb is not None and position_ids is None: + raise ValueError("`position_ids` must be provided when `rotary_pos_emb` is not None.") + + bsz, _, embed_dim = hidden_states.size() + + # get query proj + query_states = self._shape(self.q_proj(hidden_states), -1, bsz) * self.scale + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if past_key_value is not None: + past_key, past_value = past_key_value + key_states = torch.cat((past_key, key_states), dim=-2) + value_states = torch.cat((past_value, value_states), dim=-2) + + if use_cache is True: + present = (key_states, value_states) + else: + present = None + + if rotary_pos_emb is not None: + rotary_emb_dim = rotary_pos_emb.shape[-1] + + # Partial rotary embedding + query_rot, query_pass = ( + query_states[..., :rotary_emb_dim], + query_states[..., rotary_emb_dim:], + ) + key_rot, key_pass = ( + key_states[..., :rotary_emb_dim], + key_states[..., rotary_emb_dim:], + ) + + cos, sin = rotary_pos_emb.cos().squeeze(0), rotary_pos_emb.sin().squeeze(0) + query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) + + # [batch_size, num_heads, seq_length, head_dim] + query_states = torch.cat((query_rot, query_pass), dim=-1) + key_states = torch.cat((key_rot, key_pass), dim=-1) + + tgt_len = query_states.shape[2] + src_len = key_states.shape[2] + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + attn_output = torch.matmul(attn_probs, value_states) + + if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, present, attn_weights + + +class ClvpGatedLinearUnit(nn.Module): + """ + `ClvpGatedLinearUnit` uses the second half of the `hidden_states` to act as a gate for the first half of the + `hidden_states` which controls the flow of data from the first of the tensor. + """ + + def __init__(self, config): + super().__init__() + self.activation_fn = ACT2FN[config.hidden_act] + self.proj = nn.Linear(config.hidden_size, config.intermediate_size * 2) + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1) + return hidden_states * self.activation_fn(gate) + + +class ClvpEncoderMLP(nn.Module): + """ + This MLP is used in CLVP speech or text encoder models. + """ + + def __init__(self, config): + super().__init__() + self.config = config + + self.fc1 = ClvpGatedLinearUnit(config) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout_layer = nn.Dropout(config.dropout) + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.dropout_layer(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class ClvpEncoderLayer(nn.Module): + def __init__(self, config: ClvpConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.self_attn = ClvpSelfAttention(config) + self.mlp = ClvpEncoderMLP(config) + + self.input_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps) + self.post_attention_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.FloatTensor, + rotary_pos_emb: torch.FloatTensor, + attention_mask: torch.LongTensor, + position_ids: torch.LongTensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`): + input to the layer. + rotary_pos_emb (`torch.FloatTensor`): + rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module. + attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`): + attention mask where padding elements are indicated by very large negative values. + position_ids (`torch.LongTensor`): + Denotes position ids of the input tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.input_rmsnorm(hidden_states) + + attention_outputs = self.self_attn( + hidden_states=hidden_states, + rotary_pos_emb=rotary_pos_emb, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + ) + + hidden_states = attention_outputs[0] + + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.post_attention_rmsnorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attention_outputs[-1],) + + return outputs + + +# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->ClvpDecoderMLP +class ClvpDecoderMLP(nn.Module): + def __init__(self, intermediate_size, config): + super().__init__() + embed_dim = config.hidden_size + self.c_fc = Conv1D(intermediate_size, embed_dim) + self.c_proj = Conv1D(embed_dim, intermediate_size) + self.act = ACT2FN[config.activation_function] + self.dropout = nn.Dropout(config.resid_pdrop) + + def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class ClvpDecoderLayer(nn.Module): + def __init__(self, config): + super().__init__() + hidden_size = config.hidden_size + inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size + + self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = ClvpSelfAttention(config) + self.post_attention_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + self.mlp = ClvpDecoderMLP(inner_dim, config) + + def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + attn_outputs = self.attn( + hidden_states, + past_key_value=past_key_value, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] + outputs = attn_outputs[1:] + # residual connection + hidden_states = attn_output + residual + + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + if use_cache: + outputs = (hidden_states,) + outputs + else: + outputs = (hidden_states,) + outputs[1:] + + return outputs + + +class ClvpConditioningEncoder(nn.Module): + """ + This class processes the log-mel spectrograms(extracted by the Feature Extractor) and text tokens(produced by the + tokenizer) as inputs for the decoder model. + + First each log-mel spectrogram is processed into a single vector which captures valuable characteristics from each + of them, then the text tokens are converted into token embeddings and position embeddings are added afterwards. + Both of these vectors are concatenated and then passed to the decoder model. + + The text tokens helps to incorporate the "text information" and the log-mel spectrogram is used to specify the + "voice characteristics" into the generated mel tokens. + """ + + def __init__(self, config: ClvpConfig): + super().__init__() + + self.text_config = config.text_config + self.decoder_config = config.decoder_config + + self.text_token_embedding = nn.Embedding(self.text_config.vocab_size, self.decoder_config.hidden_size) + self.text_position_embedding = nn.Embedding( + self.decoder_config.max_text_tokens, self.decoder_config.hidden_size + ) + + self.mel_conv = nn.Conv1d(self.decoder_config.feature_size, self.decoder_config.hidden_size, kernel_size=1) + + # define group norms to be used before each attention layer + num_groups = self.compute_groupnorm_groups(self.decoder_config.hidden_size) + self.group_norms = nn.ModuleList( + [ + nn.GroupNorm(num_groups, self.decoder_config.hidden_size, eps=1e-5, affine=True) + for _ in range(self.decoder_config.num_mel_attn_blocks) + ] + ) + + # define the attention layers + self.mel_attn_blocks = nn.ModuleList( + [ClvpSelfAttention(self.decoder_config) for _ in range(self.decoder_config.num_mel_attn_blocks)] + ) + + self.gradient_checkpointing = False + + def compute_groupnorm_groups(self, channels: int, groups: int = 32): + """ + Calculates the value of `num_groups` for nn.GroupNorm. This logic is taken from the official tortoise + repository. link : + https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/models/arch_util.py#L26 + """ + if channels <= 16: + groups = 8 + elif channels <= 64: + groups = 16 + while channels % groups != 0: + groups = int(groups / 2) + + if groups <= 2: + raise ValueError( + f"Number of groups for the GroupNorm must be greater than 2, but it is {groups}." + f"Please consider using a different `hidden_size`" + ) + + return groups + + def forward( + self, + input_features: torch.FloatTensor, + input_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + ): + # process text + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + # We add bos and eos input_ids in the modeling file instead of the tokenizer file to keep the logic simple + # This logic is specific to ClvpConditioningEncoder and not used by other modules. + input_ids = torch.nn.functional.pad(input_ids, (1, 0), value=self.text_config.bos_token_id) + input_ids = torch.nn.functional.pad(input_ids, (0, 1), value=self.text_config.eos_token_id) + batch_size, seq_length = input_ids.size() + inputs_embeds = self.text_token_embedding(input_ids) + # check if we need to update attention mask, if yes then pad it too + if attention_mask is not None and attention_mask.shape[1] != seq_length: + attention_mask = torch.nn.functional.pad(attention_mask, (1, 0), value=1) + attention_mask = torch.nn.functional.pad(attention_mask, (0, 1), value=1) + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + # construct attention mask if not given + if attention_mask is None: + attention_mask = torch.ones([batch_size, seq_length], dtype=torch.long, device=inputs_embeds.device) + + position_ids = attention_mask.cumsum(-1) - 1 + position_embeds = self.text_position_embedding(position_ids) + text_embeds = inputs_embeds + position_embeds + + if self.gradient_checkpointing and self.training: + # process each log-mel spectrogram into a single vector + mel_spec = torch.utils.checkpoint.checkpoint(self.mel_conv, input_features) + + for i, mel_attn_block in enumerate(self.mel_attn_blocks): + residual_mel_spec = mel_spec.transpose(1, 2) + + mel_spec = torch.utils.checkpoint.checkpoint(self.group_norms[i], mel_spec).transpose(1, 2) + mel_spec = torch.utils.checkpoint.checkpoint(mel_attn_block, mel_spec)[0] + residual_mel_spec + mel_spec = mel_spec.transpose(1, 2) + + else: + # process each log-mel spectrogram into a single vector + mel_spec = self.mel_conv(input_features) + + for i, mel_attn_block in enumerate(self.mel_attn_blocks): + residual_mel_spec = mel_spec.transpose(1, 2) + + mel_spec = self.group_norms[i](mel_spec).transpose(1, 2) + mel_spec = mel_attn_block(mel_spec)[0] + residual_mel_spec + mel_spec = mel_spec.transpose(1, 2) + + mel_spec = mel_spec[:, :, 0] + mel_spec = mel_spec.unsqueeze(1) + + # repeat if there is either (1 text vs N audios) or (N texts vs 1 audio) + if text_embeds.shape[0] == 1 and mel_spec.shape[0] != 1: + text_embeds = text_embeds.repeat(mel_spec.shape[0], 1, 1) + elif text_embeds.shape[0] != 1 and mel_spec.shape[0] == 1: + mel_spec = mel_spec.repeat(text_embeds.shape[0], 1, 1) + # If there is N texts and M audios we will raise error since the number of text and audio must be same. + elif text_embeds.shape[0] != mel_spec.shape[0]: + raise ValueError( + f"The number of texts and number of audios must be same. " + f"Found {text_embeds.shape[0]} texts vs {mel_spec.shape[0]} audios" + ) + + return torch.concat([mel_spec, text_embeds], dim=1) + + +class ClvpPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ClvpConfig + base_model_prefix = "clvp" + supports_gradient_checkpointing = True + _skip_keys_device_placement = "past_key_values" + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor + if isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=factor * 0.02) + elif isinstance(module, (nn.Linear, Conv1D, nn.Conv1d)): + module.weight.data.normal_(mean=0.0, std=factor * 0.02) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, ClvpEncoderMLP): + factor = self.config.initializer_factor + in_proj_std = ( + (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + ) + fc_std = (2 * module.config.hidden_size) ** -0.5 * factor + nn.init.normal_(module.fc1.proj.weight if getattr(module.fc1, "proj") else module.fc1.weight, std=fc_std) + nn.init.normal_(module.fc2.weight, std=in_proj_std) + elif isinstance(module, ClvpEncoder): + config = self.config.text_config if hasattr(self.config, "text_config") else self.config + factor = config.initializer_factor + module.projection.weight.data.normal_(mean=0.0, std=factor * (config.hidden_size**-0.5)) + elif isinstance(module, ClvpConditioningEncoder): + module.mel_conv.weight.data.normal_(mean=0.0, std=factor) + module.mel_conv.bias.data.zero_() + elif isinstance(module, ClvpForCausalLM): + for name, p in module.named_parameters(): + if name == "c_proj.weight": + p.data.normal_( + mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers)) + ) + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +CLVP_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`ClvpConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +CLVP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`): + Indicates log mel-spectrogram representations for audio returned by [`ClvpFeatureExtractor`]. + conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*): + inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`. + text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*): + inputs_embeds for the text encoder model passed in place of `input_ids`. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +CLVP_DECODER_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): + Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see + `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have + their past given to this model should not be passed as `input_ids` as they have already been computed. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for + `past_key_values`. In other words, the `attention_mask` always has to have the length: + `len(past_key_values) + len(input_ids)` + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + + If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see + `past_key_values`). + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class ClvpEncoder(ClvpPreTrainedModel): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`ClvpEncoderLayer`]. + + Args: + config: ClvpConfig + """ + + def __init__(self, config: ClvpConfig): + super().__init__(config) + + self.config = config + self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size) + self.rotary_pos_emb = ClvpRotaryPositionalEmbedding(config) if config.use_rotary_embedding else None + self.layers = nn.ModuleList([ClvpEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + + self.sequence_summary = SequenceSummary(config) + self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + self.projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) + + self.gradient_checkpointing = False + + self.post_init() + + def get_input_embeddings(self): + return self.token_embedding + + def set_input_embeddings(self, value): + self.token_embedding = value + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + input embeddings for the model. This bypasses the model's internal embedding lookup matrix. + attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor`, *optional*): + Denotes the position ids of `input_ids`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + inputs_embeds = self.token_embedding(input_ids) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + # expand attention_mask and create position_ids if needed + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange(input_shape[1], dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + rotary_pos_emb = self.rotary_pos_emb(inputs_embeds) if self.rotary_pos_emb is not None else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = torch.utils.checkpoint.checkpoint( + encoder_layer.__call__, + hidden_states, + rotary_pos_emb, + attention_mask, + position_ids, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + rotary_pos_emb, + attention_mask, + position_ids, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + last_hidden_state = hidden_states + last_hidden_state = self.final_layer_norm(last_hidden_state) + + # take the mean over axis 1 and get pooled output + pooled_output = self.sequence_summary(last_hidden_state) + + # apply the projection layer + embeds = self.projection(pooled_output) + + if not return_dict: + return tuple( + v for v in [embeds, last_hidden_state, pooled_output, encoder_states, all_attentions] if v is not None + ) + + return ClvpEncoderOutput( + embeds=embeds, + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_states, + attentions=all_attentions, + ) + + +class ClvpDecoder(ClvpPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ClvpDecoderLayer`] + """ + + def __init__(self, config): + super().__init__(config) + + self.config = config + + self.input_embeds_layer = nn.Embedding(self.config.vocab_size, self.config.hidden_size) + self.position_embeds_layer = nn.Embedding(self.config.max_position_embeddings, self.config.hidden_size) + + self.drop = nn.Dropout(self.config.embd_pdrop) + self.layers = nn.ModuleList([ClvpDecoderLayer(self.config) for _ in range(self.config.num_hidden_layers)]) + self.layer_norm = nn.LayerNorm(self.config.hidden_size, eps=self.config.layer_norm_epsilon) + + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.input_embeds_layer + + def set_input_embeddings(self, new_embeddings): + self.input_embeds_layer = new_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + """ + for layer, heads in heads_to_prune.items(): + self.layers[layer].attn.prune_heads(heads) + + @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + input_ids.shape[0] + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + inputs_embeds.shape[0] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_key_values_length = 0 + past_key_values = tuple([None] * len(self.layers)) + else: + past_key_values_length = past_key_values[0][0].size(-2) + if position_ids is None: + position_ids = torch.arange( + past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) + + if inputs_embeds is None: + inputs_embeds = self.input_embeds_layer(input_ids) + position_embeds = self.position_embeds_layer(position_ids) + inputs_embeds = inputs_embeds + position_embeds + + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x num_attention_heads x N x N + # head_mask has shape num_hidden_layers x batch x num_attention_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + hidden_states = inputs_embeds + + if token_type_ids is not None: + token_type_embeds = self.input_embeds_layer(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + hidden_states = self.drop(hidden_states) + + output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + all_hidden_states = () if output_hidden_states else None + for i, (block, past_key_value) in enumerate(zip(self.layers, past_key_values)): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + outputs = torch.utils.checkpoint.checkpoint( + block.__call__, + hidden_states, + None, + attention_mask, + position_ids, + head_mask[i], + ) + else: + outputs = block( + hidden_states, + past_key_value=past_key_value, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask[i], + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) + + hidden_states = self.layer_norm(hidden_states) + + hidden_states = hidden_states.view(output_shape) + + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] + if v is not None + ) + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare Clvp decoder model outputting raw hidden-states without any specific head on top.", + CLVP_START_DOCSTRING, +) +class ClvpModel(ClvpPreTrainedModel): + def __init__(self, config: ClvpDecoderConfig): + super().__init__(config) + self.config = config + self.decoder = ClvpDecoder(self.config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.decoder.input_embeds_layer + + def set_input_embeddings(self, value): + self.decoder.input_embeds_layer = value + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + hidden_states=decoder_outputs.hidden_states, + attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + "The CLVP decoder model with a language modelling head on top.", + CLVP_START_DOCSTRING, +) +class ClvpForCausalLM(ClvpPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.config = config + self.model = ClvpModel(self.config) + + self.final_norm = nn.LayerNorm(self.config.hidden_size) + self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.decoder.input_embeds_layer + + def set_input_embeddings(self, new_embeddings): + self.model.decoder.input_embeds_layer = new_embeddings + + def _prepare_model_inputs( + self, + inputs: Optional[torch.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: + """ + This function extracts the model-specific `inputs` for generation. + """ + input_name = self.main_input_name + + model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None} + + inputs_kwarg = model_kwargs.pop(input_name, None) + if inputs_kwarg is not None and inputs is not None: + raise ValueError( + f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed." + f"Make sure to either pass {inputs} or {input_name}=..." + ) + elif inputs_kwarg is not None: + inputs = inputs_kwarg + + if input_name == "input_ids" and "inputs_embeds" in model_kwargs: + model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( + inputs, bos_token_id, model_kwargs=model_kwargs + ) + inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" + + # Check if conditioning_embeds are provided or not, if yes then concatenate the bos_token_id at the end of the conditioning_embeds. + # Then we must subtract the positional_ids because during the forward pass it will be added anyways, so we must cancel them out here. + conditioning_embeds = model_kwargs.get("conditioning_embeds", None) + + if conditioning_embeds is not None: + mel_start_token_embedding = self.model.decoder.input_embeds_layer( + torch.full( + (conditioning_embeds.shape[0], 1), + fill_value=self.config.bos_token_id, + device=conditioning_embeds.device, + ) + ) + mel_start_token_embedding += self.model.decoder.position_embeds_layer( + torch.full((conditioning_embeds.shape[0], 1), fill_value=0, device=conditioning_embeds.device) + ) + conditioning_embeds = torch.concat([conditioning_embeds, mel_start_token_embedding], dim=1) + + # subtract the positional_ids here + if hasattr(model_kwargs, "attention_mask"): + position_ids = model_kwargs["attention_mask"].long().cumsum(-1) - 1 + else: + position_ids = torch.range( + 0, conditioning_embeds.shape[1] - 1, dtype=torch.long, device=conditioning_embeds.device + ) + position_ids = position_ids.unsqueeze(0).repeat(conditioning_embeds.shape[0], 1) + + model_kwargs["inputs_embeds"] = conditioning_embeds - self.model.decoder.position_embeds_layer( + position_ids + ) + model_kwargs["input_ids"] = ( + torch.ones((model_kwargs["inputs_embeds"].shape[0], 1), dtype=torch.long, device=self.device) + * self.config.bos_token_id + ) + + return model_kwargs["inputs_embeds"], "inputs_embeds", model_kwargs + + inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) + return inputs, input_name, model_kwargs + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, inputs_embeds=None, conditioning_embeds=None, **kwargs + ): + input_ids_length = input_ids.shape[-1] + token_type_ids = kwargs.get("token_type_ids", None) + # only last token for inputs_ids if past is defined in kwargs + if past_key_values: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + if token_type_ids is not None: + token_type_ids = token_type_ids[:, -input_ids.shape[1] :] + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -1].unsqueeze(-1) + else: + position_ids = None + + if conditioning_embeds is not None and past_key_values is not None: + position_ids = torch.tensor([input_ids_length], dtype=torch.long, device=input_ids.device) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "token_type_ids": token_type_ids, + } + ) + return model_inputs + + @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + lm_logits = self.final_norm(hidden_states) + lm_logits = self.lm_head(lm_logits) + + loss = None + if labels is not None: + labels = labels.to(lm_logits.device) + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + @staticmethod + # Copied from transformers.models.gpt2.modeling_gpt2.GPT2LMHeadModel._reorder_cache + def _reorder_cache( + past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor + ) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + """ + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past_key_values + ) + + +@add_start_docstrings( + "The composite CLVP model with a text encoder, speech encoder and speech decoder model." + "The speech decoder model generates the speech_ids from the text and the text encoder and speech encoder works" + "together to filter out the best speech_ids.", + CLVP_START_DOCSTRING, +) +class ClvpModelForConditionalGeneration(ClvpPreTrainedModel): + config_class = ClvpConfig + + def __init__(self, config: ClvpConfig): + super().__init__(config) + + if not isinstance(config.text_config, ClvpEncoderConfig): + raise ValueError( + "config.text_config is expected to be of type `ClvpEncoderConfig` but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.speech_config, ClvpEncoderConfig): + raise ValueError( + "config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type" + f" {type(config.speech_config)}." + ) + + if not isinstance(config.decoder_config, ClvpDecoderConfig): + raise ValueError( + "config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type" + f" {type(config.decoder_config)}." + ) + + self.conditioning_encoder = ClvpConditioningEncoder(config) + + self.speech_decoder_model = ClvpForCausalLM(config.decoder_config) + + self.text_encoder_model = ClvpEncoder(config.text_config) + self.speech_encoder_model = ClvpEncoder(config.speech_config) + + self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) + + # Initialize weights and apply final processing + self.post_init() + + # taken from the original repo, + # link : https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/api.py#L117 + def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor: + """ + This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the + last few tokens of each sequence. + + Args: + speech_ids (`torch.LongTensor`): + This refers to the output of the decoder model. + """ + decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes + speech_ids = speech_ids[:, 1:] + if torch.isin(self.speech_decoder_model.config.eos_token_id, speech_ids): + speech_ids = torch.nn.functional.pad( + speech_ids, pad=(0, 1), value=self.speech_decoder_model.config.eos_token_id + ) + + stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0) + speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0]) + + for i, each_seq_stop_token_index in enumerate(stop_token_indices): + # This means that no stop tokens were found so the sentence was still being generated, in that case we don't need + # to apply any padding so just skip to the next sequence of tokens. + if each_seq_stop_token_index.sum() == 0: + continue + + stm = each_seq_stop_token_index.argmax() + speech_ids[i, stm:] = decoder_fixing_codes[0] + if stm - 3 < speech_ids.shape[1]: + speech_ids[i, -3:] = torch.tensor( + [decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long + ) + + return speech_ids + + def get_text_features( + self, + input_ids: Optional[torch.LongTensor] = None, + text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + ) -> torch.FloatTensor: + r""" + This method can be used to extract text_embeds from a text. The text embeddings obtained by applying the + projection layer to the pooled output of the CLVP text encoder model. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + [What are input IDs?](../glossary#input-ids) + text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*): + inputs_embeds for the text encoder model passed in place of `input_ids`. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Returns: + `torch.FloatTensor` of shape `(batch_size, output_dim)`: + The text embeddings obtained by applying the projection layer to the pooled output of the CLVP Text + Model. + + Examples: + + ```python + >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration + + >>> # Define the Text + >>> text = "This is an example text." + + >>> # Define processor and model + >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") + >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") + + >>> # Generate processor output and text embeds + >>> processor_output = processor(text=text, return_tensors="pt") + >>> text_embeds = model.get_text_features(input_ids=processor_output["input_ids"]) + ``` + """ + + outputs = self.text_encoder_model( + input_ids=input_ids, + inputs_embeds=text_encoder_inputs_embeds, + attention_mask=attention_mask, + ) + + return outputs[0] + + def get_speech_features( + self, + speech_ids: Optional[torch.LongTensor] = None, + input_ids: Optional[torch.LongTensor] = None, + input_features: Optional[torch.FloatTensor] = None, + conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + generation_config: Optional[GenerationConfig] = None, + **kwargs, + ) -> torch.FloatTensor: + r""" + This method can be used to extract speech_embeds. The speech embeddings are obtained by applying the speech + model on speech_ids. If speech_ids is not present but both input_ids and input_features are given then the + decoder model will be used to first generate the speech_ids and then applying the speech model. + + Args: + speech_ids (`torch.LongTensor` of shape `(batch_size, num_speech_ids)`, *optional*): + Speech Tokens. Padding will be ignored by default should you provide it. If speech_ids are provided + then input_ids and input_features will be automatically ignored. + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Input text Tokens. Processed from the [`ClvpTokenizer`]. If speech_ids is not provided, then input_ids + and input_features will be used. + input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*): + Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`]. If + speech_ids is not provided, then input_ids and input_features will be used. + conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*): + inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`. + attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding speech token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + generation_config (`GenerationConfig`, *optional*): + generation config to control the generation of speech_ids if they are not provided. + + Returns: + `torch.FloatTensor` of shape `(batch_size, output_dim)`: + The speech embeddings obtained by applying the projection layer to the pooled output of the CLVP Speech + Model. + + Examples: + + ```python + >>> import datasets + >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration + + >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library) + >>> text = "This is an example text." + >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) + >>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values() + + >>> # Define processor and model + >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") + >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") + + >>> # Generate processor output and model output + >>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt") + >>> speech_embeds = model.get_speech_features( + ... input_ids=processor_output["input_ids"], input_features=processor_output["input_features"] + ... ) + ``` + """ + + if speech_ids is None: + if (input_ids is None and conditioning_encoder_inputs_embeds is None) or input_features is None: + raise ValueError( + "Either speech_ids or input_ids/conditioning_encoder_inputs_embeds and input_features must be provided." + ) + + if generation_config is None: + generation_config = self.generation_config + generation_config.update(**kwargs) + + conditioning_embeds = self.conditioning_encoder( + input_features=input_features, + input_ids=input_ids, + inputs_embeds=conditioning_encoder_inputs_embeds, + attention_mask=attention_mask, + ) + + speech_ids = self.speech_decoder_model.generate( + conditioning_embeds=conditioning_embeds, + generation_config=generation_config, + ) + + speech_ids = self.fix_speech_decoder_output(speech_ids[0]) + + outputs = self.speech_encoder_model( + input_ids=speech_ids, + attention_mask=attention_mask, + ) + + return outputs[0] + + @add_start_docstrings_to_model_forward(CLVP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ClvpOutput, config_class=ClvpConfig) + def forward( + self, + input_ids: torch.LongTensor = None, + input_features: torch.FloatTensor = None, + conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None, + text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + return_loss: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = False, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, ClvpOutput]: + r""" + Returns: + + Examples: + + ```python + >>> import datasets + >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration + + >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library) + >>> text = "This is an example text." + + >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) + >>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values() + + >>> # Define processor and model + >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") + >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") + + >>> # processor outputs and model outputs + >>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt") + >>> outputs = model( + ... input_ids=processor_output["input_ids"], + ... input_features=processor_output["input_features"], + ... return_dict=True, + ... ) + ``` + """ + + # Use CLVP model's config for some fields (if specified) instead of those of speech & text components. + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + conditioning_embeds = self.conditioning_encoder( + input_features=input_features, + input_ids=input_ids, + inputs_embeds=conditioning_encoder_inputs_embeds, + attention_mask=attention_mask, + ) + + decoder_outputs = self.speech_decoder_model( + inputs_embeds=conditioning_embeds, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + speech_ids = decoder_outputs[0] + + # since we will get the embeds of shape `(batch_size, seq_len, embedding_dim)` during the forward pass + # we must convert it to tokens, to make it compaitable with speech_transformer + if speech_ids.ndim == 3: + speech_ids = speech_ids.argmax(2) + speech_ids = self.fix_speech_decoder_output(speech_ids) + + speech_outputs = self.speech_encoder_model( + input_ids=speech_ids, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_outputs = self.text_encoder_model( + input_ids=input_ids, + inputs_embeds=text_encoder_inputs_embeds, + attention_mask=attention_mask, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + speech_embeds = speech_outputs[0] + text_embeds = text_outputs[0] + + # normalized features + speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale + logits_per_speech = logits_per_text.t() + + loss = None + if return_loss: + loss = clvp_loss(logits_per_text) + + if not return_dict: + output = ( + logits_per_speech, + logits_per_text, + text_embeds, + speech_embeds, + text_outputs[2], + speech_outputs[2], + ) + if output_hidden_states: + output += ( + decoder_outputs[-1], + text_outputs[-1], + speech_outputs[-1], + ) + + return ((loss,) + output) if loss is not None else output + + return ClvpOutput( + loss=loss, + logits_per_speech=logits_per_speech, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + speech_embeds=speech_embeds, + text_model_output=text_outputs[2], + speech_model_output=speech_outputs[2], + decoder_hidden_states=decoder_outputs.hidden_states, + text_encoder_hidden_states=text_outputs.hidden_states, + speech_encoder_hidden_states=speech_outputs.hidden_states, + ) + + @torch.no_grad() + def generate( + self, + input_ids: torch.LongTensor = None, + input_features: torch.FloatTensor = None, + attention_mask: Optional[torch.LongTensor] = None, + generation_config: Optional[GenerationConfig] = None, + output_hidden_states: Optional[bool] = None, + **kwargs, + ): + """ + Generate method for `ClvpModelForConditionalGeneration`, this method calls the `generate` method of + `ClvpForCausalLM` and then uses those generated `speech_ids` to process `text_embeds` and `speech_embeds` using + `ClvpEncoder`. + + Args: + input_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Input text Tokens. Processed from the [`ClvpTokenizer`]. + input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*): + Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`]. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of decoder model, text encoder and speech encoder models. + + Returns: + `ClvpOutput` or tuple: A `ClvpOutput` (if `return_dict_in_generate=True` or when + `config.return_dict_in_generate=True`) or a tuple. + """ + if generation_config is None: + generation_config = self.generation_config + + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + generation_config.validate() + self._validate_model_kwargs(model_kwargs.copy()) + + conditioning_embeds = self.conditioning_encoder( + input_features=input_features, + input_ids=input_ids, + attention_mask=attention_mask, + ) + + decoder_outputs = self.speech_decoder_model.generate( + conditioning_embeds=conditioning_embeds, + generation_config=generation_config, + output_hidden_states=output_hidden_states, + return_dict=generation_config.return_dict_in_generate, + ) + if isinstance(decoder_outputs, ModelOutput): + speech_ids = decoder_outputs.sequences + speech_ids = self.fix_speech_decoder_output(speech_ids) + + speech_outputs = self.speech_encoder_model( + input_ids=speech_ids, + output_hidden_states=output_hidden_states, + return_dict=generation_config.return_dict_in_generate, + ) + text_outputs = self.text_encoder_model( + input_ids=input_ids, + attention_mask=attention_mask, + output_hidden_states=output_hidden_states, + return_dict=generation_config.return_dict_in_generate, + ) + + speech_embeds = speech_outputs[0] + text_embeds = text_outputs[0] + + # normalized features + speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale + logits_per_speech = logits_per_text.t() + + if not generation_config.return_dict_in_generate: + output = ( + speech_ids, + logits_per_speech, + logits_per_text, + text_embeds, + speech_embeds, + text_outputs[2], + speech_outputs[2], + ) + if output_hidden_states: + output += ( + decoder_outputs[-1], + text_outputs[-1], + speech_outputs[-1], + ) + + return output + + return ClvpOutput( + speech_ids=speech_ids, + logits_per_speech=logits_per_speech, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + speech_embeds=speech_embeds, + text_model_output=text_outputs[2], + speech_model_output=speech_outputs[2], + decoder_hidden_states=decoder_outputs.hidden_states, + text_encoder_hidden_states=text_outputs.hidden_states, + speech_encoder_hidden_states=speech_outputs.hidden_states, + ) diff --git a/src/transformers/models/clvp/number_normalizer.py b/src/transformers/models/clvp/number_normalizer.py new file mode 100644 index 000000000000..86aa087e8139 --- /dev/null +++ b/src/transformers/models/clvp/number_normalizer.py @@ -0,0 +1,238 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""English Normalizer class for CLVP.""" + + +import re + + +class EnglishNormalizer: + def __init__(self): + # List of (regular expression, replacement) pairs for abbreviations: + self._abbreviations = [ + (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) + for x in [ + ("mrs", "misess"), + ("mr", "mister"), + ("dr", "doctor"), + ("st", "saint"), + ("co", "company"), + ("jr", "junior"), + ("maj", "major"), + ("gen", "general"), + ("drs", "doctors"), + ("rev", "reverend"), + ("lt", "lieutenant"), + ("hon", "honorable"), + ("sgt", "sergeant"), + ("capt", "captain"), + ("esq", "esquire"), + ("ltd", "limited"), + ("col", "colonel"), + ("ft", "fort"), + ] + ] + + self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] + self.teens = [ + "ten", + "eleven", + "twelve", + "thirteen", + "fourteen", + "fifteen", + "sixteen", + "seventeen", + "eighteen", + "nineteen", + ] + self.tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"] + + def number_to_words(self, num: int) -> str: + """ + Converts numbers(`int`) to words(`str`). + + Please note that it only supports upto - "'nine hundred ninety-nine quadrillion, nine hundred ninety-nine + trillion, nine hundred ninety-nine billion, nine hundred ninety-nine million, nine hundred ninety-nine + thousand, nine hundred ninety-nine'" or `number_to_words(999_999_999_999_999_999)`. + """ + if num == 0: + return "zero" + elif num < 0: + return "minus " + self.number_to_words(abs(num)) + elif num < 10: + return self.ones[num] + elif num < 20: + return self.teens[num - 10] + elif num < 100: + return self.tens[num // 10] + ("-" + self.number_to_words(num % 10) if num % 10 != 0 else "") + elif num < 1000: + return ( + self.ones[num // 100] + " hundred" + (" " + self.number_to_words(num % 100) if num % 100 != 0 else "") + ) + elif num < 1_000_000: + return ( + self.number_to_words(num // 1000) + + " thousand" + + (", " + self.number_to_words(num % 1000) if num % 1000 != 0 else "") + ) + elif num < 1_000_000_000: + return ( + self.number_to_words(num // 1_000_000) + + " million" + + (", " + self.number_to_words(num % 1_000_000) if num % 1_000_000 != 0 else "") + ) + elif num < 1_000_000_000_000: + return ( + self.number_to_words(num // 1_000_000_000) + + " billion" + + (", " + self.number_to_words(num % 1_000_000_000) if num % 1_000_000_000 != 0 else "") + ) + elif num < 1_000_000_000_000_000: + return ( + self.number_to_words(num // 1_000_000_000_000) + + " trillion" + + (", " + self.number_to_words(num % 1_000_000_000_000) if num % 1_000_000_000_000 != 0 else "") + ) + elif num < 1_000_000_000_000_000_000: + return ( + self.number_to_words(num // 1_000_000_000_000_000) + + " quadrillion" + + ( + ", " + self.number_to_words(num % 1_000_000_000_000_000) + if num % 1_000_000_000_000_000 != 0 + else "" + ) + ) + else: + return "number out of range" + + def convert_to_ascii(self, text: str) -> str: + """ + Converts unicode to ascii + """ + return text.encode("ascii", "ignore").decode("utf-8") + + def _expand_dollars(self, m: str) -> str: + """ + This method is used to expand numerical dollar values into spoken words. + """ + match = m.group(1) + parts = match.split(".") + if len(parts) > 2: + return match + " dollars" # Unexpected format + + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = "dollar" if dollars == 1 else "dollars" + cent_unit = "cent" if cents == 1 else "cents" + return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit) + elif dollars: + dollar_unit = "dollar" if dollars == 1 else "dollars" + return "%s %s" % (dollars, dollar_unit) + elif cents: + cent_unit = "cent" if cents == 1 else "cents" + return "%s %s" % (cents, cent_unit) + else: + return "zero dollars" + + def _remove_commas(self, m: str) -> str: + """ + This method is used to remove commas from sentences. + """ + return m.group(1).replace(",", "") + + def _expand_decimal_point(self, m: str) -> str: + """ + This method is used to expand '.' into spoken word ' point '. + """ + return m.group(1).replace(".", " point ") + + def _expand_ordinal(self, num: str) -> str: + """ + This method is used to expand ordinals such as '1st', '2nd' into spoken words. + """ + ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"} + + num = int(num.group(0)[:-2]) + if 10 <= num % 100 and num % 100 <= 20: + suffix = "th" + else: + suffix = ordinal_suffixes.get(num % 10, "th") + return self.number_to_words(num) + suffix + + def _expand_number(self, m: str) -> str: + """ + This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository, + link : + https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86) + """ + num = int(m.group(0)) + + if num > 1000 and num < 3000: + if num == 2000: + return "two thousand" + elif num > 2000 and num < 2010: + return "two thousand " + self.number_to_words(num % 100) + elif num % 100 == 0: + return self.number_to_words(num // 100) + " hundred" + else: + return self.number_to_words(num) + else: + return self.number_to_words(num) + + def normalize_numbers(self, text: str) -> str: + """ + This method is used to normalize numbers within a text such as converting the numbers to words, removing + commas, etc. + """ + text = re.sub(re.compile(r"([0-9][0-9\,]+[0-9])"), self._remove_commas, text) + text = re.sub(re.compile(r"£([0-9\,]*[0-9]+)"), r"\1 pounds", text) + text = re.sub(re.compile(r"\$([0-9\.\,]*[0-9]+)"), self._expand_dollars, text) + text = re.sub(re.compile(r"([0-9]+\.[0-9]+)"), self._expand_decimal_point, text) + text = re.sub(re.compile(r"[0-9]+(st|nd|rd|th)"), self._expand_ordinal, text) + text = re.sub(re.compile(r"[0-9]+"), self._expand_number, text) + return text + + def expand_abbreviations(self, text: str) -> str: + """ + Expands the abbreviate words. + """ + for regex, replacement in self._abbreviations: + text = re.sub(regex, replacement, text) + return text + + def collapse_whitespace(self, text: str) -> str: + """ + Removes multiple whitespaces + """ + return re.sub(re.compile(r"\s+"), " ", text) + + def __call__(self, text): + """ + Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands + abbreviations + """ + + text = self.convert_to_ascii(text) + text = text.lower() + text = self.normalize_numbers(text) + text = self.expand_abbreviations(text) + text = self.collapse_whitespace(text) + text = text.replace('"', "") + + return text diff --git a/src/transformers/models/clvp/processing_clvp.py b/src/transformers/models/clvp/processing_clvp.py new file mode 100644 index 000000000000..cf4bd4de8f78 --- /dev/null +++ b/src/transformers/models/clvp/processing_clvp.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Processor class for CLVP +""" + + +from ...processing_utils import ProcessorMixin + + +class ClvpProcessor(ProcessorMixin): + r""" + Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor. + + [`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the + [`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information. + + Args: + feature_extractor (`ClvpFeatureExtractor`): + An instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input. + tokenizer (`ClvpTokenizer`): + An instance of [`ClvpTokenizer`]. The tokenizer is a required input. + """ + feature_extractor_class = "ClvpFeatureExtractor" + tokenizer_class = "ClvpTokenizer" + model_input_names = [ + "input_ids", + "input_features", + "attention_mask", + ] + + def __init__(self, feature_extractor, tokenizer): + super().__init__(feature_extractor, tokenizer) + + def __call__(self, *args, **kwargs): + """ + Forwards the `audio` and `sampling_rate` arguments to [`~ClvpFeatureExtractor.__call__`] and the `text` + argument to [`~ClvpTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more + information. + """ + + raw_speech = kwargs.pop("raw_speech", None) + sampling_rate = kwargs.pop("sampling_rate", None) + text = kwargs.pop("text", None) + + if raw_speech is None and text is None: + raise ValueError("You need to specify either an `raw_speech` or `text` input to process.") + + if raw_speech is not None: + inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs) + if text is not None: + encodings = self.tokenizer(text, **kwargs) + + if text is None: + return inputs + elif raw_speech is None: + return encodings + else: + inputs["input_ids"] = encodings["input_ids"] + inputs["attention_mask"] = encodings["attention_mask"] + return inputs + + # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.batch_decode with Whisper->Clvp + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer + to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.decode with Whisper->Clvp + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the + docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/models/clvp/tokenization_clvp.py b/src/transformers/models/clvp/tokenization_clvp.py new file mode 100644 index 000000000000..f09245f94be8 --- /dev/null +++ b/src/transformers/models/clvp/tokenization_clvp.py @@ -0,0 +1,379 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization class for CLVP.""" + +import json +import os +from functools import lru_cache +from typing import List, Optional, Tuple + +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import logging +from .number_normalizer import EnglishNormalizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", +} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "clvp_dev": "https://huggingface.co/susnato/clvp_dev/blob/main/vocab.json", + }, + "merges_file": { + "clvp_dev": "https://huggingface.co/susnato/clvp_dev/blob/main/merges.txt", + }, +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "clvp_dev": 1024, +} + + +@lru_cache() +# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class ClvpTokenizer(PreTrainedTokenizer): + """ + Construct a CLVP tokenizer. Based on byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ```python + >>> from transformers import ClvpTokenizer + + >>> tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev") + >>> tokenizer("Hello world")["input_ids"] + [62, 84, 28, 2, 179, 79] + + >>> tokenizer(" Hello world")["input_ids"] + [2, 62, 84, 28, 2, 179, 79] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). + + + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The beginning of sequence token. + eos_token (`str`, *optional*, defaults to `"[STOP]"`): + The end of sequence token. + pad_token (`str`, *optional*, defaults to `"[STOP]"`): + The pad token of the sequence. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (CLVP tokenizer detect beginning of words by the preceding space). + add_bos_token (`bool`, *optional*, defaults to `False`): + Whether to add `bos_token` in front of the sequence when add_special_tokens=True. + add_eos_token (`bool`, *optional*, defaults to `False`): + Whether to add `eos_token` in end of the sequence when add_special_tokens=True. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = [ + "input_ids", + "attention_mask", + ] + + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + unk_token="[UNK]", + bos_token="<|endoftext|>", + eos_token="[STOP]", + pad_token="[STOP]", + add_prefix_space=False, + add_bos_token=False, + add_eos_token=False, + **kwargs, + ): + bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token + + self.add_bos_token = add_bos_token + self.add_eos_token = add_eos_token + self._normalizer = None + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.add_prefix_space = add_prefix_space + + # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + super().__init__( + errors=errors, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + add_prefix_space=add_prefix_space, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + **kwargs, + ) + + @property + def vocab_size(self): + return len(self.encoder) + + @property + def normalizer(self): + if self._normalizer is None: + self._normalizer = EnglishNormalizer() + return self._normalizer + + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = bos_token_id + token_ids_0 + eos_token_id + + if token_ids_1 is not None: + output = output + bos_token_id + token_ids_1 + eos_token_id + + return output + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_special_tokens_mask + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if not self.add_bos_token: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + text = self.normalizer(text) + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + + # if the token is "Ġ" we replace it with "[SPACE]" (if "[SPACE]" is present in the vocab), otherwise we keep the "Ġ". + bpe_tokens.extend( + "[SPACE]" if bpe_token == "\u0120" and "[SPACE]" in self.encoder.keys() else bpe_token + for bpe_token in self.bpe(token).split(" ") + ) + + return bpe_tokens + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + def clean_up_tokenization(self, text): + text = "".join(text) + vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys()) + + text = text.replace("[SPACE]", " ") if "[SPACE]" in vocab_tokens else text + text = text.replace("[STOP]", " ") if "[STOP]" in vocab_tokens else text + + text = text.replace(self.unk_token, "").replace(" ", " ").replace(" ", " ") + return text + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 8ce211b21f87..c39b2f86add5 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -1940,6 +1940,51 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ClvpDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpModelForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/clvp/__init__.py b/tests/models/clvp/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/models/clvp/test_feature_extraction_clvp.py b/tests/models/clvp/test_feature_extraction_clvp.py new file mode 100644 index 000000000000..db641eaf6145 --- /dev/null +++ b/tests/models/clvp/test_feature_extraction_clvp.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import itertools +import os +import random +import tempfile +import unittest + +import numpy as np +from datasets import Audio, load_dataset + +from transformers import ClvpFeatureExtractor +from transformers.testing_utils import check_json_file_has_correct_format, require_torch, slow +from transformers.utils.import_utils import is_torch_available + +from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin + + +if is_torch_available(): + import torch + +global_rng = random.Random() + + +# Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.floats_list +def floats_list(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + values = [] + for batch_idx in range(shape[0]): + values.append([]) + for _ in range(shape[1]): + values[-1].append(rng.random() * scale) + + return values + + +@require_torch +class ClvpFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + min_seq_length=400, + max_seq_length=2000, + feature_size=10, + hop_length=160, + chunk_length=8, + padding_value=0.0, + sampling_rate=4_000, + return_attention_mask=False, + ): + self.parent = parent + self.batch_size = batch_size + self.min_seq_length = min_seq_length + self.max_seq_length = max_seq_length + self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) + self.padding_value = padding_value + self.sampling_rate = sampling_rate + self.return_attention_mask = return_attention_mask + self.feature_size = feature_size + self.chunk_length = chunk_length + self.hop_length = hop_length + + def prepare_feat_extract_dict(self): + return { + "feature_size": self.feature_size, + "hop_length": self.hop_length, + "chunk_length": self.chunk_length, + "padding_value": self.padding_value, + "sampling_rate": self.sampling_rate, + "return_attention_mask": self.return_attention_mask, + } + + # Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTester.prepare_inputs_for_common + def prepare_inputs_for_common(self, equal_length=False, numpify=False): + def _flatten(list_of_lists): + return list(itertools.chain(*list_of_lists)) + + if equal_length: + speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] + else: + # make sure that inputs increase in size + speech_inputs = [ + floats_list((x, self.feature_size)) + for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) + ] + if numpify: + speech_inputs = [np.asarray(x) for x in speech_inputs] + return speech_inputs + + +@require_torch +class ClvpFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): + feature_extraction_class = ClvpFeatureExtractor + + def setUp(self): + self.feat_extract_tester = ClvpFeatureExtractionTester(self) + + def tearDown(self): + super().tearDown() + # clean-up as much as possible GPU memory occupied by PyTorch + gc.collect() + torch.cuda.empty_cache() + + # Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_from_and_save_pretrained + def test_feat_extract_from_and_save_pretrained(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] + check_json_file_has_correct_format(saved_file) + feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + mel_1 = feat_extract_first.mel_filters + mel_2 = feat_extract_second.mel_filters + self.assertTrue(np.allclose(mel_1, mel_2)) + self.assertEqual(dict_first, dict_second) + + # Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_to_json_file + def test_feat_extract_to_json_file(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + json_file_path = os.path.join(tmpdirname, "feat_extract.json") + feat_extract_first.to_json_file(json_file_path) + feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + mel_1 = feat_extract_first.mel_filters + mel_2 = feat_extract_second.mel_filters + self.assertTrue(np.allclose(mel_1, mel_2)) + self.assertEqual(dict_first, dict_second) + + def test_call(self): + # Tests that all call wrap to encode_plus and batch_encode_plus + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + # create three inputs of length 800, 1000, and 1200 + speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] + np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] + + # Test feature size + input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features + self.assertTrue(input_features.ndim == 3) + self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) + + # Test not batched input + encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features + self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) + + # Test batched + encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Test 2-D numpy arrays are batched. + speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] + np_speech_inputs = np.asarray(speech_inputs) + encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Test truncation required + speech_inputs = [floats_list((1, x))[0] for x in range(200, (feature_extractor.n_samples + 500), 200)] + np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] + + speech_inputs_truncated = [x[: feature_extractor.n_samples] for x in speech_inputs] + np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] + + encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad + def test_double_precision_pad(self): + import torch + + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + np_speech_inputs = np.random.rand(100, 32).astype(np.float64) + py_speech_inputs = np_speech_inputs.tolist() + + for inputs in [py_speech_inputs, np_speech_inputs]: + np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") + self.assertTrue(np_processed.input_features.dtype == np.float32) + pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") + self.assertTrue(pt_processed.input_features.dtype == torch.float32) + + def _load_datasamples(self, num_samples): + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + ds = ds.cast_column("audio", Audio(sampling_rate=22050)) + # automatic decoding with librispeech + speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] + + return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] + + @slow + def test_integration(self): + # fmt: off + EXPECTED_INPUT_FEATURES = torch.tensor( + [ + 0.9271, 1.1405, 1.4419, 1.2470, 1.2438, 1.1787, 1.0595, 1.0570, 1.1070, + 1.2205, 1.2376, 1.2997, 1.1131, 1.0843, 1.0459, 1.1858, 1.2323, 1.3582, + 1.3401, 1.3770, 1.4173, 1.3381, 1.2291, 1.0854, 1.2116, 1.1873, 1.2178, + 1.2137, 1.3001, 1.4274 + ] + ) + # fmt: on + + input_speech, sr = self._load_datasamples(1) + + feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev") + input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features + self.assertEqual(input_features.shape, (1, 80, 517)) + self.assertTrue(torch.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4)) diff --git a/tests/models/clvp/test_modeling_clvp.py b/tests/models/clvp/test_modeling_clvp.py new file mode 100644 index 000000000000..1b3ab79034a9 --- /dev/null +++ b/tests/models/clvp/test_modeling_clvp.py @@ -0,0 +1,640 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch Clvp model. """ + + +import gc +import tempfile +import unittest + +import datasets +import numpy as np + +from transformers import ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig +from transformers.testing_utils import ( + require_torch, + slow, + torch_device, +) +from transformers.utils import is_torch_available + +from ...generation.test_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ( + ModelTesterMixin, + _config_zero_init, + ids_tensor, + random_attention_mask, +) + + +if is_torch_available(): + import torch + + from transformers import ClvpEncoder, ClvpForCausalLM, ClvpModel, ClvpModelForConditionalGeneration + from transformers.models.clvp.modeling_clvp import CLVP_PRETRAINED_MODEL_ARCHIVE_LIST + +from transformers import ClvpFeatureExtractor, ClvpTokenizer + + +class ClvpEncoderTester: + def __init__( + self, + parent, + batch_size=2, + seq_length=7, + is_training=False, + use_input_mask=True, + use_labels=True, + vocab_size=50, + hidden_size=128, + projection_dim=16, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=32, + dropout=0.1, + attention_dropout=0.1, + initializer_range=0.02, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.projection_dim = projection_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.initializer_range = initializer_range + self.scope = scope + self.bos_token_id = vocab_size - 1 + self.eos_token_id = vocab_size - 1 + + def get_config(self): + encoder_config = ClvpEncoderConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + projection_dim=self.projection_dim, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + initializer_range=self.initializer_range, + bos_token_id=self.bos_token_id, + eos_token_id=self.eos_token_id, + ) + + return encoder_config + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + if input_mask is not None: + batch_size, seq_length = input_mask.shape + rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) + for batch_idx, start_index in enumerate(rnd_start_indices): + input_mask[batch_idx, :start_index] = 1 + input_mask[batch_idx, start_index:] = 0 + + encoder_config = self.get_config() + + return encoder_config, input_ids, input_mask + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + speech_config, input_ids, input_mask = config_and_inputs + inputs_dict = {"input_ids": input_ids.to(torch_device), "attention_mask": input_mask.to(torch_device)} + return speech_config, inputs_dict + + def create_and_check_model(self, speech_config, input_ids, input_mask): + text_config = ClvpEncoderConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + projection_dim=self.projection_dim, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + initializer_range=self.initializer_range, + ) + text_encoder_model = ClvpEncoder(config=text_config) + text_encoder_model.to(torch_device) + text_encoder_model.eval() + with torch.no_grad(): + result = text_encoder_model(input_ids, attention_mask=input_mask) + result = text_encoder_model(input_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim)) + + # now check with speech config + speech_encoder_model = ClvpEncoder(config=speech_config) + speech_encoder_model.to(torch_device) + speech_encoder_model.eval() + with torch.no_grad(): + result = speech_encoder_model(input_ids, attention_mask=input_mask) + result = speech_encoder_model(input_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim)) + + +@require_torch +class ClvpEncoderTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = (ClvpEncoder,) if is_torch_available() else () + test_pruning = False + test_head_masking = False + test_torchscript = False + + def setUp(self): + self.model_tester = ClvpEncoderTester(self) + self.encoder_config_tester = ConfigTester(self, config_class=ClvpEncoderConfig, hidden_size=32) + + def tearDown(self): + super().tearDown() + # clean-up as much as possible GPU memory occupied by PyTorch + gc.collect() + torch.cuda.empty_cache() + + def test_config(self): + self.encoder_config_tester.run_common_tests() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + @unittest.skip(reason="ClvpEncoder does not output loss") + def test_training(self): + pass + + @unittest.skip(reason="ClvpEncoder does not output loss") + def test_training_gradient_checkpointing(self): + pass + + +class ClvpDecoderTester: + def __init__( + self, + parent, + batch_size=2, + seq_length=3, + is_training=False, + vocab_size=300, + max_position_embeddings=256, + max_text_tokens=256, + use_input_mask=True, + hidden_size=128, + num_hidden_layers=2, + num_attention_heads=2, + bos_token_id=97, + eos_token_id=98, + relative_attention_num_buckets=4, + relative_attention_max_distance=16, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.max_text_tokens = max_text_tokens + self.use_input_mask = use_input_mask + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.num_hidden_layers = num_hidden_layers + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.relative_attention_num_buckets = relative_attention_num_buckets + self.relative_attention_max_distance = relative_attention_max_distance + + def get_config(self): + decoder_config = ClvpDecoderConfig( + vocab_size=self.vocab_size, + max_position_embeddings=self.max_position_embeddings, + max_text_tokens=self.max_text_tokens, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + bos_token_id=self.bos_token_id, + eos_token_id=self.eos_token_id, + relative_attention_num_buckets=self.relative_attention_num_buckets, + relative_attention_max_distance=self.relative_attention_max_distance, + ) + + return decoder_config + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + if input_mask is not None: + batch_size, seq_length = input_mask.shape + rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) + for batch_idx, start_index in enumerate(rnd_start_indices): + input_mask[batch_idx, :start_index] = 1 + input_mask[batch_idx, start_index:] = 0 + + decoder_config = self.get_config() + + return decoder_config, input_ids, input_mask + + def create_and_check_model(self, config, input_ids, attention_mask): + model = ClvpForCausalLM(config).to(torch_device).eval() + with torch.no_grad(): + result = model(input_ids=input_ids, attention_mask=attention_mask) + + self.parent.assertEqual(result[0].shape, (self.batch_size, self.seq_length, self.vocab_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, attention_mask = config_and_inputs + inputs_dict = { + "input_ids": input_ids.to(torch_device), + "attention_mask": attention_mask.to(torch_device), + } + return config, inputs_dict + + +@require_torch +class ClvpDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = (ClvpModel, ClvpForCausalLM) if is_torch_available() else () + all_generative_model_classes = (ClvpForCausalLM,) if is_torch_available() else () + + test_pruning = False + + def setUp(self): + self.model_tester = ClvpDecoderTester(self) + self.decoder_config_tester = ConfigTester(self, config_class=ClvpDecoderConfig, hidden_size=32) + + def tearDown(self): + super().tearDown() + # clean-up as much as possible GPU memory occupied by PyTorch + gc.collect() + torch.cuda.empty_cache() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + if return_labels and model_class == ClvpForCausalLM: + inputs_dict["labels"] = torch.zeros( + [self.model_tester.batch_size, self.model_tester.seq_length], device=torch_device + ).long() + + return inputs_dict + + def test_training(self): + # we will only test the ClvpForCausalLM since it outputs loss + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + model = ClvpForCausalLM(config) + model.to(torch_device) + model.train() + inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True) + loss = model(**inputs).loss + loss.backward() + + def test_training_gradient_checkpointing(self): + # we will only test the ClvpForCausalLM since it outputs loss + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.use_cache = False + config.return_dict = True + + model = ClvpForCausalLM(config) + model.to(torch_device) + model.gradient_checkpointing_enable() + model.train() + inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True) + + loss = model(**inputs).loss + loss.backward() + + +class ClvpModelForConditionalGenerationTester: + def __init__(self, parent, is_training=False): + self.parent = parent + self.clvp_encoder_tester = ClvpEncoderTester(parent) + self.is_training = is_training + + def get_config(self): + decoder_config = ClvpDecoderConfig( + vocab_size=50, + max_position_embeddings=30, + max_text_tokens=30, + hidden_size=128, + num_hidden_layers=1, + num_attention_heads=2, + bos_token_id=97, + eos_token_id=98, + relative_attention_num_buckets=4, + relative_attention_max_distance=16, + ) + text_config = self.clvp_encoder_tester.get_config() + speech_config = self.clvp_encoder_tester.get_config() + speech_config.vocab_size = 300 + + return ClvpConfig.from_sub_model_configs( + text_config, + speech_config, + decoder_config, + projection_dim=16, + ) + + def prepare_config_and_inputs(self): + _, input_ids, attention_mask = self.clvp_encoder_tester.prepare_config_and_inputs() + + ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) + _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values() + + feature_extractor = ClvpFeatureExtractor() + input_features = feature_extractor(raw_speech=audio, sampling_rate=sr, return_tensors="pt")[ + "input_features" + ].to(torch_device) + + config = self.get_config() + + return config, input_ids, attention_mask, input_features + + def create_and_check_model(self, config, input_ids, attention_mask, input_features): + model = ClvpModelForConditionalGeneration(config).to(torch_device).eval() + with torch.no_grad(): + result = model(input_ids=input_ids, input_features=input_features, attention_mask=attention_mask) + + self.parent.assertEqual(result.logits_per_speech.shape, (2, self.clvp_encoder_tester.batch_size)) + self.parent.assertEqual(result.logits_per_text.shape, (self.clvp_encoder_tester.batch_size, 2)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, attention_mask, input_features = config_and_inputs + inputs_dict = { + "input_ids": input_ids.to(torch_device), + "attention_mask": attention_mask.to(torch_device), + "input_features": input_features.to(torch_device), + "return_loss": False, + } + return config, inputs_dict + + +@require_torch +class ClvpModelForConditionalGenerationTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = (ClvpModelForConditionalGeneration,) if is_torch_available() else () + + test_head_masking = False + test_pruning = False + test_resize_embeddings = False + test_attention_outputs = False + test_torchscript = False + + def setUp(self): + self.model_tester = ClvpModelForConditionalGenerationTester(self) + self.clvp_config_tester = ConfigTester(self, config_class=ClvpConfig, hidden_size=32) + + def tearDown(self): + super().tearDown() + # clean-up as much as possible GPU memory occupied by PyTorch + gc.collect() + torch.cuda.empty_cache() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + # check for decoder model, text encoder model and speech encoder model hidden states + decoder_hidden_states = outputs.decoder_hidden_states + text_encoder_hidden_states = outputs.text_encoder_hidden_states + speech_encoder_hidden_states = outputs.speech_encoder_hidden_states + + # check length of the hidden states + expected_decoder_num_layers = config.decoder_config.num_hidden_layers + 1 + self.assertEqual(len(decoder_hidden_states), expected_decoder_num_layers) + + expected_speech_encoder_num_layers = config.text_config.num_hidden_layers + 1 + self.assertEqual(len(text_encoder_hidden_states), expected_speech_encoder_num_layers) + + expected_text_encoder_num_layers = config.speech_config.num_hidden_layers + 1 + self.assertEqual(len(speech_encoder_hidden_states), expected_text_encoder_num_layers) + + # check shapes of each hidden state + + # for the decoder model we will only test the dimension because the ClvpConditioningEncoder could increase + # the sequence lengths. + self.assertEqual(decoder_hidden_states[0].shape[-1], config.decoder_config.hidden_size) + + # the testing for text encoder stays standard because we just pass the text tokens here. + self.assertListEqual( + list(text_encoder_hidden_states[0].shape[-2:]), + [self.model_tester.clvp_encoder_tester.seq_length, config.text_config.hidden_size], + ) + + # for the decoder model we will only test the dimension because the fix_decoder_outputs method could increase + # the sequence lengths by adding `decoder_fixing_codes` tokens at the end. + self.assertEqual(speech_encoder_hidden_states[0].shape[-1], config.speech_config.hidden_size) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + @unittest.skip(reason="Retain_grad is tested in individual model tests") + def test_retain_grad_hidden_states_attentions(self): + pass + + @unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings") + def test_model_common_attributes(self): + pass + + # override as the `logit_scale` parameter initilization is different for Clvp + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if param.requires_grad: + # check if `logit_scale` is initilized as per the original implementation + if name == "logit_scale": + expected_value = np.log(1 / 0.07) + returned_value = param.data.item() + + self.assertAlmostEqual( + returned_value, + expected_value, + delta=1e-3, + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + else: + expected_range = [0.0, 1.0] + returned_range = ((param.data.mean() * 1e9).round() / 1e9).item() + + self.assertIn( + returned_range, + expected_range, + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + def test_load_speech_text_decoder_config(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + # Save ClvpConfig and check if we can load ClvpEncoderConfig from it + with tempfile.TemporaryDirectory() as tmp_dir_name: + config.save_pretrained(tmp_dir_name) + encoder_config = ClvpEncoderConfig.from_pretrained(tmp_dir_name) + self.assertDictEqual(config.text_config.to_dict(), encoder_config.to_dict()) + + # Save ClvpConfig and check if we can load ClvpDecoderConfig from it + with tempfile.TemporaryDirectory() as tmp_dir_name: + config.save_pretrained(tmp_dir_name) + decoder_config = ClvpDecoderConfig.from_pretrained(tmp_dir_name) + self.assertDictEqual(config.decoder_config.to_dict(), decoder_config.to_dict()) + + @slow + def test_model_from_pretrained(self): + for model_name in CLVP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = ClvpModelForConditionalGeneration.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# Since Clvp has a lot of different models connected with each other it's better to test each of them individually along +# with a test_full_model_integration. If the model breaks in future, it could be of a great help to identify the broken part. + + +@slow +@require_torch +class ClvpIntegrationTest(unittest.TestCase): + def setUp(self): + self.text = "This is an example text." + ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) + _, self.speech_samples, self.sr = ds.sort("id").select(range(1))[:1]["audio"][0].values() + + self.model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev").to(torch_device) + self.model.eval() + tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev") + feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev") + + tokenizer_output = tokenizer(self.text, return_tensors="pt") + self.text_tokens = tokenizer_output["input_ids"].to(torch_device) + self.input_features = feature_extractor( + raw_speech=self.speech_samples, sampling_rate=self.sr, return_tensors="pt" + )["input_features"].to(torch_device) + + def tearDown(self): + super().tearDown() + # clean-up as much as possible GPU memory occupied by PyTorch + gc.collect() + torch.cuda.empty_cache() + + def test_conditional_encoder(self): + with torch.no_grad(): + conditioning_encoder_outputs = self.model.conditioning_encoder( + input_features=self.input_features, input_ids=self.text_tokens + ).to("cpu") + + self.assertEqual( + conditioning_encoder_outputs.shape, + torch.Size((self.input_features.shape[0], 18, self.model.config.decoder_config.hidden_size)), + ) + + EXPECTED_OUTPUTS = torch.tensor( + [[-0.8582, 0.5228, 1.9944], [-0.0465, -1.1017, -0.0093], [-0.0466, -0.6030, -0.1280]] + ) + + self.assertTrue(torch.allclose(conditioning_encoder_outputs[0, :3, :3], EXPECTED_OUTPUTS, atol=1e-4)) + + def test_decoder_model_generate(self): + autoregressive_model_output = self.model.speech_decoder_model.generate(input_ids=self.text_tokens).cpu() + + EXPECTED_OUTPUTS = torch.tensor([[147, 2, 54, 2, 43, 2, 169, 122, 29, 64, 2, 136, 37, 33, 9, 8193]]) + + self.assertTrue(torch.allclose(autoregressive_model_output, EXPECTED_OUTPUTS)) + + def test_text_and_speech_encoder_models(self): + # check for text embeds + text_embeds = self.model.text_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu() + + # fmt: off + EXPECTED_TEXT_EMBEDS = torch.tensor( + [ 1.8060e+00, -2.7928e+00, 3.2021e+00, -1.5673e+00, 2.3284e+00, -3.2065e+00, -1.3368e+00, 2.2322e+00, + -1.7667e+00, 4.1505e-01, 2.4119e+00, -5.8133e-03, -4.6367e+00, 1.6450e-01, 6.7459e+00, 6.6292e+00, + 1.1046e+00, 3.6196e+00, -1.0496e+01, 5.4924e+00 + ] + ) + # fmt: on + + self.assertTrue(torch.allclose(text_embeds[0, :20], EXPECTED_TEXT_EMBEDS, atol=1e-4)) + + # check for speech embeds + speech_embeds = self.model.speech_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu() + + # fmt: off + EXPECTED_SPEECH_EMBEDS = torch.tensor( + [ 4.6143, -5.5784, 0.8983, -3.9665, -0.6714, -1.0665, -1.1277, 1.5619, 2.6322, -7.2008, -2.4932, 0.3265, + -1.4738, 0.1425, 5.0825, 4.1760, -5.4708, 2.1935, -6.0044, 3.9540 + ] + ) + # fmt: on + + self.assertTrue(torch.allclose(speech_embeds[0, :20], EXPECTED_SPEECH_EMBEDS, atol=1e-4)) + + def test_full_model_integration(self): + full_model_output = self.model.generate( + input_ids=self.text_tokens, + input_features=self.input_features, + do_sample=False, + num_beams=4, + num_return_sequences=4, + max_new_tokens=10, + ).speech_ids.cpu() + + EXPECTED_OUTPUTS = torch.tensor([[1953, 1080, 612], [1953, 1953, 612], [1953, 612, 716]]) + + self.assertTrue(torch.allclose(full_model_output[-3:, -3:], EXPECTED_OUTPUTS)) diff --git a/tests/models/clvp/test_processor_clvp.py b/tests/models/clvp/test_processor_clvp.py new file mode 100644 index 000000000000..f751ab92d03d --- /dev/null +++ b/tests/models/clvp/test_processor_clvp.py @@ -0,0 +1,136 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gc +import shutil +import tempfile +import unittest + +from transformers import ClvpFeatureExtractor, ClvpProcessor, ClvpTokenizer +from transformers.testing_utils import require_torch + +from .test_feature_extraction_clvp import floats_list + + +@require_torch +class ClvpProcessorTest(unittest.TestCase): + def setUp(self): + self.checkpoint = "susnato/clvp_dev" + self.tmpdirname = tempfile.mkdtemp() + + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmpdirname) + gc.collect() + + # Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.get_tokenizer with Whisper->Clvp + def get_tokenizer(self, **kwargs): + return ClvpTokenizer.from_pretrained(self.checkpoint, **kwargs) + + # Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.get_feature_extractor with Whisper->Clvp + def get_feature_extractor(self, **kwargs): + return ClvpFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) + + # Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_save_load_pretrained_default with Whisper->Clvp + def test_save_load_pretrained_default(self): + tokenizer = self.get_tokenizer() + feature_extractor = self.get_feature_extractor() + + processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + processor.save_pretrained(self.tmpdirname) + processor = ClvpProcessor.from_pretrained(self.tmpdirname) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) + self.assertIsInstance(processor.tokenizer, ClvpTokenizer) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) + self.assertIsInstance(processor.feature_extractor, ClvpFeatureExtractor) + + # Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_feature_extractor with Whisper->Clvp,processor(raw_speech->processor(raw_speech=raw_speech + def test_feature_extractor(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + raw_speech = floats_list((3, 1000)) + + input_feat_extract = feature_extractor(raw_speech, return_tensors="np") + input_processor = processor(raw_speech=raw_speech, return_tensors="np") + + for key in input_feat_extract.keys(): + self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) + + # Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_tokenizer with Whisper->Clvp + def test_tokenizer(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + input_str = "This is a test string" + + encoded_processor = processor(text=input_str) + + encoded_tok = tokenizer(input_str) + + for key in encoded_tok.keys(): + self.assertListEqual(encoded_tok[key], encoded_processor[key]) + + # Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_tokenizer_decode with Whisper->Clvp + def test_tokenizer_decode(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] + + decoded_processor = processor.batch_decode(predicted_ids) + decoded_tok = tokenizer.batch_decode(predicted_ids) + + self.assertListEqual(decoded_tok, decoded_processor) + + def test_save_load_pretrained_additional_features(self): + processor = ClvpProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) + processor.save_pretrained(self.tmpdirname) + + tokenizer_add_kwargs = self.get_tokenizer(pad_token="(PAD)") + feature_extractor_add_kwargs = self.get_feature_extractor(sampling_rate=16000) + + processor = ClvpProcessor.from_pretrained( + self.tmpdirname, + pad_token="(PAD)", + sampling_rate=16000, + ) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) + self.assertIsInstance(processor.tokenizer, ClvpTokenizer) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) + self.assertIsInstance(processor.feature_extractor, ClvpFeatureExtractor) + + def test_model_input_names(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + self.assertListEqual( + sorted(processor.model_input_names), + sorted(set(feature_extractor.model_input_names + tokenizer.model_input_names)), + msg="`processor` and `feature_extractor` model input names do not match", + ) diff --git a/tests/models/clvp/test_tokenization_clvp.py b/tests/models/clvp/test_tokenization_clvp.py new file mode 100644 index 000000000000..b6368887595d --- /dev/null +++ b/tests/models/clvp/test_tokenization_clvp.py @@ -0,0 +1,312 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import os +import unittest +from typing import List + +from transformers import ClvpTokenizer + +from ...test_tokenization_common import TokenizerTesterMixin, slow + + +class ClvpTokenizationTest(TokenizerTesterMixin, unittest.TestCase): + tokenizer_class = ClvpTokenizer + test_rust_tokenizer = False + from_pretrained_kwargs = {"add_prefix_space": True} + test_seq2seq = False + test_sentencepiece_ignore_case = True + + def setUp(self): + super().setUp() + + # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt + vocab = [ + "l", + "o", + "w", + "e", + "r", + "s", + "t", + "i", + "d", + "n", + "\u0120", + "\u0120l", + "\u0120n", + "\u0120lo", + "\u0120low", + "er", + "\u0120lowest", + "\u0120newer", + "\u0120wider", + "", + "<|endoftext|>", + "[SPACE]", + ] + vocab_tokens = dict(zip(vocab, range(len(vocab)))) + merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] + self.special_tokens_map = {"unk_token": ""} + + self.vocab_file = os.path.join(self.tmpdirname, "vocab.json") + self.merges_file = os.path.join(self.tmpdirname, "merges.txt") + with open(self.vocab_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps(vocab_tokens) + "\n") + with open(self.merges_file, "w", encoding="utf-8") as fp: + fp.write("\n".join(merges)) + + # Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.get_tokenizer with GPT2->Clvp + def get_tokenizer(self, **kwargs): + kwargs.update(self.special_tokens_map) + return ClvpTokenizer.from_pretrained(self.tmpdirname, **kwargs) + + # Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.get_input_output_texts + def get_input_output_texts(self, tokenizer): + input_text = "lower newer" + output_text = "lower newer" + return input_text, output_text + + # Copied from transformers.tests.models.layoutxlm.test_tokenization_layoutxlm.LayoutXLMTokenizationTest.test_add_special_tokens + def test_add_special_tokens(self): + tokenizers: List[ClvpTokenizer] = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + special_token = "[SPECIAL_TOKEN]" + special_token_box = [1000, 1000, 1000, 1000] + + tokenizer.add_special_tokens({"cls_token": special_token}) + encoded_special_token = tokenizer.encode( + [special_token], boxes=[special_token_box], add_special_tokens=False + ) + self.assertEqual(len(encoded_special_token), 1) + + decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True) + self.assertTrue(special_token not in decoded) + + # Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_rust_and_python_full_tokenizers + def test_rust_and_python_full_tokenizers(self): + if not self.test_rust_tokenizer: + return + + tokenizer = self.get_tokenizer() + rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) + + sequence = "lower newer" + + # Testing tokenization + tokens = tokenizer.tokenize(sequence, add_prefix_space=True) + rust_tokens = rust_tokenizer.tokenize(sequence) + self.assertListEqual(tokens, rust_tokens) + + # Testing conversion to ids without special tokens + ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) + rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) + self.assertListEqual(ids, rust_ids) + + # Testing conversion to ids with special tokens + rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) + ids = tokenizer.encode(sequence, add_prefix_space=True) + rust_ids = rust_tokenizer.encode(sequence) + self.assertListEqual(ids, rust_ids) + + # Testing the unknown token + input_tokens = tokens + [rust_tokenizer.unk_token] + input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] + self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) + + # Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_padding + def test_padding(self, max_length=15): + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + # Simple input + s = "This is a simple input" + s2 = ["This is a simple input 1", "This is a simple input 2"] + p = ("This is a simple input", "This is a pair") + p2 = [ + ("This is a simple input 1", "This is a simple input 2"), + ("This is a simple pair 1", "This is a simple pair 2"), + ] + + # Simple input tests + self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") + + # Simple input + self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") + + # Simple input + self.assertRaises( + ValueError, + tokenizer_r.batch_encode_plus, + s2, + max_length=max_length, + padding="max_length", + ) + + # Pair input + self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") + + # Pair input + self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") + + # Pair input + self.assertRaises( + ValueError, + tokenizer_r.batch_encode_plus, + p2, + max_length=max_length, + padding="max_length", + ) + + # Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_padding_if_pad_token_set_slow + def test_padding_if_pad_token_set_slow(self): + tokenizer = ClvpTokenizer.from_pretrained(self.tmpdirname, pad_token="") + + # Simple input + s = "This is a simple input" + s2 = ["This is a simple input looooooooong", "This is a simple input"] + p = ("This is a simple input", "This is a pair") + p2 = [ + ("This is a simple input loooooong", "This is a simple input"), + ("This is a simple pair loooooong", "This is a simple pair"), + ] + + pad_token_id = tokenizer.pad_token_id + + out_s = tokenizer(s, padding="max_length", max_length=30, return_tensors="np") + out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors="np") + out_p = tokenizer(*p, padding="max_length", max_length=60, return_tensors="np") + out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors="np") + + # s + # test single string max_length padding + self.assertEqual(out_s["input_ids"].shape[-1], 30) + self.assertTrue(pad_token_id in out_s["input_ids"]) + self.assertTrue(0 in out_s["attention_mask"]) + + # s2 + # test automatic padding + self.assertEqual(out_s2["input_ids"].shape[-1], 33) + # long slice doesn't have padding + self.assertFalse(pad_token_id in out_s2["input_ids"][0]) + self.assertFalse(0 in out_s2["attention_mask"][0]) + # short slice does have padding + self.assertTrue(pad_token_id in out_s2["input_ids"][1]) + self.assertTrue(0 in out_s2["attention_mask"][1]) + + # p + # test single pair max_length padding + self.assertEqual(out_p["input_ids"].shape[-1], 60) + self.assertTrue(pad_token_id in out_p["input_ids"]) + self.assertTrue(0 in out_p["attention_mask"]) + + # p2 + # test automatic padding pair + self.assertEqual(out_p2["input_ids"].shape[-1], 52) + # long slice pair doesn't have padding + self.assertFalse(pad_token_id in out_p2["input_ids"][0]) + self.assertFalse(0 in out_p2["attention_mask"][0]) + # short slice pair does have padding + self.assertTrue(pad_token_id in out_p2["input_ids"][1]) + self.assertTrue(0 in out_p2["attention_mask"][1]) + + # Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_special_tokens_mask_input_pairs_and_bos_token + def test_special_tokens_mask_input_pairs_and_bos_token(self): + # TODO: change to self.get_tokenizers() when the fast version is implemented + tokenizers = [self.get_tokenizer(do_lower_case=False, add_bos_token=True)] + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + sequence_0 = "Encode this." + sequence_1 = "This one too please." + encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) + encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False) + encoded_sequence_dict = tokenizer.encode_plus( + sequence_0, + sequence_1, + add_special_tokens=True, + return_special_tokens_mask=True, + ) + encoded_sequence_w_special = encoded_sequence_dict["input_ids"] + special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] + self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) + + filtered_sequence = [ + (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) + ] + filtered_sequence = [x for x in filtered_sequence if x is not None] + self.assertEqual(encoded_sequence, filtered_sequence) + + def test_token_type_ids(self): + tokenizer = self.get_tokenizer() + seq_0 = "Test this method." + + # We want to have sequence 0 and sequence 1 are tagged + # respectively with 0 and 1 token_ids + # (regardless of whether the model use token type ids) + # We use this assumption in the QA pipeline among other place + output = tokenizer(seq_0, return_token_type_ids=True, add_special_tokens=True) + self.assertIn(0, output["token_type_ids"]) + + def test_full_tokenizer(self): + tokenizer = ClvpTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) + text = "lower newer" + bpe_tokens = ["l", "o", "w", "er", "[SPACE]", "n", "e", "w", "er"] + tokens = tokenizer.tokenize(text, add_prefix_space=False) + self.assertListEqual(tokens, bpe_tokens) + + input_tokens = tokens + [tokenizer.unk_token] + input_bpe_tokens = [0, 1, 2, 15, 21, 9, 3, 2, 15, 19] + self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) + + @slow + def test_outputs_with_numbers(self): + text = "hello and this is an example text and I have $1000. my lucky number is 12345." + tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev") + + # fmt: off + EXPECTED_OUTPUT = [62, 84, 28, 2, 53, 2,147, 2, 54, 2, 43, 2, 169, 122, 29, 64, 2, 136, 37, 33, 2, 53, 2, 22, + 2, 148, 2, 110, 2, 40, 206, 53, 2, 134, 84, 59, 32, 9, 2, 125, 2, 25, 34, 197, 38, 2, 27, + 231, 15, 44, 2, 54, 2, 33, 100, 25, 76, 2, 40, 206, 53, 7, 2, 40, 46, 18, 2, 21, 97, 17, + 219, 2, 87, 210, 8, 19, 22, 76, 9, + ] + # fmt: on + + self.assertListEqual(tokenizer.encode(text, add_special_tokens=False), EXPECTED_OUTPUT) + + @slow + def test_tokenizer_integration(self): + sequences = [ + "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " + "general-purpose architectures (BERT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " + "Language Understanding (NLU) and Natural Language Generation (NLG) with over multiple pretrained " + "models and deep interoperability between Jax, PyTorch and TensorFlow.", + "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " + "conditioning on both left and right context in all layers.", + "The quick brown fox jumps over the lazy dog.", + ] + + # fmt: off + expected_encoding = {'input_ids': [[144, 43, 32, 87, 26, 173, 2, 5, 87, 26, 44, 70, 2, 209, 27, 2, 55, 2, 29, 38, 51, 31, 71, 8, 144, 43, 32, 87, 26, 173, 2, 53, 2, 29, 38, 51, 31, 71, 8, 29, 46, 144, 137, 49, 8, 15, 44, 33, 6, 2, 187, 35, 83, 61, 2, 20, 50, 44, 56, 8, 29, 121, 139, 66, 2, 59, 71, 60, 18, 16, 33, 34, 175, 2, 5, 15, 44, 33, 7, 2, 89, 15, 44, 33, 14, 7, 2, 37, 25, 26, 7, 2, 17, 54, 78, 25, 15, 44, 33, 7, 2, 37, 25, 111, 33, 9, 9, 9, 6, 2, 87, 2, 27, 48, 121, 56, 2, 25, 43, 20, 34, 14, 112, 2, 97, 234, 63, 53, 52, 2, 5, 27, 25, 34, 6, 2, 53, 2, 27, 48, 121, 56, 2, 25, 43, 20, 34, 14, 112, 2, 20, 50, 44, 158, 2, 5, 27, 25, 20, 6, 2, 103, 2, 253, 2, 26, 167, 78, 29, 64, 2, 29, 46, 144, 137, 49, 2, 115, 126, 25, 32, 2, 53, 2, 126, 18, 29, 2, 41, 114, 161, 44, 109, 151, 240, 2, 67, 33, 100, 50, 2, 23, 14, 37, 7, 2, 29, 38, 51, 31, 71, 2, 53, 2, 33, 50, 32, 57, 19, 25, 69, 9], [ 15, 44, 33, 2, 54, 2, 17, 61, 22, 20, 27, 49, 2, 51, 2, 29, 46, 8, 144, 137, 2, 126, 18, 29, 2, 15, 83, 22, 46, 16, 181, 56, 2, 46, 29, 175, 86, 158, 32, 2, 154, 2, 97, 25, 14, 67, 25, 49, 2, 136, 37, 33, 2, 185, 2, 23, 28, 41, 33, 70, 2, 135, 17, 60, 107, 52, 2, 47, 2, 165, 40, 2, 64, 19, 33, 2, 53, 2, 101, 104, 2, 135, 136, 37, 33, 2, 41, 2, 108, 2, 25, 88, 173, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 42, 2, 194, 91, 24, 2, 243, 190, 2, 182, 37, 2, 23, 231, 29, 32, 2, 253, 2, 42, 2, 25, 14, 39, 38, 2, 134, 20, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], # noqa: E501 + 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], # noqa: E501 + } + # fmt: on + + self.tokenizer_integration_test_util( + sequences=sequences, expected_encoding=expected_encoding, model_name="susnato/clvp_dev", padding=True + ) diff --git a/utils/check_repo.py b/utils/check_repo.py index 181905dab9f7..d740eefed019 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -207,6 +207,8 @@ "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", + "ClvpForCausalLM", + "ClvpModel", "GroupViTTextModel", "GroupViTVisionModel", "TFCLIPTextModel", From 68afca3e699cf0d8e53598e75f21508a1bcabdd5 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Fri, 10 Nov 2023 15:22:43 +0100 Subject: [PATCH 139/268] [`AttentionMaskConverter`] ]Fix-mask-inf (#27114) * fix? * actual fix * fixups * add dataclass to the attention mask converter * refine testing suite * make sure there are no overflows * update the test --- src/transformers/modeling_attn_mask_utils.py | 26 +++++++++++++++++++- tests/test_modeling_utils.py | 6 +++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/src/transformers/modeling_attn_mask_utils.py b/src/transformers/modeling_attn_mask_utils.py index 434b32ce7f89..9658adc55d5c 100755 --- a/src/transformers/modeling_attn_mask_utils.py +++ b/src/transformers/modeling_attn_mask_utils.py @@ -11,11 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch +@dataclass class AttentionMaskConverter: """ A utility attention mask class that allows one to: @@ -24,6 +26,21 @@ class AttentionMaskConverter: - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, key_value_length) that can be multiplied with attention scores + Examples: + + ```python + >>> import torch + >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter + + >>> converter = AttentionMaskConverter(True) + >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, 5) + tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], + [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], + [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], + [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38], + [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]]) + ``` + Parameters: is_causal (`bool`): Whether the attention mask should be a uni-directional (causal) or bi-directional mask. @@ -32,6 +49,9 @@ class AttentionMaskConverter: Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. """ + is_causal: bool + sliding_window: int + def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal self.sliding_window = sliding_window @@ -112,7 +132,11 @@ def to_4d( expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( attention_mask_2d.device ) - expanded_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask + if causal_4d_mask is not None: + expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min) + + # expanded_attn_mask + causal_4d_mask can cause some overflow + expanded_4d_mask = expanded_attn_mask return expanded_4d_mask diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index 8456871df620..1885fc671b02 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -1266,6 +1266,9 @@ def check_to_4d(self, mask_converter, q_len, kv_len, additional_mask=None, bsz=3 assert mask_4d.shape == (bsz, 1, q_len, kv_len) + # make sure there are no overflows + assert mask_4d.min() != float("-inf") + context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks @@ -1341,6 +1344,9 @@ def test_2d_to_4d_causal(self): self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + # check that the mask does not overflow on causal masked tokens + self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 0), (1, 0), (1, 1)]) + def test_2d_to_4d(self): mask_converter = AttentionMaskConverter(is_causal=False) From 00dc856233a4539ded53520db6606a4b152c30be Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 10 Nov 2023 16:19:06 +0100 Subject: [PATCH 140/268] At most 2 GPUs for CI (#27435) At most 2 GPUs Co-authored-by: ydshieh --- .github/workflows/self-nightly-scheduled.yml | 1 + .github/workflows/self-past.yml | 1 + .github/workflows/self-push.yml | 1 + .github/workflows/self-scheduled.yml | 1 + 4 files changed, 4 insertions(+) diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index f5d96828183e..07a6197584a4 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -19,6 +19,7 @@ env: SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} TF_FORCE_GPU_ALLOW_GROWTH: true RUN_PT_TF_CROSS_TESTS: 1 + CUDA_VISIBLE_DEVICES: 0,1 jobs: setup: diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index ad624fb2581b..568b6a9b64f8 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -30,6 +30,7 @@ env: SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} TF_FORCE_GPU_ALLOW_GROWTH: true RUN_PT_TF_CROSS_TESTS: 1 + CUDA_VISIBLE_DEVICES: 0,1 jobs: setup: diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index 15035704d0ae..423b0c8c6932 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -25,6 +25,7 @@ env: PYTEST_TIMEOUT: 60 TF_FORCE_GPU_ALLOW_GROWTH: true RUN_PT_TF_CROSS_TESTS: 1 + CUDA_VISIBLE_DEVICES: 0,1 jobs: setup: diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 09ea3af0d44c..13d6a3806985 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -23,6 +23,7 @@ env: SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} TF_FORCE_GPU_ALLOW_GROWTH: true RUN_PT_TF_CROSS_TESTS: 1 + CUDA_VISIBLE_DEVICES: 0,1 jobs: setup: From e1c3ac25515839146c93427e55941de9cee3401e Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Fri, 10 Nov 2023 20:58:30 +0530 Subject: [PATCH 141/268] Add Phi-1 and Phi-1_5 (#26170) * only dir not even init * init * tokenizer removed and reference of codegen added * modeling file updated a lot remaining app_rotary_emb * conversion script done * conversion script fixed, a lot of factoring done and most tests pass * added token_clf and extractive_QA_head * integration tests pass * flash attn tests pass! * config done * more docs in modeling file * some style fix * style and others * doc test error fix * more doc fix * some attention fixes * most fixes * style and other fixes * docs fix and config * doc fix * some comments * conversion script updated * conversion script updated * Revert "conversion script updated" This reverts commit e92378c54084ec0747041b113083d1746ecb6c7f. * final comments * add Phi to language_modeling.md * edit phi.md file * rebase and fix * removed phi-1.5 example * changed model_type from 'phi'->'mixformer-sequential' * small change * small change * revert \small change * changed mixformer-sequential->phi * small change * added phi-1.5 example instead of phi-1 * doc test might pass now * rebase and small change * added the dropout layer * more fixes * modified .md file * very very small doc change --- README.md | 1 + README_es.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_ru.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.md | 1 + docs/source/en/model_doc/phi.md | 126 ++ docs/source/en/tasks/language_modeling.md | 2 +- .../en/tasks/sequence_classification.md | 2 +- docs/source/en/tasks/token_classification.md | 2 +- src/transformers/__init__.py | 20 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 4 + .../models/auto/tokenization_auto.py | 1 + src/transformers/models/phi/__init__.py | 69 ++ .../models/phi/configuration_phi.py | 179 +++ .../models/phi/convert_phi_weights_to_hf.py | 175 +++ src/transformers/models/phi/modeling_phi.py | 1072 +++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 38 + tests/models/phi/__init__.py | 0 tests/models/phi/test_modeling_phi.py | 386 ++++++ 26 files changed, 2088 insertions(+), 3 deletions(-) create mode 100644 docs/source/en/model_doc/phi.md create mode 100644 src/transformers/models/phi/__init__.py create mode 100644 src/transformers/models/phi/configuration_phi.py create mode 100644 src/transformers/models/phi/convert_phi_weights_to_hf.py create mode 100644 src/transformers/models/phi/modeling_phi.py create mode 100644 tests/models/phi/__init__.py create mode 100644 tests/models/phi/test_modeling_phi.py diff --git a/README.md b/README.md index 7fed44a6d900..12724e60a188 100644 --- a/README.md +++ b/README.md @@ -443,6 +443,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (from ADEPT) released in a [blog post](https://www.adept.ai/blog/persimmon-8b) by Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. +1. **[Phi](https://huggingface.co/docs/transformers/main/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. diff --git a/README_es.md b/README_es.md index 172bd5569151..5cdbc27ec791 100644 --- a/README_es.md +++ b/README_es.md @@ -418,6 +418,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (from ADEPT) released with the paper [blog post](https://www.adept.ai/blog/persimmon-8b) by Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. +1. **[Phi](https://huggingface.co/docs/transformers/main/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. diff --git a/README_hd.md b/README_hd.md index 3616e1a84032..01937532f967 100644 --- a/README_hd.md +++ b/README_hd.md @@ -392,6 +392,7 @@ conda install -c huggingface transformers 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google की ओर से) साथ में दिया गया पेपर [लंबे इनपुट सारांश के लिए ट्रांसफ़ॉर्मरों को बेहतर तरीके से एक्सटेंड करना](https://arxiv .org/abs/2208.04347) जेसन फांग, याओ झाओ, पीटर जे लियू द्वारा। 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (दीपमाइंड से) साथ में पेपर [पर्सीवर आईओ: संरचित इनपुट और आउटपुट के लिए एक सामान्य वास्तुकला] (https://arxiv.org/abs/2107.14795) एंड्रयू जेगल, सेबेस्टियन बोरग्यूड, जीन-बैप्टिस्ट अलायराक, कार्ल डोर्श, कैटलिन इओनेस्कु, डेविड द्वारा डिंग, स्कंद कोप्पुला, डैनियल ज़ोरान, एंड्रयू ब्रॉक, इवान शेलहैमर, ओलिवियर हेनाफ, मैथ्यू एम। बोट्विनिक, एंड्रयू ज़िसरमैन, ओरिओल विनियल्स, जोआओ कैरेरा द्वारा पोस्ट किया गया। 1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (ADEPT से) Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. द्वाराअनुसंधान पत्र [blog post](https://www.adept.ai/blog/persimmon-8b) के साथ जारी किया गया +1. **[Phi](https://huggingface.co/docs/transformers/main/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research से) कागज के साथ [PhoBERT: वियतनामी के लिए पूर्व-प्रशिक्षित भाषा मॉडल](https://www .aclweb.org/anthology/2020.findings-emnlp.92/) डैट क्वोक गुयेन और अन्ह तुआन गुयेन द्वारा पोस्ट किया गया। 1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google से) Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. द्वाराअनुसंधान पत्र [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) के साथ जारी किया गया 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP से) साथ वाला पेपर [प्रोग्राम अंडरस्टैंडिंग एंड जेनरेशन के लिए यूनिफाइड प्री-ट्रेनिंग](https://arxiv .org/abs/2103.06333) वसी उद्दीन अहमद, सैकत चक्रवर्ती, बैशाखी रे, काई-वेई चांग द्वारा। diff --git a/README_ja.md b/README_ja.md index 60b46e917629..5935da396bf1 100644 --- a/README_ja.md +++ b/README_ja.md @@ -452,6 +452,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google から) Jason Phang, Yao Zhao, and Peter J. Liu から公開された研究論文: [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind から) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira から公開された研究論文: [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (ADEPT から) Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. から公開された研究論文 [blog post](https://www.adept.ai/blog/persimmon-8b) +1. **[Phi](https://huggingface.co/docs/transformers/main/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research から) Dat Quoc Nguyen and Anh Tuan Nguyen から公開された研究論文: [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google から) Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. から公開された研究論文 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP から) Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang から公開された研究論文: [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) diff --git a/README_ko.md b/README_ko.md index 583b8cce1bc3..e0c38472cc46 100644 --- a/README_ko.md +++ b/README_ko.md @@ -367,6 +367,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google 에서) Jason Phang, Yao Zhao, Peter J. Liu 의 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 논문과 함께 발표했습니다. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind 에서) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 의 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 논문과 함께 발표했습니다. 1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (ADEPT 에서 제공)은 Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani.의 [blog post](https://www.adept.ai/blog/persimmon-8b)논문과 함께 발표했습니다. +1. **[Phi](https://huggingface.co/docs/transformers/main/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research 에서) Dat Quoc Nguyen and Anh Tuan Nguyen 의 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 논문과 함께 발표했습니다. 1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google 에서 제공)은 Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.의 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347)논문과 함께 발표했습니다. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP 에서) Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 의 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 논문과 함께 발표했습니다. diff --git a/README_ru.md b/README_ru.md index 9945b47fc25a..cf63f7c67ef9 100644 --- a/README_ru.md +++ b/README_ru.md @@ -436,6 +436,7 @@ conda install -c huggingface transformers 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[Persimmon](https://huggingface.co/docs/transformers/main/model_doc/persimmon)** (from ADEPT) released in a [blog post](https://www.adept.ai/blog/persimmon-8b) by Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. +1. **[Phi](https://huggingface.co/docs/main/transformers/model_doc/phi)** (from Microsoft Research) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. diff --git a/README_zh-hans.md b/README_zh-hans.md index d9581112a872..3d84374d5561 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -391,6 +391,7 @@ conda install -c huggingface transformers 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。 1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (来自 ADEPT) 伴随论文 [blog post](https://www.adept.ai/blog/persimmon-8b) 由 Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani 发布。 +1. **[Phi](https://huggingface.co/docs/transformers/main/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。 1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (来自 Google) 伴随论文 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) 由 Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova 发布。 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 77384e8d17c7..c095423cce15 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -403,6 +403,7 @@ conda install -c huggingface transformers 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (from ADEPT) released with the paper [blog post](https://www.adept.ai/blog/persimmon-8b) by Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. +1. **[Phi](https://huggingface.co/docs/transformers/main/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index c07502402ccd..4e0ce88c10af 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -424,6 +424,8 @@ title: PEGASUS-X - local: model_doc/persimmon title: Persimmon + - local: model_doc/phi + title: Phi - local: model_doc/phobert title: PhoBERT - local: model_doc/plbart diff --git a/docs/source/en/index.md b/docs/source/en/index.md index f32c5eccdb9f..ae01569e970c 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -217,6 +217,7 @@ Flax), PyTorch, and/or TensorFlow. | [PEGASUS-X](model_doc/pegasus_x) | ✅ | ❌ | ❌ | | [Perceiver](model_doc/perceiver) | ✅ | ❌ | ❌ | | [Persimmon](model_doc/persimmon) | ✅ | ❌ | ❌ | +| [Phi](model_doc/phi) | ✅ | ❌ | ❌ | | [PhoBERT](model_doc/phobert) | ✅ | ✅ | ✅ | | [Pix2Struct](model_doc/pix2struct) | ✅ | ❌ | ❌ | | [PLBart](model_doc/plbart) | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/phi.md b/docs/source/en/model_doc/phi.md new file mode 100644 index 000000000000..337502ac31d6 --- /dev/null +++ b/docs/source/en/model_doc/phi.md @@ -0,0 +1,126 @@ + + +# Phi + +## Overview + +The Phi-1 model was proposed in [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li. + +The Phi-1.5 model was proposed in [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. + +### Summary +In Phi-1 and Phi-1.5 papers, the authors showed how important the quality of the data is in training relative to the model size. +They selected high quality "textbook" data alongside with synthetically generated data for training their small sized Transformer +based model Phi-1 with 1.3B parameters. Despite this small scale, phi-1 attains pass@1 accuracy 50.6% on HumanEval and 55.5% on MBPP. +They follow the same strategy for Phi-1.5 and created another 1.3B parameter model with performance on natural language tasks comparable +to models 5x larger, and surpassing most non-frontier LLMs. Phi-1.5 exhibits many of the traits of much larger LLMs such as the ability +to “think step by step” or perform some rudimentary in-context learning. +With these two experiments the authors successfully showed the huge impact of quality of training data when training machine learning models. + + +The abstract from the Phi-1 paper is the following: + +*We introduce phi-1, a new large language model for code, with significantly smaller size than +competing models: phi-1 is a Transformer-based model with 1.3B parameters, trained for 4 days on +8 A100s, using a selection of “textbook quality” data from the web (6B tokens) and synthetically +generated textbooks and exercises with GPT-3.5 (1B tokens). Despite this small scale, phi-1 attains +pass@1 accuracy 50.6% on HumanEval and 55.5% on MBPP. It also displays surprising emergent +properties compared to phi-1-base, our model before our finetuning stage on a dataset of coding +exercises, and phi-1-small, a smaller model with 350M parameters trained with the same pipeline as +phi-1 that still achieves 45% on HumanEval.* + +The abstract from the Phi-1.5 paper is the following: + +*We continue the investigation into the power of smaller Transformer-based language models as +initiated by TinyStories – a 10 million parameter model that can produce coherent English – and +the follow-up work on phi-1, a 1.3 billion parameter model with Python coding performance close +to the state-of-the-art. The latter work proposed to use existing Large Language Models (LLMs) to +generate “textbook quality” data as a way to enhance the learning process compared to traditional +web data. We follow the “Textbooks Are All You Need” approach, focusing this time on common +sense reasoning in natural language, and create a new 1.3 billion parameter model named phi-1.5, +with performance on natural language tasks comparable to models 5x larger, and surpassing most +non-frontier LLMs on more complex reasoning tasks such as grade-school mathematics and basic +coding. More generally, phi-1.5 exhibits many of the traits of much larger LLMs, both good –such +as the ability to “think step by step” or perform some rudimentary in-context learning– and bad, +including hallucinations and the potential for toxic and biased generations –encouragingly though, we +are seeing improvement on that front thanks to the absence of web data. We open-source phi-1.5 to +promote further research on these urgent topics.* + + +This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). +The original code for Phi-1 and Phi-1.5 can be found [here](https://huggingface.co/microsoft/phi-1/blob/main/modeling_mixformer_sequential.py) and [here](https://huggingface.co/microsoft/phi-1_5/blob/main/modeling_mixformer_sequential.py) respectively. + + +## Usage tips + +- This model is quite similar to `Llama` with the main difference in [`PhiDecoderLayer`], where they used [`PhiAttention`] and [`PhiMLP`] layers in parallel configuration. +- The tokenizer used for this model is identical to the [`CodeGenTokenizer`]. + + +### Example : + +```python +>>> from transformers import PhiForCausalLM, AutoTokenizer + +>>> # define the model and tokenzier. +>>> model = PhiForCausalLM.from_pretrained("susnato/phi-1_5_dev") +>>> tokenizer = AutoTokenizer.from_pretrained("susnato/phi-1_5_dev") + +>>> # feel free to change the prompt to your liking. +>>> prompt = "If I were an AI that had just achieved" + +>>> # apply the tokenizer. +>>> tokens = tokenizer(prompt, return_tensors="pt") + +>>> # use the model to generate new tokens. +>>> generated_output = model.generate(**tokens, use_cache=True, max_new_tokens=10) + +>>> tokenizer.batch_decode(generated_output)[0] +'If I were an AI that had just achieved a breakthrough in machine learning, I would be thrilled' +``` + + +## PhiConfig + +[[autodoc]] PhiConfig + + + + +## PhiModel + +[[autodoc]] PhiModel + - forward + +## PhiForCausalLM + +[[autodoc]] PhiForCausalLM + - forward + - generate + +## PhiForSequenceClassification + +[[autodoc]] PhiForSequenceClassification + - forward + +## PhiForTokenClassification + +[[autodoc]] PhiForTokenClassification + - forward + + + \ No newline at end of file diff --git a/docs/source/en/tasks/language_modeling.md b/docs/source/en/tasks/language_modeling.md index 9c35b7293d75..0b9c24870219 100644 --- a/docs/source/en/tasks/language_modeling.md +++ b/docs/source/en/tasks/language_modeling.md @@ -37,7 +37,7 @@ You can finetune other architectures for causal language modeling following the Choose one of the following architectures: -[BART](../model_doc/bart), [BERT](../model_doc/bert), [Bert Generation](../model_doc/bert-generation), [BigBird](../model_doc/big_bird), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [BioGpt](../model_doc/biogpt), [Blenderbot](../model_doc/blenderbot), [BlenderbotSmall](../model_doc/blenderbot-small), [BLOOM](../model_doc/bloom), [CamemBERT](../model_doc/camembert), [CodeLlama](../model_doc/code_llama), [CodeGen](../model_doc/codegen), [CPM-Ant](../model_doc/cpmant), [CTRL](../model_doc/ctrl), [Data2VecText](../model_doc/data2vec-text), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [Falcon](../model_doc/falcon), [Fuyu](../model_doc/fuyu), [GIT](../model_doc/git), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [GPT NeoX Japanese](../model_doc/gpt_neox_japanese), [GPT-J](../model_doc/gptj), [LLaMA](../model_doc/llama), [Marian](../model_doc/marian), [mBART](../model_doc/mbart), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [Mistral](../model_doc/mistral), [MPT](../model_doc/mpt), [MusicGen](../model_doc/musicgen), [MVP](../model_doc/mvp), [OpenLlama](../model_doc/open-llama), [OpenAI GPT](../model_doc/openai-gpt), [OPT](../model_doc/opt), [Pegasus](../model_doc/pegasus), [Persimmon](../model_doc/persimmon), [PLBart](../model_doc/plbart), [ProphetNet](../model_doc/prophetnet), [QDQBert](../model_doc/qdqbert), [Reformer](../model_doc/reformer), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [RWKV](../model_doc/rwkv), [Speech2Text2](../model_doc/speech_to_text_2), [Transformer-XL](../model_doc/transfo-xl), [TrOCR](../model_doc/trocr), [Whisper](../model_doc/whisper), [XGLM](../model_doc/xglm), [XLM](../model_doc/xlm), [XLM-ProphetNet](../model_doc/xlm-prophetnet), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod) +[BART](../model_doc/bart), [BERT](../model_doc/bert), [Bert Generation](../model_doc/bert-generation), [BigBird](../model_doc/big_bird), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [BioGpt](../model_doc/biogpt), [Blenderbot](../model_doc/blenderbot), [BlenderbotSmall](../model_doc/blenderbot-small), [BLOOM](../model_doc/bloom), [CamemBERT](../model_doc/camembert), [CodeLlama](../model_doc/code_llama), [CodeGen](../model_doc/codegen), [CPM-Ant](../model_doc/cpmant), [CTRL](../model_doc/ctrl), [Data2VecText](../model_doc/data2vec-text), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [Falcon](../model_doc/falcon), [Fuyu](../model_doc/fuyu), [GIT](../model_doc/git), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [GPT NeoX Japanese](../model_doc/gpt_neox_japanese), [GPT-J](../model_doc/gptj), [LLaMA](../model_doc/llama), [Marian](../model_doc/marian), [mBART](../model_doc/mbart), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [Mistral](../model_doc/mistral), [MPT](../model_doc/mpt), [MusicGen](../model_doc/musicgen), [MVP](../model_doc/mvp), [OpenLlama](../model_doc/open-llama), [OpenAI GPT](../model_doc/openai-gpt), [OPT](../model_doc/opt), [Pegasus](../model_doc/pegasus), [Persimmon](../model_doc/persimmon), [Phi](../model_doc/phi), [PLBart](../model_doc/plbart), [ProphetNet](../model_doc/prophetnet), [QDQBert](../model_doc/qdqbert), [Reformer](../model_doc/reformer), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [RWKV](../model_doc/rwkv), [Speech2Text2](../model_doc/speech_to_text_2), [Transformer-XL](../model_doc/transfo-xl), [TrOCR](../model_doc/trocr), [Whisper](../model_doc/whisper), [XGLM](../model_doc/xglm), [XLM](../model_doc/xlm), [XLM-ProphetNet](../model_doc/xlm-prophetnet), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod) diff --git a/docs/source/en/tasks/sequence_classification.md b/docs/source/en/tasks/sequence_classification.md index 7068e7ce088e..c6daa66f362f 100644 --- a/docs/source/en/tasks/sequence_classification.md +++ b/docs/source/en/tasks/sequence_classification.md @@ -33,7 +33,7 @@ The task illustrated in this tutorial is supported by the following model archit -[ALBERT](../model_doc/albert), [BART](../model_doc/bart), [BERT](../model_doc/bert), [BigBird](../model_doc/big_bird), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [BioGpt](../model_doc/biogpt), [BLOOM](../model_doc/bloom), [CamemBERT](../model_doc/camembert), [CANINE](../model_doc/canine), [CodeLlama](../model_doc/code_llama), [ConvBERT](../model_doc/convbert), [CTRL](../model_doc/ctrl), [Data2VecText](../model_doc/data2vec-text), [DeBERTa](../model_doc/deberta), [DeBERTa-v2](../model_doc/deberta-v2), [DistilBERT](../model_doc/distilbert), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [ErnieM](../model_doc/ernie_m), [ESM](../model_doc/esm), [Falcon](../model_doc/falcon), [FlauBERT](../model_doc/flaubert), [FNet](../model_doc/fnet), [Funnel Transformer](../model_doc/funnel), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [GPT-J](../model_doc/gptj), [I-BERT](../model_doc/ibert), [LayoutLM](../model_doc/layoutlm), [LayoutLMv2](../model_doc/layoutlmv2), [LayoutLMv3](../model_doc/layoutlmv3), [LED](../model_doc/led), [LiLT](../model_doc/lilt), [LLaMA](../model_doc/llama), [Longformer](../model_doc/longformer), [LUKE](../model_doc/luke), [MarkupLM](../model_doc/markuplm), [mBART](../model_doc/mbart), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [Mistral](../model_doc/mistral), [MobileBERT](../model_doc/mobilebert), [MPNet](../model_doc/mpnet), [MPT](../model_doc/mpt), [MRA](../model_doc/mra), [MT5](../model_doc/mt5), [MVP](../model_doc/mvp), [Nezha](../model_doc/nezha), [Nyströmformer](../model_doc/nystromformer), [OpenLlama](../model_doc/open-llama), [OpenAI GPT](../model_doc/openai-gpt), [OPT](../model_doc/opt), [Perceiver](../model_doc/perceiver), [Persimmon](../model_doc/persimmon), [PLBart](../model_doc/plbart), [QDQBert](../model_doc/qdqbert), [Reformer](../model_doc/reformer), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [SqueezeBERT](../model_doc/squeezebert), [T5](../model_doc/t5), [TAPAS](../model_doc/tapas), [Transformer-XL](../model_doc/transfo-xl), [UMT5](../model_doc/umt5), [XLM](../model_doc/xlm), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod), [YOSO](../model_doc/yoso) +[ALBERT](../model_doc/albert), [BART](../model_doc/bart), [BERT](../model_doc/bert), [BigBird](../model_doc/big_bird), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [BioGpt](../model_doc/biogpt), [BLOOM](../model_doc/bloom), [CamemBERT](../model_doc/camembert), [CANINE](../model_doc/canine), [CodeLlama](../model_doc/code_llama), [ConvBERT](../model_doc/convbert), [CTRL](../model_doc/ctrl), [Data2VecText](../model_doc/data2vec-text), [DeBERTa](../model_doc/deberta), [DeBERTa-v2](../model_doc/deberta-v2), [DistilBERT](../model_doc/distilbert), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [ErnieM](../model_doc/ernie_m), [ESM](../model_doc/esm), [Falcon](../model_doc/falcon), [FlauBERT](../model_doc/flaubert), [FNet](../model_doc/fnet), [Funnel Transformer](../model_doc/funnel), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [GPT-J](../model_doc/gptj), [I-BERT](../model_doc/ibert), [LayoutLM](../model_doc/layoutlm), [LayoutLMv2](../model_doc/layoutlmv2), [LayoutLMv3](../model_doc/layoutlmv3), [LED](../model_doc/led), [LiLT](../model_doc/lilt), [LLaMA](../model_doc/llama), [Longformer](../model_doc/longformer), [LUKE](../model_doc/luke), [MarkupLM](../model_doc/markuplm), [mBART](../model_doc/mbart), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [Mistral](../model_doc/mistral), [MobileBERT](../model_doc/mobilebert), [MPNet](../model_doc/mpnet), [MPT](../model_doc/mpt), [MRA](../model_doc/mra), [MT5](../model_doc/mt5), [MVP](../model_doc/mvp), [Nezha](../model_doc/nezha), [Nyströmformer](../model_doc/nystromformer), [OpenLlama](../model_doc/open-llama), [OpenAI GPT](../model_doc/openai-gpt), [OPT](../model_doc/opt), [Perceiver](../model_doc/perceiver), [Persimmon](../model_doc/persimmon), [Phi](../model_doc/phi), [PLBart](../model_doc/plbart), [QDQBert](../model_doc/qdqbert), [Reformer](../model_doc/reformer), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [SqueezeBERT](../model_doc/squeezebert), [T5](../model_doc/t5), [TAPAS](../model_doc/tapas), [Transformer-XL](../model_doc/transfo-xl), [UMT5](../model_doc/umt5), [XLM](../model_doc/xlm), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod), [YOSO](../model_doc/yoso) diff --git a/docs/source/en/tasks/token_classification.md b/docs/source/en/tasks/token_classification.md index 289f2b05896a..125af5c9d979 100644 --- a/docs/source/en/tasks/token_classification.md +++ b/docs/source/en/tasks/token_classification.md @@ -32,7 +32,7 @@ The task illustrated in this tutorial is supported by the following model archit -[ALBERT](../model_doc/albert), [BERT](../model_doc/bert), [BigBird](../model_doc/big_bird), [BioGpt](../model_doc/biogpt), [BLOOM](../model_doc/bloom), [BROS](../model_doc/bros), [CamemBERT](../model_doc/camembert), [CANINE](../model_doc/canine), [ConvBERT](../model_doc/convbert), [Data2VecText](../model_doc/data2vec-text), [DeBERTa](../model_doc/deberta), [DeBERTa-v2](../model_doc/deberta-v2), [DistilBERT](../model_doc/distilbert), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [ErnieM](../model_doc/ernie_m), [ESM](../model_doc/esm), [Falcon](../model_doc/falcon), [FlauBERT](../model_doc/flaubert), [FNet](../model_doc/fnet), [Funnel Transformer](../model_doc/funnel), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [I-BERT](../model_doc/ibert), [LayoutLM](../model_doc/layoutlm), [LayoutLMv2](../model_doc/layoutlmv2), [LayoutLMv3](../model_doc/layoutlmv3), [LiLT](../model_doc/lilt), [Longformer](../model_doc/longformer), [LUKE](../model_doc/luke), [MarkupLM](../model_doc/markuplm), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [MobileBERT](../model_doc/mobilebert), [MPNet](../model_doc/mpnet), [MPT](../model_doc/mpt), [MRA](../model_doc/mra), [Nezha](../model_doc/nezha), [Nyströmformer](../model_doc/nystromformer), [QDQBert](../model_doc/qdqbert), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [SqueezeBERT](../model_doc/squeezebert), [XLM](../model_doc/xlm), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod), [YOSO](../model_doc/yoso) +[ALBERT](../model_doc/albert), [BERT](../model_doc/bert), [BigBird](../model_doc/big_bird), [BioGpt](../model_doc/biogpt), [BLOOM](../model_doc/bloom), [BROS](../model_doc/bros), [CamemBERT](../model_doc/camembert), [CANINE](../model_doc/canine), [ConvBERT](../model_doc/convbert), [Data2VecText](../model_doc/data2vec-text), [DeBERTa](../model_doc/deberta), [DeBERTa-v2](../model_doc/deberta-v2), [DistilBERT](../model_doc/distilbert), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [ErnieM](../model_doc/ernie_m), [ESM](../model_doc/esm), [Falcon](../model_doc/falcon), [FlauBERT](../model_doc/flaubert), [FNet](../model_doc/fnet), [Funnel Transformer](../model_doc/funnel), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [I-BERT](../model_doc/ibert), [LayoutLM](../model_doc/layoutlm), [LayoutLMv2](../model_doc/layoutlmv2), [LayoutLMv3](../model_doc/layoutlmv3), [LiLT](../model_doc/lilt), [Longformer](../model_doc/longformer), [LUKE](../model_doc/luke), [MarkupLM](../model_doc/markuplm), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [MobileBERT](../model_doc/mobilebert), [MPNet](../model_doc/mpnet), [MPT](../model_doc/mpt), [MRA](../model_doc/mra), [Nezha](../model_doc/nezha), [Nyströmformer](../model_doc/nystromformer), [Phi](../model_doc/phi), [QDQBert](../model_doc/qdqbert), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [SqueezeBERT](../model_doc/squeezebert), [XLM](../model_doc/xlm), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod), [YOSO](../model_doc/yoso) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index eb27314cb62a..cf89602b6597 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -497,6 +497,7 @@ "models.pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], "models.perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverTokenizer"], "models.persimmon": ["PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP", "PersimmonConfig"], + "models.phi": ["PHI_PRETRAINED_CONFIG_ARCHIVE_MAP", "PhiConfig"], "models.phobert": ["PhobertTokenizer"], "models.pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", @@ -2513,6 +2514,16 @@ _import_structure["models.persimmon"].extend( ["PersimmonForCausalLM", "PersimmonForSequenceClassification", "PersimmonModel", "PersimmonPreTrainedModel"] ) + _import_structure["models.phi"].extend( + [ + "PHI_PRETRAINED_MODEL_ARCHIVE_LIST", + "PhiForCausalLM", + "PhiForSequenceClassification", + "PhiForTokenClassification", + "PhiModel", + "PhiPreTrainedModel", + ] + ) _import_structure["models.pix2struct"].extend( [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -4690,6 +4701,7 @@ from .models.pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig from .models.perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverTokenizer from .models.persimmon import PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP, PersimmonConfig + from .models.phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig from .models.phobert import PhobertTokenizer from .models.pix2struct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -6406,6 +6418,14 @@ PersimmonModel, PersimmonPreTrainedModel, ) + from .models.phi import ( + PHI_PRETRAINED_MODEL_ARCHIVE_LIST, + PhiForCausalLM, + PhiForSequenceClassification, + PhiForTokenClassification, + PhiModel, + PhiPreTrainedModel, + ) from .models.pix2struct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, Pix2StructForConditionalGeneration, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 46e275e3f320..6132512688e6 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -162,6 +162,7 @@ pegasus_x, perceiver, persimmon, + phi, phobert, pix2struct, plbart, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 2c27892260aa..c1c2387373b8 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -168,6 +168,7 @@ ("pegasus_x", "PegasusXConfig"), ("perceiver", "PerceiverConfig"), ("persimmon", "PersimmonConfig"), + ("phi", "PhiConfig"), ("pix2struct", "Pix2StructConfig"), ("plbart", "PLBartConfig"), ("poolformer", "PoolFormerConfig"), @@ -379,6 +380,7 @@ ("pegasus_x", "PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("perceiver", "PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("persimmon", "PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("phi", "PHI_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("pix2struct", "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("plbart", "PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("poolformer", "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -609,6 +611,7 @@ ("pegasus_x", "PEGASUS-X"), ("perceiver", "Perceiver"), ("persimmon", "Persimmon"), + ("phi", "Phi"), ("phobert", "PhoBERT"), ("pix2struct", "Pix2Struct"), ("plbart", "PLBart"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index f4f8eab9967c..ffcae9a23494 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -161,6 +161,7 @@ ("pegasus_x", "PegasusXModel"), ("perceiver", "PerceiverModel"), ("persimmon", "PersimmonModel"), + ("phi", "PhiModel"), ("plbart", "PLBartModel"), ("poolformer", "PoolFormerModel"), ("prophetnet", "ProphetNetModel"), @@ -426,6 +427,7 @@ ("opt", "OPTForCausalLM"), ("pegasus", "PegasusForCausalLM"), ("persimmon", "PersimmonForCausalLM"), + ("phi", "PhiForCausalLM"), ("plbart", "PLBartForCausalLM"), ("prophetnet", "ProphetNetForCausalLM"), ("qdqbert", "QDQBertLMHeadModel"), @@ -758,6 +760,7 @@ ("opt", "OPTForSequenceClassification"), ("perceiver", "PerceiverForSequenceClassification"), ("persimmon", "PersimmonForSequenceClassification"), + ("phi", "PhiForSequenceClassification"), ("plbart", "PLBartForSequenceClassification"), ("qdqbert", "QDQBertForSequenceClassification"), ("reformer", "ReformerForSequenceClassification"), @@ -915,6 +918,7 @@ ("mra", "MraForTokenClassification"), ("nezha", "NezhaForTokenClassification"), ("nystromformer", "NystromformerForTokenClassification"), + ("phi", "PhiForTokenClassification"), ("qdqbert", "QDQBertForTokenClassification"), ("rembert", "RemBertForTokenClassification"), ("roberta", "RobertaForTokenClassification"), diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 6f983b97810b..f04a9500dffe 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -307,6 +307,7 @@ "LlamaTokenizerFast" if is_tokenizers_available() else None, ), ), + ("phi", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)), ("phobert", ("PhobertTokenizer", None)), ("pix2struct", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)), ("plbart", ("PLBartTokenizer" if is_sentencepiece_available() else None, None)), diff --git a/src/transformers/models/phi/__init__.py b/src/transformers/models/phi/__init__.py new file mode 100644 index 000000000000..ba79ac81a6b9 --- /dev/null +++ b/src/transformers/models/phi/__init__.py @@ -0,0 +1,69 @@ +# Copyright 2023 Microsoft and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_sentencepiece_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_phi": ["PHI_PRETRAINED_CONFIG_ARCHIVE_MAP", "PhiConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_phi"] = [ + "PHI_PRETRAINED_MODEL_ARCHIVE_LIST", + "PhiPreTrainedModel", + "PhiModel", + "PhiForCausalLM", + "PhiForSequenceClassification", + "PhiForTokenClassification", + ] + + +if TYPE_CHECKING: + from .configuration_phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_phi import ( + PHI_PRETRAINED_MODEL_ARCHIVE_LIST, + PhiForCausalLM, + PhiForSequenceClassification, + PhiForTokenClassification, + PhiModel, + PhiPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/phi/configuration_phi.py b/src/transformers/models/phi/configuration_phi.py new file mode 100644 index 000000000000..aec31054ce1d --- /dev/null +++ b/src/transformers/models/phi/configuration_phi.py @@ -0,0 +1,179 @@ +# coding=utf-8 +# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Phi model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +PHI_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "susnato/phi-1_dev": "https://huggingface.co/susnato/phi-1_dev/resolve/main/config.json", + "susnato/phi-1_5_dev": "https://huggingface.co/susnato/phi-1_5_dev/resolve/main/config.json", +} + + +class PhiConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Phi + [susnato/phi-1_dev](https://huggingface.co/susnato/phi-1_dev). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 51200): + Vocabulary size of the Phi model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`PhiModel`]. + hidden_size (`int`, *optional*, defaults to 2048): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 8192): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer decoder. + resid_pdrop (`float`, *optional*, defaults to 0.0): + Dropout probability for mlp outputs. + embd_pdrop (`int`, *optional*, defaults to 0.0): + The dropout ratio for the embeddings. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio after computing the attention scores. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Phi-1 and Phi-1.5 supports up to 2048 + tokens. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling + strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format + is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update + `max_position_embeddings` to the expected new maximum. See the following thread for more information on how + these scaling strategies behave: + https://www.reddit.com/r/LocalPersimmon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This + is an experimental feature, subject to breaking API changes in future versions. + partial_rotary_factor (`float`, *optional*, defaults to 0.5): + Percentage of the query and keys which will have rotary embedding. + qk_layernorm (`bool`, *optional*, defaults to `False`): + Whether or not to normalize the Queries and Keys after projecting the hidden states + bos_token_id (`int`, *optional*, defaults to 1): + Denotes beginning of sequences token id. + eos_token_id (`int`, *optional*, defaults to 2): + Denotes end of sequences token id. + + Example: + + ```python + >>> from transformers import PhiModel, PhiConfig + + >>> # Initializing a Phi-1 style configuration + >>> configuration = PhiConfig.from_pretrained("susnato/phi-1_dev") + + >>> # Initializing a model from the configuration + >>> model = PhiModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "phi" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=51200, + hidden_size=2048, + intermediate_size=8192, + num_hidden_layers=24, + num_attention_heads=32, + resid_pdrop=0.0, + embd_pdrop=0.0, + attention_dropout=0.0, + hidden_act="gelu_new", + max_position_embeddings=2048, + initializer_range=0.02, + layer_norm_eps=1e-5, + use_cache=True, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + partial_rotary_factor=0.5, + qk_layernorm=False, + bos_token_id=1, + eos_token_id=2, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attention_dropout = attention_dropout + self.hidden_act = hidden_act + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self.partial_rotary_factor = partial_rotary_factor + self.qk_layernorm = qk_layernorm + self._rope_scaling_validation() + + super().__init__( + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + raise ValueError( + "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " + f"got {self.rope_scaling}" + ) + rope_scaling_type = self.rope_scaling.get("type", None) + rope_scaling_factor = self.rope_scaling.get("factor", None) + if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: + raise ValueError( + f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" + ) + if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: + raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") diff --git a/src/transformers/models/phi/convert_phi_weights_to_hf.py b/src/transformers/models/phi/convert_phi_weights_to_hf.py new file mode 100644 index 000000000000..36d6eeb3e635 --- /dev/null +++ b/src/transformers/models/phi/convert_phi_weights_to_hf.py @@ -0,0 +1,175 @@ +# coding=utf-8 +# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Weights conversion script for Phi + +This script downloads both Phi-1 and Phi-1.5 checkpoints to "checkpoint_path" and then converts the weights to +HugfgingFace model's format and saves them in "pytorch_dump_folder_path". +""" + +import argparse +import gc +import os + +import torch +from huggingface_hub import hf_hub_download + +from transformers import PhiConfig, PhiForCausalLM + + +_MODELS = { + "microsoft/phi-1": "https://huggingface.co/microsoft/phi-1/blob/main/pytorch_model.bin", + "microsoft/phi-1_5": "https://huggingface.co/microsoft/phi-1_5/blob/main/pytorch_model.bin", +} + + +PHI_MAPPING = { + "layers.0.wte.weight": "model.embed_tokens.weight", + "layers.25.linear.bias": "lm_head.bias", + "layers.25.linear.weight": "lm_head.weight", + "layers.25.ln.bias": "model.final_layernorm.bias", + "layers.25.ln.weight": "model.final_layernorm.weight", + "layers": "model.layers", + "ln": "input_layernorm", + "mixer": "self_attn", + "Wqkv": "query_key_value", + "out_proj": "dense", +} + + +def convert_weights(original_weights, mapping, config): + converted_weights = {} + original_weights_keys = sorted(original_weights.keys()) + + # we change names (1-24) -> layers(0-23) for Phi model layers + range_change = { + f"layers.{k}.": f"layers.{v}." + for k, v in zip(range(1, config.num_hidden_layers + 1), range(0, config.num_hidden_layers)) + } + + mapping.update(**range_change) + + for original_weights_key in original_weights_keys: + new_key = original_weights_key + + if "rotary_emb" in new_key: + continue + + if "Wqkv" in new_key: + if "weight" in new_key: + weight = original_weights[new_key] + weights_shape = weight.shape + weight = ( + weight.view(3, config.num_attention_heads, -1, config.hidden_size) + .transpose(0, 1) + .reshape(*weights_shape) + ) + original_weights[new_key] = weight + elif "bias" in new_key: + bias = original_weights[new_key] + bias_shape = bias.shape + bias = bias.view(3, config.num_attention_heads, -1).transpose(0, 1).reshape(*bias_shape) + original_weights[new_key] = bias + + for k, v in mapping.items(): + if k in new_key: + new_key = new_key.replace(k, v) + + converted_weights[new_key] = original_weights.pop(original_weights_key) + + return converted_weights + + +def _download(url: str, root: str): + repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}" + filename = f"{url.split('/')[-1]}" + hf_hub_download( + repo_id=repo_id, + filename=filename, + force_filename=root, + local_dir_use_symlinks=False, + ) + + +def convert_phi_weights(checkpoint_path, pytorch_dump_folder_path, use_cuda, save_weights_directly): + device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" + for each_model_name, each_model_url in _MODELS.items(): + converted_checkpoint = {} + + model_path = os.path.join(checkpoint_path, each_model_name + "_" + each_model_url.split("/")[-1]) + if not os.path.exists(model_path): + print(f"\n{each_model_name} was not found! Downloading it to {model_path}") + _download(url=each_model_url, root=model_path) + model_checkpoint = torch.load(model_path, map_location=device) + model_type = each_model_name.split("/")[1] # phi-1 or phi-1_5 + config = PhiConfig.from_pretrained(f"susnato/{model_type}_dev") + + # Converting the weights + converted_checkpoint.update(**convert_weights(model_checkpoint, PHI_MAPPING, config)) + + # Save either the whole model or the converted weights + if save_weights_directly: + save_weights_path = os.path.join( + pytorch_dump_folder_path, each_model_name.split("/")[-1] + "_" + each_model_url.split("/")[-1] + ) + torch.save(converted_checkpoint, save_weights_path) + print(f"Model weights saved at {save_weights_path}!") + + else: + model = PhiForCausalLM(config).to(device) + model.load_state_dict(converted_checkpoint, strict=True) + save_model_path = os.path.join(pytorch_dump_folder_path, model_type) + model.save_pretrained(save_model_path) + print(f"Model saved at {save_model_path}!") + + # release GPU memory for the 2nd model if cuda was used. + del config, model + + # release GPU memory for the 2nd model if cuda was used. + del model_checkpoint, converted_checkpoint + if use_cuda: + torch.cuda.empty_cache() + gc.collect() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # # Required parameters + parser.add_argument( + "--checkpoint_path", type=str, help="Path to the folder of downloaded checkpoints. (Please enter full path)" + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + help="Path to the output PyTorch model. (Please enter full path)", + ) + parser.add_argument( + "--use_cuda", + default=False, + type=bool, + help="Whether to load the weights on GPU during conversion or not, False by default", + ) + parser.add_argument( + "--save_weights_directly", + default=True, + type=bool, + help="Whether to save the weights directly after conversion or load the weight to the Phi model and then save " + "the Phi model along with weights. True by default", + ) + + args = parser.parse_args() + convert_phi_weights(args.checkpoint_path, args.pytorch_dump_folder_path, args.use_cuda, args.save_weights_directly) diff --git a/src/transformers/models/phi/modeling_phi.py b/src/transformers/models/phi/modeling_phi.py new file mode 100644 index 000000000000..7009b59c8318 --- /dev/null +++ b/src/transformers/models/phi/modeling_phi.py @@ -0,0 +1,1072 @@ +# coding=utf-8 +# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" PyTorch Phi model.""" + + +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask +from ...modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_phi import PhiConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "susnato/phi-1_dev" +_CONFIG_FOR_DOC = "PhiConfig" + +PHI_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "susnato/phi-1_dev", + "susnato/phi-1_5_dev", + # See all Phi models at https://huggingface.co/models?filter=phi +] + + +# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Phi +class PhiRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->Phi +class PhiLinearScalingRotaryEmbedding(PhiRotaryEmbedding): + """PhiRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = t / self.scaling_factor + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->Phi +class PhiDynamicNTKScalingRotaryEmbedding(PhiRotaryEmbedding): + """PhiRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Phi +class PhiMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +# Copied from transformers.models.persimmon.modeling_persimmon.PersimmonAttention with Persimmon->Phi,persimmon->phi +class PhiAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: PhiConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.partial_rotary_factor = config.partial_rotary_factor + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True) + self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=True) + self.qk_layernorm = config.qk_layernorm + + if self.qk_layernorm: + self.q_layernorm = nn.LayerNorm( + config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True + ) + self.k_layernorm = nn.LayerNorm( + config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True + ) + self.attention_dropout = nn.Dropout(config.attention_dropout) + self._init_rope() + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = PhiRotaryEmbedding( + int(self.partial_rotary_factor * self.head_dim), + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = PhiLinearScalingRotaryEmbedding( + int(self.partial_rotary_factor * self.head_dim), + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "dynamic": + self.rotary_emb = PhiDynamicNTKScalingRotaryEmbedding( + int(self.partial_rotary_factor * self.head_dim), + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + # Copied from transformers.models.bloom.modeling_bloom.BloomAttention._split_heads + def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory + storage as `fused_qkv` + + Args: + fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim] + + Returns: + query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] + value: [batch_size, seq_length, num_heads, head_dim] + """ + batch_size, seq_length, three_times_hidden_size = fused_qkv.shape + fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) + return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :] + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + # [batch_size, seq_length, 3 x hidden_size] + fused_qkv = self.query_key_value(hidden_states) + + # 3 x [batch_size, seq_length, num_heads, head_dim] + (query_states, key_states, value_states) = self._split_heads(fused_qkv) + + if self.qk_layernorm: + query_states = self.q_layernorm(query_states) + key_states = self.k_layernorm(key_states) + + # [batch_size, num_heads, seq_length, head_dim] -> [batch_size, seq_length, num_heads, head_dim] + query_states = query_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + # Partial rotary embedding + query_rot, query_pass = ( + query_states[..., : self.rotary_emb.dim], + query_states[..., self.rotary_emb.dim :], + ) + key_rot, key_pass = ( + key_states[..., : self.rotary_emb.dim], + key_states[..., self.rotary_emb.dim :], + ) + # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor] + query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) + + # [batch_size, seq_length, num_heads, head_dim] + query_states = torch.cat((query_rot, query_pass), dim=-1) + key_states = torch.cat((key_rot, key_pass), dim=-1) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query_states.dtype) + attn_weights = self.attention_dropout(attn_weights) + + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.dense(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class PhiDecoderLayer(nn.Module): + def __init__(self, config: PhiConfig): + super().__init__() + self.self_attn = PhiAttention(config=config) + self.mlp = PhiMLP(config) + self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): + input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range + `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + attn_outputs, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + attn_outputs = self.resid_dropout(attn_outputs) + + feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states)) + hidden_states = attn_outputs + feed_forward_hidden_states + residual + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +PHI_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`PhiConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Phi Model outputting raw hidden-states without any specific head on top.", + PHI_START_DOCSTRING, +) +class PhiPreTrainedModel(PreTrainedModel): + config_class = PhiConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _skip_keys_device_placement = "past_key_values" + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +PHI_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Phi Model outputting raw hidden-states without any specific head on top.", + PHI_START_DOCSTRING, +) +class PhiModel(PhiPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PhiDecoderLayer`] + + Args: + config: PhiConfig + """ + + def __init__(self, config: PhiConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.embed_dropout = nn.Dropout(config.embd_pdrop) + self.layers = nn.ModuleList([PhiDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + inputs_embeds = self.embed_dropout(inputs_embeds) + + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_value, + output_attentions, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.final_layernorm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class PhiForCausalLM(PhiPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi,bias=False->bias=True + def __init__(self, config): + super().__init__(config) + self.model = PhiModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings + def get_input_embeddings(self): + return self.model.embed_tokens + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder + def set_decoder(self, decoder): + self.model = decoder + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, PhiForCausalLM + + >>> model = PhiForCausalLM.from_pretrained("susnato/phi-1_5_dev") + >>> tokenizer = AutoTokenizer.from_pretrained("susnato/phi-1_5_dev") + + >>> prompt = "This is an example script ." + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + 'This is an example script .py file that uses the `os` module to create a new directory and write some text to it.\n\n``' + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The PhiModel with a sequence classification head on top (linear layer). + + [`PhiForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + PHI_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->PHI,Llama->Phi with self.transformer->self.model, transformer_outputs->model_outputs +class PhiForSequenceClassification(PhiPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = PhiModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = model_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + logits.device + ) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + model_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=model_outputs.past_key_values, + hidden_states=model_outputs.hidden_states, + attentions=model_outputs.attentions, + ) + + +@add_start_docstrings( + """ + PhiModel with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + PHI_START_DOCSTRING, +) +# Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with MPT->PHI,Mpt->Phi,self.transformer->self.model,transformer_outputs->model_outputs +class PhiForTokenClassification(PhiPreTrainedModel): + def __init__(self, config: PhiConfig): + super().__init__(config) + self.num_labels = config.num_labels + + self.model = PhiModel(config) + if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: + classifier_dropout = config.classifier_dropout + elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + attention_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **deprecated_arguments, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_outputs = self.model( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = model_outputs[0] + hidden_states = self.dropout(hidden_states) + logits = self.classifier(hidden_states) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + batch_size, seq_length = labels.shape + loss_fct = CrossEntropyLoss() + loss = loss_fct( + logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) + ) + + if not return_dict: + output = (logits,) + model_outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=model_outputs.hidden_states, + attentions=model_outputs.attentions, + ) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index c39b2f86add5..c6b20c7e3674 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -6172,6 +6172,44 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +PHI_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PhiForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/phi/__init__.py b/tests/models/phi/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/models/phi/test_modeling_phi.py b/tests/models/phi/test_modeling_phi.py new file mode 100644 index 000000000000..200fac25907a --- /dev/null +++ b/tests/models/phi/test_modeling_phi.py @@ -0,0 +1,386 @@ +# coding=utf-8 +# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Testing suite for the PyTorch Phi model. """ + + +import unittest + +from transformers import PhiConfig, is_torch_available +from transformers.testing_utils import require_torch, slow, torch_device + +from ...generation.test_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from ...test_pipeline_mixin import PipelineTesterMixin + + +if is_torch_available(): + import torch + + from transformers import ( + PhiForCausalLM, + PhiForSequenceClassification, + PhiForTokenClassification, + PhiModel, + ) + + +# Copied from tests.models.llama.test_modeling_llama.LlamaModelTester with Llama->Phi +class PhiModelTester: + def __init__( + self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=False, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + num_labels=3, + num_choices=4, + pad_token_id=0, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_labels = num_labels + self.num_choices = num_choices + self.pad_token_id = pad_token_id + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + token_type_ids = None + if self.use_token_type_ids: + token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) + + sequence_labels = None + token_labels = None + choice_labels = None + if self.use_labels: + sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) + choice_labels = ids_tensor([self.batch_size], self.num_choices) + + config = self.get_config() + + return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + + def get_config(self): + return PhiConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + is_decoder=False, + initializer_range=self.initializer_range, + pad_token_id=self.pad_token_id, + ) + + def create_and_check_model( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = PhiModel(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask) + result = model(input_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + def create_and_check_model_as_decoder( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + config.add_cross_attention = True + model = PhiModel(config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + attention_mask=input_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + ) + result = model( + input_ids, + attention_mask=input_mask, + encoder_hidden_states=encoder_hidden_states, + ) + result = model(input_ids, attention_mask=input_mask) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + def create_and_check_for_causal_lm( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + model = PhiForCausalLM(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, labels=token_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) + + def create_and_check_decoder_model_past_large_inputs( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + config.is_decoder = True + config.add_cross_attention = True + model = PhiForCausalLM(config=config) + model.to(torch_device) + model.eval() + + # first forward pass + outputs = model( + input_ids, + attention_mask=input_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=True, + ) + past_key_values = outputs.past_key_values + + # create hypothetical multiple next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) + next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) + + # append to next input_ids and + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) + + output_from_no_past = model( + next_input_ids, + attention_mask=next_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_hidden_states=True, + )["hidden_states"][0] + output_from_past = model( + next_tokens, + attention_mask=next_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + output_hidden_states=True, + )["hidden_states"][0] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() + + self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) + + # test that outputs are equal for slice + self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + ) = config_and_inputs + inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} + return config, inputs_dict + + +@require_torch +class PhiModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): + all_model_classes = ( + (PhiModel, PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification) + if is_torch_available() + else () + ) + all_generative_model_classes = (PhiForCausalLM,) if is_torch_available() else () + pipeline_model_mapping = ( + { + "feature-extraction": PhiModel, + "text-classification": PhiForSequenceClassification, + "text-generation": PhiForCausalLM, + "token-classification": PhiForTokenClassification, + "zero-shot": PhiForSequenceClassification, + } + if is_torch_available() + else {} + ) + + test_headmasking = False + test_pruning = False + + # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.setUp with Llama->Phi + def setUp(self): + self.model_tester = PhiModelTester(self) + self.config_tester = ConfigTester(self, config_class=PhiConfig, hidden_size=37) + + # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config + def test_config(self): + self.config_tester.run_common_tests() + + # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Phi,llama->phi + def test_phi_sequence_classification_model(self): + config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.num_labels = 3 + input_ids = input_dict["input_ids"] + attention_mask = input_ids.ne(1).to(torch_device) + sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) + model = PhiForSequenceClassification(config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) + self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) + + # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_single_label with Llama->Phi,llama->phi + def test_phi_sequence_classification_model_for_single_label(self): + config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.num_labels = 3 + config.problem_type = "single_label_classification" + input_ids = input_dict["input_ids"] + attention_mask = input_ids.ne(1).to(torch_device) + sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) + model = PhiForSequenceClassification(config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) + self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) + + # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_multi_label with Llama->Phi,llama->phi + def test_phi_sequence_classification_model_for_multi_label(self): + config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.num_labels = 3 + config.problem_type = "multi_label_classification" + input_ids = input_dict["input_ids"] + attention_mask = input_ids.ne(1).to(torch_device) + sequence_labels = ids_tensor( + [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size + ).to(torch.float) + model = PhiForSequenceClassification(config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) + self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) + + +@slow +@require_torch +class PhiIntegrationTest(unittest.TestCase): + def test_model_phi_1_logits(self): + input_ids = { + "input_ids": torch.tensor( + [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device + ) + } + + model = PhiForCausalLM.from_pretrained("susnato/phi-1_dev").to(torch_device) + model.eval() + + output = model(**input_ids).logits + + # fmt: off + EXPECTED_OUTPUT = torch.tensor([[2.2671, 6.7684, -2.0107, -1.2440, -1.5335, -2.3828, 6.9186, 6.4245, 3.1548, 0.9998, 0.0760, 4.4653, 4.9857, 4.2956, 1.2308, -1.4178, 0.1361, 0.5191, -0.5699, -2.2201, -3.0750, -3.9600, -4.5936, -3.7394, -2.7777, 6.1874, -0.4148, -1.5684, -0.5967, 0.2395], [1.7004, 4.0383, 0.0546, 0.4530, -0.3619, -0.9021, 1.8355, 1.3587, 1.2406, 2.5775, -0.8834, 5.1910, 4.2565, 4.1406, 3.0752, -0.9099, 1.1595, 0.0264, 0.3243, -1.1803, -1.3945, -2.1406, -3.9939, -1.4438, -2.9546, 3.9204, 1.0851, -1.0598, -1.7819, -0.4827]]).to(torch_device) + # fmt: on + + self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4)) + + def test_model_phi_1_5_logits(self): + input_ids = { + "input_ids": torch.tensor( + [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device + ) + } + + model = PhiForCausalLM.from_pretrained("susnato/phi-1_5_dev").to(torch_device) + model.eval() + + output = model(**input_ids).logits + + # fmt: off + EXPECTED_OUTPUT = torch.tensor([[12.2922, 13.3507, 8.6963, 9.1355, 9.3502, 9.2667, 14.2027, 13.1363, 13.5446, 11.1337, 9.9279, 16.7195, 13.0768, 14.9141, 11.9965, 8.0233, 10.3129, 10.6118, 10.0204, 9.3827, 8.8344, 8.2806, 8.0153, 8.0540, 7.0964, 16.5743, 11.1256, 9.6987, 11.4770, 10.5440], [12.3323, 14.6050, 8.9986, 8.1580, 9.5654, 6.6728, 12.5966, 12.6662, 12.2784, 11.7522, 8.2039, 16.3102, 11.2203, 13.6088, 12.0125, 9.1021, 9.8216, 10.0987, 9.0926, 8.4260, 8.8009, 7.6547, 6.8075, 7.7881, 7.4501, 15.7451, 10.5053, 8.3129, 10.0027, 9.2612]]).to(torch_device) + # fmt: on + + self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4)) From ed115b347347a1292dfd88a85d5bd9b8250c66e7 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 10 Nov 2023 15:35:27 +0000 Subject: [PATCH 142/268] Normalize floating point cast (#27249) * Normalize image - cast input images to float32. This is done if the input image isn't of floating type. Issues can occur when do_rescale=False is set in an image processor. When this happens, the image passed to the call is of type uint8 becuase of the type casting that happens in resize because of the PIL image library. As the mean and std values are cast to match the image dtype, this can cause NaNs and infs to appear in the normalized image, as the floating values being used to divide the image are now set to 0. The reason the mean and std values are cast is because previously they were set as float32 by default. However, if the input image was of type float16, the normalization would result in the image being upcast to float32 too. * Add tests * Remove float32 cast --- src/transformers/image_transforms.py | 5 ++++ tests/test_image_transforms.py | 37 ++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/src/transformers/image_transforms.py b/src/transformers/image_transforms.py index e9cb93db651d..b3a25a8be891 100644 --- a/src/transformers/image_transforms.py +++ b/src/transformers/image_transforms.py @@ -376,6 +376,11 @@ def normalize( channel_axis = get_channel_dimension_axis(image, input_data_format=input_data_format) num_channels = image.shape[channel_axis] + # We cast to float32 to avoid errors that can occur when subtracting uint8 values. + # We preserve the original dtype if it is a float type to prevent upcasting float16. + if not np.issubdtype(image.dtype, np.floating): + image = image.astype(np.float32) + if isinstance(mean, Iterable): if len(mean) != num_channels: raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(mean)}") diff --git a/tests/test_image_transforms.py b/tests/test_image_transforms.py index 2941685e6952..ae86f84def51 100644 --- a/tests/test_image_transforms.py +++ b/tests/test_image_transforms.py @@ -302,7 +302,7 @@ def test_normalize(self): normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first") self.assertIsInstance(normalized_image, np.ndarray) self.assertEqual(normalized_image.shape, (3, 224, 224)) - self.assertTrue(np.allclose(normalized_image, expected_image)) + self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test image with 4 channels is normalized correctly image = np.random.randint(0, 256, (224, 224, 4)) / 255 @@ -310,9 +310,42 @@ def test_normalize(self): std = (0.1, 0.2, 0.3, 0.4) expected_image = (image - mean) / std self.assertTrue( - np.allclose(normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image) + np.allclose( + normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image, atol=1e-6 + ) ) + # Test float32 image input keeps float32 dtype + image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float32) / 255 + mean = (0.5, 0.6, 0.7) + std = (0.1, 0.2, 0.3) + expected_image = ((image - mean) / std).astype(np.float32) + normalized_image = normalize(image, mean=mean, std=std) + self.assertEqual(normalized_image.dtype, np.float32) + self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) + + # Test float16 image input keeps float16 dtype + image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float16) / 255 + mean = (0.5, 0.6, 0.7) + std = (0.1, 0.2, 0.3) + + # The mean and std are cast to match the dtype of the input image + cast_mean = np.array(mean, dtype=np.float16) + cast_std = np.array(std, dtype=np.float16) + expected_image = (image - cast_mean) / cast_std + normalized_image = normalize(image, mean=mean, std=std) + self.assertEqual(normalized_image.dtype, np.float16) + self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) + + # Test int image input is converted to float32 + image = np.random.randint(0, 2, (224, 224, 3), dtype=np.uint8) + mean = (0.5, 0.6, 0.7) + std = (0.1, 0.2, 0.3) + expected_image = (image.astype(np.float32) - mean) / std + normalized_image = normalize(image, mean=mean, std=std) + self.assertEqual(normalized_image.dtype, np.float32) + self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) + def test_center_crop(self): image = np.random.randint(0, 256, (3, 224, 224)) From 7ee995fd9c692761c4601ddbffa2ac2ec9f27b0b Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 10 Nov 2023 20:05:05 +0100 Subject: [PATCH 143/268] Make `examples_torch_job` faster (#27437) fix Co-authored-by: ydshieh --- .circleci/create_circleci_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index ba766dc1b3d4..e326e324b65c 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -397,6 +397,7 @@ def job_name(self): examples_torch_job = CircleCIJob( "examples_torch", + additional_env={"OMP_NUM_THREADS": 8}, cache_name="torch_examples", install_steps=[ "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng", @@ -405,6 +406,7 @@ def job_name(self): "pip install -U --upgrade-strategy eager -r examples/pytorch/_tests_requirements.txt", "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate", ], + pytest_num_workers=1, ) From 9d87cd2ce286c1d8e88e129b04a4bd0db6754522 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 13 Nov 2023 12:35:51 +0100 Subject: [PATCH 144/268] Fix line ending in `utils/not_doctested.txt` (#27459) fix Co-authored-by: ydshieh --- utils/not_doctested.txt | 1984 +++++++++++++++++++-------------------- 1 file changed, 992 insertions(+), 992 deletions(-) diff --git a/utils/not_doctested.txt b/utils/not_doctested.txt index 79297cb17dc1..744caff11279 100644 --- a/utils/not_doctested.txt +++ b/utils/not_doctested.txt @@ -1,992 +1,992 @@ -docs/source/en/_config.py -docs/source/en/accelerate.md -docs/source/en/add_new_model.md -docs/source/en/add_new_pipeline.md -docs/source/en/add_tensorflow_model.md -docs/source/en/attention.md -docs/source/en/benchmarks.md -docs/source/en/bertology.md -docs/source/en/big_models.md -docs/source/en/community.md -docs/source/en/contributing.md -docs/source/en/create_a_model.md -docs/source/en/custom_models.md -docs/source/en/custom_tools.md -docs/source/en/debugging.md -docs/source/en/fast_tokenizers.md -docs/source/en/glossary.md -docs/source/en/hpo_train.md -docs/source/en/index.md -docs/source/en/installation.md -docs/source/en/internal/audio_utils.md -docs/source/en/internal/file_utils.md -docs/source/en/internal/image_processing_utils.md -docs/source/en/internal/modeling_utils.md -docs/source/en/internal/pipelines_utils.md -docs/source/en/internal/time_series_utils.md -docs/source/en/internal/tokenization_utils.md -docs/source/en/internal/trainer_utils.md -docs/source/en/llm_tutorial.md -docs/source/en/main_classes/agent.md -docs/source/en/main_classes/callback.md -docs/source/en/main_classes/configuration.md -docs/source/en/main_classes/data_collator.md -docs/source/en/main_classes/deepspeed.md -docs/source/en/main_classes/feature_extractor.md -docs/source/en/main_classes/image_processor.md -docs/source/en/main_classes/keras_callbacks.md -docs/source/en/main_classes/logging.md -docs/source/en/main_classes/model.md -docs/source/en/main_classes/onnx.md -docs/source/en/main_classes/optimizer_schedules.md -docs/source/en/main_classes/output.md -docs/source/en/main_classes/pipelines.md -docs/source/en/main_classes/processors.md -docs/source/en/main_classes/quantization.md -docs/source/en/main_classes/tokenizer.md -docs/source/en/main_classes/trainer.md -docs/source/en/model_doc/albert.md -docs/source/en/model_doc/align.md -docs/source/en/model_doc/altclip.md -docs/source/en/model_doc/audio-spectrogram-transformer.md -docs/source/en/model_doc/auto.md -docs/source/en/model_doc/autoformer.md -docs/source/en/model_doc/bark.md -docs/source/en/model_doc/bart.md -docs/source/en/model_doc/barthez.md -docs/source/en/model_doc/bartpho.md -docs/source/en/model_doc/beit.md -docs/source/en/model_doc/bert-generation.md -docs/source/en/model_doc/bert-japanese.md -docs/source/en/model_doc/bert.md -docs/source/en/model_doc/bertweet.md -docs/source/en/model_doc/big_bird.md -docs/source/en/model_doc/bigbird_pegasus.md -docs/source/en/model_doc/biogpt.md -docs/source/en/model_doc/bit.md -docs/source/en/model_doc/blenderbot-small.md -docs/source/en/model_doc/blenderbot.md -docs/source/en/model_doc/blip-2.md -docs/source/en/model_doc/blip.md -docs/source/en/model_doc/bloom.md -docs/source/en/model_doc/bort.md -docs/source/en/model_doc/bridgetower.md -docs/source/en/model_doc/camembert.md -docs/source/en/model_doc/canine.md -docs/source/en/model_doc/chinese_clip.md -docs/source/en/model_doc/clap.md -docs/source/en/model_doc/clip.md -docs/source/en/model_doc/clipseg.md -docs/source/en/model_doc/codegen.md -docs/source/en/model_doc/conditional_detr.md -docs/source/en/model_doc/convbert.md -docs/source/en/model_doc/convnext.md -docs/source/en/model_doc/convnextv2.md -docs/source/en/model_doc/cpm.md -docs/source/en/model_doc/cpmant.md -docs/source/en/model_doc/ctrl.md -docs/source/en/model_doc/cvt.md -docs/source/en/model_doc/data2vec.md -docs/source/en/model_doc/deberta-v2.md -docs/source/en/model_doc/deberta.md -docs/source/en/model_doc/decision_transformer.md -docs/source/en/model_doc/deformable_detr.md -docs/source/en/model_doc/deit.md -docs/source/en/model_doc/deplot.md -docs/source/en/model_doc/deta.md -docs/source/en/model_doc/detr.md -docs/source/en/model_doc/dialogpt.md -docs/source/en/model_doc/dinat.md -docs/source/en/model_doc/dinov2.md -docs/source/en/model_doc/distilbert.md -docs/source/en/model_doc/dit.md -docs/source/en/model_doc/dpr.md -docs/source/en/model_doc/dpt.md -docs/source/en/model_doc/efficientformer.md -docs/source/en/model_doc/efficientnet.md -docs/source/en/model_doc/electra.md -docs/source/en/model_doc/encodec.md -docs/source/en/model_doc/ernie.md -docs/source/en/model_doc/ernie_m.md -docs/source/en/model_doc/esm.md -docs/source/en/model_doc/flan-t5.md -docs/source/en/model_doc/flan-ul2.md -docs/source/en/model_doc/flaubert.md -docs/source/en/model_doc/flava.md -docs/source/en/model_doc/fnet.md -docs/source/en/model_doc/focalnet.md -docs/source/en/model_doc/fsmt.md -docs/source/en/model_doc/funnel.md -docs/source/en/model_doc/git.md -docs/source/en/model_doc/glpn.md -docs/source/en/model_doc/gpt-sw3.md -docs/source/en/model_doc/gpt2.md -docs/source/en/model_doc/gpt_bigcode.md -docs/source/en/model_doc/gpt_neo.md -docs/source/en/model_doc/gpt_neox.md -docs/source/en/model_doc/gpt_neox_japanese.md -docs/source/en/model_doc/gptj.md -docs/source/en/model_doc/gptsan-japanese.md -docs/source/en/model_doc/graphormer.md -docs/source/en/model_doc/groupvit.md -docs/source/en/model_doc/herbert.md -docs/source/en/model_doc/hubert.md -docs/source/en/model_doc/ibert.md -docs/source/en/model_doc/idefics.md -docs/source/en/model_doc/imagegpt.md -docs/source/en/model_doc/informer.md -docs/source/en/model_doc/instructblip.md -docs/source/en/model_doc/jukebox.md -docs/source/en/model_doc/layoutlm.md -docs/source/en/model_doc/layoutlmv2.md -docs/source/en/model_doc/layoutlmv3.md -docs/source/en/model_doc/layoutxlm.md -docs/source/en/model_doc/led.md -docs/source/en/model_doc/levit.md -docs/source/en/model_doc/lilt.md -docs/source/en/model_doc/llama.md -docs/source/en/model_doc/llama2.md -docs/source/en/model_doc/longformer.md -docs/source/en/model_doc/longt5.md -docs/source/en/model_doc/luke.md -docs/source/en/model_doc/lxmert.md -docs/source/en/model_doc/m2m_100.md -docs/source/en/model_doc/marian.md -docs/source/en/model_doc/mask2former.md -docs/source/en/model_doc/maskformer.md -docs/source/en/model_doc/matcha.md -docs/source/en/model_doc/mbart.md -docs/source/en/model_doc/mctct.md -docs/source/en/model_doc/mega.md -docs/source/en/model_doc/megatron-bert.md -docs/source/en/model_doc/megatron_gpt2.md -docs/source/en/model_doc/mgp-str.md -docs/source/en/model_doc/mistral.md -docs/source/en/model_doc/mluke.md -docs/source/en/model_doc/mms.md -docs/source/en/model_doc/mobilebert.md -docs/source/en/model_doc/mobilenet_v1.md -docs/source/en/model_doc/mobilenet_v2.md -docs/source/en/model_doc/mobilevit.md -docs/source/en/model_doc/mobilevitv2.md -docs/source/en/model_doc/mpnet.md -docs/source/en/model_doc/mpt.md -docs/source/en/model_doc/mra.md -docs/source/en/model_doc/mt5.md -docs/source/en/model_doc/musicgen.md -docs/source/en/model_doc/mvp.md -docs/source/en/model_doc/nat.md -docs/source/en/model_doc/nezha.md -docs/source/en/model_doc/nllb-moe.md -docs/source/en/model_doc/nllb.md -docs/source/en/model_doc/nystromformer.md -docs/source/en/model_doc/oneformer.md -docs/source/en/model_doc/open-llama.md -docs/source/en/model_doc/openai-gpt.md -docs/source/en/model_doc/opt.md -docs/source/en/model_doc/owlvit.md -docs/source/en/model_doc/pegasus.md -docs/source/en/model_doc/pegasus_x.md -docs/source/en/model_doc/perceiver.md -docs/source/en/model_doc/phobert.md -docs/source/en/model_doc/pix2struct.md -docs/source/en/model_doc/plbart.md -docs/source/en/model_doc/poolformer.md -docs/source/en/model_doc/pop2piano.md -docs/source/en/model_doc/prophetnet.md -docs/source/en/model_doc/pvt.md -docs/source/en/model_doc/qdqbert.md -docs/source/en/model_doc/rag.md -docs/source/en/model_doc/realm.md -docs/source/en/model_doc/reformer.md -docs/source/en/model_doc/regnet.md -docs/source/en/model_doc/rembert.md -docs/source/en/model_doc/resnet.md -docs/source/en/model_doc/retribert.md -docs/source/en/model_doc/roberta-prelayernorm.md -docs/source/en/model_doc/roberta.md -docs/source/en/model_doc/roc_bert.md -docs/source/en/model_doc/roformer.md -docs/source/en/model_doc/rwkv.md -docs/source/en/model_doc/sam.md -docs/source/en/model_doc/segformer.md -docs/source/en/model_doc/sew-d.md -docs/source/en/model_doc/sew.md -docs/source/en/model_doc/speech-encoder-decoder.md -docs/source/en/model_doc/speech_to_text_2.md -docs/source/en/model_doc/speecht5.md -docs/source/en/model_doc/splinter.md -docs/source/en/model_doc/squeezebert.md -docs/source/en/model_doc/swiftformer.md -docs/source/en/model_doc/swin.md -docs/source/en/model_doc/swin2sr.md -docs/source/en/model_doc/swinv2.md -docs/source/en/model_doc/table-transformer.md -docs/source/en/model_doc/tapas.md -docs/source/en/model_doc/time_series_transformer.md -docs/source/en/model_doc/timesformer.md -docs/source/en/model_doc/trajectory_transformer.md -docs/source/en/model_doc/transfo-xl.md -docs/source/en/model_doc/trocr.md -docs/source/en/model_doc/tvlt.md -docs/source/en/model_doc/ul2.md -docs/source/en/model_doc/umt5.md -docs/source/en/model_doc/unispeech-sat.md -docs/source/en/model_doc/unispeech.md -docs/source/en/model_doc/upernet.md -docs/source/en/model_doc/van.md -docs/source/en/model_doc/videomae.md -docs/source/en/model_doc/vilt.md -docs/source/en/model_doc/vision-encoder-decoder.md -docs/source/en/model_doc/vision-text-dual-encoder.md -docs/source/en/model_doc/visual_bert.md -docs/source/en/model_doc/vit.md -docs/source/en/model_doc/vit_hybrid.md -docs/source/en/model_doc/vit_mae.md -docs/source/en/model_doc/vit_msn.md -docs/source/en/model_doc/vivit.md -docs/source/en/model_doc/wav2vec2-conformer.md -docs/source/en/model_doc/wav2vec2.md -docs/source/en/model_doc/wav2vec2_phoneme.md -docs/source/en/model_doc/wavlm.md -docs/source/en/model_doc/whisper.md -docs/source/en/model_doc/xclip.md -docs/source/en/model_doc/xglm.md -docs/source/en/model_doc/xlm-prophetnet.md -docs/source/en/model_doc/xlm-roberta-xl.md -docs/source/en/model_doc/xlm-roberta.md -docs/source/en/model_doc/xlm-v.md -docs/source/en/model_doc/xlm.md -docs/source/en/model_doc/xlnet.md -docs/source/en/model_doc/xls_r.md -docs/source/en/model_doc/xlsr_wav2vec2.md -docs/source/en/model_doc/xmod.md -docs/source/en/model_doc/yolos.md -docs/source/en/model_doc/yoso.md -docs/source/en/model_memory_anatomy.md -docs/source/en/model_sharing.md -docs/source/en/model_summary.md -docs/source/en/multilingual.md -docs/source/en/notebooks.md -docs/source/en/pad_truncation.md -docs/source/en/peft.md -docs/source/en/perf_hardware.md -docs/source/en/perf_infer_cpu.md -docs/source/en/perf_infer_gpu_one.md -docs/source/en/perf_torch_compile.md -docs/source/en/perf_train_cpu.md -docs/source/en/perf_train_cpu_many.md -docs/source/en/perf_train_gpu_many.md -docs/source/en/perf_train_gpu_one.md -docs/source/en/perf_train_special.md -docs/source/en/perf_train_tpu.md -docs/source/en/perf_train_tpu_tf.md -docs/source/en/performance.md -docs/source/en/perplexity.md -docs/source/en/philosophy.md -docs/source/en/pipeline_webserver.md -docs/source/en/pr_checks.md -docs/source/en/preprocessing.md -docs/source/en/run_scripts.md -docs/source/en/sagemaker.md -docs/source/en/serialization.md -docs/source/en/tasks/asr.md -docs/source/en/tasks/audio_classification.md -docs/source/en/tasks/document_question_answering.md -docs/source/en/tasks/idefics.md # causes other tests to fail -docs/source/en/tasks/image_captioning.md -docs/source/en/tasks/image_classification.md -docs/source/en/tasks/language_modeling.md -docs/source/en/tasks/masked_language_modeling.md -docs/source/en/tasks/monocular_depth_estimation.md -docs/source/en/tasks/multiple_choice.md -docs/source/en/tasks/object_detection.md -docs/source/en/tasks/question_answering.md -docs/source/en/tasks/semantic_segmentation.md -docs/source/en/tasks/sequence_classification.md -docs/source/en/tasks/summarization.md -docs/source/en/tasks/text-to-speech.md -docs/source/en/tasks/token_classification.md -docs/source/en/tasks/translation.md -docs/source/en/tasks/video_classification.md -docs/source/en/tasks/visual_question_answering.md -docs/source/en/tasks/zero_shot_image_classification.md -docs/source/en/tasks/zero_shot_object_detection.md -docs/source/en/tasks_explained.md -docs/source/en/tf_xla.md -docs/source/en/tflite.md -docs/source/en/tokenizer_summary.md -docs/source/en/torchscript.md -docs/source/en/training.md -docs/source/en/transformers_agents.md -docs/source/en/troubleshooting.md -src/transformers/activations.py -src/transformers/activations_tf.py -src/transformers/audio_utils.py -src/transformers/benchmark/benchmark.py -src/transformers/benchmark/benchmark_args.py -src/transformers/benchmark/benchmark_args_tf.py -src/transformers/benchmark/benchmark_args_utils.py -src/transformers/benchmark/benchmark_tf.py -src/transformers/benchmark/benchmark_utils.py -src/transformers/commands/add_new_model.py -src/transformers/commands/add_new_model_like.py -src/transformers/commands/convert.py -src/transformers/commands/download.py -src/transformers/commands/env.py -src/transformers/commands/lfs.py -src/transformers/commands/pt_to_tf.py -src/transformers/commands/run.py -src/transformers/commands/serving.py -src/transformers/commands/train.py -src/transformers/commands/transformers_cli.py -src/transformers/commands/user.py -src/transformers/configuration_utils.py -src/transformers/convert_graph_to_onnx.py -src/transformers/convert_pytorch_checkpoint_to_tf2.py -src/transformers/convert_slow_tokenizer.py -src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py -src/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py -src/transformers/data/data_collator.py -src/transformers/data/datasets/glue.py -src/transformers/data/datasets/language_modeling.py -src/transformers/data/datasets/squad.py -src/transformers/data/metrics/squad_metrics.py -src/transformers/data/processors/glue.py -src/transformers/data/processors/squad.py -src/transformers/data/processors/utils.py -src/transformers/data/processors/xnli.py -src/transformers/debug_utils.py -src/transformers/deepspeed.py -src/transformers/dependency_versions_check.py -src/transformers/dependency_versions_table.py -src/transformers/dynamic_module_utils.py -src/transformers/feature_extraction_sequence_utils.py -src/transformers/feature_extraction_utils.py -src/transformers/file_utils.py -src/transformers/hf_argparser.py -src/transformers/hyperparameter_search.py -src/transformers/image_processing_utils.py -src/transformers/image_transforms.py -src/transformers/image_utils.py -src/transformers/integrations/bitsandbytes.py -src/transformers/integrations/deepspeed.py -src/transformers/integrations/integration_utils.py -src/transformers/integrations/peft.py -src/transformers/keras_callbacks.py -src/transformers/modelcard.py -src/transformers/modeling_flax_outputs.py -src/transformers/modeling_flax_pytorch_utils.py -src/transformers/modeling_flax_utils.py -src/transformers/modeling_outputs.py -src/transformers/modeling_tf_outputs.py -src/transformers/modeling_tf_pytorch_utils.py -src/transformers/modeling_tf_utils.py -src/transformers/modeling_utils.py -src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/albert/modeling_flax_albert.py -src/transformers/models/align/configuration_align.py -src/transformers/models/align/convert_align_tf_to_hf.py -src/transformers/models/align/modeling_align.py -src/transformers/models/altclip/configuration_altclip.py -src/transformers/models/altclip/modeling_altclip.py -src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py -src/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py -src/transformers/models/auto/auto_factory.py -src/transformers/models/auto/configuration_auto.py -src/transformers/models/auto/modeling_auto.py -src/transformers/models/auto/modeling_flax_auto.py -src/transformers/models/auto/modeling_tf_auto.py -src/transformers/models/autoformer/configuration_autoformer.py -src/transformers/models/autoformer/modeling_autoformer.py -src/transformers/models/bark/convert_suno_to_hf.py -src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/bart/modeling_flax_bart.py -src/transformers/models/bart/modeling_tf_bart.py -src/transformers/models/beit/convert_beit_unilm_to_pytorch.py -src/transformers/models/beit/modeling_flax_beit.py -src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py -src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py -src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py -src/transformers/models/bert/modeling_flax_bert.py -src/transformers/models/bert_generation/modeling_bert_generation.py -src/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py -src/transformers/models/big_bird/modeling_flax_big_bird.py -src/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py -src/transformers/models/biogpt/configuration_biogpt.py -src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/biogpt/modeling_biogpt.py -src/transformers/models/bit/configuration_bit.py -src/transformers/models/bit/convert_bit_to_pytorch.py -src/transformers/models/bit/modeling_bit.py -src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/blenderbot/modeling_flax_blenderbot.py -src/transformers/models/blenderbot/modeling_tf_blenderbot.py -src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py -src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py -src/transformers/models/blip/configuration_blip.py -src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py -src/transformers/models/blip/modeling_blip_text.py -src/transformers/models/blip/modeling_tf_blip_text.py -src/transformers/models/blip_2/configuration_blip_2.py -src/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py -src/transformers/models/blip_2/modeling_blip_2.py # causes other tests to fail -src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py -src/transformers/models/bloom/modeling_bloom.py -src/transformers/models/bloom/modeling_flax_bloom.py -src/transformers/models/bridgetower/configuration_bridgetower.py -src/transformers/models/bridgetower/modeling_bridgetower.py -src/transformers/models/bros/convert_bros_to_pytorch.py -src/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py -src/transformers/models/camembert/modeling_camembert.py -src/transformers/models/camembert/modeling_tf_camembert.py -src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py -src/transformers/models/chinese_clip/configuration_chinese_clip.py -src/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py -src/transformers/models/chinese_clip/modeling_chinese_clip.py -src/transformers/models/clap/convert_clap_original_pytorch_to_hf.py -src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py -src/transformers/models/clip/modeling_clip.py -src/transformers/models/clip/modeling_flax_clip.py -src/transformers/models/clip/modeling_tf_clip.py -src/transformers/models/clipseg/configuration_clipseg.py -src/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py -src/transformers/models/codegen/modeling_codegen.py -src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py -src/transformers/models/convbert/modeling_convbert.py -src/transformers/models/convbert/modeling_tf_convbert.py -src/transformers/models/convnext/convert_convnext_to_pytorch.py -src/transformers/models/convnext/modeling_tf_convnext.py -src/transformers/models/convnextv2/configuration_convnextv2.py -src/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py -src/transformers/models/convnextv2/modeling_convnextv2.py -src/transformers/models/cpmant/configuration_cpmant.py -src/transformers/models/cpmant/modeling_cpmant.py -src/transformers/models/cpmant/tokenization_cpmant.py -src/transformers/models/ctrl/modeling_tf_ctrl.py -src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/cvt/modeling_tf_cvt.py -src/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/data2vec/modeling_data2vec_text.py -src/transformers/models/data2vec/modeling_tf_data2vec_vision.py -src/transformers/models/deberta/modeling_tf_deberta.py -src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py -src/transformers/models/decision_transformer/modeling_decision_transformer.py -src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py -src/transformers/models/deformable_detr/load_custom.py -src/transformers/models/deit/convert_deit_timm_to_pytorch.py -src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py -src/transformers/models/deprecated/mctct/configuration_mctct.py -src/transformers/models/deprecated/mctct/feature_extraction_mctct.py -src/transformers/models/deprecated/mctct/modeling_mctct.py -src/transformers/models/deprecated/mctct/processing_mctct.py -src/transformers/models/deprecated/mmbt/configuration_mmbt.py -src/transformers/models/deprecated/mmbt/modeling_mmbt.py -src/transformers/models/deprecated/open_llama/configuration_open_llama.py -src/transformers/models/deprecated/open_llama/modeling_open_llama.py -src/transformers/models/deprecated/retribert/configuration_retribert.py -src/transformers/models/deprecated/retribert/modeling_retribert.py -src/transformers/models/deprecated/retribert/tokenization_retribert.py -src/transformers/models/deprecated/retribert/tokenization_retribert_fast.py -src/transformers/models/deprecated/tapex/tokenization_tapex.py -src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py -src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py -src/transformers/models/deprecated/van/configuration_van.py -src/transformers/models/deprecated/van/convert_van_to_pytorch.py -src/transformers/models/deprecated/van/modeling_van.py -src/transformers/models/deta/convert_deta_resnet_to_pytorch.py -src/transformers/models/deta/convert_deta_swin_to_pytorch.py -src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/detr/convert_detr_to_pytorch.py -src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/dinov2/configuration_dinov2.py -src/transformers/models/dinov2/convert_dinov2_to_hf.py -src/transformers/models/dinov2/modeling_dinov2.py -src/transformers/models/distilbert/modeling_distilbert.py -src/transformers/models/distilbert/modeling_flax_distilbert.py -src/transformers/models/distilbert/modeling_tf_distilbert.py -src/transformers/models/dit/convert_dit_unilm_to_pytorch.py -src/transformers/models/donut/configuration_donut_swin.py -src/transformers/models/donut/convert_donut_to_pytorch.py -src/transformers/models/donut/modeling_donut_swin.py -src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py -src/transformers/models/dpr/modeling_dpr.py -src/transformers/models/dpr/modeling_tf_dpr.py -src/transformers/models/dpt/configuration_dpt.py -src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py -src/transformers/models/dpt/convert_dpt_to_pytorch.py -src/transformers/models/efficientformer/configuration_efficientformer.py -src/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/efficientformer/modeling_efficientformer.py -src/transformers/models/efficientnet/configuration_efficientnet.py -src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py -src/transformers/models/efficientnet/modeling_efficientnet.py -src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py -src/transformers/models/electra/modeling_flax_electra.py -src/transformers/models/encodec/configuration_encodec.py -src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py -src/transformers/models/encoder_decoder/modeling_encoder_decoder.py -src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py -src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py -src/transformers/models/ernie/modeling_ernie.py -src/transformers/models/esm/configuration_esm.py -src/transformers/models/esm/convert_esm.py -src/transformers/models/esm/modeling_esm.py -src/transformers/models/esm/modeling_esmfold.py -src/transformers/models/esm/modeling_tf_esm.py -src/transformers/models/esm/openfold_utils/chunk_utils.py -src/transformers/models/esm/openfold_utils/data_transforms.py -src/transformers/models/esm/openfold_utils/feats.py -src/transformers/models/esm/openfold_utils/loss.py -src/transformers/models/esm/openfold_utils/protein.py -src/transformers/models/esm/openfold_utils/residue_constants.py -src/transformers/models/esm/openfold_utils/rigid_utils.py -src/transformers/models/esm/openfold_utils/tensor_utils.py -src/transformers/models/falcon/configuration_falcon.py -src/transformers/models/falcon/modeling_falcon.py -src/transformers/models/flaubert/configuration_flaubert.py -src/transformers/models/flaubert/modeling_flaubert.py -src/transformers/models/flaubert/modeling_tf_flaubert.py -src/transformers/models/flava/convert_dalle_to_flava_codebook.py -src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py -src/transformers/models/flava/modeling_flava.py -src/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py -src/transformers/models/fnet/modeling_fnet.py -src/transformers/models/focalnet/configuration_focalnet.py -src/transformers/models/focalnet/convert_focalnet_to_hf_format.py -src/transformers/models/focalnet/modeling_focalnet.py -src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/fsmt/modeling_fsmt.py -src/transformers/models/funnel/configuration_funnel.py -src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py -src/transformers/models/funnel/modeling_funnel.py -src/transformers/models/funnel/modeling_tf_funnel.py -src/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py -src/transformers/models/git/configuration_git.py -src/transformers/models/git/convert_git_to_pytorch.py -src/transformers/models/glpn/configuration_glpn.py -src/transformers/models/glpn/convert_glpn_to_pytorch.py -src/transformers/models/gpt2/CONVERSION.md -src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py -src/transformers/models/gpt2/modeling_flax_gpt2.py -src/transformers/models/gpt2/modeling_tf_gpt2.py -src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py -src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py -src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py -src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py -src/transformers/models/gpt_neo/modeling_gpt_neo.py -src/transformers/models/gpt_neox/modeling_gpt_neox.py -src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py -src/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py -src/transformers/models/gptj/configuration_gptj.py -src/transformers/models/gptj/modeling_flax_gptj.py -src/transformers/models/gptj/modeling_tf_gptj.py -src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py -src/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py -src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py -src/transformers/models/graphormer/collating_graphormer.py -src/transformers/models/graphormer/configuration_graphormer.py -src/transformers/models/graphormer/modeling_graphormer.py -src/transformers/models/groupvit/configuration_groupvit.py -src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py -src/transformers/models/hubert/configuration_hubert.py -src/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/hubert/modeling_tf_hubert.py -src/transformers/models/ibert/configuration_ibert.py -src/transformers/models/ibert/modeling_ibert.py -src/transformers/models/ibert/quant_modules.py -src/transformers/models/idefics/configuration_idefics.py -src/transformers/models/idefics/image_processing_idefics.py -src/transformers/models/idefics/modeling_idefics.py -src/transformers/models/idefics/perceiver.py -src/transformers/models/idefics/processing_idefics.py -src/transformers/models/idefics/vision.py -src/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py -src/transformers/models/informer/configuration_informer.py -src/transformers/models/informer/modeling_informer.py -src/transformers/models/instructblip/configuration_instructblip.py -src/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py -src/transformers/models/instructblip/modeling_instructblip.py -src/transformers/models/instructblip/processing_instructblip.py -src/transformers/models/jukebox/configuration_jukebox.py -src/transformers/models/jukebox/convert_jukebox.py -src/transformers/models/jukebox/modeling_jukebox.py -src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/led/configuration_led.py -src/transformers/models/led/modeling_led.py -src/transformers/models/led/modeling_tf_led.py -src/transformers/models/levit/convert_levit_timm_to_pytorch.py -src/transformers/models/levit/modeling_levit.py -src/transformers/models/lilt/configuration_lilt.py -src/transformers/models/llama/configuration_llama.py -src/transformers/models/llama/convert_llama_weights_to_hf.py -src/transformers/models/llama/modeling_llama.py -src/transformers/models/longformer/configuration_longformer.py -src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py -src/transformers/models/longt5/configuration_longt5.py -src/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py -src/transformers/models/longt5/modeling_flax_longt5.py -src/transformers/models/luke/configuration_luke.py -src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/luke/modeling_luke.py -src/transformers/models/lxmert/configuration_lxmert.py -src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/lxmert/modeling_lxmert.py -src/transformers/models/lxmert/modeling_tf_lxmert.py -src/transformers/models/m2m_100/convert_m2m100_original_checkpoint_to_pytorch.py -src/transformers/models/m2m_100/modeling_m2m_100.py -src/transformers/models/marian/configuration_marian.py -src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py -src/transformers/models/marian/convert_marian_to_pytorch.py -src/transformers/models/marian/modeling_flax_marian.py -src/transformers/models/marian/modeling_tf_marian.py -src/transformers/models/markuplm/configuration_markuplm.py -src/transformers/models/markuplm/feature_extraction_markuplm.py -src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/maskformer/configuration_maskformer_swin.py -src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/maskformer/convert_maskformer_resnet_to_pytorch.py -src/transformers/models/maskformer/convert_maskformer_swin_to_pytorch.py -src/transformers/models/maskformer/modeling_maskformer_swin.py -src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py -src/transformers/models/mbart/modeling_flax_mbart.py -src/transformers/models/mega/configuration_mega.py -src/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/mega/modeling_mega.py -src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py -src/transformers/models/megatron_bert/modeling_megatron_bert.py -src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py -src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py -src/transformers/models/mgp_str/configuration_mgp_str.py -src/transformers/models/mgp_str/modeling_mgp_str.py -src/transformers/models/mistral/configuration_mistral.py -src/transformers/models/mistral/modeling_mistral.py -src/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py -src/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py -src/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py -src/transformers/models/mobilevit/configuration_mobilevit.py -src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py -src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py -src/transformers/models/mpnet/configuration_mpnet.py -src/transformers/models/mpnet/modeling_mpnet.py -src/transformers/models/mpnet/modeling_tf_mpnet.py -src/transformers/models/mpt/configuration_mpt.py -src/transformers/models/mpt/modeling_mpt.py -src/transformers/models/mra/configuration_mra.py -src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py -src/transformers/models/mra/modeling_mra.py -src/transformers/models/mt5/configuration_mt5.py -src/transformers/models/mt5/modeling_flax_mt5.py -src/transformers/models/mt5/modeling_mt5.py -src/transformers/models/mt5/modeling_tf_mt5.py -src/transformers/models/musicgen/convert_musicgen_transformers.py -src/transformers/models/mvp/modeling_mvp.py -src/transformers/models/nezha/modeling_nezha.py -src/transformers/models/nllb_moe/configuration_nllb_moe.py -src/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py -src/transformers/models/nllb_moe/modeling_nllb_moe.py -src/transformers/models/nougat/convert_nougat_to_hf.py -src/transformers/models/nystromformer/configuration_nystromformer.py -src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/nystromformer/modeling_nystromformer.py -src/transformers/models/oneformer/convert_to_hf_oneformer.py -src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py -src/transformers/models/openai/modeling_openai.py -src/transformers/models/openai/modeling_tf_openai.py -src/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/opt/modeling_flax_opt.py -src/transformers/models/owlvit/configuration_owlvit.py -src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py -src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py -src/transformers/models/pegasus/modeling_flax_pegasus.py -src/transformers/models/pegasus/modeling_tf_pegasus.py -src/transformers/models/pegasus_x/modeling_pegasus_x.py -src/transformers/models/perceiver/configuration_perceiver.py -src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py -src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py -src/transformers/models/persimmon/modeling_persimmon.py -src/transformers/models/pix2struct/configuration_pix2struct.py -src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py -src/transformers/models/pix2struct/image_processing_pix2struct.py -src/transformers/models/pix2struct/processing_pix2struct.py -src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py -src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py -src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py -src/transformers/models/pop2piano/feature_extraction_pop2piano.py -src/transformers/models/pop2piano/processing_pop2piano.py -src/transformers/models/pop2piano/tokenization_pop2piano.py -src/transformers/models/prophetnet/configuration_prophetnet.py -src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/prophetnet/modeling_prophetnet.py -src/transformers/models/pvt/configuration_pvt.py -src/transformers/models/pvt/convert_pvt_to_pytorch.py -src/transformers/models/pvt/image_processing_pvt.py -src/transformers/models/pvt/modeling_pvt.py -src/transformers/models/qdqbert/configuration_qdqbert.py -src/transformers/models/qdqbert/modeling_qdqbert.py -src/transformers/models/rag/configuration_rag.py -src/transformers/models/rag/modeling_rag.py -src/transformers/models/rag/modeling_tf_rag.py -src/transformers/models/rag/retrieval_rag.py -src/transformers/models/realm/modeling_realm.py -src/transformers/models/realm/retrieval_realm.py -src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py -src/transformers/models/regnet/configuration_regnet.py -src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py -src/transformers/models/regnet/convert_regnet_to_pytorch.py -src/transformers/models/regnet/modeling_flax_regnet.py -src/transformers/models/rembert/configuration_rembert.py -src/transformers/models/rembert/convert_rembert_tf_checkpoint_to_pytorch.py -src/transformers/models/rembert/modeling_rembert.py -src/transformers/models/rembert/modeling_tf_rembert.py -src/transformers/models/resnet/convert_resnet_to_pytorch.py -src/transformers/models/resnet/modeling_flax_resnet.py -src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/roberta/modeling_flax_roberta.py -src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py -src/transformers/models/roc_bert/configuration_roc_bert.py -src/transformers/models/roformer/convert_roformer_original_tf_checkpoint_to_pytorch.py -src/transformers/models/roformer/modeling_flax_roformer.py -src/transformers/models/roformer/modeling_roformer.py -src/transformers/models/roformer/modeling_tf_roformer.py -src/transformers/models/rwkv/configuration_rwkv.py -src/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py -src/transformers/models/rwkv/modeling_rwkv.py -src/transformers/models/sam/configuration_sam.py -src/transformers/models/sam/convert_sam_original_to_hf_format.py -src/transformers/models/sam/image_processing_sam.py -src/transformers/models/sam/modeling_sam.py -src/transformers/models/sam/modeling_tf_sam.py -src/transformers/models/sam/processing_sam.py -src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py -src/transformers/models/segformer/configuration_segformer.py -src/transformers/models/segformer/convert_segformer_original_to_pytorch.py -src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/sew_d/convert_sew_d_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py -src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py -src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py -src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py -src/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py -src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py -src/transformers/models/speecht5/configuration_speecht5.py -src/transformers/models/speecht5/convert_hifigan.py -src/transformers/models/speecht5/convert_speecht5_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/speecht5/number_normalizer.py -src/transformers/models/splinter/configuration_splinter.py -src/transformers/models/splinter/modeling_splinter.py -src/transformers/models/squeezebert/modeling_squeezebert.py -src/transformers/models/swiftformer/configuration_swiftformer.py -src/transformers/models/swiftformer/convert_swiftformer_original_to_hf.py -src/transformers/models/swiftformer/modeling_swiftformer.py -src/transformers/models/swin/convert_swin_simmim_to_pytorch.py -src/transformers/models/swin/convert_swin_timm_to_pytorch.py -src/transformers/models/swin/modeling_tf_swin.py -src/transformers/models/swin2sr/configuration_swin2sr.py -src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py -src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py -src/transformers/models/swinv2/modeling_swinv2.py -src/transformers/models/switch_transformers/configuration_switch_transformers.py -src/transformers/models/switch_transformers/convert_big_switch.py -src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py -src/transformers/models/switch_transformers/modeling_switch_transformers.py -src/transformers/models/t5/configuration_t5.py -src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py -src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py -src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py -src/transformers/models/t5/modeling_flax_t5.py -src/transformers/models/t5/modeling_t5.py -src/transformers/models/t5/modeling_tf_t5.py -src/transformers/models/table_transformer/configuration_table_transformer.py -src/transformers/models/table_transformer/convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/tapas/configuration_tapas.py -src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py -src/transformers/models/tapas/modeling_tapas.py -src/transformers/models/tapas/modeling_tf_tapas.py -src/transformers/models/timesformer/convert_timesformer_to_pytorch.py -src/transformers/models/timm_backbone/configuration_timm_backbone.py -src/transformers/models/timm_backbone/modeling_timm_backbone.py -src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py -src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py -src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py -src/transformers/models/transfo_xl/modeling_transfo_xl.py -src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py -src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py -src/transformers/models/tvlt/configuration_tvlt.py -src/transformers/models/tvlt/modeling_tvlt.py -src/transformers/models/umt5/configuration_umt5.py -src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py -src/transformers/models/umt5/modeling_umt5.py -src/transformers/models/unispeech/convert_unispeech_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/unispeech_sat/configuration_unispeech_sat.py -src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/unispeech_sat/convert_unispeech_sat_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/upernet/configuration_upernet.py -src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py -src/transformers/models/upernet/convert_swin_upernet_to_pytorch.py -src/transformers/models/videomae/configuration_videomae.py -src/transformers/models/videomae/convert_videomae_to_pytorch.py -src/transformers/models/vilt/configuration_vilt.py -src/transformers/models/vilt/convert_vilt_original_to_pytorch.py -src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py -src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py -src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py -src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py -src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/visual_bert/modeling_visual_bert.py -src/transformers/models/vit/convert_dino_to_pytorch.py -src/transformers/models/vit/convert_vit_timm_to_pytorch.py -src/transformers/models/vit/modeling_flax_vit.py -src/transformers/models/vit_hybrid/configuration_vit_hybrid.py -src/transformers/models/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py -src/transformers/models/vit_hybrid/modeling_vit_hybrid.py -src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py -src/transformers/models/vit_mae/modeling_tf_vit_mae.py -src/transformers/models/vit_msn/configuration_vit_msn.py -src/transformers/models/vit_msn/convert_msn_to_pytorch.py -src/transformers/models/vivit/configuration_vivit.py -src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py -src/transformers/models/vivit/image_processing_vivit.py -src/transformers/models/vivit/modeling_vivit.py -src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/wav2vec2/convert_wav2vec2_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py -src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py -src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/wavlm/convert_wavlm_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/wavlm/convert_wavlm_original_s3prl_checkpoint_to_pytorch.py -src/transformers/models/whisper/convert_openai_to_hf.py -src/transformers/models/whisper/english_normalizer.py -src/transformers/models/whisper/modeling_flax_whisper.py -src/transformers/models/x_clip/configuration_x_clip.py -src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py -src/transformers/models/xglm/configuration_xglm.py -src/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py -src/transformers/models/xglm/modeling_flax_xglm.py -src/transformers/models/xglm/modeling_tf_xglm.py -src/transformers/models/xglm/modeling_xglm.py -src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/xlm/modeling_tf_xlm.py -src/transformers/models/xlm/modeling_xlm.py -src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py -src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py -src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py -src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py -src/transformers/models/xlm_roberta/modeling_xlm_roberta.py -src/transformers/models/xlm_roberta_xl/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py -src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py -src/transformers/models/xlnet/modeling_tf_xlnet.py -src/transformers/models/xlnet/modeling_xlnet.py -src/transformers/models/xmod/convert_xmod_original_pytorch_checkpoint_to_pytorch.py -src/transformers/models/yolos/convert_yolos_to_pytorch.py -src/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py -src/transformers/models/yoso/modeling_yoso.py -src/transformers/onnx/__main__.py -src/transformers/onnx/config.py -src/transformers/onnx/convert.py -src/transformers/onnx/features.py -src/transformers/onnx/utils.py -src/transformers/optimization.py -src/transformers/optimization_tf.py -src/transformers/pipelines/audio_classification.py -src/transformers/pipelines/audio_utils.py -src/transformers/pipelines/automatic_speech_recognition.py -src/transformers/pipelines/base.py -src/transformers/pipelines/conversational.py -src/transformers/pipelines/depth_estimation.py -src/transformers/pipelines/document_question_answering.py -src/transformers/pipelines/feature_extraction.py -src/transformers/pipelines/fill_mask.py -src/transformers/pipelines/image_classification.py -src/transformers/pipelines/image_segmentation.py -src/transformers/pipelines/image_to_text.py -src/transformers/pipelines/mask_generation.py -src/transformers/pipelines/object_detection.py -src/transformers/pipelines/pt_utils.py -src/transformers/pipelines/question_answering.py -src/transformers/pipelines/table_question_answering.py -src/transformers/pipelines/text_classification.py -src/transformers/pipelines/token_classification.py -src/transformers/pipelines/video_classification.py -src/transformers/pipelines/visual_question_answering.py -src/transformers/pipelines/zero_shot_audio_classification.py -src/transformers/pipelines/zero_shot_classification.py -src/transformers/pipelines/zero_shot_image_classification.py -src/transformers/pipelines/zero_shot_object_detection.py -src/transformers/processing_utils.py -src/transformers/pytorch_utils.py -src/transformers/sagemaker/trainer_sm.py -src/transformers/sagemaker/training_args_sm.py -src/transformers/testing_utils.py -src/transformers/tf_utils.py -src/transformers/time_series_utils.py -src/transformers/tokenization_utils.py -src/transformers/tokenization_utils_base.py -src/transformers/tokenization_utils_fast.py -src/transformers/tools/agent_types.py -src/transformers/tools/agents.py -src/transformers/tools/base.py -src/transformers/tools/document_question_answering.py -src/transformers/tools/evaluate_agent.py -src/transformers/tools/image_captioning.py -src/transformers/tools/image_question_answering.py -src/transformers/tools/image_segmentation.py -src/transformers/tools/prompts.py -src/transformers/tools/python_interpreter.py -src/transformers/tools/speech_to_text.py -src/transformers/tools/text_classification.py -src/transformers/tools/text_question_answering.py -src/transformers/tools/text_summarization.py -src/transformers/tools/text_to_speech.py -src/transformers/tools/translation.py -src/transformers/trainer.py -src/transformers/trainer_callback.py -src/transformers/trainer_pt_utils.py -src/transformers/trainer_seq2seq.py -src/transformers/trainer_tf.py -src/transformers/trainer_utils.py -src/transformers/training_args.py -src/transformers/training_args_seq2seq.py -src/transformers/training_args_tf.py -src/transformers/utils/backbone_utils.py -src/transformers/utils/bitsandbytes.py -src/transformers/utils/constants.py -src/transformers/utils/doc.py -src/transformers/utils/dummy_detectron2_objects.py -src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py -src/transformers/utils/dummy_flax_objects.py -src/transformers/utils/dummy_keras_nlp_objects.py -src/transformers/utils/dummy_music_objects.py -src/transformers/utils/dummy_pt_objects.py -src/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py -src/transformers/utils/dummy_sentencepiece_objects.py -src/transformers/utils/dummy_speech_objects.py -src/transformers/utils/dummy_tensorflow_text_objects.py -src/transformers/utils/dummy_tf_objects.py -src/transformers/utils/dummy_tokenizers_objects.py -src/transformers/utils/dummy_vision_objects.py -src/transformers/utils/fx.py -src/transformers/utils/generic.py -src/transformers/utils/hp_naming.py -src/transformers/utils/hub.py -src/transformers/utils/import_utils.py -src/transformers/utils/logging.py -src/transformers/utils/model_parallel_utils.py -src/transformers/utils/notebook.py -src/transformers/utils/peft_utils.py -src/transformers/utils/quantization_config.py -src/transformers/utils/sentencepiece_model_pb2.py -src/transformers/utils/sentencepiece_model_pb2_new.py -src/transformers/utils/versions.py +docs/source/en/_config.py +docs/source/en/accelerate.md +docs/source/en/add_new_model.md +docs/source/en/add_new_pipeline.md +docs/source/en/add_tensorflow_model.md +docs/source/en/attention.md +docs/source/en/benchmarks.md +docs/source/en/bertology.md +docs/source/en/big_models.md +docs/source/en/community.md +docs/source/en/contributing.md +docs/source/en/create_a_model.md +docs/source/en/custom_models.md +docs/source/en/custom_tools.md +docs/source/en/debugging.md +docs/source/en/fast_tokenizers.md +docs/source/en/glossary.md +docs/source/en/hpo_train.md +docs/source/en/index.md +docs/source/en/installation.md +docs/source/en/internal/audio_utils.md +docs/source/en/internal/file_utils.md +docs/source/en/internal/image_processing_utils.md +docs/source/en/internal/modeling_utils.md +docs/source/en/internal/pipelines_utils.md +docs/source/en/internal/time_series_utils.md +docs/source/en/internal/tokenization_utils.md +docs/source/en/internal/trainer_utils.md +docs/source/en/llm_tutorial.md +docs/source/en/main_classes/agent.md +docs/source/en/main_classes/callback.md +docs/source/en/main_classes/configuration.md +docs/source/en/main_classes/data_collator.md +docs/source/en/main_classes/deepspeed.md +docs/source/en/main_classes/feature_extractor.md +docs/source/en/main_classes/image_processor.md +docs/source/en/main_classes/keras_callbacks.md +docs/source/en/main_classes/logging.md +docs/source/en/main_classes/model.md +docs/source/en/main_classes/onnx.md +docs/source/en/main_classes/optimizer_schedules.md +docs/source/en/main_classes/output.md +docs/source/en/main_classes/pipelines.md +docs/source/en/main_classes/processors.md +docs/source/en/main_classes/quantization.md +docs/source/en/main_classes/tokenizer.md +docs/source/en/main_classes/trainer.md +docs/source/en/model_doc/albert.md +docs/source/en/model_doc/align.md +docs/source/en/model_doc/altclip.md +docs/source/en/model_doc/audio-spectrogram-transformer.md +docs/source/en/model_doc/auto.md +docs/source/en/model_doc/autoformer.md +docs/source/en/model_doc/bark.md +docs/source/en/model_doc/bart.md +docs/source/en/model_doc/barthez.md +docs/source/en/model_doc/bartpho.md +docs/source/en/model_doc/beit.md +docs/source/en/model_doc/bert-generation.md +docs/source/en/model_doc/bert-japanese.md +docs/source/en/model_doc/bert.md +docs/source/en/model_doc/bertweet.md +docs/source/en/model_doc/big_bird.md +docs/source/en/model_doc/bigbird_pegasus.md +docs/source/en/model_doc/biogpt.md +docs/source/en/model_doc/bit.md +docs/source/en/model_doc/blenderbot-small.md +docs/source/en/model_doc/blenderbot.md +docs/source/en/model_doc/blip-2.md +docs/source/en/model_doc/blip.md +docs/source/en/model_doc/bloom.md +docs/source/en/model_doc/bort.md +docs/source/en/model_doc/bridgetower.md +docs/source/en/model_doc/camembert.md +docs/source/en/model_doc/canine.md +docs/source/en/model_doc/chinese_clip.md +docs/source/en/model_doc/clap.md +docs/source/en/model_doc/clip.md +docs/source/en/model_doc/clipseg.md +docs/source/en/model_doc/codegen.md +docs/source/en/model_doc/conditional_detr.md +docs/source/en/model_doc/convbert.md +docs/source/en/model_doc/convnext.md +docs/source/en/model_doc/convnextv2.md +docs/source/en/model_doc/cpm.md +docs/source/en/model_doc/cpmant.md +docs/source/en/model_doc/ctrl.md +docs/source/en/model_doc/cvt.md +docs/source/en/model_doc/data2vec.md +docs/source/en/model_doc/deberta-v2.md +docs/source/en/model_doc/deberta.md +docs/source/en/model_doc/decision_transformer.md +docs/source/en/model_doc/deformable_detr.md +docs/source/en/model_doc/deit.md +docs/source/en/model_doc/deplot.md +docs/source/en/model_doc/deta.md +docs/source/en/model_doc/detr.md +docs/source/en/model_doc/dialogpt.md +docs/source/en/model_doc/dinat.md +docs/source/en/model_doc/dinov2.md +docs/source/en/model_doc/distilbert.md +docs/source/en/model_doc/dit.md +docs/source/en/model_doc/dpr.md +docs/source/en/model_doc/dpt.md +docs/source/en/model_doc/efficientformer.md +docs/source/en/model_doc/efficientnet.md +docs/source/en/model_doc/electra.md +docs/source/en/model_doc/encodec.md +docs/source/en/model_doc/ernie.md +docs/source/en/model_doc/ernie_m.md +docs/source/en/model_doc/esm.md +docs/source/en/model_doc/flan-t5.md +docs/source/en/model_doc/flan-ul2.md +docs/source/en/model_doc/flaubert.md +docs/source/en/model_doc/flava.md +docs/source/en/model_doc/fnet.md +docs/source/en/model_doc/focalnet.md +docs/source/en/model_doc/fsmt.md +docs/source/en/model_doc/funnel.md +docs/source/en/model_doc/git.md +docs/source/en/model_doc/glpn.md +docs/source/en/model_doc/gpt-sw3.md +docs/source/en/model_doc/gpt2.md +docs/source/en/model_doc/gpt_bigcode.md +docs/source/en/model_doc/gpt_neo.md +docs/source/en/model_doc/gpt_neox.md +docs/source/en/model_doc/gpt_neox_japanese.md +docs/source/en/model_doc/gptj.md +docs/source/en/model_doc/gptsan-japanese.md +docs/source/en/model_doc/graphormer.md +docs/source/en/model_doc/groupvit.md +docs/source/en/model_doc/herbert.md +docs/source/en/model_doc/hubert.md +docs/source/en/model_doc/ibert.md +docs/source/en/model_doc/idefics.md +docs/source/en/model_doc/imagegpt.md +docs/source/en/model_doc/informer.md +docs/source/en/model_doc/instructblip.md +docs/source/en/model_doc/jukebox.md +docs/source/en/model_doc/layoutlm.md +docs/source/en/model_doc/layoutlmv2.md +docs/source/en/model_doc/layoutlmv3.md +docs/source/en/model_doc/layoutxlm.md +docs/source/en/model_doc/led.md +docs/source/en/model_doc/levit.md +docs/source/en/model_doc/lilt.md +docs/source/en/model_doc/llama.md +docs/source/en/model_doc/llama2.md +docs/source/en/model_doc/longformer.md +docs/source/en/model_doc/longt5.md +docs/source/en/model_doc/luke.md +docs/source/en/model_doc/lxmert.md +docs/source/en/model_doc/m2m_100.md +docs/source/en/model_doc/marian.md +docs/source/en/model_doc/mask2former.md +docs/source/en/model_doc/maskformer.md +docs/source/en/model_doc/matcha.md +docs/source/en/model_doc/mbart.md +docs/source/en/model_doc/mctct.md +docs/source/en/model_doc/mega.md +docs/source/en/model_doc/megatron-bert.md +docs/source/en/model_doc/megatron_gpt2.md +docs/source/en/model_doc/mgp-str.md +docs/source/en/model_doc/mistral.md +docs/source/en/model_doc/mluke.md +docs/source/en/model_doc/mms.md +docs/source/en/model_doc/mobilebert.md +docs/source/en/model_doc/mobilenet_v1.md +docs/source/en/model_doc/mobilenet_v2.md +docs/source/en/model_doc/mobilevit.md +docs/source/en/model_doc/mobilevitv2.md +docs/source/en/model_doc/mpnet.md +docs/source/en/model_doc/mpt.md +docs/source/en/model_doc/mra.md +docs/source/en/model_doc/mt5.md +docs/source/en/model_doc/musicgen.md +docs/source/en/model_doc/mvp.md +docs/source/en/model_doc/nat.md +docs/source/en/model_doc/nezha.md +docs/source/en/model_doc/nllb-moe.md +docs/source/en/model_doc/nllb.md +docs/source/en/model_doc/nystromformer.md +docs/source/en/model_doc/oneformer.md +docs/source/en/model_doc/open-llama.md +docs/source/en/model_doc/openai-gpt.md +docs/source/en/model_doc/opt.md +docs/source/en/model_doc/owlvit.md +docs/source/en/model_doc/pegasus.md +docs/source/en/model_doc/pegasus_x.md +docs/source/en/model_doc/perceiver.md +docs/source/en/model_doc/phobert.md +docs/source/en/model_doc/pix2struct.md +docs/source/en/model_doc/plbart.md +docs/source/en/model_doc/poolformer.md +docs/source/en/model_doc/pop2piano.md +docs/source/en/model_doc/prophetnet.md +docs/source/en/model_doc/pvt.md +docs/source/en/model_doc/qdqbert.md +docs/source/en/model_doc/rag.md +docs/source/en/model_doc/realm.md +docs/source/en/model_doc/reformer.md +docs/source/en/model_doc/regnet.md +docs/source/en/model_doc/rembert.md +docs/source/en/model_doc/resnet.md +docs/source/en/model_doc/retribert.md +docs/source/en/model_doc/roberta-prelayernorm.md +docs/source/en/model_doc/roberta.md +docs/source/en/model_doc/roc_bert.md +docs/source/en/model_doc/roformer.md +docs/source/en/model_doc/rwkv.md +docs/source/en/model_doc/sam.md +docs/source/en/model_doc/segformer.md +docs/source/en/model_doc/sew-d.md +docs/source/en/model_doc/sew.md +docs/source/en/model_doc/speech-encoder-decoder.md +docs/source/en/model_doc/speech_to_text_2.md +docs/source/en/model_doc/speecht5.md +docs/source/en/model_doc/splinter.md +docs/source/en/model_doc/squeezebert.md +docs/source/en/model_doc/swiftformer.md +docs/source/en/model_doc/swin.md +docs/source/en/model_doc/swin2sr.md +docs/source/en/model_doc/swinv2.md +docs/source/en/model_doc/table-transformer.md +docs/source/en/model_doc/tapas.md +docs/source/en/model_doc/time_series_transformer.md +docs/source/en/model_doc/timesformer.md +docs/source/en/model_doc/trajectory_transformer.md +docs/source/en/model_doc/transfo-xl.md +docs/source/en/model_doc/trocr.md +docs/source/en/model_doc/tvlt.md +docs/source/en/model_doc/ul2.md +docs/source/en/model_doc/umt5.md +docs/source/en/model_doc/unispeech-sat.md +docs/source/en/model_doc/unispeech.md +docs/source/en/model_doc/upernet.md +docs/source/en/model_doc/van.md +docs/source/en/model_doc/videomae.md +docs/source/en/model_doc/vilt.md +docs/source/en/model_doc/vision-encoder-decoder.md +docs/source/en/model_doc/vision-text-dual-encoder.md +docs/source/en/model_doc/visual_bert.md +docs/source/en/model_doc/vit.md +docs/source/en/model_doc/vit_hybrid.md +docs/source/en/model_doc/vit_mae.md +docs/source/en/model_doc/vit_msn.md +docs/source/en/model_doc/vivit.md +docs/source/en/model_doc/wav2vec2-conformer.md +docs/source/en/model_doc/wav2vec2.md +docs/source/en/model_doc/wav2vec2_phoneme.md +docs/source/en/model_doc/wavlm.md +docs/source/en/model_doc/whisper.md +docs/source/en/model_doc/xclip.md +docs/source/en/model_doc/xglm.md +docs/source/en/model_doc/xlm-prophetnet.md +docs/source/en/model_doc/xlm-roberta-xl.md +docs/source/en/model_doc/xlm-roberta.md +docs/source/en/model_doc/xlm-v.md +docs/source/en/model_doc/xlm.md +docs/source/en/model_doc/xlnet.md +docs/source/en/model_doc/xls_r.md +docs/source/en/model_doc/xlsr_wav2vec2.md +docs/source/en/model_doc/xmod.md +docs/source/en/model_doc/yolos.md +docs/source/en/model_doc/yoso.md +docs/source/en/model_memory_anatomy.md +docs/source/en/model_sharing.md +docs/source/en/model_summary.md +docs/source/en/multilingual.md +docs/source/en/notebooks.md +docs/source/en/pad_truncation.md +docs/source/en/peft.md +docs/source/en/perf_hardware.md +docs/source/en/perf_infer_cpu.md +docs/source/en/perf_infer_gpu_one.md +docs/source/en/perf_torch_compile.md +docs/source/en/perf_train_cpu.md +docs/source/en/perf_train_cpu_many.md +docs/source/en/perf_train_gpu_many.md +docs/source/en/perf_train_gpu_one.md +docs/source/en/perf_train_special.md +docs/source/en/perf_train_tpu.md +docs/source/en/perf_train_tpu_tf.md +docs/source/en/performance.md +docs/source/en/perplexity.md +docs/source/en/philosophy.md +docs/source/en/pipeline_webserver.md +docs/source/en/pr_checks.md +docs/source/en/preprocessing.md +docs/source/en/run_scripts.md +docs/source/en/sagemaker.md +docs/source/en/serialization.md +docs/source/en/tasks/asr.md +docs/source/en/tasks/audio_classification.md +docs/source/en/tasks/document_question_answering.md +docs/source/en/tasks/idefics.md # causes other tests to fail +docs/source/en/tasks/image_captioning.md +docs/source/en/tasks/image_classification.md +docs/source/en/tasks/language_modeling.md +docs/source/en/tasks/masked_language_modeling.md +docs/source/en/tasks/monocular_depth_estimation.md +docs/source/en/tasks/multiple_choice.md +docs/source/en/tasks/object_detection.md +docs/source/en/tasks/question_answering.md +docs/source/en/tasks/semantic_segmentation.md +docs/source/en/tasks/sequence_classification.md +docs/source/en/tasks/summarization.md +docs/source/en/tasks/text-to-speech.md +docs/source/en/tasks/token_classification.md +docs/source/en/tasks/translation.md +docs/source/en/tasks/video_classification.md +docs/source/en/tasks/visual_question_answering.md +docs/source/en/tasks/zero_shot_image_classification.md +docs/source/en/tasks/zero_shot_object_detection.md +docs/source/en/tasks_explained.md +docs/source/en/tf_xla.md +docs/source/en/tflite.md +docs/source/en/tokenizer_summary.md +docs/source/en/torchscript.md +docs/source/en/training.md +docs/source/en/transformers_agents.md +docs/source/en/troubleshooting.md +src/transformers/activations.py +src/transformers/activations_tf.py +src/transformers/audio_utils.py +src/transformers/benchmark/benchmark.py +src/transformers/benchmark/benchmark_args.py +src/transformers/benchmark/benchmark_args_tf.py +src/transformers/benchmark/benchmark_args_utils.py +src/transformers/benchmark/benchmark_tf.py +src/transformers/benchmark/benchmark_utils.py +src/transformers/commands/add_new_model.py +src/transformers/commands/add_new_model_like.py +src/transformers/commands/convert.py +src/transformers/commands/download.py +src/transformers/commands/env.py +src/transformers/commands/lfs.py +src/transformers/commands/pt_to_tf.py +src/transformers/commands/run.py +src/transformers/commands/serving.py +src/transformers/commands/train.py +src/transformers/commands/transformers_cli.py +src/transformers/commands/user.py +src/transformers/configuration_utils.py +src/transformers/convert_graph_to_onnx.py +src/transformers/convert_pytorch_checkpoint_to_tf2.py +src/transformers/convert_slow_tokenizer.py +src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py +src/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py +src/transformers/data/data_collator.py +src/transformers/data/datasets/glue.py +src/transformers/data/datasets/language_modeling.py +src/transformers/data/datasets/squad.py +src/transformers/data/metrics/squad_metrics.py +src/transformers/data/processors/glue.py +src/transformers/data/processors/squad.py +src/transformers/data/processors/utils.py +src/transformers/data/processors/xnli.py +src/transformers/debug_utils.py +src/transformers/deepspeed.py +src/transformers/dependency_versions_check.py +src/transformers/dependency_versions_table.py +src/transformers/dynamic_module_utils.py +src/transformers/feature_extraction_sequence_utils.py +src/transformers/feature_extraction_utils.py +src/transformers/file_utils.py +src/transformers/hf_argparser.py +src/transformers/hyperparameter_search.py +src/transformers/image_processing_utils.py +src/transformers/image_transforms.py +src/transformers/image_utils.py +src/transformers/integrations/bitsandbytes.py +src/transformers/integrations/deepspeed.py +src/transformers/integrations/integration_utils.py +src/transformers/integrations/peft.py +src/transformers/keras_callbacks.py +src/transformers/modelcard.py +src/transformers/modeling_flax_outputs.py +src/transformers/modeling_flax_pytorch_utils.py +src/transformers/modeling_flax_utils.py +src/transformers/modeling_outputs.py +src/transformers/modeling_tf_outputs.py +src/transformers/modeling_tf_pytorch_utils.py +src/transformers/modeling_tf_utils.py +src/transformers/modeling_utils.py +src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/albert/modeling_flax_albert.py +src/transformers/models/align/configuration_align.py +src/transformers/models/align/convert_align_tf_to_hf.py +src/transformers/models/align/modeling_align.py +src/transformers/models/altclip/configuration_altclip.py +src/transformers/models/altclip/modeling_altclip.py +src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +src/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py +src/transformers/models/auto/auto_factory.py +src/transformers/models/auto/configuration_auto.py +src/transformers/models/auto/modeling_auto.py +src/transformers/models/auto/modeling_flax_auto.py +src/transformers/models/auto/modeling_tf_auto.py +src/transformers/models/autoformer/configuration_autoformer.py +src/transformers/models/autoformer/modeling_autoformer.py +src/transformers/models/bark/convert_suno_to_hf.py +src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/bart/modeling_flax_bart.py +src/transformers/models/bart/modeling_tf_bart.py +src/transformers/models/beit/convert_beit_unilm_to_pytorch.py +src/transformers/models/beit/modeling_flax_beit.py +src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py +src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py +src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py +src/transformers/models/bert/modeling_flax_bert.py +src/transformers/models/bert_generation/modeling_bert_generation.py +src/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py +src/transformers/models/big_bird/modeling_flax_big_bird.py +src/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py +src/transformers/models/biogpt/configuration_biogpt.py +src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/biogpt/modeling_biogpt.py +src/transformers/models/bit/configuration_bit.py +src/transformers/models/bit/convert_bit_to_pytorch.py +src/transformers/models/bit/modeling_bit.py +src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/blenderbot/modeling_flax_blenderbot.py +src/transformers/models/blenderbot/modeling_tf_blenderbot.py +src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py +src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +src/transformers/models/blip/configuration_blip.py +src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py +src/transformers/models/blip/modeling_blip_text.py +src/transformers/models/blip/modeling_tf_blip_text.py +src/transformers/models/blip_2/configuration_blip_2.py +src/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py +src/transformers/models/blip_2/modeling_blip_2.py # causes other tests to fail +src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py +src/transformers/models/bloom/modeling_bloom.py +src/transformers/models/bloom/modeling_flax_bloom.py +src/transformers/models/bridgetower/configuration_bridgetower.py +src/transformers/models/bridgetower/modeling_bridgetower.py +src/transformers/models/bros/convert_bros_to_pytorch.py +src/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py +src/transformers/models/camembert/modeling_camembert.py +src/transformers/models/camembert/modeling_tf_camembert.py +src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py +src/transformers/models/chinese_clip/configuration_chinese_clip.py +src/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py +src/transformers/models/chinese_clip/modeling_chinese_clip.py +src/transformers/models/clap/convert_clap_original_pytorch_to_hf.py +src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py +src/transformers/models/clip/modeling_clip.py +src/transformers/models/clip/modeling_flax_clip.py +src/transformers/models/clip/modeling_tf_clip.py +src/transformers/models/clipseg/configuration_clipseg.py +src/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py +src/transformers/models/codegen/modeling_codegen.py +src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py +src/transformers/models/convbert/modeling_convbert.py +src/transformers/models/convbert/modeling_tf_convbert.py +src/transformers/models/convnext/convert_convnext_to_pytorch.py +src/transformers/models/convnext/modeling_tf_convnext.py +src/transformers/models/convnextv2/configuration_convnextv2.py +src/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py +src/transformers/models/convnextv2/modeling_convnextv2.py +src/transformers/models/cpmant/configuration_cpmant.py +src/transformers/models/cpmant/modeling_cpmant.py +src/transformers/models/cpmant/tokenization_cpmant.py +src/transformers/models/ctrl/modeling_tf_ctrl.py +src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/cvt/modeling_tf_cvt.py +src/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/data2vec/modeling_data2vec_text.py +src/transformers/models/data2vec/modeling_tf_data2vec_vision.py +src/transformers/models/deberta/modeling_tf_deberta.py +src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +src/transformers/models/decision_transformer/modeling_decision_transformer.py +src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py +src/transformers/models/deformable_detr/load_custom.py +src/transformers/models/deit/convert_deit_timm_to_pytorch.py +src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py +src/transformers/models/deprecated/mctct/configuration_mctct.py +src/transformers/models/deprecated/mctct/feature_extraction_mctct.py +src/transformers/models/deprecated/mctct/modeling_mctct.py +src/transformers/models/deprecated/mctct/processing_mctct.py +src/transformers/models/deprecated/mmbt/configuration_mmbt.py +src/transformers/models/deprecated/mmbt/modeling_mmbt.py +src/transformers/models/deprecated/open_llama/configuration_open_llama.py +src/transformers/models/deprecated/open_llama/modeling_open_llama.py +src/transformers/models/deprecated/retribert/configuration_retribert.py +src/transformers/models/deprecated/retribert/modeling_retribert.py +src/transformers/models/deprecated/retribert/tokenization_retribert.py +src/transformers/models/deprecated/retribert/tokenization_retribert_fast.py +src/transformers/models/deprecated/tapex/tokenization_tapex.py +src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py +src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py +src/transformers/models/deprecated/van/configuration_van.py +src/transformers/models/deprecated/van/convert_van_to_pytorch.py +src/transformers/models/deprecated/van/modeling_van.py +src/transformers/models/deta/convert_deta_resnet_to_pytorch.py +src/transformers/models/deta/convert_deta_swin_to_pytorch.py +src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/detr/convert_detr_to_pytorch.py +src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/dinov2/configuration_dinov2.py +src/transformers/models/dinov2/convert_dinov2_to_hf.py +src/transformers/models/dinov2/modeling_dinov2.py +src/transformers/models/distilbert/modeling_distilbert.py +src/transformers/models/distilbert/modeling_flax_distilbert.py +src/transformers/models/distilbert/modeling_tf_distilbert.py +src/transformers/models/dit/convert_dit_unilm_to_pytorch.py +src/transformers/models/donut/configuration_donut_swin.py +src/transformers/models/donut/convert_donut_to_pytorch.py +src/transformers/models/donut/modeling_donut_swin.py +src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py +src/transformers/models/dpr/modeling_dpr.py +src/transformers/models/dpr/modeling_tf_dpr.py +src/transformers/models/dpt/configuration_dpt.py +src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py +src/transformers/models/dpt/convert_dpt_to_pytorch.py +src/transformers/models/efficientformer/configuration_efficientformer.py +src/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/efficientformer/modeling_efficientformer.py +src/transformers/models/efficientnet/configuration_efficientnet.py +src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py +src/transformers/models/efficientnet/modeling_efficientnet.py +src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py +src/transformers/models/electra/modeling_flax_electra.py +src/transformers/models/encodec/configuration_encodec.py +src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py +src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py +src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +src/transformers/models/ernie/modeling_ernie.py +src/transformers/models/esm/configuration_esm.py +src/transformers/models/esm/convert_esm.py +src/transformers/models/esm/modeling_esm.py +src/transformers/models/esm/modeling_esmfold.py +src/transformers/models/esm/modeling_tf_esm.py +src/transformers/models/esm/openfold_utils/chunk_utils.py +src/transformers/models/esm/openfold_utils/data_transforms.py +src/transformers/models/esm/openfold_utils/feats.py +src/transformers/models/esm/openfold_utils/loss.py +src/transformers/models/esm/openfold_utils/protein.py +src/transformers/models/esm/openfold_utils/residue_constants.py +src/transformers/models/esm/openfold_utils/rigid_utils.py +src/transformers/models/esm/openfold_utils/tensor_utils.py +src/transformers/models/falcon/configuration_falcon.py +src/transformers/models/falcon/modeling_falcon.py +src/transformers/models/flaubert/configuration_flaubert.py +src/transformers/models/flaubert/modeling_flaubert.py +src/transformers/models/flaubert/modeling_tf_flaubert.py +src/transformers/models/flava/convert_dalle_to_flava_codebook.py +src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py +src/transformers/models/flava/modeling_flava.py +src/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py +src/transformers/models/fnet/modeling_fnet.py +src/transformers/models/focalnet/configuration_focalnet.py +src/transformers/models/focalnet/convert_focalnet_to_hf_format.py +src/transformers/models/focalnet/modeling_focalnet.py +src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/fsmt/modeling_fsmt.py +src/transformers/models/funnel/configuration_funnel.py +src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py +src/transformers/models/funnel/modeling_funnel.py +src/transformers/models/funnel/modeling_tf_funnel.py +src/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py +src/transformers/models/git/configuration_git.py +src/transformers/models/git/convert_git_to_pytorch.py +src/transformers/models/glpn/configuration_glpn.py +src/transformers/models/glpn/convert_glpn_to_pytorch.py +src/transformers/models/gpt2/CONVERSION.md +src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py +src/transformers/models/gpt2/modeling_flax_gpt2.py +src/transformers/models/gpt2/modeling_tf_gpt2.py +src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py +src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py +src/transformers/models/gpt_neo/modeling_gpt_neo.py +src/transformers/models/gpt_neox/modeling_gpt_neox.py +src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +src/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py +src/transformers/models/gptj/configuration_gptj.py +src/transformers/models/gptj/modeling_flax_gptj.py +src/transformers/models/gptj/modeling_tf_gptj.py +src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py +src/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py +src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +src/transformers/models/graphormer/collating_graphormer.py +src/transformers/models/graphormer/configuration_graphormer.py +src/transformers/models/graphormer/modeling_graphormer.py +src/transformers/models/groupvit/configuration_groupvit.py +src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py +src/transformers/models/hubert/configuration_hubert.py +src/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/hubert/modeling_tf_hubert.py +src/transformers/models/ibert/configuration_ibert.py +src/transformers/models/ibert/modeling_ibert.py +src/transformers/models/ibert/quant_modules.py +src/transformers/models/idefics/configuration_idefics.py +src/transformers/models/idefics/image_processing_idefics.py +src/transformers/models/idefics/modeling_idefics.py +src/transformers/models/idefics/perceiver.py +src/transformers/models/idefics/processing_idefics.py +src/transformers/models/idefics/vision.py +src/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py +src/transformers/models/informer/configuration_informer.py +src/transformers/models/informer/modeling_informer.py +src/transformers/models/instructblip/configuration_instructblip.py +src/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py +src/transformers/models/instructblip/modeling_instructblip.py +src/transformers/models/instructblip/processing_instructblip.py +src/transformers/models/jukebox/configuration_jukebox.py +src/transformers/models/jukebox/convert_jukebox.py +src/transformers/models/jukebox/modeling_jukebox.py +src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/led/configuration_led.py +src/transformers/models/led/modeling_led.py +src/transformers/models/led/modeling_tf_led.py +src/transformers/models/levit/convert_levit_timm_to_pytorch.py +src/transformers/models/levit/modeling_levit.py +src/transformers/models/lilt/configuration_lilt.py +src/transformers/models/llama/configuration_llama.py +src/transformers/models/llama/convert_llama_weights_to_hf.py +src/transformers/models/llama/modeling_llama.py +src/transformers/models/longformer/configuration_longformer.py +src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py +src/transformers/models/longt5/configuration_longt5.py +src/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py +src/transformers/models/longt5/modeling_flax_longt5.py +src/transformers/models/luke/configuration_luke.py +src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/luke/modeling_luke.py +src/transformers/models/lxmert/configuration_lxmert.py +src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/lxmert/modeling_lxmert.py +src/transformers/models/lxmert/modeling_tf_lxmert.py +src/transformers/models/m2m_100/convert_m2m100_original_checkpoint_to_pytorch.py +src/transformers/models/m2m_100/modeling_m2m_100.py +src/transformers/models/marian/configuration_marian.py +src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py +src/transformers/models/marian/convert_marian_to_pytorch.py +src/transformers/models/marian/modeling_flax_marian.py +src/transformers/models/marian/modeling_tf_marian.py +src/transformers/models/markuplm/configuration_markuplm.py +src/transformers/models/markuplm/feature_extraction_markuplm.py +src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/maskformer/configuration_maskformer_swin.py +src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/maskformer/convert_maskformer_resnet_to_pytorch.py +src/transformers/models/maskformer/convert_maskformer_swin_to_pytorch.py +src/transformers/models/maskformer/modeling_maskformer_swin.py +src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py +src/transformers/models/mbart/modeling_flax_mbart.py +src/transformers/models/mega/configuration_mega.py +src/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/mega/modeling_mega.py +src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py +src/transformers/models/megatron_bert/modeling_megatron_bert.py +src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py +src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py +src/transformers/models/mgp_str/configuration_mgp_str.py +src/transformers/models/mgp_str/modeling_mgp_str.py +src/transformers/models/mistral/configuration_mistral.py +src/transformers/models/mistral/modeling_mistral.py +src/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +src/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +src/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py +src/transformers/models/mobilevit/configuration_mobilevit.py +src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py +src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py +src/transformers/models/mpnet/configuration_mpnet.py +src/transformers/models/mpnet/modeling_mpnet.py +src/transformers/models/mpnet/modeling_tf_mpnet.py +src/transformers/models/mpt/configuration_mpt.py +src/transformers/models/mpt/modeling_mpt.py +src/transformers/models/mra/configuration_mra.py +src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py +src/transformers/models/mra/modeling_mra.py +src/transformers/models/mt5/configuration_mt5.py +src/transformers/models/mt5/modeling_flax_mt5.py +src/transformers/models/mt5/modeling_mt5.py +src/transformers/models/mt5/modeling_tf_mt5.py +src/transformers/models/musicgen/convert_musicgen_transformers.py +src/transformers/models/mvp/modeling_mvp.py +src/transformers/models/nezha/modeling_nezha.py +src/transformers/models/nllb_moe/configuration_nllb_moe.py +src/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py +src/transformers/models/nllb_moe/modeling_nllb_moe.py +src/transformers/models/nougat/convert_nougat_to_hf.py +src/transformers/models/nystromformer/configuration_nystromformer.py +src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/nystromformer/modeling_nystromformer.py +src/transformers/models/oneformer/convert_to_hf_oneformer.py +src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py +src/transformers/models/openai/modeling_openai.py +src/transformers/models/openai/modeling_tf_openai.py +src/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/opt/modeling_flax_opt.py +src/transformers/models/owlvit/configuration_owlvit.py +src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py +src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py +src/transformers/models/pegasus/modeling_flax_pegasus.py +src/transformers/models/pegasus/modeling_tf_pegasus.py +src/transformers/models/pegasus_x/modeling_pegasus_x.py +src/transformers/models/perceiver/configuration_perceiver.py +src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py +src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py +src/transformers/models/persimmon/modeling_persimmon.py +src/transformers/models/pix2struct/configuration_pix2struct.py +src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py +src/transformers/models/pix2struct/image_processing_pix2struct.py +src/transformers/models/pix2struct/processing_pix2struct.py +src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py +src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py +src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py +src/transformers/models/pop2piano/feature_extraction_pop2piano.py +src/transformers/models/pop2piano/processing_pop2piano.py +src/transformers/models/pop2piano/tokenization_pop2piano.py +src/transformers/models/prophetnet/configuration_prophetnet.py +src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/prophetnet/modeling_prophetnet.py +src/transformers/models/pvt/configuration_pvt.py +src/transformers/models/pvt/convert_pvt_to_pytorch.py +src/transformers/models/pvt/image_processing_pvt.py +src/transformers/models/pvt/modeling_pvt.py +src/transformers/models/qdqbert/configuration_qdqbert.py +src/transformers/models/qdqbert/modeling_qdqbert.py +src/transformers/models/rag/configuration_rag.py +src/transformers/models/rag/modeling_rag.py +src/transformers/models/rag/modeling_tf_rag.py +src/transformers/models/rag/retrieval_rag.py +src/transformers/models/realm/modeling_realm.py +src/transformers/models/realm/retrieval_realm.py +src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py +src/transformers/models/regnet/configuration_regnet.py +src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py +src/transformers/models/regnet/convert_regnet_to_pytorch.py +src/transformers/models/regnet/modeling_flax_regnet.py +src/transformers/models/rembert/configuration_rembert.py +src/transformers/models/rembert/convert_rembert_tf_checkpoint_to_pytorch.py +src/transformers/models/rembert/modeling_rembert.py +src/transformers/models/rembert/modeling_tf_rembert.py +src/transformers/models/resnet/convert_resnet_to_pytorch.py +src/transformers/models/resnet/modeling_flax_resnet.py +src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/roberta/modeling_flax_roberta.py +src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py +src/transformers/models/roc_bert/configuration_roc_bert.py +src/transformers/models/roformer/convert_roformer_original_tf_checkpoint_to_pytorch.py +src/transformers/models/roformer/modeling_flax_roformer.py +src/transformers/models/roformer/modeling_roformer.py +src/transformers/models/roformer/modeling_tf_roformer.py +src/transformers/models/rwkv/configuration_rwkv.py +src/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py +src/transformers/models/rwkv/modeling_rwkv.py +src/transformers/models/sam/configuration_sam.py +src/transformers/models/sam/convert_sam_original_to_hf_format.py +src/transformers/models/sam/image_processing_sam.py +src/transformers/models/sam/modeling_sam.py +src/transformers/models/sam/modeling_tf_sam.py +src/transformers/models/sam/processing_sam.py +src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py +src/transformers/models/segformer/configuration_segformer.py +src/transformers/models/segformer/convert_segformer_original_to_pytorch.py +src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/sew_d/convert_sew_d_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py +src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py +src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py +src/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py +src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +src/transformers/models/speecht5/configuration_speecht5.py +src/transformers/models/speecht5/convert_hifigan.py +src/transformers/models/speecht5/convert_speecht5_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/speecht5/number_normalizer.py +src/transformers/models/splinter/configuration_splinter.py +src/transformers/models/splinter/modeling_splinter.py +src/transformers/models/squeezebert/modeling_squeezebert.py +src/transformers/models/swiftformer/configuration_swiftformer.py +src/transformers/models/swiftformer/convert_swiftformer_original_to_hf.py +src/transformers/models/swiftformer/modeling_swiftformer.py +src/transformers/models/swin/convert_swin_simmim_to_pytorch.py +src/transformers/models/swin/convert_swin_timm_to_pytorch.py +src/transformers/models/swin/modeling_tf_swin.py +src/transformers/models/swin2sr/configuration_swin2sr.py +src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py +src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py +src/transformers/models/swinv2/modeling_swinv2.py +src/transformers/models/switch_transformers/configuration_switch_transformers.py +src/transformers/models/switch_transformers/convert_big_switch.py +src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py +src/transformers/models/switch_transformers/modeling_switch_transformers.py +src/transformers/models/t5/configuration_t5.py +src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py +src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py +src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py +src/transformers/models/t5/modeling_flax_t5.py +src/transformers/models/t5/modeling_t5.py +src/transformers/models/t5/modeling_tf_t5.py +src/transformers/models/table_transformer/configuration_table_transformer.py +src/transformers/models/table_transformer/convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/tapas/configuration_tapas.py +src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py +src/transformers/models/tapas/modeling_tapas.py +src/transformers/models/tapas/modeling_tf_tapas.py +src/transformers/models/timesformer/convert_timesformer_to_pytorch.py +src/transformers/models/timm_backbone/configuration_timm_backbone.py +src/transformers/models/timm_backbone/modeling_timm_backbone.py +src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py +src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py +src/transformers/models/transfo_xl/modeling_transfo_xl.py +src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py +src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py +src/transformers/models/tvlt/configuration_tvlt.py +src/transformers/models/tvlt/modeling_tvlt.py +src/transformers/models/umt5/configuration_umt5.py +src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py +src/transformers/models/umt5/modeling_umt5.py +src/transformers/models/unispeech/convert_unispeech_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/unispeech_sat/configuration_unispeech_sat.py +src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/unispeech_sat/convert_unispeech_sat_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/upernet/configuration_upernet.py +src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py +src/transformers/models/upernet/convert_swin_upernet_to_pytorch.py +src/transformers/models/videomae/configuration_videomae.py +src/transformers/models/videomae/convert_videomae_to_pytorch.py +src/transformers/models/vilt/configuration_vilt.py +src/transformers/models/vilt/convert_vilt_original_to_pytorch.py +src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py +src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py +src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py +src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/visual_bert/modeling_visual_bert.py +src/transformers/models/vit/convert_dino_to_pytorch.py +src/transformers/models/vit/convert_vit_timm_to_pytorch.py +src/transformers/models/vit/modeling_flax_vit.py +src/transformers/models/vit_hybrid/configuration_vit_hybrid.py +src/transformers/models/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py +src/transformers/models/vit_hybrid/modeling_vit_hybrid.py +src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py +src/transformers/models/vit_mae/modeling_tf_vit_mae.py +src/transformers/models/vit_msn/configuration_vit_msn.py +src/transformers/models/vit_msn/convert_msn_to_pytorch.py +src/transformers/models/vivit/configuration_vivit.py +src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py +src/transformers/models/vivit/image_processing_vivit.py +src/transformers/models/vivit/modeling_vivit.py +src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/wav2vec2/convert_wav2vec2_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py +src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/wavlm/convert_wavlm_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/wavlm/convert_wavlm_original_s3prl_checkpoint_to_pytorch.py +src/transformers/models/whisper/convert_openai_to_hf.py +src/transformers/models/whisper/english_normalizer.py +src/transformers/models/whisper/modeling_flax_whisper.py +src/transformers/models/x_clip/configuration_x_clip.py +src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py +src/transformers/models/xglm/configuration_xglm.py +src/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py +src/transformers/models/xglm/modeling_flax_xglm.py +src/transformers/models/xglm/modeling_tf_xglm.py +src/transformers/models/xglm/modeling_xglm.py +src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/xlm/modeling_tf_xlm.py +src/transformers/models/xlm/modeling_xlm.py +src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py +src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py +src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py +src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py +src/transformers/models/xlm_roberta/modeling_xlm_roberta.py +src/transformers/models/xlm_roberta_xl/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py +src/transformers/models/xlnet/modeling_tf_xlnet.py +src/transformers/models/xlnet/modeling_xlnet.py +src/transformers/models/xmod/convert_xmod_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/yolos/convert_yolos_to_pytorch.py +src/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py +src/transformers/models/yoso/modeling_yoso.py +src/transformers/onnx/__main__.py +src/transformers/onnx/config.py +src/transformers/onnx/convert.py +src/transformers/onnx/features.py +src/transformers/onnx/utils.py +src/transformers/optimization.py +src/transformers/optimization_tf.py +src/transformers/pipelines/audio_classification.py +src/transformers/pipelines/audio_utils.py +src/transformers/pipelines/automatic_speech_recognition.py +src/transformers/pipelines/base.py +src/transformers/pipelines/conversational.py +src/transformers/pipelines/depth_estimation.py +src/transformers/pipelines/document_question_answering.py +src/transformers/pipelines/feature_extraction.py +src/transformers/pipelines/fill_mask.py +src/transformers/pipelines/image_classification.py +src/transformers/pipelines/image_segmentation.py +src/transformers/pipelines/image_to_text.py +src/transformers/pipelines/mask_generation.py +src/transformers/pipelines/object_detection.py +src/transformers/pipelines/pt_utils.py +src/transformers/pipelines/question_answering.py +src/transformers/pipelines/table_question_answering.py +src/transformers/pipelines/text_classification.py +src/transformers/pipelines/token_classification.py +src/transformers/pipelines/video_classification.py +src/transformers/pipelines/visual_question_answering.py +src/transformers/pipelines/zero_shot_audio_classification.py +src/transformers/pipelines/zero_shot_classification.py +src/transformers/pipelines/zero_shot_image_classification.py +src/transformers/pipelines/zero_shot_object_detection.py +src/transformers/processing_utils.py +src/transformers/pytorch_utils.py +src/transformers/sagemaker/trainer_sm.py +src/transformers/sagemaker/training_args_sm.py +src/transformers/testing_utils.py +src/transformers/tf_utils.py +src/transformers/time_series_utils.py +src/transformers/tokenization_utils.py +src/transformers/tokenization_utils_base.py +src/transformers/tokenization_utils_fast.py +src/transformers/tools/agent_types.py +src/transformers/tools/agents.py +src/transformers/tools/base.py +src/transformers/tools/document_question_answering.py +src/transformers/tools/evaluate_agent.py +src/transformers/tools/image_captioning.py +src/transformers/tools/image_question_answering.py +src/transformers/tools/image_segmentation.py +src/transformers/tools/prompts.py +src/transformers/tools/python_interpreter.py +src/transformers/tools/speech_to_text.py +src/transformers/tools/text_classification.py +src/transformers/tools/text_question_answering.py +src/transformers/tools/text_summarization.py +src/transformers/tools/text_to_speech.py +src/transformers/tools/translation.py +src/transformers/trainer.py +src/transformers/trainer_callback.py +src/transformers/trainer_pt_utils.py +src/transformers/trainer_seq2seq.py +src/transformers/trainer_tf.py +src/transformers/trainer_utils.py +src/transformers/training_args.py +src/transformers/training_args_seq2seq.py +src/transformers/training_args_tf.py +src/transformers/utils/backbone_utils.py +src/transformers/utils/bitsandbytes.py +src/transformers/utils/constants.py +src/transformers/utils/doc.py +src/transformers/utils/dummy_detectron2_objects.py +src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py +src/transformers/utils/dummy_flax_objects.py +src/transformers/utils/dummy_keras_nlp_objects.py +src/transformers/utils/dummy_music_objects.py +src/transformers/utils/dummy_pt_objects.py +src/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py +src/transformers/utils/dummy_sentencepiece_objects.py +src/transformers/utils/dummy_speech_objects.py +src/transformers/utils/dummy_tensorflow_text_objects.py +src/transformers/utils/dummy_tf_objects.py +src/transformers/utils/dummy_tokenizers_objects.py +src/transformers/utils/dummy_vision_objects.py +src/transformers/utils/fx.py +src/transformers/utils/generic.py +src/transformers/utils/hp_naming.py +src/transformers/utils/hub.py +src/transformers/utils/import_utils.py +src/transformers/utils/logging.py +src/transformers/utils/model_parallel_utils.py +src/transformers/utils/notebook.py +src/transformers/utils/peft_utils.py +src/transformers/utils/quantization_config.py +src/transformers/utils/sentencepiece_model_pb2.py +src/transformers/utils/sentencepiece_model_pb2_new.py +src/transformers/utils/versions.py From 7b998cabeeb5ec45c6fd0762461d5b4a85fc0481 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 13 Nov 2023 12:37:46 +0100 Subject: [PATCH 145/268] Fix some Wav2Vec2 related models' doctest (#27462) * fix * fix * fix * fix * fix --------- Co-authored-by: ydshieh --- src/transformers/models/wav2vec2/tokenization_wav2vec2.py | 8 ++++---- .../wav2vec2_with_lm/processing_wav2vec2_with_lm.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/transformers/models/wav2vec2/tokenization_wav2vec2.py b/src/transformers/models/wav2vec2/tokenization_wav2vec2.py index b826eb048ed9..37b0af0a0e12 100644 --- a/src/transformers/models/wav2vec2/tokenization_wav2vec2.py +++ b/src/transformers/models/wav2vec2/tokenization_wav2vec2.py @@ -603,7 +603,7 @@ def decode( >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") >>> # load first sample of English common_voice - >>> dataset = load_dataset("common_voice", "en", split="train", streaming=True) + >>> dataset = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="train", streaming=True) >>> dataset = dataset.cast_column("audio", datasets.Audio(sampling_rate=16_000)) >>> dataset_iter = iter(dataset) >>> sample = next(dataset_iter) @@ -626,10 +626,10 @@ def decode( ... } ... for d in outputs.word_offsets ... ] - >>> # compare word offsets with audio `common_voice_en_100038.mp3` online on the dataset viewer: - >>> # https://huggingface.co/datasets/common_voice/viewer/en/train + >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer: + >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en >>> word_offsets[:3] - [{'word': 'WHY', 'start_time': 1.42, 'end_time': 1.54}, {'word': 'DOES', 'start_time': 1.64, 'end_time': 1.9}, {'word': 'MILISANDRA', 'start_time': 2.26, 'end_time': 2.9}] + [{'word': 'THE', 'start_time': 0.7, 'end_time': 0.78}, {'word': 'TRICK', 'start_time': 0.88, 'end_time': 1.08}, {'word': 'APPEARS', 'start_time': 1.2, 'end_time': 1.64}] ```""" # Convert inputs to python lists token_ids = to_py_obj(token_ids) diff --git a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py index e331da14e810..fdc06a806880 100644 --- a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +++ b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py @@ -538,7 +538,7 @@ def decode( >>> processor = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") >>> # load first sample of English common_voice - >>> dataset = load_dataset("common_voice", "en", split="train", streaming=True) + >>> dataset = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="train", streaming=True) >>> dataset = dataset.cast_column("audio", datasets.Audio(sampling_rate=16_000)) >>> dataset_iter = iter(dataset) >>> sample = next(dataset_iter) @@ -561,10 +561,10 @@ def decode( ... } ... for d in outputs.word_offsets ... ] - >>> # compare word offsets with audio `common_voice_en_100038.mp3` online on the dataset viewer: - >>> # https://huggingface.co/datasets/common_voice/viewer/en/train + >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer: + >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en >>> word_offsets[:4] - [{'word': 'WHY', 'start_time': 1.42, 'end_time': 1.54}, {'word': 'DOES', 'start_time': 1.66, 'end_time': 1.9}, {'word': 'MILISANDRA', 'start_time': 2.26, 'end_time': 2.9}, {'word': 'LOOK', 'start_time': 3.0, 'end_time': 3.16}] + [{'word': 'THE', 'start_time': 0.68, 'end_time': 0.78}, {'word': 'TRACK', 'start_time': 0.88, 'end_time': 1.1}, {'word': 'APPEARS', 'start_time': 1.18, 'end_time': 1.66}, {'word': 'ON', 'start_time': 1.86, 'end_time': 1.92}] ```""" from pyctcdecode.constants import ( From 8f577dca4f2e9153d152afffe209fee643a90124 Mon Sep 17 00:00:00 2001 From: Chris McMaster Date: Mon, 13 Nov 2023 22:43:01 +1100 Subject: [PATCH 146/268] Fixed typo in error message (#27461) "past key much have a shape" -> "past key must have a shape" --- src/transformers/models/mistral/modeling_mistral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index f1699a8f48ad..7d6be0e3de25 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -372,7 +372,7 @@ def forward( if past_key.shape[-2] != self.config.sliding_window - 1: raise ValueError( - f"past key much have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" + f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" f" {past_key.shape}" ) From b97cab7e6d3934c566e6cc8daf3e2f76d92397d5 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Mon, 13 Nov 2023 14:20:54 +0100 Subject: [PATCH 147/268] Remove-auth-token (#27060) * don't use `use_auth_token`internally * let's use token everywhere * fixup --- docs/source/en/hpo_train.md | 2 +- docs/source/en/model_doc/nllb.md | 4 ++-- docs/source/ja/hpo_train.md | 2 +- docs/source/ko/hpo_train.md | 2 +- .../research_projects/jax-projects/README.md | 2 +- .../layoutlmv3/run_funsd_cord.py | 10 +++++----- .../research_projects/mlm_wwm/run_mlm_wwm.py | 2 +- .../quantization-qdqbert/run_quant_qa.py | 6 +++--- .../robust-speech-event/eval.py | 2 +- .../run_speech_recognition_ctc_bnb.py | 12 ++++++------ .../run_speech_recognition_ctc_streaming.py | 12 ++++++------ .../tapex/run_tabfact_with_tapex.py | 6 +++--- .../tapex/run_wikisql_with_tapex.py | 6 +++--- .../tapex/run_wikitablequestions_with_tapex.py | 6 +++--- .../research_projects/xtreme-s/run_xtreme_s.py | 18 +++++++++--------- .../generation/configuration_utils.py | 2 +- src/transformers/integrations/peft.py | 4 ++-- .../models/bark/processing_bark.py | 4 ++-- src/transformers/tools/base.py | 6 +++--- tests/models/auto/test_processor_auto.py | 6 ++---- .../pytorch/run_glue_model_parallelism.py | 6 +++--- tests/test_configuration_utils.py | 8 +++----- tests/test_feature_extraction_utils.py | 10 +++++----- tests/test_image_processing_utils.py | 10 +++++----- tests/test_modeling_flax_utils.py | 8 ++++---- tests/test_modeling_tf_utils.py | 10 ++++------ tests/test_modeling_utils.py | 12 +++++------- tests/test_tokenization_utils.py | 12 ++++++------ tests/utils/test_hub_utils.py | 4 ++-- 29 files changed, 93 insertions(+), 101 deletions(-) diff --git a/docs/source/en/hpo_train.md b/docs/source/en/hpo_train.md index 882193d9e837..c516c501f882 100644 --- a/docs/source/en/hpo_train.md +++ b/docs/source/en/hpo_train.md @@ -99,7 +99,7 @@ Define a `model_init` function and pass it to the [`Trainer`], as an example: ... config=config, ... cache_dir=model_args.cache_dir, ... revision=model_args.model_revision, -... use_auth_token=True if model_args.use_auth_token else None, +... token=True if model_args.use_auth_token else None, ... ) ``` diff --git a/docs/source/en/model_doc/nllb.md b/docs/source/en/model_doc/nllb.md index b0dffa185ec4..3f272129d2f8 100644 --- a/docs/source/en/model_doc/nllb.md +++ b/docs/source/en/model_doc/nllb.md @@ -118,9 +118,9 @@ See example below for a translation from romanian to german: >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained( -... "facebook/nllb-200-distilled-600M", use_auth_token=True, src_lang="ron_Latn" +... "facebook/nllb-200-distilled-600M", token=True, src_lang="ron_Latn" ... ) ->>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M", use_auth_token=True) +>>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M", token=True) >>> article = "Şeful ONU spune că nu există o soluţie militară în Siria" >>> inputs = tokenizer(article, return_tensors="pt") diff --git a/docs/source/ja/hpo_train.md b/docs/source/ja/hpo_train.md index 46a22550bb46..85da3616f80e 100644 --- a/docs/source/ja/hpo_train.md +++ b/docs/source/ja/hpo_train.md @@ -105,7 +105,7 @@ Wandbについては、[object_parameter](https://docs.wandb.ai/guides/sweeps/co ... config=config, ... cache_dir=model_args.cache_dir, ... revision=model_args.model_revision, -... use_auth_token=True if model_args.use_auth_token else None, +... token=True if model_args.use_auth_token else None, ... ) ``` diff --git a/docs/source/ko/hpo_train.md b/docs/source/ko/hpo_train.md index c7b25306930a..58bacd55ff75 100644 --- a/docs/source/ko/hpo_train.md +++ b/docs/source/ko/hpo_train.md @@ -87,7 +87,7 @@ wandb의 경우, 해당 [object_parameter](https://docs.wandb.ai/guides/sweeps/c ... config=config, ... cache_dir=model_args.cache_dir, ... revision=model_args.model_revision, -... use_auth_token=True if model_args.use_auth_token else None, +... token=True if model_args.use_auth_token else None, ... ) ``` diff --git a/examples/research_projects/jax-projects/README.md b/examples/research_projects/jax-projects/README.md index 66bb6c61a376..fc5f09695522 100644 --- a/examples/research_projects/jax-projects/README.md +++ b/examples/research_projects/jax-projects/README.md @@ -1117,7 +1117,7 @@ params = model.init(key2, x) bytes_output = serialization.to_bytes(params) -repo = Repository("flax-model", clone_from="flax-community/flax-model-dummy", use_auth_token=True) +repo = Repository("flax-model", clone_from="flax-community/flax-model-dummy", token=True) with repo.commit("My cool Flax model :)"): with open("flax_model.msgpack", "wb") as f: f.write(bytes_output) diff --git a/examples/research_projects/layoutlmv3/run_funsd_cord.py b/examples/research_projects/layoutlmv3/run_funsd_cord.py index e826fd997424..ad83fbdef9de 100644 --- a/examples/research_projects/layoutlmv3/run_funsd_cord.py +++ b/examples/research_projects/layoutlmv3/run_funsd_cord.py @@ -250,7 +250,7 @@ def main(): "nielsr/funsd-layoutlmv3", data_args.dataset_config_name, cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) elif data_args.dataset_name == "cord": # Downloading and loading a dataset from the hub. @@ -258,7 +258,7 @@ def main(): "nielsr/cord-layoutlmv3", data_args.dataset_config_name, cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) else: raise ValueError("This script only supports either FUNSD or CORD out-of-the-box.") @@ -313,7 +313,7 @@ def get_label_list(labels): finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) processor = AutoProcessor.from_pretrained( @@ -321,7 +321,7 @@ def get_label_list(labels): cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, add_prefix_space=True, apply_ocr=False, ) @@ -332,7 +332,7 @@ def get_label_list(labels): config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) # Set the correspondences label/ID inside the model config diff --git a/examples/research_projects/mlm_wwm/run_mlm_wwm.py b/examples/research_projects/mlm_wwm/run_mlm_wwm.py index 84272421b96f..d22b2db7dcad 100644 --- a/examples/research_projects/mlm_wwm/run_mlm_wwm.py +++ b/examples/research_projects/mlm_wwm/run_mlm_wwm.py @@ -325,7 +325,7 @@ def main(): config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) else: logger.info("Training new model from scratch") diff --git a/examples/research_projects/quantization-qdqbert/run_quant_qa.py b/examples/research_projects/quantization-qdqbert/run_quant_qa.py index bb887955eb43..fac834ef70f3 100755 --- a/examples/research_projects/quantization-qdqbert/run_quant_qa.py +++ b/examples/research_projects/quantization-qdqbert/run_quant_qa.py @@ -322,14 +322,14 @@ def main(): model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) model = QDQBertForQuestionAnswering.from_pretrained( model_args.model_name_or_path, @@ -337,7 +337,7 @@ def main(): config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. diff --git a/examples/research_projects/robust-speech-event/eval.py b/examples/research_projects/robust-speech-event/eval.py index a8acca1825d7..b6c89a6d49fa 100755 --- a/examples/research_projects/robust-speech-event/eval.py +++ b/examples/research_projects/robust-speech-event/eval.py @@ -65,7 +65,7 @@ def normalize_text(text: str) -> str: def main(args): # load dataset - dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True) + dataset = load_dataset(args.dataset, args.config, split=args.split, token=True) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) diff --git a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py index 7a137eb5fe71..ebf33eb01df5 100755 --- a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py +++ b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py @@ -418,7 +418,7 @@ def main(): data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, ) if data_args.audio_column_name not in raw_datasets["train"].column_names: @@ -443,7 +443,7 @@ def main(): data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, ) if data_args.max_eval_samples is not None: @@ -481,7 +481,7 @@ def remove_special_characters(batch): # the tokenizer # load config config = AutoConfig.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token + model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token ) # 4. Next, if no tokenizer file is defined, @@ -532,11 +532,11 @@ def remove_special_characters(batch): # load feature_extractor and tokenizer tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, **tokenizer_kwargs, ) feature_extractor = AutoFeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token + model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token ) # adapt config @@ -564,7 +564,7 @@ def remove_special_characters(batch): model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, ) # freeze encoder diff --git a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py index e3800142e38d..8a8eda851bb4 100644 --- a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py +++ b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py @@ -395,7 +395,7 @@ def load_streaming_dataset(split, sampling_rate, **kwargs): # so we just need to set the correct target sampling rate and normalize the input # via the `feature_extractor` feature_extractor = AutoFeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token + model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token ) if training_args.do_train: @@ -403,7 +403,7 @@ def load_streaming_dataset(split, sampling_rate, **kwargs): path=data_args.dataset_name, name=data_args.dataset_config_name, split=data_args.train_split_name, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, streaming=True, sampling_rate=feature_extractor.sampling_rate, ) @@ -431,7 +431,7 @@ def load_streaming_dataset(split, sampling_rate, **kwargs): path=data_args.dataset_name, name=data_args.dataset_config_name, split=data_args.eval_split_name, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, streaming=True, sampling_rate=feature_extractor.sampling_rate, ) @@ -465,7 +465,7 @@ def remove_special_characters(batch): # 3. Next, let's load the config as we might need it to create # the tokenizer config = AutoConfig.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token + model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token ) # 4. Now we can instantiate the tokenizer and model @@ -481,7 +481,7 @@ def remove_special_characters(batch): tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, config=config, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, ) # adapt config @@ -509,7 +509,7 @@ def remove_special_characters(batch): model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, ) # freeze encoder diff --git a/examples/research_projects/tapex/run_tabfact_with_tapex.py b/examples/research_projects/tapex/run_tabfact_with_tapex.py index 17222b8c4534..2bef4a371ef6 100644 --- a/examples/research_projects/tapex/run_tabfact_with_tapex.py +++ b/examples/research_projects/tapex/run_tabfact_with_tapex.py @@ -292,7 +292,7 @@ def main(): num_labels=num_labels, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) # load tapex tokenizer tokenizer = TapexTokenizer.from_pretrained( @@ -300,7 +300,7 @@ def main(): cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, add_prefix_space=True, ) model = BartForSequenceClassification.from_pretrained( @@ -309,7 +309,7 @@ def main(): config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) # Padding strategy diff --git a/examples/research_projects/tapex/run_wikisql_with_tapex.py b/examples/research_projects/tapex/run_wikisql_with_tapex.py index 3e72a5ad53d6..821b283d9ff6 100644 --- a/examples/research_projects/tapex/run_wikisql_with_tapex.py +++ b/examples/research_projects/tapex/run_wikisql_with_tapex.py @@ -329,7 +329,7 @@ def main(): model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) # IMPORTANT: the initial BART model's decoding is penalized by no_repeat_ngram_size, and thus @@ -344,7 +344,7 @@ def main(): cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, add_prefix_space=True, ) @@ -355,7 +355,7 @@ def main(): config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) if model.config.decoder_start_token_id is None: diff --git a/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py b/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py index bec81a98890e..f874eebb3418 100644 --- a/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py +++ b/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py @@ -327,7 +327,7 @@ def main(): model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) # IMPORTANT: the initial BART model's decoding is penalized by no_repeat_ngram_size, and thus @@ -342,7 +342,7 @@ def main(): cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, add_prefix_space=True, ) @@ -353,7 +353,7 @@ def main(): config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) if model.config.decoder_start_token_id is None: diff --git a/examples/research_projects/xtreme-s/run_xtreme_s.py b/examples/research_projects/xtreme-s/run_xtreme_s.py index 5db30c7e97e5..e01ccbf4488d 100644 --- a/examples/research_projects/xtreme-s/run_xtreme_s.py +++ b/examples/research_projects/xtreme-s/run_xtreme_s.py @@ -502,7 +502,7 @@ def main(): data_args.dataset_name, config_name, split=data_args.train_split_name, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) @@ -528,7 +528,7 @@ def main(): data_args.dataset_name, config_name, split=data_args.eval_split_name, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) @@ -540,7 +540,7 @@ def main(): data_args.dataset_name, config_name, split=data_args.predict_split_name, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) @@ -595,7 +595,7 @@ def remove_special_characters(batch): # 3. Next, let's load the config as we might need it to create # the tokenizer config = AutoConfig.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token + model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token ) if is_text_target: @@ -651,11 +651,11 @@ def remove_special_characters(batch): if is_text_target: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, **tokenizer_kwargs, ) feature_extractor = AutoFeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token + model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token ) # adapt config @@ -694,14 +694,14 @@ def remove_special_characters(batch): model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, ) elif config.is_encoder_decoder: model = AutoModelForSpeechSeq2Seq.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") @@ -710,7 +710,7 @@ def remove_special_characters(batch): model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, - use_auth_token=data_args.use_auth_token, + token=data_args.use_auth_token, ) # freeze encoder diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index ef3a6fba1b47..04aa48364f6a 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -716,7 +716,7 @@ def from_pretrained( proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, - use_auth_token=token, + token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, diff --git a/src/transformers/integrations/peft.py b/src/transformers/integrations/peft.py index de68e01c5fde..8be09e86319f 100644 --- a/src/transformers/integrations/peft.py +++ b/src/transformers/integrations/peft.py @@ -179,7 +179,7 @@ def load_adapter( peft_config = PeftConfig.from_pretrained( peft_model_id, - use_auth_token=token, + token=token, **adapter_kwargs, ) @@ -190,7 +190,7 @@ def load_adapter( self._hf_peft_config_loaded = True if peft_model_id is not None: - adapter_state_dict = load_peft_weights(peft_model_id, use_auth_token=token, **adapter_kwargs) + adapter_state_dict = load_peft_weights(peft_model_id, token=token, **adapter_kwargs) # We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility processed_adapter_state_dict = {} diff --git a/src/transformers/models/bark/processing_bark.py b/src/transformers/models/bark/processing_bark.py index 2b381327592e..ef8ed03224be 100644 --- a/src/transformers/models/bark/processing_bark.py +++ b/src/transformers/models/bark/processing_bark.py @@ -94,7 +94,7 @@ def from_pretrained( proxies=kwargs.pop("proxies", None), resume_download=kwargs.pop("resume_download", False), local_files_only=kwargs.pop("local_files_only", False), - use_auth_token=kwargs.pop("use_auth_token", None), + token=kwargs.pop("use_auth_token", None), revision=kwargs.pop("revision", None), ) if speaker_embeddings_path is None: @@ -190,7 +190,7 @@ def _load_voice_preset(self, voice_preset: str = None, **kwargs): proxies=kwargs.pop("proxies", None), resume_download=kwargs.pop("resume_download", False), local_files_only=kwargs.pop("local_files_only", False), - use_auth_token=kwargs.pop("use_auth_token", None), + token=kwargs.pop("use_auth_token", None), revision=kwargs.pop("revision", None), ) if path is None: diff --git a/src/transformers/tools/base.py b/src/transformers/tools/base.py index c781a0b270ba..42027948a6f5 100644 --- a/src/transformers/tools/base.py +++ b/src/transformers/tools/base.py @@ -226,7 +226,7 @@ def from_hub( resolved_config_file = cached_file( repo_id, TOOL_CONFIG_FILE, - use_auth_token=token, + token=token, **hub_kwargs, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, @@ -236,7 +236,7 @@ def from_hub( resolved_config_file = cached_file( repo_id, CONFIG_NAME, - use_auth_token=token, + token=token, **hub_kwargs, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, @@ -259,7 +259,7 @@ def from_hub( custom_tool = config tool_class = custom_tool["tool_class"] - tool_class = get_class_from_dynamic_module(tool_class, repo_id, use_auth_token=token, **hub_kwargs) + tool_class = get_class_from_dynamic_module(tool_class, repo_id, token=token, **hub_kwargs) if len(tool_class.name) == 0: tool_class.name = custom_tool["name"] diff --git a/tests/models/auto/test_processor_auto.py b/tests/models/auto/test_processor_auto.py index a4f371426877..bf4a92475dee 100644 --- a/tests/models/auto/test_processor_auto.py +++ b/tests/models/auto/test_processor_auto.py @@ -308,9 +308,7 @@ def tearDownClass(cls): def test_push_to_hub(self): processor = Wav2Vec2Processor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR) with tempfile.TemporaryDirectory() as tmp_dir: - processor.save_pretrained( - os.path.join(tmp_dir, "test-processor"), push_to_hub=True, use_auth_token=self._token - ) + processor.save_pretrained(os.path.join(tmp_dir, "test-processor"), push_to_hub=True, token=self._token) new_processor = Wav2Vec2Processor.from_pretrained(f"{USER}/test-processor") for k, v in processor.feature_extractor.__dict__.items(): @@ -324,7 +322,7 @@ def test_push_to_hub_in_organization(self): processor.save_pretrained( os.path.join(tmp_dir, "test-processor-org"), push_to_hub=True, - use_auth_token=self._token, + token=self._token, organization="valid_org", ) diff --git a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py index 32e28e469717..c38ee542e6ce 100644 --- a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py +++ b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py @@ -314,14 +314,14 @@ def main(): finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, @@ -329,7 +329,7 @@ def main(): config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets diff --git a/tests/test_configuration_utils.py b/tests/test_configuration_utils.py index 1b8136bfbb42..a6e9e6b0390a 100644 --- a/tests/test_configuration_utils.py +++ b/tests/test_configuration_utils.py @@ -142,7 +142,7 @@ def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) - config.push_to_hub("valid_org/test-config-org", use_auth_token=self._token) + config.push_to_hub("valid_org/test-config-org", token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): @@ -154,9 +154,7 @@ def test_push_to_hub_in_organization(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: - config.save_pretrained( - tmp_dir, repo_id="valid_org/test-config-org", push_to_hub=True, use_auth_token=self._token - ) + config.save_pretrained(tmp_dir, repo_id="valid_org/test-config-org", push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): @@ -167,7 +165,7 @@ def test_push_to_hub_dynamic_config(self): CustomConfig.register_for_auto_class() config = CustomConfig(attribute=42) - config.push_to_hub("test-dynamic-config", use_auth_token=self._token) + config.push_to_hub("test-dynamic-config", token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig"}) diff --git a/tests/test_feature_extraction_utils.py b/tests/test_feature_extraction_utils.py index b17c48ff120d..7467cac139c4 100644 --- a/tests/test_feature_extraction_utils.py +++ b/tests/test_feature_extraction_utils.py @@ -85,7 +85,7 @@ def tearDownClass(cls): def test_push_to_hub(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) - feature_extractor.push_to_hub("test-feature-extractor", use_auth_token=self._token) + feature_extractor.push_to_hub("test-feature-extractor", token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): @@ -97,7 +97,7 @@ def test_push_to_hub(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( - tmp_dir, repo_id="test-feature-extractor", push_to_hub=True, use_auth_token=self._token + tmp_dir, repo_id="test-feature-extractor", push_to_hub=True, token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") @@ -106,7 +106,7 @@ def test_push_to_hub(self): def test_push_to_hub_in_organization(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) - feature_extractor.push_to_hub("valid_org/test-feature-extractor", use_auth_token=self._token) + feature_extractor.push_to_hub("valid_org/test-feature-extractor", token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): @@ -118,7 +118,7 @@ def test_push_to_hub_in_organization(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( - tmp_dir, repo_id="valid_org/test-feature-extractor-org", push_to_hub=True, use_auth_token=self._token + tmp_dir, repo_id="valid_org/test-feature-extractor-org", push_to_hub=True, token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org") @@ -129,7 +129,7 @@ def test_push_to_hub_dynamic_feature_extractor(self): CustomFeatureExtractor.register_for_auto_class() feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) - feature_extractor.push_to_hub("test-dynamic-feature-extractor", use_auth_token=self._token) + feature_extractor.push_to_hub("test-dynamic-feature-extractor", token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( diff --git a/tests/test_image_processing_utils.py b/tests/test_image_processing_utils.py index 5211a541d3ec..3be8ee3c26db 100644 --- a/tests/test_image_processing_utils.py +++ b/tests/test_image_processing_utils.py @@ -95,7 +95,7 @@ def tearDownClass(cls): def test_push_to_hub(self): image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) - image_processor.push_to_hub("test-image-processor", use_auth_token=self._token) + image_processor.push_to_hub("test-image-processor", token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): @@ -107,7 +107,7 @@ def test_push_to_hub(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( - tmp_dir, repo_id="test-image-processor", push_to_hub=True, use_auth_token=self._token + tmp_dir, repo_id="test-image-processor", push_to_hub=True, token=self._token ) new_image_processor = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") @@ -116,7 +116,7 @@ def test_push_to_hub(self): def test_push_to_hub_in_organization(self): image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) - image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token) + image_processor.push_to_hub("valid_org/test-image-processor", token=self._token) new_image_processor = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): @@ -128,7 +128,7 @@ def test_push_to_hub_in_organization(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( - tmp_dir, repo_id="valid_org/test-image-processor-org", push_to_hub=True, use_auth_token=self._token + tmp_dir, repo_id="valid_org/test-image-processor-org", push_to_hub=True, token=self._token ) new_image_processor = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") @@ -139,7 +139,7 @@ def test_push_to_hub_dynamic_image_processor(self): CustomImageProcessor.register_for_auto_class() image_processor = CustomImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) - image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token) + image_processor.push_to_hub("test-dynamic-image-processor", token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( diff --git a/tests/test_modeling_flax_utils.py b/tests/test_modeling_flax_utils.py index 06ed30f8afa1..e0e6c873c689 100644 --- a/tests/test_modeling_flax_utils.py +++ b/tests/test_modeling_flax_utils.py @@ -60,7 +60,7 @@ def test_push_to_hub(self): vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) - model.push_to_hub("test-model-flax", use_auth_token=self._token) + model.push_to_hub("test-model-flax", token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") @@ -76,7 +76,7 @@ def test_push_to_hub(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: - model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, use_auth_token=self._token) + model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") @@ -92,7 +92,7 @@ def test_push_to_hub_in_organization(self): vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) - model.push_to_hub("valid_org/test-model-flax-org", use_auth_token=self._token) + model.push_to_hub("valid_org/test-model-flax-org", token=self._token) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") @@ -109,7 +109,7 @@ def test_push_to_hub_in_organization(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( - tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, use_auth_token=self._token + tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, token=self._token ) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") diff --git a/tests/test_modeling_tf_utils.py b/tests/test_modeling_tf_utils.py index 6d0ed8640772..130f920f71e1 100644 --- a/tests/test_modeling_tf_utils.py +++ b/tests/test_modeling_tf_utils.py @@ -572,7 +572,7 @@ def test_push_to_hub(self): logging.set_verbosity_info() logger = logging.get_logger("transformers.utils.hub") with CaptureLogger(logger) as cl: - model.push_to_hub("test-model-tf", use_auth_token=self._token) + model.push_to_hub("test-model-tf", token=self._token) logging.set_verbosity_warning() # Check the model card was created and uploaded. self.assertIn("Uploading the following files to __DUMMY_TRANSFORMERS_USER__/test-model-tf", cl.out) @@ -590,7 +590,7 @@ def test_push_to_hub(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: - model.save_pretrained(tmp_dir, repo_id="test-model-tf", push_to_hub=True, use_auth_token=self._token) + model.save_pretrained(tmp_dir, repo_id="test-model-tf", push_to_hub=True, token=self._token) new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf") models_equal = True @@ -638,7 +638,7 @@ def test_push_to_hub_in_organization(self): # Make sure model is properly initialized model.build() - model.push_to_hub("valid_org/test-model-tf-org", use_auth_token=self._token) + model.push_to_hub("valid_org/test-model-tf-org", token=self._token) new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org") models_equal = True @@ -653,9 +653,7 @@ def test_push_to_hub_in_organization(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: - model.save_pretrained( - tmp_dir, push_to_hub=True, use_auth_token=self._token, repo_id="valid_org/test-model-tf-org" - ) + model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-tf-org") new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org") models_equal = True diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index 1885fc671b02..e457dc07a9fb 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -1162,7 +1162,7 @@ def test_push_to_hub(self): vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) - model.push_to_hub("test-model", use_auth_token=self._token) + model.push_to_hub("test-model", token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): @@ -1173,7 +1173,7 @@ def test_push_to_hub(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: - model.save_pretrained(tmp_dir, repo_id="test-model", push_to_hub=True, use_auth_token=self._token) + model.save_pretrained(tmp_dir, repo_id="test-model", push_to_hub=True, token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): @@ -1202,7 +1202,7 @@ def test_push_to_hub_in_organization(self): vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) - model.push_to_hub("valid_org/test-model-org", use_auth_token=self._token) + model.push_to_hub("valid_org/test-model-org", token=self._token) new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): @@ -1213,9 +1213,7 @@ def test_push_to_hub_in_organization(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: - model.save_pretrained( - tmp_dir, push_to_hub=True, use_auth_token=self._token, repo_id="valid_org/test-model-org" - ) + model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-org") new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): @@ -1228,7 +1226,7 @@ def test_push_to_hub_dynamic_model(self): config = CustomConfig(hidden_size=32) model = CustomModel(config) - model.push_to_hub("test-dynamic-model", use_auth_token=self._token) + model.push_to_hub("test-dynamic-model", token=self._token) # checks self.assertDictEqual( config.auto_map, diff --git a/tests/test_tokenization_utils.py b/tests/test_tokenization_utils.py index 2984de97fdcb..3f7f7249f97c 100644 --- a/tests/test_tokenization_utils.py +++ b/tests/test_tokenization_utils.py @@ -146,7 +146,7 @@ def test_push_to_hub(self): vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) - tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token) + tokenizer.push_to_hub("test-tokenizer", token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @@ -155,7 +155,7 @@ def test_push_to_hub(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: - tokenizer.save_pretrained(tmp_dir, repo_id="test-tokenizer", push_to_hub=True, use_auth_token=self._token) + tokenizer.save_pretrained(tmp_dir, repo_id="test-tokenizer", push_to_hub=True, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @@ -167,7 +167,7 @@ def test_push_to_hub_in_organization(self): vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) - tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token) + tokenizer.push_to_hub("valid_org/test-tokenizer-org", token=self._token) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @@ -177,7 +177,7 @@ def test_push_to_hub_in_organization(self): # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( - tmp_dir, repo_id="valid_org/test-tokenizer-org", push_to_hub=True, use_auth_token=self._token + tmp_dir, repo_id="valid_org/test-tokenizer-org", push_to_hub=True, token=self._token ) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") @@ -193,7 +193,7 @@ def test_push_to_hub_dynamic_tokenizer(self): tokenizer = CustomTokenizer(vocab_file) # No fast custom tokenizer - tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token) + tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module @@ -210,7 +210,7 @@ def test_push_to_hub_dynamic_tokenizer(self): bert_tokenizer.save_pretrained(tmp_dir) tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir) - tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token) + tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module diff --git a/tests/utils/test_hub_utils.py b/tests/utils/test_hub_utils.py index 5ff1ed679e6e..dffc018e284c 100644 --- a/tests/utils/test_hub_utils.py +++ b/tests/utils/test_hub_utils.py @@ -132,10 +132,10 @@ def test_get_file_gated_repo(self): """Test download file from a gated repo fails with correct message when not authenticated.""" with self.assertRaisesRegex(EnvironmentError, "You are trying to access a gated repo."): # All files except README.md are protected on a gated repo. - cached_file(GATED_REPO, "gated_file.txt", use_auth_token=False) + cached_file(GATED_REPO, "gated_file.txt", token=False) def test_has_file_gated_repo(self): """Test check file existence from a gated repo fails with correct message when not authenticated.""" with self.assertRaisesRegex(EnvironmentError, "is a gated repository"): # All files except README.md are protected on a gated repo. - has_file(GATED_REPO, "gated_file.txt", use_auth_token=False) + has_file(GATED_REPO, "gated_file.txt", token=False) From 210e38d83f85738f9f6c277579e6c05e634ad164 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Mon, 13 Nov 2023 14:51:48 +0100 Subject: [PATCH 148/268] [`Llama + Mistral`] Add attention dropout (#27315) * add droppouts * add the dropout * add doc in the config * nits * fix mistral config * nits --- src/transformers/models/llama/configuration_llama.py | 5 ++++- src/transformers/models/llama/modeling_llama.py | 8 ++++---- src/transformers/models/mistral/configuration_mistral.py | 5 ++++- src/transformers/models/mistral/modeling_mistral.py | 8 +++----- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/llama/configuration_llama.py b/src/transformers/models/llama/configuration_llama.py index 7cce21671e97..354bcb5b2e8f 100644 --- a/src/transformers/models/llama/configuration_llama.py +++ b/src/transformers/models/llama/configuration_llama.py @@ -95,7 +95,8 @@ class LlamaConfig(PretrainedConfig): experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. - + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. ```python >>> from transformers import LlamaModel, LlamaConfig @@ -133,6 +134,7 @@ def __init__( rope_theta=10000.0, rope_scaling=None, attention_bias=False, + attention_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size @@ -156,6 +158,7 @@ def __init__( self.rope_scaling = rope_scaling self._rope_scaling_validation() self.attention_bias = attention_bias + self.attention_dropout = attention_dropout super().__init__( pad_token_id=pad_token_id, diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 703ebf0d9560..fd4bd16fd2df 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -278,6 +278,7 @@ class LlamaAttention(nn.Module): def __init__(self, config: LlamaConfig): super().__init__() self.config = config + self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads @@ -292,6 +293,7 @@ def __init__(self, config: LlamaConfig): f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) @@ -404,6 +406,7 @@ def forward( # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): @@ -489,10 +492,7 @@ def forward( key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) - # TODO: llama does not have dropout in the config?? - # It is recommended to use dropout with FA according to the docs - # when training. - dropout_rate = 0.0 # if not self.training else self.attn_dropout + dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need diff --git a/src/transformers/models/mistral/configuration_mistral.py b/src/transformers/models/mistral/configuration_mistral.py index a1b054df49c2..a6c4634f611d 100644 --- a/src/transformers/models/mistral/configuration_mistral.py +++ b/src/transformers/models/mistral/configuration_mistral.py @@ -82,7 +82,8 @@ class MistralConfig(PretrainedConfig): The base period of the RoPE embeddings. sliding_window (`int`, *optional*, defaults to 4096): Sliding window attention window size. If not specified, will default to `4096`. - + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. ```python >>> from transformers import MistralModel, MistralConfig @@ -119,6 +120,7 @@ def __init__( tie_word_embeddings=False, rope_theta=10000.0, sliding_window=4096, + attention_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size @@ -139,6 +141,7 @@ def __init__( self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta + self.attention_dropout = attention_dropout super().__init__( pad_token_id=pad_token_id, diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 7d6be0e3de25..445b7d95cb6b 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -205,6 +205,7 @@ def __init__(self, config: MistralConfig): self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True + self.attention_dropout = config.attention_dropout if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( @@ -284,6 +285,7 @@ def forward( # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): @@ -390,11 +392,7 @@ def forward( # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) - - # TODO: Mistral does not have dropout in the config?? - # It is recommended to use dropout with FA according to the docs - # when training. - dropout_rate = 0.0 # if not self.training else self.attn_dropout + dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need From 9dc8fe1b325f270320cdf205778bddae03c6ba1f Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Mon, 13 Nov 2023 15:17:01 +0100 Subject: [PATCH 149/268] Default to msgpack for safetensors (#27460) * Default to msgpack for safetensors * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../modeling_flax_pytorch_utils.py | 6 +- src/transformers/modeling_flax_utils.py | 41 +++---- tests/test_modeling_flax_utils.py | 112 +++++++++++++++++- tests/test_modeling_tf_utils.py | 65 ++++++++++ 4 files changed, 196 insertions(+), 28 deletions(-) diff --git a/src/transformers/modeling_flax_pytorch_utils.py b/src/transformers/modeling_flax_pytorch_utils.py index 5a0f52a995e8..f78c4e78c78b 100644 --- a/src/transformers/modeling_flax_pytorch_utils.py +++ b/src/transformers/modeling_flax_pytorch_utils.py @@ -50,7 +50,7 @@ def load_pytorch_checkpoint_in_flax_state_dict( """Load pytorch checkpoints in a flax model""" try: import torch # noqa: F401 - except ImportError: + except (ImportError, ModuleNotFoundError): logger.error( "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" @@ -150,7 +150,7 @@ def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model): # numpy currently does not support bfloat16, need to go over float32 in this case to not lose precision try: import torch # noqa: F401 - except ImportError: + except (ImportError, ModuleNotFoundError): logger.error( "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" @@ -349,7 +349,7 @@ def load_flax_weights_in_pytorch_model(pt_model, flax_state): try: import torch # noqa: F401 - except ImportError: + except (ImportError, ModuleNotFoundError): logger.error( "Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index 9e63cb0cb961..37567d3d8432 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -721,7 +721,14 @@ def from_pretrained( pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): - if is_safetensors_available() and os.path.isfile( + if os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): + # Load from a Flax checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)): + # Load from a sharded Flax checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME) + is_sharded = True + elif is_safetensors_available() and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) ): # Load from a safetensors checkpoint @@ -735,13 +742,6 @@ def from_pretrained( # Load from a sharded pytorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME) is_sharded = True - elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): - # Load from a Flax checkpoint - archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) - elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)): - # Load from a sharded Flax checkpoint - archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME) - is_sharded = True # At this stage we don't have a weight file so we will raise an error. elif is_safetensors_available() and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) @@ -770,8 +770,6 @@ def from_pretrained( else: if from_pt: filename = WEIGHTS_NAME - elif is_safetensors_available(): - filename = SAFE_WEIGHTS_NAME else: filename = FLAX_WEIGHTS_NAME @@ -792,22 +790,14 @@ def from_pretrained( } resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) - # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None - # result when internet is up, the repo and revision exist, but the file does not. - if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: - # Did not find the safetensors file, let's fallback to Flax. - # No support for sharded safetensors yet, so we'll raise an error if that's all we find. - filename = FLAX_WEIGHTS_NAME - resolved_archive_file = cached_file( - pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **cached_file_kwargs - ) + # Maybe the checkpoint is sharded, we try to grab the index name in this case. if resolved_archive_file is None and filename == FLAX_WEIGHTS_NAME: - # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, FLAX_WEIGHTS_INDEX_NAME, **cached_file_kwargs ) if resolved_archive_file is not None: is_sharded = True + # Maybe the checkpoint is pytorch sharded, we try to grab the pytorch index name in this case. if resolved_archive_file is None and from_pt: resolved_archive_file = cached_file( @@ -815,6 +805,17 @@ def from_pretrained( ) if resolved_archive_file is not None: is_sharded = True + + # If we still haven't found anything, look for `safetensors`. + if resolved_archive_file is None: + # No support for sharded safetensors yet, so we'll raise an error if that's all we find. + filename = SAFE_WEIGHTS_NAME + resolved_archive_file = cached_file( + pretrained_model_name_or_path, SAFE_WEIGHTS_NAME, **cached_file_kwargs + ) + + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None + # result when internet is up, the repo and revision exist, but the file does not. if resolved_archive_file is None: # Otherwise, maybe there is a TF or Torch model file. We try those to give a helpful error # message. diff --git a/tests/test_modeling_flax_utils.py b/tests/test_modeling_flax_utils.py index e0e6c873c689..e668b4353258 100644 --- a/tests/test_modeling_flax_utils.py +++ b/tests/test_modeling_flax_utils.py @@ -19,8 +19,16 @@ from huggingface_hub import HfFolder, delete_repo, snapshot_download from requests.exceptions import HTTPError -from transformers import BertConfig, BertModel, is_flax_available -from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax, require_safetensors, require_torch +from transformers import BertConfig, BertModel, is_flax_available, is_torch_available +from transformers.testing_utils import ( + TOKEN, + USER, + is_pt_flax_cross_test, + is_staging_test, + require_flax, + require_safetensors, + require_torch, +) from transformers.utils import FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_NAME @@ -202,6 +210,7 @@ def test_safetensors_save_and_load(self): @require_flax @require_torch + @is_pt_flax_cross_test def test_safetensors_save_and_load_pt_to_flax(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True) pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") @@ -218,21 +227,114 @@ def test_safetensors_save_and_load_pt_to_flax(self): @require_safetensors def test_safetensors_load_from_hub(self): + """ + This test checks that we can load safetensors from a checkpoint that only has those on the Hub + """ flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") # Can load from the Flax-formatted checkpoint safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-only") self.assertTrue(check_models_equal(flax_model, safetensors_model)) + @require_safetensors + def test_safetensors_load_from_local(self): + """ + This test checks that we can load safetensors from a checkpoint that only has those on the Hub + """ + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-flax-only", cache_dir=tmp) + flax_model = FlaxBertModel.from_pretrained(location) + + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-flax-safetensors-only", cache_dir=tmp) + safetensors_model = FlaxBertModel.from_pretrained(location) + + self.assertTrue(check_models_equal(flax_model, safetensors_model)) + @require_torch @require_safetensors - def test_safetensors_load_from_hub_flax_and_pt(self): - flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + @is_pt_flax_cross_test + def test_safetensors_load_from_hub_from_safetensors_pt(self): + """ + This test checks that we can load safetensors from a checkpoint that only has those on the Hub. + saved in the "pt" format. + """ + flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-msgpack") + + # Can load from the PyTorch-formatted checkpoint + safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") + self.assertTrue(check_models_equal(flax_model, safetensors_model)) + + @require_torch + @require_safetensors + @is_pt_flax_cross_test + def test_safetensors_load_from_local_from_safetensors_pt(self): + """ + This test checks that we can load safetensors from a checkpoint that only has those on the Hub. + saved in the "pt" format. + """ + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-msgpack", cache_dir=tmp) + flax_model = FlaxBertModel.from_pretrained(location) # Can load from the PyTorch-formatted checkpoint - safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only", from_pt=True) + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) + safetensors_model = FlaxBertModel.from_pretrained(location) + self.assertTrue(check_models_equal(flax_model, safetensors_model)) + @require_safetensors + def test_safetensors_load_from_hub_from_safetensors_pt_without_torch_installed(self): + """ + This test checks that we cannot load safetensors from a checkpoint that only has safetensors + saved in the "pt" format if torch isn't installed. + """ + if is_torch_available(): + # This test verifies that a correct error message is shown when loading from a pt safetensors + # PyTorch shouldn't be installed for this to work correctly. + return + + # Cannot load from the PyTorch-formatted checkpoint without PyTorch installed + with self.assertRaises(ModuleNotFoundError): + _ = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") + + @require_safetensors + def test_safetensors_load_from_local_from_safetensors_pt_without_torch_installed(self): + """ + This test checks that we cannot load safetensors from a checkpoint that only has safetensors + saved in the "pt" format if torch isn't installed. + """ + if is_torch_available(): + # This test verifies that a correct error message is shown when loading from a pt safetensors + # PyTorch shouldn't be installed for this to work correctly. + return + + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) + + # Cannot load from the PyTorch-formatted checkpoint without PyTorch installed + with self.assertRaises(ModuleNotFoundError): + _ = FlaxBertModel.from_pretrained(location) + + @require_safetensors + def test_safetensors_load_from_hub_msgpack_before_safetensors(self): + """ + This test checks that we'll first download msgpack weights before safetensors + The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch + """ + FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-msgpack") + + @require_safetensors + def test_safetensors_load_from_local_msgpack_before_safetensors(self): + """ + This test checks that we'll first download msgpack weights before safetensors + The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch + """ + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors-msgpack", cache_dir=tmp) + FlaxBertModel.from_pretrained(location) + @require_safetensors def test_safetensors_flax_from_flax(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") diff --git a/tests/test_modeling_tf_utils.py b/tests/test_modeling_tf_utils.py index 130f920f71e1..ccc3f1f5cef2 100644 --- a/tests/test_modeling_tf_utils.py +++ b/tests/test_modeling_tf_utils.py @@ -535,6 +535,71 @@ def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_hub(self): # This should discard the safetensors weights in favor of the .h5 sharded weights TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded") + @require_safetensors + def test_safetensors_load_from_local(self): + """ + This test checks that we can load safetensors from a checkpoint that only has those on the Hub + """ + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-tf-only", cache_dir=tmp) + tf_model = TFBertModel.from_pretrained(location) + + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-tf-safetensors-only", cache_dir=tmp) + safetensors_model = TFBertModel.from_pretrained(location) + + for p1, p2 in zip(tf_model.weights, safetensors_model.weights): + self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) + + @require_safetensors + def test_safetensors_load_from_hub_from_safetensors_pt(self): + """ + This test checks that we can load safetensors from a checkpoint that only has those on the Hub. + saved in the "pt" format. + """ + tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-h5") + + # Can load from the PyTorch-formatted checkpoint + safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") + for p1, p2 in zip(tf_model.weights, safetensors_model.weights): + self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) + + @require_safetensors + def test_safetensors_load_from_local_from_safetensors_pt(self): + """ + This test checks that we can load safetensors from a local checkpoint that only has those + saved in the "pt" format. + """ + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-h5", cache_dir=tmp) + tf_model = TFBertModel.from_pretrained(location) + + # Can load from the PyTorch-formatted checkpoint + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) + safetensors_model = TFBertModel.from_pretrained(location) + + for p1, p2 in zip(tf_model.weights, safetensors_model.weights): + self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) + + @require_safetensors + def test_safetensors_load_from_hub_h5_before_safetensors(self): + """ + This test checks that we'll first download h5 weights before safetensors + The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch + """ + TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-msgpack") + + @require_safetensors + def test_safetensors_load_from_local_h5_before_safetensors(self): + """ + This test checks that we'll first download h5 weights before safetensors + The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch + """ + with tempfile.TemporaryDirectory() as tmp: + location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors-msgpack", cache_dir=tmp) + TFBertModel.from_pretrained(location) + @require_tf @is_staging_test From 68ae3be7f514b0e0411664e2a2cdbc6264fd3a0b Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Mon, 13 Nov 2023 15:18:19 +0100 Subject: [PATCH 150/268] Fix `from_pt` flag when loading with `safetensors` (#27394) * Fix * Tests * Fix --- src/transformers/modeling_tf_pytorch_utils.py | 8 ++- tests/models/mpnet/test_modeling_mpnet.py | 4 ++ .../models/wav2vec2/test_modeling_wav2vec2.py | 6 +++ tests/test_modeling_common.py | 50 +++++++++++++++++++ 4 files changed, 67 insertions(+), 1 deletion(-) diff --git a/src/transformers/modeling_tf_pytorch_utils.py b/src/transformers/modeling_tf_pytorch_utils.py index fbce340fea76..d45b95fa5bc7 100644 --- a/src/transformers/modeling_tf_pytorch_utils.py +++ b/src/transformers/modeling_tf_pytorch_utils.py @@ -166,6 +166,7 @@ def load_pytorch_checkpoint_in_tf2_model( try: import tensorflow as tf # noqa: F401 import torch # noqa: F401 + from safetensors.torch import load_file as safe_load_file # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " @@ -182,7 +183,12 @@ def load_pytorch_checkpoint_in_tf2_model( for path in pytorch_checkpoint_path: pt_path = os.path.abspath(path) logger.info(f"Loading PyTorch weights from {pt_path}") - pt_state_dict.update(torch.load(pt_path, map_location="cpu")) + if pt_path.endswith(".safetensors"): + state_dict = safe_load_file(pt_path) + else: + state_dict = torch.load(pt_path, map_location="cpu") + + pt_state_dict.update(state_dict) logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters") diff --git a/tests/models/mpnet/test_modeling_mpnet.py b/tests/models/mpnet/test_modeling_mpnet.py index fc167641742a..52d8d1f8b4c6 100644 --- a/tests/models/mpnet/test_modeling_mpnet.py +++ b/tests/models/mpnet/test_modeling_mpnet.py @@ -246,6 +246,10 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*config_and_inputs) + @unittest.skip("This isn't passing but should, seems like a misconfiguration of tied weights.") + def test_tf_from_pt_safetensors(self): + return + @require_torch class MPNetModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/wav2vec2/test_modeling_wav2vec2.py b/tests/models/wav2vec2/test_modeling_wav2vec2.py index cb943520db68..353606252c0d 100644 --- a/tests/models/wav2vec2/test_modeling_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_wav2vec2.py @@ -824,6 +824,12 @@ def flatten_output(output): # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() + @unittest.skip( + "Need to investigate why config.do_stable_layer_norm is set to False here when it doesn't seem to be supported" + ) + def test_flax_from_pt_safetensors(self): + return + @require_torch class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index fdd48de2fd7f..31c3c7af030d 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -105,6 +105,7 @@ if is_flax_available(): import jax.numpy as jnp + from tests.test_modeling_flax_utils import check_models_equal from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, @@ -3219,6 +3220,55 @@ def test_flash_attn_2_fp32_ln(self): # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) + @is_pt_tf_cross_test + def test_tf_from_pt_safetensors(self): + for model_class in self.all_model_classes: + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning + if not hasattr(transformers, tf_model_class_name): + # transformers does not have this model in TF version yet + return + + tf_model_class = getattr(transformers, tf_model_class_name) + + pt_model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + pt_model.save_pretrained(tmpdirname, safe_serialization=True) + tf_model_1 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) + + pt_model.save_pretrained(tmpdirname, safe_serialization=False) + tf_model_2 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) + + # Check models are equal + for p1, p2 in zip(tf_model_1.weights, tf_model_2.weights): + self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) + + @is_pt_flax_cross_test + def test_flax_from_pt_safetensors(self): + for model_class in self.all_model_classes: + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + flax_model_class_name = "Flax" + model_class.__name__ # Add the "Flax at the beginning + if not hasattr(transformers, flax_model_class_name): + # transformers does not have this model in Flax version yet + return + + flax_model_class = getattr(transformers, flax_model_class_name) + + pt_model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + pt_model.save_pretrained(tmpdirname, safe_serialization=True) + flax_model_1 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) + + pt_model.save_pretrained(tmpdirname, safe_serialization=False) + flax_model_2 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) + + # Check models are equal + self.assertTrue(check_models_equal(flax_model_1, flax_model_2)) + global_rng = random.Random() From 20abdacbef1a74634f00453eb924574734374ec4 Mon Sep 17 00:00:00 2001 From: assafbot <125451756+assafbot@users.noreply.github.com> Date: Mon, 13 Nov 2023 17:31:44 +0200 Subject: [PATCH 151/268] OWLv2: bug fix in post_process_object_detection() when using cuda device (#27468) * OWLv2: bug fix in post_process_object_detection() when using cuda device * fix copies issue by fixing original function in owlvit --- src/transformers/models/owlv2/image_processing_owlv2.py | 2 +- src/transformers/models/owlvit/image_processing_owlvit.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/owlv2/image_processing_owlv2.py b/src/transformers/models/owlv2/image_processing_owlv2.py index 9a186beb8418..bb309b40d314 100644 --- a/src/transformers/models/owlv2/image_processing_owlv2.py +++ b/src/transformers/models/owlv2/image_processing_owlv2.py @@ -504,7 +504,7 @@ def post_process_object_detection( else: img_h, img_w = target_sizes.unbind(1) - scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] diff --git a/src/transformers/models/owlvit/image_processing_owlvit.py b/src/transformers/models/owlvit/image_processing_owlvit.py index 3efbc5122962..4242356cabd4 100644 --- a/src/transformers/models/owlvit/image_processing_owlvit.py +++ b/src/transformers/models/owlvit/image_processing_owlvit.py @@ -448,7 +448,7 @@ def post_process(self, outputs, target_sizes): # Convert from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) - scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] @@ -498,7 +498,7 @@ def post_process_object_detection( else: img_h, img_w = target_sizes.unbind(1) - scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] From 2dc29cfc984267b92ea5fcfd2c1885a8fddeaa99 Mon Sep 17 00:00:00 2001 From: Tomasz Cichy <107866759+tomaszcichy98@users.noreply.github.com> Date: Mon, 13 Nov 2023 15:32:03 +0000 Subject: [PATCH 152/268] Fix docstring for `gradient_checkpointing_kwargs` (#27470) Docstring entry for `gradient_checkpointing_kwargs` was `gradient_checkpointing_args`. This is incorrect. --- src/transformers/training_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 32446f0ab201..9efe42c1353d 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -574,7 +574,7 @@ class TrainingArguments: Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not finished. gradient_checkpointing (`bool`, *optional*, defaults to `False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. - gradient_checkpointing_args (`dict`, *optional*, defaults to `None`): + gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): Key word arguments to be passed to the `gradient_checkpointing_enable` method. include_inputs_for_metrics (`bool`, *optional*, defaults to `False`): Whether or not the inputs will be passed to the `compute_metrics` function. This is intended for metrics From 3b5962131093ceab09f0540cb99d84c18c45035f Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 13 Nov 2023 16:38:13 +0100 Subject: [PATCH 153/268] Install `python-Levenshtein` for `nougat` in CI image (#27465) fix Co-authored-by: ydshieh --- docker/transformers-all-latest-gpu/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 0557faffec9e..d108ba5ace58 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -67,6 +67,9 @@ RUN python3 -m pip install --no-cache-dir decord av==9.2.0 # For `dinat` model RUN python3 -m pip install --no-cache-dir natten -f https://shi-labs.com/natten/wheels/$CUDA/ +# For `nougat` tokenizer +RUN python3 -m pip install --no-cache-dir python-Levenshtein + # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop From 2422c38de6dc2341f9cf3685fc36d090a68d0f60 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Mon, 13 Nov 2023 17:20:42 +0100 Subject: [PATCH 154/268] Add DINOv2 depth estimation (#26092) * First draft * Fix style * More improvements * Fix tests * Fix tests * Convert checkpoint * Improve DPTImageProcessor * Remove scripts, improve conversion script * Remove print statements * Fix test * Improve docstring * More improvements * Fix style * Fix image processor * Add tests * Address comments * Address comments * Make bias backwards compatible * Address comment * Address comment * Address comment * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Address comments * Add flag * Add tests * Make tests smaller * Use regular BackboneOutput * Fix all tests * Update test * Convert more checkpoints * Convert giant checkpoints, add integration test * Rename size_divisibility to size_divisor --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../models/dinov2/modeling_dinov2.py | 7 +- .../models/dpt/configuration_dpt.py | 56 ++- .../models/dpt/convert_dinov2_depth_to_hf.py | 384 ++++++++++++++++++ .../models/dpt/convert_dpt_to_pytorch.py | 15 +- .../models/dpt/image_processing_dpt.py | 70 +++- src/transformers/models/dpt/modeling_dpt.py | 151 ++++--- tests/models/dpt/test_image_processing_dpt.py | 22 + .../dpt/test_modeling_dpt_auto_backbone.py | 294 ++++++++++++++ 8 files changed, 926 insertions(+), 73 deletions(-) create mode 100644 src/transformers/models/dpt/convert_dinov2_depth_to_hf.py create mode 100644 tests/models/dpt/test_modeling_dpt_auto_backbone.py diff --git a/src/transformers/models/dinov2/modeling_dinov2.py b/src/transformers/models/dinov2/modeling_dinov2.py index 1215b23480b7..e6a17e570743 100644 --- a/src/transformers/models/dinov2/modeling_dinov2.py +++ b/src/transformers/models/dinov2/modeling_dinov2.py @@ -835,11 +835,12 @@ def forward( if self.config.apply_layernorm: hidden_state = self.layernorm(hidden_state) if self.config.reshape_hidden_states: + hidden_state = hidden_state[:, 1:] + # this was actually a bug in the original implementation that we copied here, + # cause normally the order is height, width batch_size, _, height, width = pixel_values.shape patch_size = self.config.patch_size - hidden_state = hidden_state[:, 1:, :].reshape( - batch_size, width // patch_size, height // patch_size, -1 - ) + hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) diff --git a/src/transformers/models/dpt/configuration_dpt.py b/src/transformers/models/dpt/configuration_dpt.py index 45acd5902f5e..99994c5cb2d8 100644 --- a/src/transformers/models/dpt/configuration_dpt.py +++ b/src/transformers/models/dpt/configuration_dpt.py @@ -18,6 +18,7 @@ from ...configuration_utils import PretrainedConfig from ...utils import logging +from ..auto.configuration_auto import CONFIG_MAPPING from ..bit import BitConfig @@ -91,6 +92,10 @@ class DPTConfig(PretrainedConfig): The index of the features to use in the heads. use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`): Whether to use batch normalization in the pre-activate residual units of the fusion blocks. + use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`): + Whether to use bias in the pre-activate residual units of the fusion blocks. + add_projection (`bool`, *optional*, defaults to `False`): + Whether to add a projection layer before the depth estimation head. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): @@ -104,7 +109,8 @@ class DPTConfig(PretrainedConfig): neck_ignore_stages (`List[int]`, *optional*, defaults to `[0, 1]`): Used only for the `hybrid` embedding type. The stages of the readout layers to ignore. backbone_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*): - Used only for the `hybrid` embedding type. The configuration of the backbone in a dictionary. + The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to + leverage the [`AutoBackbone`] API. Example: @@ -145,6 +151,8 @@ def __init__( fusion_hidden_size=256, head_in_index=-1, use_batch_norm_in_fusion_residual=False, + use_bias_in_fusion_residual=None, + add_projection=False, use_auxiliary_head=True, auxiliary_loss_weight=0.4, semantic_loss_ignore_index=255, @@ -159,6 +167,7 @@ def __init__( self.hidden_size = hidden_size self.is_hybrid = is_hybrid + use_autobackbone = False if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone.") @@ -185,32 +194,49 @@ def __init__( if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.") + + elif backbone_config is not None: + use_autobackbone = True + + if isinstance(backbone_config, dict): + backbone_model_type = backbone_config.get("model_type") + config_class = CONFIG_MAPPING[backbone_model_type] + backbone_config = config_class.from_dict(backbone_config) + + self.backbone_config = backbone_config + self.backbone_featmap_shape = None + self.neck_ignore_stages = [] + else: - self.backbone_config = None + self.backbone_config = backbone_config self.backbone_featmap_shape = None self.neck_ignore_stages = [] - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.intermediate_size = intermediate_size - self.hidden_act = hidden_act - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.initializer_range = initializer_range - self.layer_norm_eps = layer_norm_eps - self.image_size = image_size - self.patch_size = patch_size - self.num_channels = num_channels - self.qkv_bias = qkv_bias - self.backbone_out_indices = backbone_out_indices + self.num_hidden_layers = None if use_autobackbone else num_hidden_layers + self.num_attention_heads = None if use_autobackbone else num_attention_heads + self.intermediate_size = None if use_autobackbone else intermediate_size + self.hidden_dropout_prob = None if use_autobackbone else hidden_dropout_prob + self.attention_probs_dropout_prob = None if use_autobackbone else attention_probs_dropout_prob + self.layer_norm_eps = None if use_autobackbone else layer_norm_eps + self.image_size = None if use_autobackbone else image_size + self.patch_size = None if use_autobackbone else patch_size + self.num_channels = None if use_autobackbone else num_channels + self.qkv_bias = None if use_autobackbone else qkv_bias + self.backbone_out_indices = None if use_autobackbone else backbone_out_indices + if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']") + self.hidden_act = hidden_act + self.initializer_range = initializer_range self.readout_type = readout_type self.reassemble_factors = reassemble_factors self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.head_in_index = head_in_index self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual + self.use_bias_in_fusion_residual = use_bias_in_fusion_residual + self.add_projection = add_projection + # auxiliary head attributes (semantic segmentation) self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight diff --git a/src/transformers/models/dpt/convert_dinov2_depth_to_hf.py b/src/transformers/models/dpt/convert_dinov2_depth_to_hf.py new file mode 100644 index 000000000000..2bd147096c86 --- /dev/null +++ b/src/transformers/models/dpt/convert_dinov2_depth_to_hf.py @@ -0,0 +1,384 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert DINOv2 + DPT checkpoints from the original repository. URL: +https://github.com/facebookresearch/dinov2/tree/main""" + + +import argparse +import itertools +import math +from pathlib import Path + +import requests +import torch +from PIL import Image +from torchvision import transforms + +from transformers import Dinov2Config, DPTConfig, DPTForDepthEstimation, DPTImageProcessor +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +def get_dpt_config(model_name): + if "small" in model_name: + # equivalent to stage 3, stage 6, stage 9, stage 12 + backbone_config = Dinov2Config.from_pretrained( + "facebook/dinov2-small", out_indices=[3, 6, 9, 12], apply_layernorm=False, reshape_hidden_states=False + ) + neck_hidden_sizes = [48, 96, 192, 384] + elif "base" in model_name: + backbone_config = Dinov2Config.from_pretrained( + "facebook/dinov2-base", out_indices=[3, 6, 9, 12], apply_layernorm=False, reshape_hidden_states=False + ) + neck_hidden_sizes = [96, 192, 384, 768] + elif "large" in model_name: + backbone_config = Dinov2Config.from_pretrained( + "facebook/dinov2-large", out_indices=[5, 12, 18, 24], apply_layernorm=False, reshape_hidden_states=False + ) + neck_hidden_sizes = [128, 256, 512, 1024] + elif "giant" in model_name: + backbone_config = Dinov2Config.from_pretrained( + "facebook/dinov2-giant", out_indices=[10, 20, 30, 40], apply_layernorm=False, reshape_hidden_states=False + ) + neck_hidden_sizes = [192, 384, 768, 1536] + else: + raise NotImplementedError("To do") + + config = DPTConfig( + backbone_config=backbone_config, + neck_hidden_sizes=neck_hidden_sizes, + use_bias_in_fusion_residual=False, + add_projection=True, + ) + + return config + + +# here we list all DPT keys to be renamed (original name on the left, our name on the right) +def create_rename_keys_dpt(config): + rename_keys = [] + + # fmt: off + # activation postprocessing (projections, readout projections + resize blocks) + for i in range(4): + rename_keys.append((f"decode_head.reassemble_blocks.projects.{i}.conv.weight", f"neck.reassemble_stage.layers.{i}.projection.weight")) + rename_keys.append((f"decode_head.reassemble_blocks.projects.{i}.conv.bias", f"neck.reassemble_stage.layers.{i}.projection.bias")) + + rename_keys.append((f"decode_head.reassemble_blocks.readout_projects.{i}.0.weight", f"neck.reassemble_stage.readout_projects.{i}.0.weight")) + rename_keys.append((f"decode_head.reassemble_blocks.readout_projects.{i}.0.bias", f"neck.reassemble_stage.readout_projects.{i}.0.bias")) + + if i != 2: + rename_keys.append((f"decode_head.reassemble_blocks.resize_layers.{i}.weight", f"neck.reassemble_stage.layers.{i}.resize.weight")) + rename_keys.append((f"decode_head.reassemble_blocks.resize_layers.{i}.bias", f"neck.reassemble_stage.layers.{i}.resize.bias")) + + # fusion layers + for i in range(4): + rename_keys.append((f"decode_head.fusion_blocks.{i}.project.conv.weight", f"neck.fusion_stage.layers.{i}.projection.weight")) + rename_keys.append((f"decode_head.fusion_blocks.{i}.project.conv.bias", f"neck.fusion_stage.layers.{i}.projection.bias")) + if i != 0: + rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit1.conv1.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer1.convolution1.weight")) + rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit1.conv2.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer1.convolution2.weight")) + rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit2.conv1.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer2.convolution1.weight")) + rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit2.conv2.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer2.convolution2.weight")) + + # neck convolutions + for i in range(4): + rename_keys.append((f"decode_head.convs.{i}.conv.weight", f"neck.convs.{i}.weight")) + + # head + rename_keys.append(("decode_head.project.conv.weight", "head.projection.weight")) + rename_keys.append(("decode_head.project.conv.bias", "head.projection.bias")) + + for i in range(0, 5, 2): + rename_keys.append((f"decode_head.conv_depth.head.{i}.weight", f"head.head.{i}.weight")) + rename_keys.append((f"decode_head.conv_depth.head.{i}.bias", f"head.head.{i}.bias")) + # fmt: on + + return rename_keys + + +# here we list all backbone keys to be renamed (original name on the left, our name on the right) +def create_rename_keys_backbone(config): + rename_keys = [] + + # fmt: off + # patch embedding layer + rename_keys.append(("cls_token", "backbone.embeddings.cls_token")) + rename_keys.append(("mask_token", "backbone.embeddings.mask_token")) + rename_keys.append(("pos_embed", "backbone.embeddings.position_embeddings")) + rename_keys.append(("patch_embed.proj.weight", "backbone.embeddings.patch_embeddings.projection.weight")) + rename_keys.append(("patch_embed.proj.bias", "backbone.embeddings.patch_embeddings.projection.bias")) + + # Transfomer encoder + for i in range(config.backbone_config.num_hidden_layers): + # layernorms + rename_keys.append((f"blocks.{i}.norm1.weight", f"backbone.encoder.layer.{i}.norm1.weight")) + rename_keys.append((f"blocks.{i}.norm1.bias", f"backbone.encoder.layer.{i}.norm1.bias")) + rename_keys.append((f"blocks.{i}.norm2.weight", f"backbone.encoder.layer.{i}.norm2.weight")) + rename_keys.append((f"blocks.{i}.norm2.bias", f"backbone.encoder.layer.{i}.norm2.bias")) + # MLP + if config.backbone_config.use_swiglu_ffn: + rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"backbone.encoder.layer.{i}.mlp.w12.weight")) + rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"backbone.encoder.layer.{i}.mlp.w12.bias")) + rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"backbone.encoder.layer.{i}.mlp.w3.weight")) + rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"backbone.encoder.layer.{i}.mlp.w3.bias")) + else: + rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"backbone.encoder.layer.{i}.mlp.fc1.weight")) + rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"backbone.encoder.layer.{i}.mlp.fc1.bias")) + rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"backbone.encoder.layer.{i}.mlp.fc2.weight")) + rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"backbone.encoder.layer.{i}.mlp.fc2.bias")) + # layerscale + rename_keys.append((f"blocks.{i}.ls1.gamma", f"backbone.encoder.layer.{i}.layer_scale1.lambda1")) + rename_keys.append((f"blocks.{i}.ls2.gamma", f"backbone.encoder.layer.{i}.layer_scale2.lambda1")) + # attention projection layer + rename_keys.append((f"blocks.{i}.attn.proj.weight", f"backbone.encoder.layer.{i}.attention.output.dense.weight")) + rename_keys.append((f"blocks.{i}.attn.proj.bias", f"backbone.encoder.layer.{i}.attention.output.dense.bias")) + # fmt: on + + rename_keys.append(("norm.weight", "backbone.layernorm.weight")) + rename_keys.append(("norm.bias", "backbone.layernorm.bias")) + + return rename_keys + + +# we split up the matrix of each encoder layer into queries, keys and values +def read_in_q_k_v(state_dict, config): + for i in range(config.backbone_config.num_hidden_layers): + # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") + in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") + hidden_size = config.backbone_config.hidden_size + # next, add query, keys and values (in that order) to the state dict + state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[:hidden_size, :] + state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[:hidden_size] + state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ + hidden_size : hidden_size * 2, : + ] + state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ + hidden_size : hidden_size * 2 + ] + state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-hidden_size:, :] + state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-hidden_size:] + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "https://dl.fbaipublicfiles.com/dinov2/images/example.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +name_to_url = { + "dpt-dinov2-small-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_nyu_dpt_head.pth", + "dpt-dinov2-small-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_kitti_dpt_head.pth", + "dpt-dinov2-base-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_nyu_dpt_head.pth", + "dpt-dinov2-base-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_kitti_dpt_head.pth", + "dpt-dinov2-large-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_nyu_dpt_head.pth", + "dpt-dinov2-large-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_kitti_dpt_head.pth", + "dpt-dinov2-giant-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_nyu_dpt_head.pth", + "dpt-dinov2-giant-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_kitti_dpt_head.pth", +} + + +def get_original_pixel_values(image): + class CenterPadding(object): + def __init__(self, multiple): + super().__init__() + self.multiple = multiple + + def _get_pad(self, size): + new_size = math.ceil(size / self.multiple) * self.multiple + pad_size = new_size - size + pad_size_left = pad_size // 2 + pad_size_right = pad_size - pad_size_left + return pad_size_left, pad_size_right + + def __call__(self, img): + pads = list(itertools.chain.from_iterable(self._get_pad(m) for m in img.shape[-2:][::-1])) + output = torch.nn.functional.pad(img, pads) + return output + + def __repr__(self): + return self.__class__.__name__ + "()" + + def make_depth_transform() -> transforms.Compose: + return transforms.Compose( + [ + transforms.ToTensor(), + lambda x: 255.0 * x[:3], # Discard alpha component and scale by 255 + transforms.Normalize( + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + ), + CenterPadding(multiple=14), + ] + ) + + transform = make_depth_transform() + original_pixel_values = transform(image).unsqueeze(0) + + return original_pixel_values + + +@torch.no_grad() +def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, verify_logits): + """ + Copy/paste/tweak model's weights to our DPT structure. + """ + + # define DPT configuration based on URL + checkpoint_url = name_to_url[model_name] + config = get_dpt_config(model_name) + + # load original DPT state_dict from URL + print("URL:", checkpoint_url) + dpt_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"] + # rename keys + rename_keys = create_rename_keys_dpt(config) + for src, dest in rename_keys: + rename_key(dpt_state_dict, src, dest) + + # load original backbone state_dict from URL + if "small" in model_name: + original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vits14") + elif "base" in model_name: + original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitb14") + elif "large" in model_name: + original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitl14") + elif "giant" in model_name: + original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitg14") + else: + raise NotImplementedError("To do") + original_model.eval() + backbone_state_dict = original_model.state_dict() + + # rename keys + rename_keys = create_rename_keys_backbone(config) + for src, dest in rename_keys: + rename_key(backbone_state_dict, src, dest) + + # read in qkv matrices + read_in_q_k_v(backbone_state_dict, config) + + for key, val in backbone_state_dict.copy().items(): + val = backbone_state_dict.pop(key) + if "w12" in key: + key = key.replace("w12", "weights_in") + if "w3" in key: + key = key.replace("w3", "weights_out") + backbone_state_dict[key] = val + + # merge state_dicts + state_dict = {**backbone_state_dict, **dpt_state_dict} + + # load HuggingFace model + model = DPTForDepthEstimation(config) + missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) + print("Missing keys:", missing_keys) + print("Unexpected keys:", unexpected_keys) + assert missing_keys == [ + "neck.fusion_stage.layers.0.residual_layer1.convolution1.weight", + "neck.fusion_stage.layers.0.residual_layer1.convolution2.weight", + ] + model.eval() + + # Verify image processor + processor = DPTImageProcessor( + do_resize=False, + do_rescale=False, + do_pad=True, + size_divisor=14, + do_normalize=True, + image_mean=(123.675, 116.28, 103.53), + image_std=(58.395, 57.12, 57.375), + ) + + image = prepare_img() + pixel_values = processor(image, return_tensors="pt").pixel_values.float() + original_pixel_values = get_original_pixel_values(image) + + assert torch.allclose(pixel_values, original_pixel_values) + + # Verify forward pass + with torch.no_grad(): + outputs = model(pixel_values) + + predicted_depth = outputs.predicted_depth + + print("Shape of predicted depth:", predicted_depth.shape) + print("First values of predicted depth:", predicted_depth[0, :3, :3]) + + # assert logits + if verify_logits: + if model_name == "dpt-dinov2-small-nyu": + expected_shape = torch.Size([1, 576, 736]) + expected_slice = torch.tensor( + [[3.3576, 3.4741, 3.4345], [3.4324, 3.5012, 3.2775], [3.2560, 3.3563, 3.2354]] + ) + + assert predicted_depth.shape == torch.Size(expected_shape) + assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-5) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + print(f"Saving model and processor to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print("Pushing model and processor to hub...") + model.push_to_hub(repo_id=f"facebook/{model_name}") + processor.push_to_hub(repo_id=f"facebook/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="dpt-dinov2-small-nyu", + type=str, + choices=name_to_url.keys(), + help="Name of the model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + help="Path to the output PyTorch model directory.", + ) + parser.add_argument( + "--push_to_hub", + action="store_true", + help="Whether to push the model to the hub after conversion.", + ) + parser.add_argument( + "--verify_logits", + action="store_true", + required=False, + help="Path to the output PyTorch model directory.", + ) + + args = parser.parse_args() + convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits) diff --git a/src/transformers/models/dpt/convert_dpt_to_pytorch.py b/src/transformers/models/dpt/convert_dpt_to_pytorch.py index cee5be88c3a2..42637cb21587 100644 --- a/src/transformers/models/dpt/convert_dpt_to_pytorch.py +++ b/src/transformers/models/dpt/convert_dpt_to_pytorch.py @@ -229,12 +229,14 @@ def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], expected_slice) ) + print("Looks ok!") - Path(pytorch_dump_folder_path).mkdir(exist_ok=True) - print(f"Saving model to {pytorch_dump_folder_path}") - model.save_pretrained(pytorch_dump_folder_path) - print(f"Saving image processor to {pytorch_dump_folder_path}") - image_processor.save_pretrained(pytorch_dump_folder_path) + if pytorch_dump_folder_path is not None: + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + print(f"Saving model to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + print(f"Saving image processor to {pytorch_dump_folder_path}") + image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing model to hub...") @@ -265,7 +267,7 @@ def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub "--pytorch_dump_folder_path", default=None, type=str, - required=True, + required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( @@ -276,6 +278,7 @@ def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub "--model_name", default="dpt-large", type=str, + required=False, help="Name of the model, in case you're pushing to the hub.", ) diff --git a/src/transformers/models/dpt/image_processing_dpt.py b/src/transformers/models/dpt/image_processing_dpt.py index 93374dbd9259..49403140b976 100644 --- a/src/transformers/models/dpt/image_processing_dpt.py +++ b/src/transformers/models/dpt/image_processing_dpt.py @@ -20,7 +20,7 @@ import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from ...image_transforms import resize, to_channel_dimension_format +from ...image_transforms import pad, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, @@ -122,6 +122,12 @@ class DPTImageProcessor(BaseImageProcessor): image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + do_pad (`bool`, *optional*, defaults to `False`): + Whether to apply center padding. This was introduced in the DINOv2 paper, which uses the model in + combination with DPT. + size_divisor (`int`, *optional*): + If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the + DINOv2 paper, which uses the model in combination with DPT. """ model_input_names = ["pixel_values"] @@ -138,6 +144,8 @@ def __init__( do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, + do_pad: bool = False, + size_divisor: int = None, **kwargs, ) -> None: super().__init__(**kwargs) @@ -153,6 +161,8 @@ def __init__( self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD + self.do_pad = do_pad + self.size_divisor = size_divisor def resize( self, @@ -208,6 +218,51 @@ def resize( **kwargs, ) + def pad_image( + self, + image: np.array, + size_divisor: int, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ): + """ + Center pad an image to be a multiple of `multiple`. + + Args: + image (`np.ndarray`): + Image to pad. + size_divisor (`int`): + The width and height of the image will be padded to a multiple of this number. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + + def _get_pad(size, size_divisor): + new_size = math.ceil(size / size_divisor) * size_divisor + pad_size = new_size - size + pad_size_left = pad_size // 2 + pad_size_right = pad_size - pad_size_left + return pad_size_left, pad_size_right + + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + + height, width = get_image_size(image, input_data_format) + + pad_size_left, pad_size_right = _get_pad(height, size_divisor) + pad_size_top, pad_size_bottom = _get_pad(width, size_divisor) + + return pad(image, ((pad_size_left, pad_size_right), (pad_size_top, pad_size_bottom)), data_format=data_format) + def preprocess( self, images: ImageInput, @@ -221,6 +276,8 @@ def preprocess( do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, + do_pad: bool = None, + size_divisor: int = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, @@ -286,6 +343,8 @@ def preprocess( do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std + do_pad = do_pad if do_pad is not None else self.do_pad + size_divisor = size_divisor if size_divisor is not None else self.size_divisor images = make_list_of_images(images) @@ -304,6 +363,9 @@ def preprocess( if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") + if do_pad and size_divisor is None: + raise ValueError("Size divisibility must be specified if do_pad is True.") + # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] @@ -335,6 +397,12 @@ def preprocess( for image in images ] + if do_pad: + images = [ + self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format) + for image in images + ] + images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] diff --git a/src/transformers/models/dpt/modeling_dpt.py b/src/transformers/models/dpt/modeling_dpt.py index 63796d0168fa..ca44b6a42aee 100755 --- a/src/transformers/models/dpt/modeling_dpt.py +++ b/src/transformers/models/dpt/modeling_dpt.py @@ -599,12 +599,13 @@ def _init_reassemble_dpt_hybrid(self, config): # When using DPT-Hybrid the readout type is set to "project". The sanity check is done on the config file self.readout_projects = nn.ModuleList() + hidden_size = _get_backbone_hidden_size(config) for i in range(len(config.neck_hidden_sizes)): if i <= 1: self.readout_projects.append(nn.Sequential(nn.Identity())) elif i > 1: self.readout_projects.append( - nn.Sequential(nn.Linear(2 * config.hidden_size, config.hidden_size), ACT2FN[config.hidden_act]) + nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]) ) def _init_reassemble_dpt(self, config): @@ -613,12 +614,13 @@ def _init_reassemble_dpt(self, config): if config.readout_type == "project": self.readout_projects = nn.ModuleList() + hidden_size = _get_backbone_hidden_size(config) for _ in range(len(config.neck_hidden_sizes)): self.readout_projects.append( - nn.Sequential(nn.Linear(2 * config.hidden_size, config.hidden_size), ACT2FN[config.hidden_act]) + nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]) ) - def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]: + def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: """ Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`): @@ -628,21 +630,24 @@ def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]: for i, hidden_state in enumerate(hidden_states): if i not in self.neck_ignore_stages: - # reshape to (B, C, H, W) - hidden_state, cls_token = hidden_state[:, 1:], hidden_state[:, 0] + # reshape to (batch_size, num_channels, height, width) + cls_token, hidden_state = hidden_state[:, 0], hidden_state[:, 1:] batch_size, sequence_length, num_channels = hidden_state.shape - size = int(math.sqrt(sequence_length)) - hidden_state = hidden_state.reshape(batch_size, size, size, num_channels) + if patch_height is not None and patch_width is not None: + hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels) + else: + size = int(math.sqrt(sequence_length)) + hidden_state = hidden_state.reshape(batch_size, size, size, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_shape = hidden_state.shape if self.config.readout_type == "project": - # reshape to (B, H*W, C) + # reshape to (batch_size, height*width, num_channels) hidden_state = hidden_state.flatten(2).permute((0, 2, 1)) readout = cls_token.unsqueeze(1).expand_as(hidden_state) # concatenate the readout token to the hidden states and project hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1)) - # reshape back to (B, C, H, W) + # reshape back to (batch_size, num_channels, height, width) hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape) elif self.config.readout_type == "add": hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1) @@ -653,11 +658,19 @@ def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]: return out +def _get_backbone_hidden_size(config): + if config.backbone_config is not None and config.is_hybrid is False: + return config.backbone_config.hidden_size + else: + return config.hidden_size + + class DPTReassembleLayer(nn.Module): def __init__(self, config, channels, factor): super().__init__() # projection - self.projection = nn.Conv2d(in_channels=config.hidden_size, out_channels=channels, kernel_size=1) + hidden_size = _get_backbone_hidden_size(config) + self.projection = nn.Conv2d(in_channels=hidden_size, out_channels=channels, kernel_size=1) # up/down sampling depending on factor if factor > 1: @@ -710,24 +723,30 @@ def __init__(self, config): super().__init__() self.use_batch_norm = config.use_batch_norm_in_fusion_residual - self.activation1 = ACT2FN["relu"] + use_bias_in_fusion_residual = ( + config.use_bias_in_fusion_residual + if config.use_bias_in_fusion_residual is not None + else not self.use_batch_norm + ) + + self.activation1 = nn.ReLU() self.convolution1 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, - bias=not self.use_batch_norm, + bias=use_bias_in_fusion_residual, ) - self.activation2 = ACT2FN["relu"] + self.activation2 = nn.ReLU() self.convolution2 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, - bias=not self.use_batch_norm, + bias=use_bias_in_fusion_residual, ) if self.use_batch_norm: @@ -973,8 +992,12 @@ def __init__(self, config): super().__init__() self.config = config - # postprocessing - self.reassemble_stage = DPTReassembleStage(config) + # postprocessing: only required in case of a non-hierarchical backbone (e.g. ViT, BEiT) + if config.backbone_config is not None and config.backbone_config.model_type in ["swinv2"]: + self.reassemble_stage = None + else: + self.reassemble_stage = DPTReassembleStage(config) + self.convs = nn.ModuleList() for channel in config.neck_hidden_sizes: self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False)) @@ -982,17 +1005,23 @@ def __init__(self, config): # fusion self.fusion_stage = DPTFeatureFusionStage(config) - def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]: - if not isinstance(hidden_states, list): - raise ValueError("hidden_states should be a list of tensors") + def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: + """ + Args: + hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): + List of hidden states from the backbone. + """ + if not isinstance(hidden_states, (tuple, list)): + raise ValueError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") # postprocess hidden states - features = self.reassemble_stage(hidden_states) + if self.reassemble_stage is not None: + hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) - features = [self.convs[i](feature) for i, feature in enumerate(features)] + features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] # fusion blocks output = self.fusion_stage(features) @@ -1012,20 +1041,28 @@ def __init__(self, config): self.config = config + self.projection = None + if config.add_projection: + self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + features = config.fusion_hidden_size self.head = nn.Sequential( nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), - ACT2FN["relu"], + nn.ReLU(), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - ACT2FN["relu"], + nn.ReLU(), ) def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor: # use last features hidden_states = hidden_states[self.config.head_in_index] + if self.projection is not None: + hidden_states = self.projection(hidden_states) + hidden_states = nn.ReLU()(hidden_states) + predicted_depth = self.head(hidden_states) predicted_depth = predicted_depth.squeeze(dim=1) @@ -1043,7 +1080,11 @@ class DPTForDepthEstimation(DPTPreTrainedModel): def __init__(self, config): super().__init__(config) - self.dpt = DPTModel(config, add_pooling_layer=False) + self.backbone = None + if config.backbone_config is not None and config.is_hybrid is False: + self.backbone = AutoBackbone.from_config(config.backbone_config) + else: + self.dpt = DPTModel(config, add_pooling_layer=False) # Neck self.neck = DPTNeck(config) @@ -1109,32 +1150,46 @@ def forward( output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - outputs = self.dpt( - pixel_values, - head_mask=head_mask, - output_attentions=output_attentions, - output_hidden_states=True, # we need the intermediate hidden states - return_dict=return_dict, - ) - - hidden_states = outputs.hidden_states if return_dict else outputs[1] - - # only keep certain features based on config.backbone_out_indices - # note that the hidden_states also include the initial embeddings - if not self.config.is_hybrid: - hidden_states = [ - feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices - ] + if self.backbone is not None: + outputs = self.backbone.forward_with_filtered_kwargs( + pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions + ) + hidden_states = outputs.feature_maps else: - backbone_hidden_states = outputs.intermediate_activations if return_dict else list(outputs[-1]) - backbone_hidden_states.extend( - feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:] + outputs = self.dpt( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=True, # we need the intermediate hidden states + return_dict=return_dict, ) + hidden_states = outputs.hidden_states if return_dict else outputs[1] + # only keep certain features based on config.backbone_out_indices + # note that the hidden_states also include the initial embeddings + if not self.config.is_hybrid: + hidden_states = [ + feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices + ] + else: + backbone_hidden_states = outputs.intermediate_activations if return_dict else list(outputs[-1]) + backbone_hidden_states.extend( + feature + for idx, feature in enumerate(hidden_states[1:]) + if idx in self.config.backbone_out_indices[2:] + ) - hidden_states = backbone_hidden_states + hidden_states = backbone_hidden_states + + patch_height, patch_width = None, None + if self.config.backbone_config is not None and self.config.is_hybrid is False: + _, _, height, width = pixel_values.shape + patch_size = self.config.backbone_config.patch_size + patch_height = height // patch_size + patch_width = width // patch_size - hidden_states = self.neck(hidden_states) + hidden_states = self.neck(hidden_states, patch_height, patch_width) predicted_depth = self.head(hidden_states) @@ -1167,7 +1222,7 @@ def __init__(self, config): self.head = nn.Sequential( nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), - ACT2FN["relu"], + nn.ReLU(), nn.Dropout(config.semantic_classifier_dropout), nn.Conv2d(features, config.num_labels, kernel_size=1), nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), @@ -1190,7 +1245,7 @@ def __init__(self, config): self.head = nn.Sequential( nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), - ACT2FN["relu"], + nn.ReLU(), nn.Dropout(0.1, False), nn.Conv2d(features, config.num_labels, kernel_size=1), ) @@ -1287,7 +1342,7 @@ def forward( hidden_states = backbone_hidden_states - hidden_states = self.neck(hidden_states) + hidden_states = self.neck(hidden_states=hidden_states) logits = self.head(hidden_states) diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index 5670d50b913b..a70165048bd9 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -16,6 +16,8 @@ import unittest +import numpy as np + from transformers.file_utils import is_vision_available from transformers.testing_utils import require_torch, require_vision @@ -97,6 +99,10 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_rescale")) + self.assertTrue(hasattr(image_processing, "rescale_factor")) + self.assertTrue(hasattr(image_processing, "do_pad")) + self.assertTrue(hasattr(image_processing, "size_divisor")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) @@ -104,3 +110,19 @@ def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) + + def test_padding(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + image = np.random.randn(3, 249, 491) + + # test individual method + image = image_processing.pad_image(image, size_divisor=4) + self.assertTrue(image.shape[1] % 4 == 0) + self.assertTrue(image.shape[2] % 4 == 0) + + # test by calling + pixel_values = image_processing.preprocess( + image, do_rescale=False, do_resize=False, do_pad=True, size_divisor=4, return_tensors="pt" + ).pixel_values + self.assertTrue(pixel_values.shape[2] % 4 == 0) + self.assertTrue(pixel_values.shape[3] % 4 == 0) diff --git a/tests/models/dpt/test_modeling_dpt_auto_backbone.py b/tests/models/dpt/test_modeling_dpt_auto_backbone.py new file mode 100644 index 000000000000..95e3128ff0ed --- /dev/null +++ b/tests/models/dpt/test_modeling_dpt_auto_backbone.py @@ -0,0 +1,294 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch DPT model. """ + + +import inspect +import unittest + +from transformers import Dinov2Config, DPTConfig +from transformers.file_utils import is_torch_available, is_vision_available +from transformers.models.auto import get_values +from transformers.testing_utils import require_torch, require_vision, slow, torch_device + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin + + +if is_torch_available(): + import torch + + from transformers import MODEL_MAPPING, DPTForDepthEstimation + from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from PIL import Image + + from transformers import DPTImageProcessor + + +class DPTModelTester: + def __init__( + self, + parent, + batch_size=2, + num_channels=3, + image_size=32, + patch_size=16, + use_labels=True, + num_labels=3, + is_training=True, + hidden_size=4, + num_hidden_layers=2, + num_attention_heads=2, + intermediate_size=8, + out_features=["stage1", "stage2"], + apply_layernorm=False, + reshape_hidden_states=False, + neck_hidden_sizes=[2, 2], + fusion_hidden_size=6, + ): + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.image_size = image_size + self.patch_size = patch_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.out_features = out_features + self.apply_layernorm = apply_layernorm + self.reshape_hidden_states = reshape_hidden_states + self.use_labels = use_labels + self.num_labels = num_labels + self.is_training = is_training + self.neck_hidden_sizes = neck_hidden_sizes + self.fusion_hidden_size = fusion_hidden_size + # DPT's sequence length + self.seq_length = (self.image_size // self.patch_size) ** 2 + 1 + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + labels = None + if self.use_labels: + labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) + + config = self.get_config() + + return config, pixel_values, labels + + def get_config(self): + return DPTConfig( + backbone_config=self.get_backbone_config(), + neck_hidden_sizes=self.neck_hidden_sizes, + fusion_hidden_size=self.fusion_hidden_size, + ) + + def get_backbone_config(self): + return Dinov2Config( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + is_training=self.is_training, + out_features=self.out_features, + reshape_hidden_states=self.reshape_hidden_states, + ) + + def create_and_check_for_depth_estimation(self, config, pixel_values, labels): + config.num_labels = self.num_labels + model = DPTForDepthEstimation(config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values, labels = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_torch +class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = (DPTForDepthEstimation,) if is_torch_available() else () + pipeline_model_mapping = {"depth-estimation": DPTForDepthEstimation} if is_torch_available() else {} + + test_pruning = False + test_resize_embeddings = False + test_head_masking = False + + def setUp(self): + self.model_tester = DPTModelTester(self) + self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") + def test_inputs_embeds(self): + pass + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_for_depth_estimation(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) + + def test_training(self): + for model_class in self.all_model_classes: + if model_class.__name__ == "DPTForDepthEstimation": + continue + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + if model_class in get_values(MODEL_MAPPING): + continue + + model = model_class(config) + model.to(torch_device) + model.train() + inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + loss = model(**inputs).loss + loss.backward() + + def test_training_gradient_checkpointing(self): + for model_class in self.all_model_classes: + if model_class.__name__ == "DPTForDepthEstimation": + continue + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.use_cache = False + config.return_dict = True + + if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: + continue + model = model_class(config) + model.to(torch_device) + model.gradient_checkpointing_enable() + model.train() + inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + loss = model(**inputs).loss + loss.backward() + + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + # Skip the check for the backbone + backbone_params = [] + for name, module in model.named_modules(): + if module.__class__.__name__ == "DPTViTHybridEmbeddings": + backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] + break + + for name, param in model.named_parameters(): + if param.requires_grad: + if name in backbone_params: + continue + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") + def test_model_common_attributes(self): + pass + + @unittest.skip(reason="DPT with AutoBackbone does not have a base model") + def test_save_load_fast_init_from_base(self): + pass + + @unittest.skip(reason="DPT with AutoBackbone does not have a base model") + def test_save_load_fast_init_to_base(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + + @slow + def test_model_from_pretrained(self): + for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = DPTForDepthEstimation.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_torch +@require_vision +@slow +class DPTModelIntegrationTest(unittest.TestCase): + def test_inference_depth_estimation(self): + image_processor = DPTImageProcessor.from_pretrained("facebook/dpt-dinov2-small-kitti") + model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-small-kitti").to(torch_device) + + image = prepare_img() + inputs = image_processor(images=image, return_tensors="pt").to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + predicted_depth = outputs.predicted_depth + + # verify the predicted depth + expected_shape = torch.Size((1, 576, 736)) + self.assertEqual(predicted_depth.shape, expected_shape) + + expected_slice = torch.tensor( + [[6.0433, 7.1636, 7.4268], [6.9047, 7.2471, 7.2355], [7.9261, 8.0631, 8.0244]] + ).to(torch_device) + + self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) From 1af766e1046ab0fb60e193c75041c15b718e75b6 Mon Sep 17 00:00:00 2001 From: Matt Date: Mon, 13 Nov 2023 17:01:30 +0000 Subject: [PATCH 155/268] Add version check for Jinja (#27403) * Add version check for Jinja * Update src/transformers/tokenization_utils_base.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * make fixup --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- src/transformers/tokenization_utils_base.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 7360ce02f282..ea730bc00d07 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -1762,11 +1762,17 @@ def apply_chat_template( @lru_cache def _compile_jinja_template(self, chat_template): try: + import jinja2 from jinja2.exceptions import TemplateError from jinja2.sandbox import ImmutableSandboxedEnvironment except ImportError: raise ImportError("apply_chat_template requires jinja2 to be installed.") + if version.parse(jinja2.__version__) <= version.parse("3.0.0"): + raise ImportError( + "apply_chat_template requires jinja2>=3.0.0 to be installed. Your version is " f"{jinja2.__version__}." + ) + def raise_exception(message): raise TemplateError(message) From 04af4b90d6a7951e2cbad00649af4b8cf2fc90c8 Mon Sep 17 00:00:00 2001 From: Matt Date: Mon, 13 Nov 2023 17:01:59 +0000 Subject: [PATCH 156/268] Fix Falcon tokenizer loading in pipeline (#27316) * Improve pipeline tokenizer loading and hope nothing breaks * Let's try a hacky solution * Revert the changes to init * Add a falcon hack to the automapping * Add a falcon hack to the automapping --- src/transformers/models/auto/tokenization_auto.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index f04a9500dffe..f30bc9ad6195 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -162,6 +162,7 @@ ("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), ("ernie_m", ("ErnieMTokenizer" if is_sentencepiece_available() else None, None)), ("esm", ("EsmTokenizer", None)), + ("falcon", (None, "PreTrainedTokenizerFast" if is_tokenizers_available() else None)), ("flaubert", ("FlaubertTokenizer", None)), ("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)), ("fsmt", ("FSMTTokenizer", None)), From 7b139023c358928d72d944e8b5053739c1696a68 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Mon, 13 Nov 2023 18:18:41 +0100 Subject: [PATCH 157/268] [`AWQ` ] Addresses TODO for awq tests (#27467) addresses todo for awq tests --- tests/quantization/autoawq/test_awq.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/quantization/autoawq/test_awq.py b/tests/quantization/autoawq/test_awq.py index 1f1b51b77831..f0854e42553e 100644 --- a/tests/quantization/autoawq/test_awq.py +++ b/tests/quantization/autoawq/test_awq.py @@ -85,8 +85,7 @@ def test_from_dict(self): @require_auto_awq @require_accelerate class AwqTest(unittest.TestCase): - # TODO: @younesbelkada change it to `TheBloke/Mistral-7B-v0.1-AWQ` in the future - model_name = "ybelkada/test-mistral-7b-v0.1-awq" + model_name = "TheBloke/Mistral-7B-v0.1-AWQ" dummy_transformers_model_name = "bigscience/bloom-560m" input_text = "Hello my name is" From eb79b55bf34359b8d815256576abdc3fc01aa51f Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Mon, 13 Nov 2023 11:46:40 -0600 Subject: [PATCH 158/268] Perf torch compile (#27422) * translate perrf_torch_compile.md * translate tf_xla.md * update --- docs/source/zh/_toctree.yml | 4 + docs/source/zh/perf_torch_compile.md | 362 +++++++++++++++++++++++++++ docs/source/zh/tf_xla.md | 179 +++++++++++++ 3 files changed, 545 insertions(+) create mode 100644 docs/source/zh/perf_torch_compile.md create mode 100644 docs/source/zh/tf_xla.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index 5f2fa5a172af..fffa7569f497 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -49,6 +49,10 @@ title: 实例化大模型 - local: debugging title: 问题定位及解决 + - local: tf_xla + title: TensorFlow模型的XLA集成 + - local: perf_torch_compile + title: 使用 `torch.compile()` 优化推理 title: 性能和可扩展性 - sections: - local: task_summary diff --git a/docs/source/zh/perf_torch_compile.md b/docs/source/zh/perf_torch_compile.md new file mode 100644 index 000000000000..b28dc9567c91 --- /dev/null +++ b/docs/source/zh/perf_torch_compile.md @@ -0,0 +1,362 @@ + + +# 使用 torch.compile() 优化推理 + +本指南旨在为使用[`torch.compile()`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html)在[🤗 Transformers中的计算机视觉模型](https://huggingface.co/models?pipeline_tag=image-classification&library=transformers&sort=trending)中引入的推理速度提升提供一个基准。 + + +## torch.compile 的优势 + +根据模型和GPU的不同,`torch.compile()`在推理过程中可以提高多达30%的速度。要使用`torch.compile()`,只需安装2.0及以上版本的`torch`即可。 + +编译模型需要时间,因此如果您只需要编译一次模型而不是每次推理都编译,那么它非常有用。 +要编译您选择的任何计算机视觉模型,请按照以下方式调用`torch.compile()`: + + +```diff +from transformers import AutoModelForImageClassification + +model = AutoModelForImageClassification.from_pretrained(MODEL_ID).to("cuda") ++ model = torch.compile(model) +``` + +`compile()` 提供了多种编译模式,它们在编译时间和推理开销上有所不同。`max-autotune` 比 `reduce-overhead` 需要更长的时间,但会得到更快的推理速度。默认模式在编译时最快,但在推理时间上与 `reduce-overhead` 相比效率较低。在本指南中,我们使用了默认模式。您可以在[这里](https://pytorch.org/get-started/pytorch-2.0/#user-experience)了解更多信息。 + +我们在 PyTorch 2.0.1 版本上使用不同的计算机视觉模型、任务、硬件类型和数据批量大小对 `torch.compile` 进行了基准测试。 + +## 基准测试代码 + +以下是每个任务的基准测试代码。我们在推理之前”预热“GPU,并取300次推理的平均值,每次使用相同的图像。 + +### 使用 ViT 进行图像分类 + +```python +import torch +from PIL import Image +import requests +import numpy as np +from transformers import AutoImageProcessor, AutoModelForImageClassification + +url = 'http://images.cocodataset.org/val2017/000000039769.jpg' +image = Image.open(requests.get(url, stream=True).raw) + +processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") +model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224").to("cuda") +model = torch.compile(model) + +processed_input = processor(image, return_tensors='pt').to(device="cuda") + +with torch.no_grad(): + _ = model(**processed_input) + +``` + +#### 使用 DETR 进行目标检测 + +```python +from transformers import AutoImageProcessor, AutoModelForObjectDetection + +processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50") +model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to("cuda") +model = torch.compile(model) + +texts = ["a photo of a cat", "a photo of a dog"] +inputs = processor(text=texts, images=image, return_tensors="pt").to("cuda") + +with torch.no_grad(): + _ = model(**inputs) +``` + +#### 使用 Segformer 进行图像分割 + +```python +from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation + +processor = SegformerImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") +model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to("cuda") +model = torch.compile(model) +seg_inputs = processor(images=image, return_tensors="pt").to("cuda") + +with torch.no_grad(): + _ = model(**seg_inputs) +``` + +以下是我们进行基准测试的模型列表。 + +**图像分类** +- [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) +- [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) +- [facebook/convnext-large-224](https://huggingface.co/facebook/convnext-large-224) +- [microsoft/resnet-50](https://huggingface.co/) + +**图像分割** +- [nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512) +- [facebook/mask2former-swin-tiny-coco-panoptic](https://huggingface.co/facebook/mask2former-swin-tiny-coco-panoptic) +- [facebook/maskformer-swin-base-ade](https://huggingface.co/facebook/maskformer-swin-base-ade) +- [google/deeplabv3_mobilenet_v2_1.0_513](https://huggingface.co/google/deeplabv3_mobilenet_v2_1.0_513) + +**目标检测** +- [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) +- [facebook/detr-resnet-101](https://huggingface.co/facebook/detr-resnet-101) +- [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) + + 下面是使用和不使用`torch.compile()`的推理持续时间可视化,以及每个模型在不同硬件和数据批量大小下的改进百分比。 + + +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ + +![Duration Comparison on V100 with Batch Size of 1](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/v100_1_duration.png) + +![Percentage Improvement on T4 with Batch Size of 4](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/T4_4_percentage.png) + +下面可以找到每个模型使用和不使用`compile()`的推理时间(毫秒)。请注意,OwlViT在大批量大小下会导致内存溢出。 + +### A100 (batch size: 1) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 9.325 | 7.584 | +| Image Segmentation/Segformer | 11.759 | 10.500 | +| Object Detection/OwlViT | 24.978 | 18.420 | +| Image Classification/BeiT | 11.282 | 8.448 | +| Object Detection/DETR | 34.619 | 19.040 | +| Image Classification/ConvNeXT | 10.410 | 10.208 | +| Image Classification/ResNet | 6.531 | 4.124 | +| Image Segmentation/Mask2former | 60.188 | 49.117 | +| Image Segmentation/Maskformer | 75.764 | 59.487 | +| Image Segmentation/MobileNet | 8.583 | 3.974 | +| Object Detection/Resnet-101 | 36.276 | 18.197 | +| Object Detection/Conditional-DETR | 31.219 | 17.993 | + + +### A100 (batch size: 4) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 14.832 | 14.499 | +| Image Segmentation/Segformer | 18.838 | 16.476 | +| Image Classification/BeiT | 13.205 | 13.048 | +| Object Detection/DETR | 48.657 | 32.418| +| Image Classification/ConvNeXT | 22.940 | 21.631 | +| Image Classification/ResNet | 6.657 | 4.268 | +| Image Segmentation/Mask2former | 74.277 | 61.781 | +| Image Segmentation/Maskformer | 180.700 | 159.116 | +| Image Segmentation/MobileNet | 14.174 | 8.515 | +| Object Detection/Resnet-101 | 68.101 | 44.998 | +| Object Detection/Conditional-DETR | 56.470 | 35.552 | + +### A100 (batch size: 16) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 40.944 | 40.010 | +| Image Segmentation/Segformer | 37.005 | 31.144 | +| Image Classification/BeiT | 41.854 | 41.048 | +| Object Detection/DETR | 164.382 | 161.902 | +| Image Classification/ConvNeXT | 82.258 | 75.561 | +| Image Classification/ResNet | 7.018 | 5.024 | +| Image Segmentation/Mask2former | 178.945 | 154.814 | +| Image Segmentation/Maskformer | 638.570 | 579.826 | +| Image Segmentation/MobileNet | 51.693 | 30.310 | +| Object Detection/Resnet-101 | 232.887 | 155.021 | +| Object Detection/Conditional-DETR | 180.491 | 124.032 | + +### V100 (batch size: 1) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 10.495 | 6.00 | +| Image Segmentation/Segformer | 13.321 | 5.862 | +| Object Detection/OwlViT | 25.769 | 22.395 | +| Image Classification/BeiT | 11.347 | 7.234 | +| Object Detection/DETR | 33.951 | 19.388 | +| Image Classification/ConvNeXT | 11.623 | 10.412 | +| Image Classification/ResNet | 6.484 | 3.820 | +| Image Segmentation/Mask2former | 64.640 | 49.873 | +| Image Segmentation/Maskformer | 95.532 | 72.207 | +| Image Segmentation/MobileNet | 9.217 | 4.753 | +| Object Detection/Resnet-101 | 52.818 | 28.367 | +| Object Detection/Conditional-DETR | 39.512 | 20.816 | + +### V100 (batch size: 4) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 15.181 | 14.501 | +| Image Segmentation/Segformer | 16.787 | 16.188 | +| Image Classification/BeiT | 15.171 | 14.753 | +| Object Detection/DETR | 88.529 | 64.195 | +| Image Classification/ConvNeXT | 29.574 | 27.085 | +| Image Classification/ResNet | 6.109 | 4.731 | +| Image Segmentation/Mask2former | 90.402 | 76.926 | +| Image Segmentation/Maskformer | 234.261 | 205.456 | +| Image Segmentation/MobileNet | 24.623 | 14.816 | +| Object Detection/Resnet-101 | 134.672 | 101.304 | +| Object Detection/Conditional-DETR | 97.464 | 69.739 | + +### V100 (batch size: 16) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 52.209 | 51.633 | +| Image Segmentation/Segformer | 61.013 | 55.499 | +| Image Classification/BeiT | 53.938 | 53.581 | +| Object Detection/DETR | OOM | OOM | +| Image Classification/ConvNeXT | 109.682 | 100.771 | +| Image Classification/ResNet | 14.857 | 12.089 | +| Image Segmentation/Mask2former | 249.605 | 222.801 | +| Image Segmentation/Maskformer | 831.142 | 743.645 | +| Image Segmentation/MobileNet | 93.129 | 55.365 | +| Object Detection/Resnet-101 | 482.425 | 361.843 | +| Object Detection/Conditional-DETR | 344.661 | 255.298 | + +### T4 (batch size: 1) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 16.520 | 15.786 | +| Image Segmentation/Segformer | 16.116 | 14.205 | +| Object Detection/OwlViT | 53.634 | 51.105 | +| Image Classification/BeiT | 16.464 | 15.710 | +| Object Detection/DETR | 73.100 | 53.99 | +| Image Classification/ConvNeXT | 32.932 | 30.845 | +| Image Classification/ResNet | 6.031 | 4.321 | +| Image Segmentation/Mask2former | 79.192 | 66.815 | +| Image Segmentation/Maskformer | 200.026 | 188.268 | +| Image Segmentation/MobileNet | 18.908 | 11.997 | +| Object Detection/Resnet-101 | 106.622 | 82.566 | +| Object Detection/Conditional-DETR | 77.594 | 56.984 | + +### T4 (batch size: 4) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 43.653 | 43.626 | +| Image Segmentation/Segformer | 45.327 | 42.445 | +| Image Classification/BeiT | 52.007 | 51.354 | +| Object Detection/DETR | 277.850 | 268.003 | +| Image Classification/ConvNeXT | 119.259 | 105.580 | +| Image Classification/ResNet | 13.039 | 11.388 | +| Image Segmentation/Mask2former | 201.540 | 184.670 | +| Image Segmentation/Maskformer | 764.052 | 711.280 | +| Image Segmentation/MobileNet | 74.289 | 48.677 | +| Object Detection/Resnet-101 | 421.859 | 357.614 | +| Object Detection/Conditional-DETR | 289.002 | 226.945 | + +### T4 (batch size: 16) + +| **Task/Model** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:| +| Image Classification/ViT | 163.914 | 160.907 | +| Image Segmentation/Segformer | 192.412 | 163.620 | +| Image Classification/BeiT | 188.978 | 187.976 | +| Object Detection/DETR | OOM | OOM | +| Image Classification/ConvNeXT | 422.886 | 388.078 | +| Image Classification/ResNet | 44.114 | 37.604 | +| Image Segmentation/Mask2former | 756.337 | 695.291 | +| Image Segmentation/Maskformer | 2842.940 | 2656.88 | +| Image Segmentation/MobileNet | 299.003 | 201.942 | +| Object Detection/Resnet-101 | 1619.505 | 1262.758 | +| Object Detection/Conditional-DETR | 1137.513 | 897.390| + +## PyTorch Nightly +我们还在 PyTorch Nightly 版本(2.1.0dev)上进行了基准测试,可以在[这里](https://download.pytorch.org/whl/nightly/cu118)找到 Nightly 版本的安装包,并观察到了未编译和编译模型的延迟性能改善。 + +### A100 + +| **Task/Model** | **Batch Size** | **torch 2.0 - no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:|:---:| +| Image Classification/BeiT | Unbatched | 12.462 | 6.954 | +| Image Classification/BeiT | 4 | 14.109 | 12.851 | +| Image Classification/BeiT | 16 | 42.179 | 42.147 | +| Object Detection/DETR | Unbatched | 30.484 | 15.221 | +| Object Detection/DETR | 4 | 46.816 | 30.942 | +| Object Detection/DETR | 16 | 163.749 | 163.706 | + +### T4 + +| **Task/Model** | **Batch Size** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:|:---:| +| Image Classification/BeiT | Unbatched | 14.408 | 14.052 | +| Image Classification/BeiT | 4 | 47.381 | 46.604 | +| Image Classification/BeiT | 16 | 42.179 | 42.147 | +| Object Detection/DETR | Unbatched | 68.382 | 53.481 | +| Object Detection/DETR | 4 | 269.615 | 204.785 | +| Object Detection/DETR | 16 | OOM | OOM | + +### V100 + +| **Task/Model** | **Batch Size** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:|:---:| +| Image Classification/BeiT | Unbatched | 13.477 | 7.926 | +| Image Classification/BeiT | 4 | 15.103 | 14.378 | +| Image Classification/BeiT | 16 | 52.517 | 51.691 | +| Object Detection/DETR | Unbatched | 28.706 | 19.077 | +| Object Detection/DETR | 4 | 88.402 | 62.949| +| Object Detection/DETR | 16 | OOM | OOM | + + +## 降低开销 +我们在 PyTorch Nightly 版本中为 A100 和 T4 进行了 `reduce-overhead` 编译模式的性能基准测试。 + +### A100 + +| **Task/Model** | **Batch Size** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:|:---:| +| Image Classification/ConvNeXT | Unbatched | 11.758 | 7.335 | +| Image Classification/ConvNeXT | 4 | 23.171 | 21.490 | +| Image Classification/ResNet | Unbatched | 7.435 | 3.801 | +| Image Classification/ResNet | 4 | 7.261 | 2.187 | +| Object Detection/Conditional-DETR | Unbatched | 32.823 | 11.627 | +| Object Detection/Conditional-DETR | 4 | 50.622 | 33.831 | +| Image Segmentation/MobileNet | Unbatched | 9.869 | 4.244 | +| Image Segmentation/MobileNet | 4 | 14.385 | 7.946 | + + +### T4 + +| **Task/Model** | **Batch Size** | **torch 2.0 -
no compile** | **torch 2.0 -
compile** | +|:---:|:---:|:---:|:---:| +| Image Classification/ConvNeXT | Unbatched | 32.137 | 31.84 | +| Image Classification/ConvNeXT | 4 | 120.944 | 110.209 | +| Image Classification/ResNet | Unbatched | 9.761 | 7.698 | +| Image Classification/ResNet | 4 | 15.215 | 13.871 | +| Object Detection/Conditional-DETR | Unbatched | 72.150 | 57.660 | +| Object Detection/Conditional-DETR | 4 | 301.494 | 247.543 | +| Image Segmentation/MobileNet | Unbatched | 22.266 | 19.339 | +| Image Segmentation/MobileNet | 4 | 78.311 | 50.983 | + + diff --git a/docs/source/zh/tf_xla.md b/docs/source/zh/tf_xla.md new file mode 100644 index 000000000000..da8d13d8d04b --- /dev/null +++ b/docs/source/zh/tf_xla.md @@ -0,0 +1,179 @@ + + +# 用于 TensorFlow 模型的 XLA 集成 + +[[open-in-colab]] + +加速线性代数,也称为XLA,是一个用于加速TensorFlow模型运行时间的编译器。从[官方文档](https://www.tensorflow.org/xla)中可以看到: + +XLA(加速线性代数)是一种针对线性代数的特定领域编译器,可以在可能不需要更改源代码的情况下加速TensorFlow模型。 + +在TensorFlow中使用XLA非常简单——它包含在`tensorflow`库中,并且可以使用任何图创建函数中的`jit_compile`参数来触发,例如[`tf.function`](https://www.tensorflow.org/guide/intro_to_graphs)。在使用Keras方法如`fit()`和`predict()`时,只需将`jit_compile`参数传递给`model.compile()`即可启用XLA。然而,XLA不仅限于这些方法 - 它还可以用于加速任何任意的`tf.function`。 + +在🤗 Transformers中,几个TensorFlow方法已经被重写为与XLA兼容,包括[GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)、[T5](https://huggingface.co/docs/transformers/model_doc/t5)和[OPT](https://huggingface.co/docs/transformers/model_doc/opt)等文本生成模型,以及[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)等语音处理模型。 + +虽然确切的加速倍数很大程度上取决于模型,但对于🤗 Transformers中的TensorFlow文本生成模型,我们注意到速度提高了约100倍。本文档将解释如何在这些模型上使用XLA获得最大的性能。如果您有兴趣了解更多关于基准测试和我们在XLA集成背后的设计哲学的信息,我们还将提供额外的资源链接。 + + +## 使用 XLA 运行 TensorFlow 函数 + +让我们考虑以下TensorFlow 中的模型: + +```py +import tensorflow as tf + +model = tf.keras.Sequential( + [tf.keras.layers.Dense(10, input_shape=(10,), activation="relu"), tf.keras.layers.Dense(5, activation="softmax")] +) +``` + +上述模型接受维度为 `(10,)` 的输入。我们可以像下面这样使用模型进行前向传播: + +```py +# Generate random inputs for the model. +batch_size = 16 +input_vector_dim = 10 +random_inputs = tf.random.normal((batch_size, input_vector_dim)) + +# Run a forward pass. +_ = model(random_inputs) +``` + +为了使用 XLA 编译的函数运行前向传播,我们需要执行以下操作: + +```py +xla_fn = tf.function(model, jit_compile=True) +_ = xla_fn(random_inputs) +``` + +`model`的默认`call()`函数用于编译XLA图。但如果你想将其他模型函数编译成XLA,也是可以的,如下所示: + +```py +my_xla_fn = tf.function(model.my_xla_fn, jit_compile=True) +``` + +## 在🤗 Transformers库中使用XLA运行TensorFlow文本生成模型 + +要在🤗 Transformers中启用XLA加速生成,您需要安装最新版本的`transformers`。您可以通过运行以下命令来安装它: + +```bash +pip install transformers --upgrade +``` + +然后您可以运行以下代码: + +```py +import tensorflow as tf +from transformers import AutoTokenizer, TFAutoModelForCausalLM + +# Will error if the minimal version of Transformers is not installed. +from transformers.utils import check_min_version + +check_min_version("4.21.0") + + +tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left", pad_token="") +model = TFAutoModelForCausalLM.from_pretrained("gpt2") +input_string = ["TensorFlow is"] + +# One line to create an XLA generation function +xla_generate = tf.function(model.generate, jit_compile=True) + +tokenized_input = tokenizer(input_string, return_tensors="tf") +generated_tokens = xla_generate(**tokenized_input, num_beams=2) + +decoded_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) +print(f"Generated -- {decoded_text}") +# Generated -- TensorFlow is an open-source, open-source, distributed-source application # framework for the +``` + +正如您所注意到的,在`generate()`上启用XLA只需要一行代码。其余部分代码保持不变。然而,上面的代码片段中有一些与XLA相关的注意事项。您需要了解这些注意事项,以充分利用XLA可能带来的性能提升。我们将在下面的部分讨论这些内容。 + +## 需要关注的注意事项 + +当您首次执行启用XLA的函数(如上面的`xla_generate()`)时,它将在内部尝试推断计算图,这是一个耗时的过程。这个过程被称为[“tracing”](https://www.tensorflow.org/guide/intro_to_graphs#when_is_a_function_tracing)。 + +您可能会注意到生成时间并不快。连续调用`xla_generate()`(或任何其他启用了XLA的函数)不需要再次推断计算图,只要函数的输入与最初构建计算图时的形状相匹配。对于具有固定输入形状的模态(例如图像),这不是问题,但如果您正在处理具有可变输入形状的模态(例如文本),则必须注意。 + +为了确保`xla_generate()`始终使用相同的输入形状,您可以在调用`tokenizer`时指定`padding`参数。 + +```py +import tensorflow as tf +from transformers import AutoTokenizer, TFAutoModelForCausalLM + +tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left", pad_token="") +model = TFAutoModelForCausalLM.from_pretrained("gpt2") +input_string = ["TensorFlow is"] + +xla_generate = tf.function(model.generate, jit_compile=True) + +# Here, we call the tokenizer with padding options. +tokenized_input = tokenizer(input_string, pad_to_multiple_of=8, padding=True, return_tensors="tf") + +generated_tokens = xla_generate(**tokenized_input, num_beams=2) +decoded_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) +print(f"Generated -- {decoded_text}") +``` + +通过这种方式,您可以确保`xla_generate()`的输入始终具有它跟踪的形状,从而加速生成时间。您可以使用以下代码来验证这一点: + +```py +import time +import tensorflow as tf +from transformers import AutoTokenizer, TFAutoModelForCausalLM + +tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left", pad_token="") +model = TFAutoModelForCausalLM.from_pretrained("gpt2") + +xla_generate = tf.function(model.generate, jit_compile=True) + +for input_string in ["TensorFlow is", "TensorFlow is a", "TFLite is a"]: + tokenized_input = tokenizer(input_string, pad_to_multiple_of=8, padding=True, return_tensors="tf") + start = time.time_ns() + generated_tokens = xla_generate(**tokenized_input, num_beams=2) + end = time.time_ns() + print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n") +``` + +在Tesla T4 GPU上,您可以期望如下的输出: + +```bash +Execution time -- 30819.6 ms + +Execution time -- 79.0 ms + +Execution time -- 78.9 ms +``` + +第一次调用`xla_generate()`会因为`tracing`而耗时,但后续的调用会快得多。请注意,任何时候对生成选项的更改都会触发重新`tracing`,从而导致生成时间减慢。 + +在本文档中,我们没有涵盖🤗 Transformers提供的所有文本生成选项。我们鼓励您阅读文档以了解高级用例。 + +## 附加资源 + +以下是一些附加资源,如果您想深入了解在🤗 Transformers和其他库下使用XLA: + +* [这个Colab Notebook](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/91_tf_xla_generate.ipynb) 提供了一个互动演示,让您可以尝试使用XLA兼容的编码器-解码器(例如[T5](https://huggingface.co/docs/transformers/model_doc/t5))和仅解码器(例如[GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2))文本生成模型。 + +* [这篇博客文章](https://huggingface.co/blog/tf-xla-generate) 提供了XLA兼容模型的比较基准概述,以及关于在TensorFlow中使用XLA的友好介绍。 + +* [这篇博客文章](https://blog.tensorflow.org/2022/11/how-hugging-face-improved-text-generation-performance-with-xla.html) 讨论了我们在🤗 Transformers中为TensorFlow模型添加XLA支持的设计理念。 + +* 推荐用于更多学习XLA和TensorFlow图的资源: + * [XLA:面向机器学习的优化编译器](https://www.tensorflow.org/xla) + * [图和tf.function简介](https://www.tensorflow.org/guide/intro_to_graphs) + * [使用tf.function获得更好的性能](https://www.tensorflow.org/guide/function) \ No newline at end of file From 8017a59091f8949332fc398971dfa4e55604a64c Mon Sep 17 00:00:00 2001 From: adismort14 <104080429+adismort14@users.noreply.github.com> Date: Mon, 13 Nov 2023 23:20:40 +0530 Subject: [PATCH 159/268] Fixed typo in pipelines.md documentation (#27455) Update pipelines.md --- docs/source/en/main_classes/pipelines.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/main_classes/pipelines.md b/docs/source/en/main_classes/pipelines.md index b105cb544ffc..3bb3f0d45242 100644 --- a/docs/source/en/main_classes/pipelines.md +++ b/docs/source/en/main_classes/pipelines.md @@ -225,7 +225,7 @@ For users, a rule of thumb is: - **Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the only way to go.** -- If you are latency constrained (live product doing inference), don't batch +- If you are latency constrained (live product doing inference), don't batch. - If you are using CPU, don't batch. - If you are using throughput (you want to run your model on a bunch of static data), on GPU, then: From 2ac5b9325ed3b54950c6c61fd5838ac6e55a9fe1 Mon Sep 17 00:00:00 2001 From: Gift Sinthong Date: Mon, 13 Nov 2023 10:06:32 -0800 Subject: [PATCH 160/268] [time series] Add PatchTST (#25927) * Initial commit of PatchTST model classes Co-authored-by: Phanwadee Sinthong Co-authored-by: Nam Nguyen Co-authored-by: Vijay Ekambaram Co-authored-by: Ngoc Diep Do <55230119+diepi@users.noreply.github.com> Co-authored-by: Wesley Gifford <79663411+wgifford@users.noreply.github.com> * Add PatchTSTForPretraining * update to include classification Co-authored-by: Phanwadee Sinthong Co-authored-by: Nam Nguyen Co-authored-by: Vijay Ekambaram Co-authored-by: Ngoc Diep Do <55230119+diepi@users.noreply.github.com> Co-authored-by: Wesley Gifford <79663411+wgifford@users.noreply.github.com> * clean up auto files * Add PatchTSTForPrediction * Fix relative import * Replace original PatchTSTEncoder with ChannelAttentionPatchTSTEncoder * temporary adding absolute path + add PatchTSTForForecasting class * Update base PatchTSTModel + Unittest * Update ForecastHead to use the config class * edit cv_random_masking, add mask to model output * Update configuration_patchtst.py * add masked_loss to the pretraining * add PatchEmbeddings * Update configuration_patchtst.py * edit loss which considers mask in the pretraining * remove patch_last option * Add commits from internal repo * Update ForecastHead * Add model weight initilization + unittest * Update PatchTST unittest to use local import * PatchTST integration tests for pretraining and prediction * Added PatchTSTForRegression + update unittest to include label generation * Revert unrelated model test file * Combine similar output classes * update PredictionHead * Update configuration_patchtst.py * Add Revin * small edit to PatchTSTModelOutputWithNoAttention * Update modeling_patchtst.py * Updating integration test for forecasting * Fix unittest after class structure changed * docstring updates * change input_size to num_input_channels * more formatting * Remove some unused params * Add a comment for pretrained models * add channel_attention option add channel_attention option and remove unused positional encoders. * Update PatchTST models to use HF's MultiHeadAttention module * Update paper + github urls * Fix hidden_state return value * Update integration test to use PatchTSTForForecasting * Adding dataclass decorator for model output classes * Run fixup script * Rename model repos for integration test * edit argument explanation * change individual option to shared_projection * style * Rename integration test + import cleanup * Fix outpu_hidden_states return value * removed unused mode * added std, mean and nops scaler * add initial distributional loss for predition * fix typo in docs * add generate function * formatting * add num_parallel_samples * Fix a typo * copy weighted_average function, edit PredictionHead * edit PredictionHead * add distribution head to forecasting * formatting * Add generate function for forecasting * Add generate function to prediction task * formatting * use argsort * add past_observed_mask ordering * fix arguments * docs * add back test_model_outputs_equivalence test * formatting * cleanup * formatting * use ACT2CLS * formatting * fix add_start_docstrings decorator * add distribution head and generate function to regression task add distribution head and generate function to regression task. Also made add PatchTSTForForecastingOutput, PatchTSTForRegressionOutput. * add distribution head and generate function to regression task add distribution head and generate function to regression task. Also made add PatchTSTForForecastingOutput, PatchTSTForRegressionOutput. * fix typos * add forecast_masking * fixed tests * use set_seed * fix doc test * formatting * Update docs/source/en/model_doc/patchtst.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * better var names * rename PatchTSTTranspose * fix argument names and docs string * remove compute_num_patches and unused class * remove assert * renamed to PatchTSTMasking * use num_labels for classification * use num_labels * use default num_labels from super class * move model_type after docstring * renamed PatchTSTForMaskPretraining * bs -> batch_size * more review fixes * use hidden_state * rename encoder layer and block class * remove commented seed_number * edit docstring * Add docstring * formatting * use past_observed_mask * doc suggestion * make fix-copies * use Args: * add docstring * add docstring * change some variable names and add PatchTST before some class names * formatting * fix argument types * fix tests * change x variable to patch_input * format * formatting * fix-copies * Update tests/models/patchtst/test_modeling_patchtst.py Co-authored-by: Patrick von Platen * move loss to forward * Update src/transformers/models/patchtst/modeling_patchtst.py Co-authored-by: Patrick von Platen * Update src/transformers/models/patchtst/modeling_patchtst.py Co-authored-by: Patrick von Platen * Update src/transformers/models/patchtst/modeling_patchtst.py Co-authored-by: Patrick von Platen * Update src/transformers/models/patchtst/modeling_patchtst.py Co-authored-by: Patrick von Platen * Update src/transformers/models/patchtst/modeling_patchtst.py Co-authored-by: Patrick von Platen * formatting * fix a bug when pre_norm is set to True * output_hidden_states is set to False as default * set pre_norm=True as default * format docstring * format * output_hidden_states is None by default * add missing docs * better var names * docstring: remove default to False in output_hidden_states * change labels name to target_values in regression task * format * fix tests * change to forecast_mask_ratios and random_mask_ratio * change mask names * change future_values to target_values param in the prediction class * remove nn.Sequential and make PatchTSTBatchNorm class * black * fix argument name for prediction * add output_attentions option * add output_attentions to PatchTSTEncoder * formatting * Add attention output option to all classes * Remove PatchTSTEncoderBlock * create PatchTSTEmbedding class * use config in PatchTSTPatchify * Use config in PatchTSTMasking class * add channel_attn_weights * Add PatchTSTScaler class * add output_attentions arg to test function * format * Update doc with image patchtst.md * fix-copies * rename Forecast <-> Prediction * change name of a few parameters to match with PatchTSMixer. * Remove *ForForecasting class to match with other time series models. * make style * Remove PatchTSTForForecasting in the test * remove PatchTSTForForecastingOutput class * change test_forecast_head to test_prediction_head * style * fix docs * fix tests * change num_labels to num_targets * Remove PatchTSTTranspose * remove arguments in PatchTSTMeanScaler * remove arguments in PatchTSTStdScaler * add config as an argument to all the scaler classes * reformat * Add norm_eps for batchnorm and layernorm * reformat. * reformat * edit docstring * update docstring * change variable name pooling to pooling_type * fix output_hidden_states as tuple * fix bug when calling PatchTSTBatchNorm * change stride to patch_stride * create PatchTSTPositionalEncoding class and restructure the PatchTSTEncoder * formatting * initialize scalers with configs * edit output_hidden_states * style * fix forecast_mask_patches doc string --------- Co-authored-by: Gift Sinthong Co-authored-by: Nam Nguyen Co-authored-by: Vijay Ekambaram Co-authored-by: Ngoc Diep Do <55230119+diepi@users.noreply.github.com> Co-authored-by: Wesley Gifford <79663411+wgifford@users.noreply.github.com> Co-authored-by: Wesley M. Gifford Co-authored-by: nnguyen Co-authored-by: Ngoc Diep Do Co-authored-by: Kashif Rasul Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Patrick von Platen --- README.md | 1 + README_es.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.md | 1 + docs/source/en/model_doc/patchtst.md | 73 + src/transformers/__init__.py | 26 + src/transformers/models/__init__.py | 1 + src/transformers/models/auto/__init__.py | 4 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 21 + .../models/autoformer/modeling_autoformer.py | 118 +- .../models/informer/modeling_informer.py | 118 +- src/transformers/models/patchtst/__init__.py | 66 + .../models/patchtst/configuration_patchtst.py | 274 +++ .../models/patchtst/modeling_patchtst.py | 1913 +++++++++++++++++ .../modeling_time_series_transformer.py | 112 +- src/transformers/utils/dummy_pt_objects.py | 51 + tests/models/patchtst/__init__.py | 0 .../models/patchtst/test_modeling_patchtst.py | 353 +++ utils/check_repo.py | 2 + 25 files changed, 2974 insertions(+), 171 deletions(-) create mode 100644 docs/source/en/model_doc/patchtst.md create mode 100644 src/transformers/models/patchtst/__init__.py create mode 100644 src/transformers/models/patchtst/configuration_patchtst.py create mode 100755 src/transformers/models/patchtst/modeling_patchtst.py create mode 100644 tests/models/patchtst/__init__.py create mode 100644 tests/models/patchtst/test_modeling_patchtst.py diff --git a/README.md b/README.md index 12724e60a188..5096adcaef14 100644 --- a/README.md +++ b/README.md @@ -439,6 +439,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. +1. **[PatchTST](https://huggingface.co/docs/transformers/main/model_doc/patchtst)** (from IBM) released with the paper [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/abs/2211.14730) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. diff --git a/README_es.md b/README_es.md index 5cdbc27ec791..0a3db02aedd7 100644 --- a/README_es.md +++ b/README_es.md @@ -414,6 +414,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. +1. **[PatchTST](https://huggingface.co/docs/transformers/main/model_doc/patchtst)** (from IBM) released with the paper [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. diff --git a/README_hd.md b/README_hd.md index 01937532f967..36bd25cda192 100644 --- a/README_hd.md +++ b/README_hd.md @@ -388,6 +388,7 @@ conda install -c huggingface transformers 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI से) साथ में कागज [विज़न ट्रांसफॉर्मर्स के साथ सिंपल ओपन-वोकैबुलरी ऑब्जेक्ट डिटेक्शन](https:/ /arxiv.org/abs/2205.06230) मैथियास मिंडरर, एलेक्सी ग्रिट्सेंको, ऑस्टिन स्टोन, मैक्सिम न्यूमैन, डिर्क वीसेनबोर्न, एलेक्सी डोसोवित्स्की, अरविंद महेंद्रन, अनुराग अर्नब, मुस्तफा देहघानी, ज़ुओरन शेन, जिओ वांग, ज़ियाओहुआ झाई, थॉमस किफ़, और नील हॉल्सबी द्वारा पोस्ट किया गया। 1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI से) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. द्वाराअनुसंधान पत्र [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) के साथ जारी किया गया +1. **[PatchTST](https://huggingface.co/docs/transformers/main/model_doc/patchtst)** (IBM से) Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. द्वाराअनुसंधान पत्र [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) के साथ जारी किया गया 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google की ओर से) साथ में दिया गया पेपर [लंबे इनपुट सारांश के लिए ट्रांसफ़ॉर्मरों को बेहतर तरीके से एक्सटेंड करना](https://arxiv .org/abs/2208.04347) जेसन फांग, याओ झाओ, पीटर जे लियू द्वारा। 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (दीपमाइंड से) साथ में पेपर [पर्सीवर आईओ: संरचित इनपुट और आउटपुट के लिए एक सामान्य वास्तुकला] (https://arxiv.org/abs/2107.14795) एंड्रयू जेगल, सेबेस्टियन बोरग्यूड, जीन-बैप्टिस्ट अलायराक, कार्ल डोर्श, कैटलिन इओनेस्कु, डेविड द्वारा डिंग, स्कंद कोप्पुला, डैनियल ज़ोरान, एंड्रयू ब्रॉक, इवान शेलहैमर, ओलिवियर हेनाफ, मैथ्यू एम। बोट्विनिक, एंड्रयू ज़िसरमैन, ओरिओल विनियल्स, जोआओ कैरेरा द्वारा पोस्ट किया गया। diff --git a/README_ja.md b/README_ja.md index 5935da396bf1..06a6fdd5e7dc 100644 --- a/README_ja.md +++ b/README_ja.md @@ -448,6 +448,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI から) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al から公開された研究論文: [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby から公開された研究論文: [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. から公開された研究論文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) +1. **[PatchTST](https://huggingface.co/docs/transformers/main/model_doc/patchtst)** (IBM から) Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. から公開された研究論文 [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (Google から) Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu から公開された研究論文: [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google から) Jason Phang, Yao Zhao, and Peter J. Liu から公開された研究論文: [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind から) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira から公開された研究論文: [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) diff --git a/README_ko.md b/README_ko.md index e0c38472cc46..db06296a7296 100644 --- a/README_ko.md +++ b/README_ko.md @@ -363,6 +363,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI 에서) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 의 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 논문과 함께 발표했습니다. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI 에서) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 의 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 논문과 함께 발표했습니다. 1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI 에서 제공)은 Matthias Minderer, Alexey Gritsenko, Neil Houlsby.의 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683)논문과 함께 발표했습니다. +1. **[PatchTST](https://huggingface.co/docs/transformers/main/model_doc/patchtst)** (IBM 에서 제공)은 Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.의 [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf)논문과 함께 발표했습니다. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (Google 에서) Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 의 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 논문과 함께 발표했습니다. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google 에서) Jason Phang, Yao Zhao, Peter J. Liu 의 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 논문과 함께 발표했습니다. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind 에서) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 의 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 논문과 함께 발표했습니다. diff --git a/README_zh-hans.md b/README_zh-hans.md index 3d84374d5561..5dd9f9b35a14 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -387,6 +387,7 @@ conda install -c huggingface transformers 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。 1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (来自 Google AI) 伴随论文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) 由 Matthias Minderer, Alexey Gritsenko, Neil Houlsby 发布。 +1. **[PatchTST](https://huggingface.co/docs/transformers/main/model_doc/patchtst)** (来自 IBM) 伴随论文 [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) 由 Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam 发布。 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index c095423cce15..f155fafe91f1 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -399,6 +399,7 @@ conda install -c huggingface transformers 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. +1. **[PatchTST](https://huggingface.co/docs/transformers/main/model_doc/patchtst)** (from IBM) released with the paper [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. 1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 4e0ce88c10af..612f21ab38d3 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -747,6 +747,8 @@ title: Autoformer - local: model_doc/informer title: Informer + - local: model_doc/patchtst + title: PatchTST - local: model_doc/time_series_transformer title: Time Series Transformer title: Time series models diff --git a/docs/source/en/index.md b/docs/source/en/index.md index ae01569e970c..d962338becf8 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -213,6 +213,7 @@ Flax), PyTorch, and/or TensorFlow. | [OPT](model_doc/opt) | ✅ | ✅ | ✅ | | [OWL-ViT](model_doc/owlvit) | ✅ | ❌ | ❌ | | [OWLv2](model_doc/owlv2) | ✅ | ❌ | ❌ | +| [PatchTST](model_doc/patchtst) | ✅ | ❌ | ❌ | | [Pegasus](model_doc/pegasus) | ✅ | ✅ | ✅ | | [PEGASUS-X](model_doc/pegasus_x) | ✅ | ❌ | ❌ | | [Perceiver](model_doc/perceiver) | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/patchtst.md b/docs/source/en/model_doc/patchtst.md new file mode 100644 index 000000000000..c18abeb20e64 --- /dev/null +++ b/docs/source/en/model_doc/patchtst.md @@ -0,0 +1,73 @@ + + +# PatchTST + +## Overview + +The PatchTST model was proposed in [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/abs/2211.14730) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. + +The abstract from the paper is the following: + +*We propose an efficient design of Transformer-based models for multivariate time series forecasting and self-supervised representation learning. It is based on two key components: (i) segmentation of time series into subseries-level patches which are served as input tokens to Transformer; (ii) channel-independence where each channel contains a single univariate time series that shares the same embedding and Transformer weights across all the series. Patching design naturally has three-fold benefit: local semantic information is retained in the embedding; computation and memory usage of the attention maps are quadratically reduced given the same look-back window; and the model can attend longer history. Our channel-independent patch time series Transformer (PatchTST) can improve the long-term forecasting accuracy significantly when compared with that of SOTA Transformer-based models. We also apply our model to self-supervised pre-training tasks and attain excellent fine-tuning performance, which outperforms supervised training on large datasets. Transferring of masked pre-trained representation on one dataset to others also produces SOTA forecasting accuracy.* + +Tips: + +The model can also be used for time series classification and time series regression. See the respective [`PatchTSTForClassification`] and [`PatchTSTForRegression`] classes. + +At a high level the model vectorizes time series into patches of a given size and encodes them via a Transformer which then outputs the prediction length forecasts: + +![model](https://github.com/namctin/transformers/assets/8100/150af169-29de-419a-8d98-eb78251c21fa) + + +This model was contributed by [namctin](https://huggingface.co/namctin), [gsinthong](https://huggingface.co/gsinthong), [diepi](https://huggingface.co/diepi), [vijaye12](https://huggingface.co/vijaye12), [wmgifford](https://huggingface.co/wmgifford), and [kashif](https://huggingface.co/kashif). + +The original code can be found [here](https://github.com/yuqinie98/PatchTST). + + +## PatchTSTConfig + +[[autodoc]] PatchTSTConfig + + +## PatchTSTModel + +[[autodoc]] PatchTSTModel + - forward + + +## PatchTSTForPrediction + +[[autodoc]] PatchTSTForPrediction + - forward + + +## PatchTSTForClassification + +[[autodoc]] PatchTSTForClassification + - forward + + +## PatchTSTForPretraining + +[[autodoc]] PatchTSTForPretraining + - forward + + +## PatchTSTForRegression + +[[autodoc]] PatchTSTForRegression + - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index cf89602b6597..9cbb988c5347 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -493,6 +493,7 @@ "OwlViTTextConfig", "OwlViTVisionConfig", ], + "models.patchtst": ["PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", "PatchTSTConfig"], "models.pegasus": ["PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig", "PegasusTokenizer"], "models.pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], "models.perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverTokenizer"], @@ -1167,6 +1168,8 @@ "MODEL_FOR_TEXT_ENCODING_MAPPING", "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING", "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING", + "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING", + "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING", @@ -2485,6 +2488,17 @@ "OwlViTVisionModel", ] ) + _import_structure["models.patchtst"].extend( + [ + "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", + "PatchTSTForClassification", + "PatchTSTForPrediction", + "PatchTSTForPretraining", + "PatchTSTForRegression", + "PatchTSTModel", + "PatchTSTPreTrainedModel", + ] + ) _import_structure["models.pegasus"].extend( ["PegasusForCausalLM", "PegasusForConditionalGeneration", "PegasusModel", "PegasusPreTrainedModel"] ) @@ -4697,6 +4711,7 @@ OwlViTTextConfig, OwlViTVisionConfig, ) + from .models.patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig from .models.pegasus import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig, PegasusTokenizer from .models.pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig from .models.perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverTokenizer @@ -5303,6 +5318,8 @@ MODEL_FOR_TEXT_ENCODING_MAPPING, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, + MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, + MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, @@ -6387,6 +6404,15 @@ OwlViTTextModel, OwlViTVisionModel, ) + from .models.patchtst import ( + PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, + PatchTSTForClassification, + PatchTSTForPrediction, + PatchTSTForPretraining, + PatchTSTForRegression, + PatchTSTModel, + PatchTSTPreTrainedModel, + ) from .models.pegasus import ( PegasusForCausalLM, PegasusForConditionalGeneration, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 6132512688e6..968704c0bf86 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -158,6 +158,7 @@ opt, owlv2, owlvit, + patchtst, pegasus, pegasus_x, perceiver, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index dc01c93406b7..153f7f10def6 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -77,6 +77,8 @@ "MODEL_WITH_LM_HEAD_MAPPING", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING", + "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING", + "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING", "AutoModel", "AutoBackbone", "AutoModelForAudioClassification", @@ -250,6 +252,8 @@ MODEL_FOR_TEXT_ENCODING_MAPPING, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, + MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, + MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index c1c2387373b8..900f1da799d9 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -164,6 +164,7 @@ ("opt", "OPTConfig"), ("owlv2", "Owlv2Config"), ("owlvit", "OwlViTConfig"), + ("patchtst", "PatchTSTConfig"), ("pegasus", "PegasusConfig"), ("pegasus_x", "PegasusXConfig"), ("perceiver", "PerceiverConfig"), @@ -376,6 +377,7 @@ ("opt", "OPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("owlv2", "OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("owlvit", "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("patchtst", "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("pegasus", "PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("pegasus_x", "PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("perceiver", "PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -607,6 +609,7 @@ ("opt", "OPT"), ("owlv2", "OWLv2"), ("owlvit", "OWL-ViT"), + ("patchtst", "PatchTST"), ("pegasus", "Pegasus"), ("pegasus_x", "PEGASUS-X"), ("perceiver", "Perceiver"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index ffcae9a23494..437aed60143c 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -157,6 +157,7 @@ ("opt", "OPTModel"), ("owlv2", "Owlv2Model"), ("owlvit", "OwlViTModel"), + ("patchtst", "PatchTSTModel"), ("pegasus", "PegasusModel"), ("pegasus_x", "PegasusXModel"), ("perceiver", "PerceiverModel"), @@ -1130,6 +1131,18 @@ ] ) +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + ("patchtst", "PatchTSTForClassification"), + ] +) + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES = OrderedDict( + [ + ("patchtst", "PatchTSTForRegression"), + ] +) + MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = OrderedDict( [ ("swin2sr", "Swin2SRForImageSuperResolution"), @@ -1221,6 +1234,14 @@ MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES) +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES +) + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES +) + MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES) diff --git a/src/transformers/models/autoformer/modeling_autoformer.py b/src/transformers/models/autoformer/modeling_autoformer.py index 92e9df2c7e5b..8f26274b44bc 100644 --- a/src/transformers/models/autoformer/modeling_autoformer.py +++ b/src/transformers/models/autoformer/modeling_autoformer.py @@ -208,71 +208,70 @@ def forward(self, features: torch.Tensor) -> torch.Tensor: ) -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeries->Autoformer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer class AutoformerStdScaler(nn.Module): """ - Standardize features by calculating the mean and scaling along some given dimension `dim`, and then normalizes it - by subtracting from the mean and dividing by the standard deviation. - - Args: - dim (`int`): - Dimension along which to calculate the mean and standard deviation. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. - minimum_scale (`float`, *optional*, defaults to 1e-5): - Default scale that is used for elements that are constantly zero along dimension `dim`. + Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by + subtracting from the mean and dividing by the standard deviation. """ - def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-5): + def __init__(self, config: AutoformerConfig): super().__init__() - if not dim > 0: - raise ValueError("Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0") - self.dim = dim - self.keepdim = keepdim - self.minimum_scale = minimum_scale + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 - @torch.no_grad() - def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - denominator = weights.sum(self.dim, keepdim=self.keepdim) + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) - loc = (data * weights).sum(self.dim, keepdim=self.keepdim) / denominator + loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator - variance = (((data - loc) * weights) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator + variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeries->Autoformer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer class AutoformerMeanScaler(nn.Module): """ - Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data + Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. - - Args: - dim (`int`): - Dimension along which to compute the scale. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. - default_scale (`float`, *optional*, defaults to `None`): - Default scale that is used for elements that are constantly zero. If `None`, we use the scale of the batch. - minimum_scale (`float`, *optional*, defaults to 1e-10): - Default minimum possible scale that is used for any item. """ - def __init__( - self, dim: int = -1, keepdim: bool = True, default_scale: Optional[float] = None, minimum_scale: float = 1e-10 - ): + def __init__(self, config: AutoformerConfig): super().__init__() - self.dim = dim - self.keepdim = keepdim - self.minimum_scale = minimum_scale - self.default_scale = default_scale + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + self.default_scale = config.default_scale if hasattr(config, "default_scale") else None - @torch.no_grad() def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - # shape: (N, [C], T=1) + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) @@ -300,26 +299,29 @@ def forward( return scaled_data, torch.zeros_like(scale), scale -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeries->Autoformer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer class AutoformerNOPScaler(nn.Module): """ - Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data. - - Args: - dim (`int`): - Dimension along which to compute the scale. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ - def __init__(self, dim: int, keepdim: bool = False): + def __init__(self, config: AutoformerConfig): super().__init__() - self.dim = dim - self.keepdim = keepdim + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor + self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale @@ -1433,11 +1435,11 @@ def __init__(self, config: AutoformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: - self.scaler = AutoformerMeanScaler(dim=1, keepdim=True) + self.scaler = AutoformerMeanScaler(config) elif config.scaling == "std": - self.scaler = AutoformerStdScaler(dim=1, keepdim=True) + self.scaler = AutoformerStdScaler(config) else: - self.scaler = AutoformerNOPScaler(dim=1, keepdim=True) + self.scaler = AutoformerNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = AutoformerFeatureEmbedder( diff --git a/src/transformers/models/informer/modeling_informer.py b/src/transformers/models/informer/modeling_informer.py index c0a5a2059502..205c8ba22f74 100644 --- a/src/transformers/models/informer/modeling_informer.py +++ b/src/transformers/models/informer/modeling_informer.py @@ -81,71 +81,70 @@ def forward(self, features: torch.Tensor) -> torch.Tensor: ) -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeries->Informer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer class InformerStdScaler(nn.Module): """ - Standardize features by calculating the mean and scaling along some given dimension `dim`, and then normalizes it - by subtracting from the mean and dividing by the standard deviation. - - Args: - dim (`int`): - Dimension along which to calculate the mean and standard deviation. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. - minimum_scale (`float`, *optional*, defaults to 1e-5): - Default scale that is used for elements that are constantly zero along dimension `dim`. + Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by + subtracting from the mean and dividing by the standard deviation. """ - def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-5): + def __init__(self, config: InformerConfig): super().__init__() - if not dim > 0: - raise ValueError("Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0") - self.dim = dim - self.keepdim = keepdim - self.minimum_scale = minimum_scale + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 - @torch.no_grad() - def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - denominator = weights.sum(self.dim, keepdim=self.keepdim) + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) - loc = (data * weights).sum(self.dim, keepdim=self.keepdim) / denominator + loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator - variance = (((data - loc) * weights) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator + variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeries->Informer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer class InformerMeanScaler(nn.Module): """ - Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data + Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. - - Args: - dim (`int`): - Dimension along which to compute the scale. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. - default_scale (`float`, *optional*, defaults to `None`): - Default scale that is used for elements that are constantly zero. If `None`, we use the scale of the batch. - minimum_scale (`float`, *optional*, defaults to 1e-10): - Default minimum possible scale that is used for any item. """ - def __init__( - self, dim: int = -1, keepdim: bool = True, default_scale: Optional[float] = None, minimum_scale: float = 1e-10 - ): + def __init__(self, config: InformerConfig): super().__init__() - self.dim = dim - self.keepdim = keepdim - self.minimum_scale = minimum_scale - self.default_scale = default_scale + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + self.default_scale = config.default_scale if hasattr(config, "default_scale") else None - @torch.no_grad() def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - # shape: (N, [C], T=1) + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) @@ -173,26 +172,29 @@ def forward( return scaled_data, torch.zeros_like(scale), scale -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeries->Informer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer class InformerNOPScaler(nn.Module): """ - Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data. - - Args: - dim (`int`): - Dimension along which to compute the scale. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ - def __init__(self, dim: int, keepdim: bool = False): + def __init__(self, config: InformerConfig): super().__init__() - self.dim = dim - self.keepdim = keepdim + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor + self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale @@ -1446,11 +1448,11 @@ def __init__(self, config: InformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: - self.scaler = InformerMeanScaler(dim=1, keepdim=True) + self.scaler = InformerMeanScaler(config) elif config.scaling == "std": - self.scaler = InformerStdScaler(dim=1, keepdim=True) + self.scaler = InformerStdScaler(config) else: - self.scaler = InformerNOPScaler(dim=1, keepdim=True) + self.scaler = InformerNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = InformerFeatureEmbedder( diff --git a/src/transformers/models/patchtst/__init__.py b/src/transformers/models/patchtst/__init__.py new file mode 100644 index 000000000000..8c7db64c1984 --- /dev/null +++ b/src/transformers/models/patchtst/__init__.py @@ -0,0 +1,66 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +# rely on isort to merge the imports +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = { + "configuration_patchtst": [ + "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", + "PatchTSTConfig", + ], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_patchtst"] = [ + "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", + "PatchTSTModel", + "PatchTSTPreTrainedModel", + "PatchTSTForPrediction", + "PatchTSTForPretraining", + "PatchTSTForRegression", + "PatchTSTForClassification", + ] + + +if TYPE_CHECKING: + from .configuration_patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_patchtst import ( + PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, + PatchTSTForClassification, + PatchTSTForPrediction, + PatchTSTForPretraining, + PatchTSTForRegression, + PatchTSTModel, + PatchTSTPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/patchtst/configuration_patchtst.py b/src/transformers/models/patchtst/configuration_patchtst.py new file mode 100644 index 000000000000..4ced00c36046 --- /dev/null +++ b/src/transformers/models/patchtst/configuration_patchtst.py @@ -0,0 +1,274 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PatchTST model configuration""" + +from typing import List, Optional, Union + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + +PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "ibm/patchtst-base": "https://huggingface.co/ibm/patchtst-base/resolve/main/config.json", + # See all PatchTST models at https://huggingface.co/ibm/models?filter=patchtst +} + + +class PatchTSTConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an + PatchTST model according to the specified arguments, defining the model architecture. + [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture. + + Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_input_channels (`int`, *optional*, defaults to 1): + The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of + multivariate targets. + context_length (`int`, *optional*, defaults to 32): + The context length for the encoder. + distribution_output (`str`, *optional*, defaults to `"student_t"`): + The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or + "negative_binomial". + loss (`str`, *optional*, defaults to `"mse"`): + The loss function for the model corresponding to the `distribution_output` head. For parametric + distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared + error "mse". + patch_length (`int`, *optional*, defaults to 1): + Define the patch length of the patchification process. + patch_stride (`int`, *optional*, defaults to 1): + define the stride of the patchification process. + encoder_layers (`int`, *optional*, defaults to 3): + Number of encoder layers. + d_model (`int`, *optional*, defaults to 64): + Dimensionality of the transformer layers. + encoder_attention_heads (`int`, *optional*, defaults to 4): + Number of attention heads for each attention layer in the Transformer encoder. + shared_embedding (`bool`, *optional*, defaults to `True`): + Sharing the input embedding across all channels. + channel_attention (`bool`, *optional*, defaults to `False`): + Activate channel attention block in the Transformer to allow channels to attend each other. + encoder_ffn_dim (`int`, *optional*, defaults to 256): + Dimension of the "intermediate" (often named feed-forward) layer in encoder. + norm (`str` , *optional*, defaults to `"BatchNorm"`): + Normalization at each Transformer layer. Can be `"BatchNorm"` or `"LayerNorm"`. + norm_eps (`float`, *optional*, defaults to 1e-05): + A value added to the denominator for numerical stability of normalization. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for the attention probabilities. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the encoder, and decoder. + positional_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability in the positional embedding layer. + dropout_path (`float`, *optional*, defaults to 0.0): + The dropout path in the residual block. + ff_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability used between the two layers of the feed-forward networks. + bias (`bool`, *optional*, defaults to `True`): + Consider bias in the feed-forward networks. + activation_function (`str`, *optional*, defaults to `"gelu"`): + The non-linear activation function (string) in the encoder.`"gelu"` and `"relu"` are supported. + pre_norm (`bool`, *optional*, defaults to `True`): + Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is + applied after residual block. + positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): + Positional encodings. `"zeros"`, `"normal"`, `"uniform"' and `"sincos"` are supported. + learn_pe (`bool`, *optional*, defaults to `False`): + Whether the positional encoding is updated during training. + use_cls_token (`bool`, *optional*, defaults to `False`): + Whether cls token is used. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated normal weight initialization distribution. + shared_projection (`bool`, *optional*, defaults to `True`): + Sharing the projection layer across different channels in the forecast head. + seed_number (`Optional`, *optional*): + Seed number used for random masking. If unset, no seed is set. + scaling (`Union`, *optional*, defaults to `"mean"`): + Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the + scaler is set to "mean". + mask_input (`bool`, *optional*, defaults to `False`): + Apply masking during the pretraining. + mask_type (`str`, *optional*, defaults to `"random"`): + Masking type. Only `"random"` and `"forecast"` are currently supported. + random_mask_ratio (`float`, *optional*, defaults to 0.5): + Masking ratio is applied to mask the input data during random pretraining. + forecast_mask_patches (`List`, *optional*, defaults to `[2, 3]`): + List of patch lengths to mask in the end of the data. + forecast_mask_ratios (`List`, *optional*, defaults to `[1, 1]`): + List of weights to use for each patch length. For Ex. if patch_lengths is [5,4] and mix_ratio is [1,1], + then equal weights to both patch lengths. Defaults to None. + channel_consistent_masking (`bool`, *optional*, defaults to `False`): + If channel consistent masking is True, all the channels will have the same masking. + unmasked_channel_indices (`list`, *optional*): + Channels that are not masked during pretraining. + mask_value (`int`, *optional*, defaults to 0): + Define the value of entries to be masked when pretraining. + pooling_type (`str`, *optional*, defaults to `"mean"`): + Pooling of the embedding. `"mean"`, `"max"` and `None` are supported. + head_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for head. + prediction_length (`int`, *optional*, defaults to 24): + The prediction length for the encoder. In other words, the prediction horizon of the model. + num_targets (`int`, *optional*, defaults to 1): + Number of targets for regression and classificastion tasks. For classification, it is the number of + classes. + output_range (`list`, *optional*): + Output range for regression task. The range of output values can be set to enforce the model to produce + values within a range. + num_parallel_samples (`int`, *optional*, defaults to 100): + The number of samples is generated in parallel for probablistic prediction. + + + ```python + >>> from transformers import PatchTSTConfig, PatchTSTModel + + >>> # Initializing an PatchTST configuration with 12 time steps for prediction + >>> configuration = PatchTSTConfig(prediction_length=12) + + >>> # Randomly initializing a model (with random weights) from the configuration + >>> model = PatchTSTModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "patchtst" + attribute_map = { + "hidden_size": "d_model", + "num_attention_heads": "encoder_attention_heads", + "num_hidden_layers": "encoder_layers", + } + + def __init__( + self, + # time series specific configuration + num_input_channels: int = 1, + context_length: int = 32, + distribution_output: str = "student_t", + loss: str = "mse", + # PatchTST arguments + patch_length: int = 1, + patch_stride: int = 1, + # Transformer architecture configuration + encoder_layers: int = 3, + d_model: int = 64, + encoder_attention_heads: int = 4, + shared_embedding: bool = True, + channel_attention: bool = False, + encoder_ffn_dim: int = 256, + norm: str = "BatchNorm", + norm_eps: float = 1e-5, + attention_dropout: float = 0.0, + dropout: float = 0.0, + positional_dropout: float = 0.0, + dropout_path: float = 0.0, + ff_dropout: float = 0.0, + bias: bool = True, + activation_function: str = "gelu", + pre_norm: bool = True, + positional_encoding_type: str = "sincos", + learn_pe: bool = False, + use_cls_token: bool = False, + init_std: float = 0.02, + shared_projection: bool = True, + seed_number: Optional[int] = None, + scaling: Optional[Union[str, bool]] = "mean", + # mask pretraining + mask_input: Optional[bool] = None, + mask_type: str = "random", + random_mask_ratio: float = 0.5, + forecast_mask_patches: List[int] = [2, 3], + forecast_mask_ratios: List[int] = [1, 1], + channel_consistent_masking: bool = False, + unmasked_channel_indices: Optional[List[int]] = None, + mask_value=0, + # head + pooling_type: str = "mean", + head_dropout: float = 0.0, + prediction_length: int = 24, + num_targets: int = 1, + output_range: List = None, + # distribution head + num_parallel_samples: int = 100, + **kwargs, + ): + # time series specific configuration + self.context_length = context_length + self.num_input_channels = num_input_channels # n_vars + self.loss = loss + self.distribution_output = distribution_output + self.num_parallel_samples = num_parallel_samples + + # Transformer architecture configuration + self.d_model = d_model + self.encoder_attention_heads = encoder_attention_heads + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.dropout = dropout + self.attention_dropout = attention_dropout + self.shared_embedding = shared_embedding + self.channel_attention = channel_attention + self.norm = norm + self.norm_eps = norm_eps + self.positional_dropout = positional_dropout + self.dropout_path = dropout_path + self.ff_dropout = ff_dropout + self.bias = bias + self.activation_function = activation_function + self.pre_norm = pre_norm + self.positional_encoding_type = positional_encoding_type + self.learn_pe = learn_pe + self.use_cls_token = use_cls_token + self.init_std = init_std + self.scaling = scaling + + # PatchTST parameters + self.patch_length = patch_length + self.patch_stride = patch_stride + self.num_patches = self._num_patches() + + # Mask pretraining + self.seed_number = seed_number + self.mask_input = mask_input + self.mask_type = mask_type + self.random_mask_ratio = random_mask_ratio # for random masking + self.forecast_mask_patches = forecast_mask_patches # for forecast masking + self.forecast_mask_ratios = forecast_mask_ratios + self.channel_consistent_masking = channel_consistent_masking + self.unmasked_channel_indices = unmasked_channel_indices + self.mask_value = mask_value + + # general head params + self.pooling_type = pooling_type + self.head_dropout = head_dropout + + # For prediction head + self.shared_projection = shared_projection + self.prediction_length = prediction_length + + # For prediction and regression head + self.num_parallel_samples = num_parallel_samples + + # Regression + self.num_targets = num_targets + self.output_range = output_range + + super().__init__(**kwargs) + + def _num_patches(self): + return (max(self.context_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 diff --git a/src/transformers/models/patchtst/modeling_patchtst.py b/src/transformers/models/patchtst/modeling_patchtst.py new file mode 100755 index 000000000000..30522a048f02 --- /dev/null +++ b/src/transformers/models/patchtst/modeling_patchtst.py @@ -0,0 +1,1913 @@ +# coding=utf-8 +# Copyright 2023 IBM & Hugging Face. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch PatchTST model.""" + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +from torch import nn + +from ...activations import ACT2CLS +from ...modeling_outputs import BaseModelOutput +from ...modeling_utils import PreTrainedModel +from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput +from ...trainer_utils import set_seed +from ...utils import ModelOutput, add_start_docstrings, logging +from .configuration_patchtst import PatchTSTConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "PatchTSTConfig" + +PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "ibm/patchtst-etth1-pretrain", + # See all PatchTST models at https://huggingface.co/models?filter=patchtst +] + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PatchTST +class PatchTSTAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + is_causal: bool = False, + config: Optional[PatchTSTConfig] = None, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + self.config = config + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + self.is_causal = is_causal + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class PatchTSTBatchNorm(nn.Module): + """ + Parameters: + Compute batch normalization + d_model (`int`): model dimension + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) + + def forward(self, inputs: torch.Tensor): + """ + Parameters: + inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): + input for Batch norm calculation + Returns: + `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` + """ + output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length) + output = self.batchnorm(output) + return output.transpose(1, 2) + + +def positional_encoding(positional_encoding_type, learned, q_len, d_model): + # Positional encoding + if positional_encoding_type is None: + # positional_encoding_type = None and learned = False can be used to measure impact of positional encoding + position_enc = torch.empty((q_len, d_model)) + nn.init.uniform_(position_enc, -0.02, 0.02) + learned = False + elif positional_encoding_type == "zeros": + position_enc = torch.empty((q_len, d_model)) + nn.init.uniform_(position_enc, -0.02, 0.02) + elif positional_encoding_type == "normal": + position_enc = torch.zeros((q_len, 1)) + nn.init.normal_(position_enc, mean=0.0, std=0.1) + elif positional_encoding_type == "uniform": + position_enc = torch.zeros((q_len, 1)) + nn.init.uniform_(position_enc, a=0.0, b=0.1) + elif positional_encoding_type == "sincos": + position_enc = torch.zeros(q_len, d_model) + position = torch.arange(0, q_len).unsqueeze(1) + div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) + position_enc[:, 0::2] = torch.sin(position * div_term) + position_enc[:, 1::2] = torch.cos(position * div_term) + position_enc = position_enc - position_enc.mean() + position_enc = position_enc / (position_enc.std() * 10) + else: + raise ValueError( + f"{positional_encoding_type} is not a valid positional encoder. Available types are 'normal', 'zeros', 'zero', uniform', 'sincos', None." + ) + return nn.Parameter(position_enc, requires_grad=learned) + + +def random_masking( + inputs: torch.Tensor, + mask_ratio: float, + unmasked_channel_indices: list = None, + channel_consistent_masking: bool = False, + mask_value: int = 0, + seed_number: Optional[int] = None, +): + """random_masking: Mask the input considering the control variables. + + Args: + inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`): + The input tensor to mask. + mask_ratio (`float`): + Mask ratio. + unmasked_channel_indices (list, *optional*): + indices of unmasked channels. These channels will not be masked. + channel_consistent_masking (bool, *optional* defaults to False): + When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary + across channels. + mask_value (int, *optional*, defaults to 0): + Value to use for masking. + seed_number (int, *optional*): + Value to set for the random seed. + + Returns: + `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x + n] + """ + if seed_number: + set_seed(seed_number) + + batch_size, num_channels, sequence_length, num_features = inputs.shape + device = inputs.device + + len_keep = int(sequence_length * (1 - mask_ratio)) + + if channel_consistent_masking: + noise = torch.rand(batch_size, 1, sequence_length, device=device) # noise in [0, 1], bs x 1 x L + noise = noise.repeat(1, num_channels, 1) # bs x num_channels x time + else: + # noise in [0, 1], bs x num_channels x L + noise = torch.rand(batch_size, num_channels, sequence_length, device=device) + + # mask: [bs x num_channels x num_patch] + mask = torch.ones(batch_size, num_channels, sequence_length, device=device) + mask[:, :, :len_keep] = 0 + + # sort noise for each sample + ids_shuffle = torch.argsort(noise, dim=-1) # ascend: small is keep, large is remove + ids_restore = torch.argsort(ids_shuffle, dim=-1) # ids_restore: [bs x num_channels x L] + + mask = torch.gather(mask, dim=-1, index=ids_restore) + mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patches x patch_length] + if unmasked_channel_indices is not None: + mask[:, unmasked_channel_indices, :, :] = 0 + + inputs_mask = inputs.masked_fill(mask.bool(), mask_value) + return inputs_mask, mask[..., 0] + + +def forecast_masking( + inputs: torch.Tensor, + forecast_mask_patches: list, + forecast_mask_ratios: list = None, + unmasked_channel_indices: list = None, + mask_value: int = 0, + seed_number: Optional[int] = None, +): + """Forecast masking that masks the last K patches where K is from the forecast_mask_patches list. + For every batch, distribute the patch lengths based on forecast_mask_ratios and ignore masks for column indices + mentioned in unmasked_channel_indices. + + Parameters: + inputs (`torch.Tensor`): + Input of shape `(bs, num_channels, num_patch, patch_len)` or `(bs, tsg1, tag2, num_channels, num_patch, + patch_len)` + forecast_mask_patches (`list`): + List of patch lengths to mask at the end of the data e.g. [2, 4]. + forecast_mask_ratios (`list`, *optional*): + List of weights to use for each patch length. For example if forecast_mask_patches is [5,4] and + forecast_mask_ratios is [1,1], then equal weights to both patch lengths. + unmasked_channel_indices (`list`, *optional*): + Control Variable channel indices. These channels will not be masked. + mask_value (`int`, *optional*, defaults to 0): + Value to use for masking. + seed_number (`int`, *optional*): + Value to set for the random seed. + + Returns: + `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs, + num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)` + """ + if seed_number: + set_seed(seed_number) + + if forecast_mask_ratios is None: + forecast_mask_ratios = [1 for _ in forecast_mask_patches] + + batch_size, num_channels, sequence_length, num_features = inputs.shape + mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device) + + t_list = [] + total_length = 0 + total_ratio = sum(forecast_mask_ratios) + + for patch_length, ratio in zip(forecast_mask_patches, forecast_mask_ratios): + if patch_length <= 0 or patch_length >= sequence_length: + raise Exception("masked_patch_len should be greater than 0 and less than total patches.") + temp_len = int(batch_size * ratio / total_ratio) + t_list.append([patch_length, ratio, temp_len]) + total_length += temp_len + + t_list = sorted(t_list, key=lambda x: x[2]) + + if total_length < batch_size: + t_list[0][2] = t_list[0][2] + (batch_size - total_length) + elif total_length > batch_size: + t_list[-1][2] = t_list[-1][2] + (total_length - batch_size) + + batch1 = 0 + for patch_len, _, temp_len in t_list: + batch2 = batch1 + temp_len + mask[batch1:batch2, :, -patch_len:] = 1 + batch1 = batch2 + + perm = torch.randperm(mask.shape[0]) + mask = mask[perm] + + mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patch x patch_len] + if unmasked_channel_indices is not None: + mask[:, unmasked_channel_indices, :, :] = 0 + + inputs_mask = inputs.masked_fill(mask.bool(), mask_value) + return inputs_mask, mask[..., 0] + + +class PatchTSTPatchify(nn.Module): + """ + A class to patchify the time series sequence into different patches + + Returns: + `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + + self.sequence_length = config.context_length + self.patch_length = config.patch_length + self.patch_stride = config.patch_stride + + if self.sequence_length <= self.patch_length: + raise ValueError( + f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})" + ) + + # get the number of patches + num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 + new_sequence_length = self.patch_length + self.patch_stride * (num_patches - 1) + self.sequence_start = self.sequence_length - new_sequence_length + + def forward(self, past_values: torch.Tensor): + """ + Parameters: + past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*): + Input to be patchified + + Returns: + `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` + """ + sequence_length = past_values.shape[-2] + if sequence_length != self.sequence_length: + raise ValueError( + f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})." + ) + # output: [bs x new_sequence_length x num_channels] + output = past_values[:, self.sequence_start :, :] + # output: [bs x num_patches x num_input_channels x patch_length] + output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride) + # output: [bs x num_input_channels x num_patches x patch_length] + output = output.transpose(-2, -3).contiguous() + return output + + +class PatchTSTMasking(nn.Module): + """ + Class to perform random or forecast masking. + + Parameters: + config (`PatchTSTConfig`): model config + + Returns: + x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) + Masked patched input + mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) + Bool tensor indicating True on masked points + + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.random_mask_ratio = config.random_mask_ratio + self.channel_consistent_masking = config.channel_consistent_masking + self.mask_type = config.mask_type + self.forecast_mask_patches = config.forecast_mask_patches + self.forecast_mask_ratios = config.forecast_mask_ratios + self.unmasked_channel_indices = config.unmasked_channel_indices + self.mask_value = config.mask_value + if self.unmasked_channel_indices is not None: + self.unmasked_channel_indices.sort() + self.seed_number = config.seed_number + + def forward(self, patch_input: torch.Tensor): + """ + Parameters: + patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): + Patch input + + Return: + masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) + Masked patched input + mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) + Bool tensor indicating True on masked points + + """ + + if self.mask_type == "random": + masked_input, mask = random_masking( + inputs=patch_input, + mask_ratio=self.random_mask_ratio, + unmasked_channel_indices=self.unmasked_channel_indices, + channel_consistent_masking=self.channel_consistent_masking, + mask_value=self.mask_value, + seed_number=self.seed_number, + ) + elif self.mask_type == "forecast": + masked_input, mask = forecast_masking( + inputs=patch_input, + forecast_mask_patches=self.forecast_mask_patches, + forecast_mask_ratios=self.forecast_mask_ratios, + unmasked_channel_indices=self.unmasked_channel_indices, + mask_value=self.mask_value, + seed_number=self.seed_number, + ) + else: + raise Exception("Invalid mask type") + + mask = mask.bool() # mask: [bs x num_input_channels x num_patch] + + return masked_input, mask + + +class PatchTSTEncoderLayer(nn.Module): + """ + PatchTST encoder layer + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + + self.channel_attention = config.channel_attention + + # Multi-Head attention + self.self_attn = PatchTSTAttention( + embed_dim=config.d_model, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + ) + + # Add & Norm of the sublayer 1 + self.dropout_path1 = nn.Dropout(config.dropout_path) if config.dropout_path > 0 else nn.Identity() + if "batch" in config.norm.lower(): + self.norm_sublayer1 = PatchTSTBatchNorm(config) + else: + self.norm_sublayer1 = nn.LayerNorm(config.d_model, eps=config.norm_eps) + + # Add & Norm of the sublayer 2 + if self.channel_attention: + self.dropout_path2 = nn.Dropout(config.dropout_path) if config.dropout_path > 0 else nn.Identity() + if "batch" in config.norm.lower(): + self.norm_sublayer2 = PatchTSTBatchNorm(config) + else: + self.norm_sublayer2 = nn.LayerNorm(config.d_model, eps=config.norm_eps) + + # Position-wise Feed-Forward + self.ff = nn.Sequential( + nn.Linear(config.d_model, config.encoder_ffn_dim, bias=config.bias), + ACT2CLS[config.activation_function](), + nn.Dropout(config.ff_dropout) if config.ff_dropout > 0 else nn.Identity(), + nn.Linear(config.encoder_ffn_dim, config.d_model, bias=config.bias), + ) + + # Add & Norm of sublayer 3 + self.dropout_path3 = nn.Dropout(config.dropout_path) if config.dropout_path > 0 else nn.Identity() + if "batch" in config.norm.lower(): + self.norm_sublayer3 = PatchTSTBatchNorm(config) + else: + self.norm_sublayer3 = nn.LayerNorm(config.d_model, eps=config.norm_eps) + + self.pre_norm = config.pre_norm + + def forward(self, hidden_state: torch.Tensor, output_attentions: Optional[bool] = None): + """ + Parameters: + hidden_state (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`, *required*): + Past values of the time series + Return: + `torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)` + + """ + batch_size, num_input_channels, sequence_length, d_model = hidden_state.shape + + # First sublayer: attention across time + # hidden_states: [(bs*num_channels) x sequence_length x d_model] + hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) + + if self.pre_norm: + ## Norm and Multi-Head attention and Add residual connection + attn_output, attn_weights, _ = self.self_attn( + hidden_states=self.norm_sublayer1(hidden_state), output_attentions=output_attentions + ) + # Add: residual connection with residual dropout + hidden_state = hidden_state + self.dropout_path1(attn_output) + else: + ## Multi-Head attention and Add residual connection and Norm - Standard Transformer from BERT + attn_output, attn_weights, _ = self.self_attn( + hidden_states=hidden_state, output_attentions=output_attentions + ) + # hidden_states: [(bs*num_channels) x sequence_length x d_model] + hidden_state = self.norm_sublayer1(hidden_state + self.dropout_path1(attn_output)) + + # hidden_state: [bs x num_channels x sequence_length x d_model] + hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) + + # second sublayer: attention across variable at any given time + if self.channel_attention: + # hidden_state: [bs x sequence_length x num_channels x d_model] + hidden_state = hidden_state.transpose(2, 1).contiguous() + # hidden_state: [(bs*sequence_length) x num_channels x d_model] + hidden_state = hidden_state.view(batch_size * sequence_length, num_input_channels, d_model) + if self.pre_norm: + ## Norm and Multi-Head attention and Add residual connection + attn_output, channel_attn_weights, _ = self.self_attn( + hidden_states=self.norm_sublayer2(hidden_state), output_attentions=output_attentions + ) + # Add: residual connection with residual dropout + hidden_state = hidden_state + self.dropout_path2(attn_output) + else: + ## Multi-Head attention and Add residual connection and Norm + attn_output, channel_attn_weights, _ = self.self_attn( + hidden_states=hidden_state, output_attentions=output_attentions + ) + # hidden_states: [(bs*sequence_length) x num_channels x d_model] + hidden_state = self.norm_sublayer2(hidden_state + self.dropout_path2(attn_output)) + + # Reshape hidden state + # hidden_state: [bs x sequence_length x num_channels x d_model] + hidden_state = hidden_state.reshape(batch_size, sequence_length, num_input_channels, d_model) + # hidden_state: [bs x num_channels x sequence_length x d_model] + hidden_state = hidden_state.transpose(1, 2).contiguous() + + # Third sublayer: mixing across hidden + # hidden_state: [(batch_size*num_channels) x sequence_length x d_model] + hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) + if self.pre_norm: + ## Norm and Position-wise Feed-Forward and Add residual connection + # Add: residual connection with residual dropout + hidden_state = hidden_state + self.dropout_path3(self.ff(self.norm_sublayer3(hidden_state))) + else: + ## Position-wise Feed-Forward and Add residual connection and Norm + # Add: residual connection with residual dropout + hidden_state = self.norm_sublayer3(hidden_state + self.dropout_path3(self.ff(hidden_state))) + + # [bs x num_channels x sequence_length x d_model] + hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) + + outputs = (hidden_state,) + if output_attentions: + outputs += (attn_weights, channel_attn_weights) if self.channel_attention else (attn_weights,) + + return outputs + + +class PatchTSTPreTrainedModel(PreTrainedModel): + config_class = PatchTSTConfig + base_model_prefix = "model" + main_input_name = "past_values" + supports_gradient_checkpointing = False + + def _init_weights(self, module): + """Initialize weights""" + if self.config.use_cls_token: + nn.init.normal_(self.config.cls_token, std=0.02) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, (nn.Linear, nn.Conv1d)): + module.weight.data.normal_(mean=0.0, std=self.config.init_std) + if module.bias is not None: + module.bias.data.zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (PatchTSTEncoder)): + module.gradient_checkpointing = value + + +class PatchTSTEmbedding(nn.Module): + def __init__(self, config: PatchTSTConfig): + super().__init__() + # Input encoding: projection of feature vectors onto a d-dim vector space + if not config.shared_embedding: + self.input_embedding = nn.ModuleList() + for _ in range(config.num_input_channels): + self.input_embedding.append(nn.Linear(config.patch_length, config.d_model)) + else: + self.input_embedding = nn.Linear(config.patch_length, config.d_model) + + def forward(self, patch_input: torch.Tensor): + """ + Parameters: + patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): + Patch input for embedding + return: + `torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)` + """ + # Input encoding + num_input_channels = patch_input.shape[1] + if isinstance(self.input_embedding, nn.ModuleList): + embeddings = [self.input_embedding[i](patch_input[:, i, :, :]) for i in range(num_input_channels)] + embeddings = torch.stack(embeddings, dim=1) + else: + embeddings = self.input_embedding(patch_input) # x: [bs x num_channels x num_patches x d_model] + return embeddings + + +class PatchTSTPositionalEncoding(nn.Module): + """ + Class for positional encoding + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.use_cls_token = config.use_cls_token + if config.use_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, 1, config.d_model)) + num_patches = config.num_patches + 1 + else: + num_patches = config.num_patches + # postional encoding + self.position_enc = positional_encoding( + config.positional_encoding_type, config.learn_pe, num_patches, config.d_model + ) + # Positional dropout + self.positional_dropout = ( + nn.Dropout(config.positional_dropout) if config.positional_dropout > 0 else nn.Identity() + ) + + def forward(self, patch_input: torch.Tensor): + if self.use_cls_token: + # patch_input: [bs x num_channels x num_patches x d_model] + patch_input = self.positional_dropout(patch_input + self.position_enc[1:, :]) + # append cls token where cls_token: [1 x 1 x 1 x d_model] + cls_token = self.cls_token + self.position_enc[:1, :] + # get the same copy of cls_token for all the samples in batch + cls_tokens = cls_token.expand(patch_input.shape[0], -1, -1) + # hidden_state: [bs x num_channels x (num_patches+1) x d_model] + hidden_state = torch.cat((cls_tokens, patch_input), dim=1) + else: + # hidden_state: [bs x num_channels x num_patches x d_model] + hidden_state = self.positional_dropout(patch_input + self.position_enc) + return hidden_state + + +class PatchTSTEncoder(PatchTSTPreTrainedModel): + """ + PatchTST Encoder + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + self.num_input_channels = config.num_input_channels + self.num_patches = config.num_patches + self.patch_length = config.patch_length + self.d_model = config.d_model + self.shared_embedding = config.shared_embedding + self.use_cls_token = config.use_cls_token + self.gradient_checkpointing = False + + # Input embedding: projection of feature vectors onto a d-dim vector space + self.embedder = PatchTSTEmbedding(config) + # Positional encoding + self.positional_encoder = PatchTSTPositionalEncoding(config) + # Encoder + self.layers = nn.ModuleList([PatchTSTEncoderLayer(config) for i in range(config.encoder_layers)]) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + patch_input: torch.Tensor, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + ) -> BaseModelOutput: + """ + Parameters: + patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): + Past values of the time series + output_hidden_states (bool, optional): Indicates if hidden states should be outputted. + output_attentions (bool, optional): Indicates if attentions should be outputted. + + return: + `BaseModelOutput` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + # Input embedding + patch_input = self.embedder(patch_input) + # Positional encoding + hidden_state = self.positional_encoder(patch_input) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + for encoder_layer in self.layers: + if output_hidden_states: + encoder_states = encoder_states + (hidden_state,) + + layer_outputs = encoder_layer(hidden_state=hidden_state, output_attentions=output_attentions) + # get hidden state. hidden_state shape is [bs x num_channels x num_patches x d_model] + # or [bs x num_channels x (num_patches+1) x d_model] if use cls_token + hidden_state = layer_outputs[0] + # append attention matrix at each layer + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + # return past_values, hidden_states + return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=encoder_states, attentions=all_attentions) + + +PATCHTST_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`PatchTSTConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +PATCHTST_INPUTS_DOCSTRING = r""" + Parameters: + past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, num_input_channels)`): + Past values of the time series, that serve as context in order to predict the future. The sequence size of + this tensor must be larger than the `context_length` of the model, since the model will use the larger size + to construct lag features, i.e. additional values from the past which are added in order to serve as "extra + context". + + The `sequence_length` here is equal to `config.context_length` + + The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as + `static_categorical_features`, `static_real_features`). + + For multivariate time series, the `num_input_channels` > 1 dimension is required and corresponds to the + number of variates in the time series per time step. + + future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, num_input_channels)`, *optional*): + Future values of the time series, that serve as labels for the model. The `future_values` is what the + Transformer needs during training to learn to output, given the `past_values`. + + The sequence length here is equal to `prediction_length`. + + See the demo notebook and code snippets for details. + + For multivariate time series, the `num_input_channels` > 1 dimension is required and corresponds to the + number of variates in the time series per time step. + + output_hidden_states (`bool`, *optional*, default to False): + Whether or not to return the hidden states of all layers. +""" + + +@dataclass +class PatchTSTModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states. + + Parameters: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of + the model at the output of each layer plus the optional initial embedding outputs. + patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): + patched input to the Transformer + mask: (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`,*optional*) + Bool masked tensor indicating which patches are masked + loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*) + mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length + scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*) + std of the input data (batch_size, sequence_length, num_channels) over the sequence_length + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + patch_input: torch.FloatTensor = None + mask: torch.FloatTensor = None + loc: torch.FloatTensor = None + scale: torch.FloatTensor = None + + +@dataclass +class PatchTSTForPretrainingOutput(ModelOutput): + """ + Output type of [`PatchTSTForPretraining`]. + + Parameters: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + MSE loss. + prediction_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction outputs of the time series modeling heads. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class PatchTSTForRegressionOutput(ModelOutput): + """ + Output type of [`PatchTSTForRegression`]. + + Parameters: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + MSE loss. + forecast_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction outputs of the time series modeling heads. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + forecast_outputs: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class PatchTSTForPredictionOutput(ModelOutput): + """ + Output type of [`PatchTSTForPrediction`]. + + Parameters: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + MSE loss. + prediction_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, -1)`): + Prediction outputs of the time series modeling heads. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_outputs: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + loc: torch.FloatTensor = None + scale: torch.FloatTensor = None + + +@dataclass +class PatchTSTForClassificationOutput(ModelOutput): + """ + Output type of [`PatchTSTForClassification`]. + + Parameters: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class SamplePatchTSTPredictionOutput(ModelOutput): + """ + Base class for time series model's predictions outputs that contains the sampled values from the chosen + distribution. + + Parameters: + sequences `(batch_size, num_samples, prediction_length, num_targets)`): + Sampled values from the chosen distribution. + """ + + sequences: torch.FloatTensor = None + + +@dataclass +class SamplePatchTSTRegressionOutput(ModelOutput): + """ + Base class for time series model's predictions outputs that contains the sampled values from the chosen + distribution. + + Parameters: + sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, num_targets)` + Sampled values from the chosen distribution. + """ + + sequences: torch.FloatTensor = None + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll +def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: + """ + Computes the negative log likelihood loss from input distribution with respect to target. + """ + return -input.log_prob(target) + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average +def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: + """ + Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, + meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. + + Args: + input_tensor (`torch.FloatTensor`): + Input tensor, of which the average must be computed. + weights (`torch.FloatTensor`, *optional*): + Weights tensor, of the same shape as `input_tensor`. + dim (`int`, *optional*): + The dim along which to average `input_tensor`. + + Returns: + `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. + """ + if weights is not None: + weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) + sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) + return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights + else: + return input_tensor.mean(dim=dim) + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST +class PatchTSTStdScaler(nn.Module): + """ + Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by + subtracting from the mean and dividing by the standard deviation. + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) + denominator = denominator.clamp_min(1.0) + loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator + + variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator + scale = torch.sqrt(variance + self.minimum_scale) + return (data - loc) / scale, loc, scale + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST +class PatchTSTMeanScaler(nn.Module): + """ + Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data + accordingly. + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + self.default_scale = config.default_scale if hasattr(config, "default_scale") else None + + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) + num_observed = observed_indicator.sum(self.dim, keepdim=True) + + scale = ts_sum / torch.clamp(num_observed, min=1) + + # If `default_scale` is provided, we use it, otherwise we use the scale + # of the batch. + if self.default_scale is None: + batch_sum = ts_sum.sum(dim=0) + batch_observations = torch.clamp(num_observed.sum(0), min=1) + default_scale = torch.squeeze(batch_sum / batch_observations) + else: + default_scale = self.default_scale * torch.ones_like(scale) + + # apply default scale where there are no observations + scale = torch.where(num_observed > 0, scale, default_scale) + + # ensure the scale is at least `self.minimum_scale` + scale = torch.clamp(scale, min=self.minimum_scale) + scaled_data = data / scale + + if not self.keepdim: + scale = scale.squeeze(dim=self.dim) + + return scaled_data, torch.zeros_like(scale), scale + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST +class PatchTSTNOPScaler(nn.Module): + """ + Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor = None + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) + loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) + return data, loc, scale + + +class PatchTSTScaler(nn.Module): + def __init__(self, config: PatchTSTConfig): + super().__init__() + if config.scaling == "mean" or config.scaling is True: + self.scaler = PatchTSTMeanScaler(config) + elif config.scaling == "std": + self.scaler = PatchTSTStdScaler(config) + else: + self.scaler = PatchTSTNOPScaler(config) + + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, um_input_channels)`) + """ + data, loc, scale = self.scaler(data, observed_indicator) + return data, loc, scale + + +@add_start_docstrings( + "The bare PatchTST Model outputting raw hidden-states without any specific head.", + PATCHTST_START_DOCSTRING, +) +class PatchTSTModel(PatchTSTPreTrainedModel): + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + + self.scaler = PatchTSTScaler(config) + self.patchifier = PatchTSTPatchify(config) + self.mask_input = config.mask_input + + if self.mask_input: + self.masking = PatchTSTMasking(config) + else: + self.masking = nn.Identity() + self.encoder = PatchTSTEncoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + future_values: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, PatchTSTModelOutput]: + """ + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + output_attentions (`bool`, *optional*): + Whether or not to return the output attention of all layers + return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTModelOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) + + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + + if past_observed_mask is None: + past_observed_mask = torch.ones_like(past_values) + + # x: tensor [bs x sequence_length x num_input_channels] + scaled_past_values, loc, scale = self.scaler(past_values, past_observed_mask) + + # patched_values: [bs x num_input_channels x num_patches x patch_length] for pretrain + patched_values = self.patchifier(scaled_past_values) + if self.mask_input: + masked_values, mask = self.masking(patched_values) + else: + masked_values, mask = self.masking(patched_values), None + + encoder_output = self.encoder( + patch_input=masked_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions + ) + + if not return_dict: + outputs = (encoder_output.last_hidden_state, encoder_output.hidden_states, encoder_output.attentions) + outputs = outputs + (patched_values, mask, loc, scale) + return tuple(v for v in outputs if v is not None) + + return PatchTSTModelOutput( + last_hidden_state=encoder_output.last_hidden_state, + hidden_states=encoder_output.hidden_states, + attentions=encoder_output.attentions, + patch_input=patched_values, + mask=mask, + loc=loc, + scale=scale, + ) + + +class PatchTSTMaskPretrainHead(nn.Module): + """ + Pretraining head for mask modelling + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.dropout = nn.Dropout(config.dropout) + self.linear = nn.Linear(config.d_model, config.patch_length) + self.use_cls_token = config.use_cls_token + + def forward(self, embedding: torch.Tensor) -> torch.Tensor: + """ + Parameters: + embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` + or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): + Embedding from the model + Returns: + `torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or + `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True + + """ + embedding = self.linear(self.dropout(embedding)) # [bs x num_channels x num_patches x patch_length] + if self.use_cls_token: + embedding = embedding[:, :, 1:, :] # remove the first cls token + return embedding + + +class PatchTSTForPretraining(PatchTSTPreTrainedModel): + """ + Mask pretrain model: PatchTST model + pretrain head + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + + config.mask_input = True + self.model = PatchTSTModel(config=config) + self.head = PatchTSTMaskPretrainHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, PatchTSTForPretrainingOutput]: + """ + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTForPretrainingOutput` or tuple of `torch.Tensor` (if `return_dict`=False or + `config.return_dict`=False) + + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # past_values: [bs x num_channels x num_patches x d_model] or + # [bs x num_channels x (num_patches+1) x d_model] if use cls_token + model_output = self.model( + past_values=past_values, + past_observed_mask=past_observed_mask, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + ) + + # model_output[0]: [bs x num_channels x num_patches x patch_length] or + # [bs x num_channels x (num_patches+1) x patch_length] if use cls_token + x_hat = self.head(model_output[0]) + + # calculate masked_loss + loss = nn.MSELoss(reduction="none") + loss_val = loss(x_hat, model_output.patch_input) + masked_loss = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) + + encoder_states = model_output.hidden_states + if not return_dict: + outputs = (masked_loss, x_hat, model_output.hidden_states, model_output.attentions) + return tuple(v for v in outputs if v is not None) + return PatchTSTForPretrainingOutput( + loss=masked_loss, prediction_output=x_hat, hidden_states=encoder_states, attentions=model_output.attentions + ) + + +class PatchTSTForClassification(PatchTSTPreTrainedModel): + """ + PatchTST model for classification. The model contains PatchTST model + classification head + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + + self.model = PatchTSTModel(config) + self.head = PatchTSTClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + target_values: torch.Tensor = None, + past_observed_mask: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, PatchTSTForClassificationOutput]: + """ + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + target_values (`torch.Tensor`, *optional*): labels associates with the `past_values` + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTForClassificationOutput` or tuple of `torch.Tensor` (if `return_dict`=False or + `config.return_dict`=False) + + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_output = self.model( + past_values=past_values, + past_observed_mask=past_observed_mask, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + ) + y_hat = self.head(model_output[0]) + + loss_val = None + if target_values is not None: + loss = nn.CrossEntropyLoss() + loss_val = loss(y_hat, target_values) + + if not return_dict: + outputs = (loss_val, y_hat, model_output.hidden_states, model_output.attentions) + return tuple(v for v in outputs if v is not None) + return PatchTSTForClassificationOutput( + loss=loss_val, + prediction_logits=y_hat, + hidden_states=model_output.hidden_states, + attentions=model_output.attentions, + ) + + +class PatchTSTClassificationHead(nn.Module): + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.use_cls_token = config.use_cls_token + self.pooling_type = config.pooling_type + self.flatten = nn.Flatten(start_dim=1) + self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() + self.linear = nn.Linear(config.num_input_channels * config.d_model, config.num_targets) + + def forward(self, embedding: torch.Tensor): + """ + Parameters: + embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` + or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): + Embedding from the model + Returns: + `torch.Tensor` of shape `(bs, num_targets)` + + """ + if self.use_cls_token: + # use the first output token, pooled_embedding: bs x num_channels x d_model + pooled_embedding = embedding[:, :, 0, :] + elif self.pooling_type == "mean": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.mean(dim=2) + elif self.pooling_type == "max": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.max(dim=2) + else: + raise Exception(f"pooling operator {self.pooling_type} is not implemented yet") + # pooled_embedding: bs x num_channels * d_model + pooled_embedding = self.flatten(pooled_embedding) + # output: bs x n_classes + output = self.linear(self.dropout(pooled_embedding)) + return output + + +class PatchTSTPredictionHead(nn.Module): + def __init__(self, config: PatchTSTConfig, distribution_output=None): + super().__init__() + + self.shared_projection = config.shared_projection + self.num_input_channels = config.num_input_channels + self.use_cls_token = config.use_cls_token + self.pooling_type = config.pooling_type + head_dim = config.d_model if self.pooling_type else config.d_model * config.num_patches + + if not self.shared_projection: + # if each channel has its own head + self.projections = nn.ModuleList() + self.dropouts = nn.ModuleList() + self.flattens = nn.ModuleList() + for i in range(self.num_input_channels): + self.flattens.append(nn.Flatten(start_dim=2)) + if distribution_output is None: + # use linear head + self.projections.append(nn.Linear(head_dim, config.prediction_length)) + else: + # use distribution head + self.projections.append(distribution_output.get_parameter_projection(head_dim)) + self.dropouts.append(nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()) + else: + # all the channels share the same head + self.flatten = nn.Flatten(start_dim=2) + if distribution_output is None: + # use linear head + self.projection = nn.Linear(head_dim, config.prediction_length) + else: + # use distribution head + self.projection = distribution_output.get_parameter_projection(head_dim) + self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() + + def forward(self, embedding: torch.Tensor): + """ + Parameters: + embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` + or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): + Embedding from the model + Returns: + `torch.Tensor` of shape `(bs, forecast_len, num_channels)` + + """ + if self.use_cls_token: + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding[:, :, 0, :] + else: + if self.pooling_type == "mean": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.mean(dim=2) + elif self.pooling_type == "max": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.max(dim=2) + else: + # pooled_embedding: [bs x num_channels x num_patches x d_model] + pooled_embedding = embedding + + if not self.shared_projection: + output = [] + for i in range(self.num_input_channels): + # pooled_embedding: [bs x (d_model * num_patches)] or [bs x d_model)] + pooled_embedding = self.flattens[i](pooled_embedding[:, i, :]) + pooled_embedding = self.dropouts[i](pooled_embedding) + # pooled_embedding: [bs x forecast_len] + # or tuple ([bs x forecast_len], [bs x forecast_len]) if using distribution head + pooled_embedding = self.projections[i](pooled_embedding) + output.append(pooled_embedding) + # output: [bs x num_channels x forecast_len] + output = torch.stack(output, dim=1) + else: + # pooled_embedding: [bs x num_channels x (d_model * num_patches)] or [bs x num_channels x d_model)] + pooled_embedding = self.flatten(pooled_embedding) + pooled_embedding = self.dropout(pooled_embedding) + # output: [bs x num_channels x forecast_len] or + # tuple ([bs x num_channels x forecast_len], [bs x num_channels x forecast_len]) if using distribution head + output = self.projection(pooled_embedding) + + if isinstance(output, tuple): + # output: ([bs x forecast_len x num_channels], [bs x forecast_len x num_channels]) + output = tuple(z.transpose(2, 1) for z in output) + else: + output = output.transpose(2, 1) # [bs x forecast_len x num_channels] + return output + + +class PatchTSTForPrediction(PatchTSTPreTrainedModel): + """ + PatchTST for forecasting. The model contains PatchTST model + Forecasting head + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + self.model = PatchTSTModel(config) + + if config.loss == "mse": + self.distribution_output = None + else: + if config.distribution_output == "student_t": + self.distribution_output = StudentTOutput(dim=config.prediction_length) + elif config.distribution_output == "normal": + self.distribution_output = NormalOutput(dim=config.prediction_length) + elif config.distribution_output == "negative_binomial": + self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length) + else: + raise ValueError(f"Unknown distribution output {config.distribution_output}") + + self.head = PatchTSTPredictionHead(config, self.distribution_output) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + future_values: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, PatchTSTForPredictionOutput]: + """ + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + future_values (`torch.Tensor` of shape `(bs, forecast_len, num_input_channels)`, *optional*): + future target values associated with the `past_values` + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTForPredictionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or + `config.return_dict`=False) + + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # get model output + model_output = self.model( + past_values=past_values, + past_observed_mask=past_observed_mask, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + ) + # get output head + y_hat = self.head(model_output.last_hidden_state) + + loss_val = None + + if future_values is not None: + if self.distribution_output: + distribution = self.distribution_output.distribution( + y_hat, loc=model_output.loc, scale=model_output.scale + ) + loss_val = nll(distribution, future_values) + # take average of the loss + loss_val = weighted_average(loss_val) + # for testing + # loss_val = nn.MSELoss(reduction='none')(distribution.mean, future_values) + # loss_val = weighted_average(loss_val) + else: + y_hat = y_hat * model_output.scale + model_output.loc + loss = nn.MSELoss(reduction="mean") + loss_val = loss(y_hat, future_values) + + loc = model_output.loc + scale = model_output.scale + + if not return_dict: + outputs = (loss_val, y_hat, model_output.hidden_states, model_output.attentions, loc, scale) + return tuple(v for v in outputs if v is not None) + return PatchTSTForPredictionOutput( + loss=loss_val, + prediction_outputs=y_hat, + hidden_states=model_output.hidden_states, + attentions=model_output.attentions, + loc=loc, + scale=scale, + ) + + def generate( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + ) -> SamplePatchTSTPredictionOutput: + """ + Generate sequences of sample predictions from a model with a probability distribution head. + + Parameters: + past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Past values of the time series that serves as context in order to predict the future. + + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + + Return: + [`SamplePatchTSTPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, + number of samples, prediction_length, 1)` or `(batch_size, number of samples, prediction_length, + num_input_channels)` for multivariate predictions. + """ + # get number of samples + num_parallel_samples = self.config.num_parallel_samples + + # get model output + outputs = self( + past_values=past_values, + future_values=None, + past_observed_mask=past_observed_mask, + output_hidden_states=False, + ) + + # get distribution + distribution = self.distribution_output.distribution( + outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale + ) + # get samples: list of [bs x forecast_len x num_channels] + samples = [distribution.sample() for _ in range(num_parallel_samples)] + # stack tensors + samples = torch.stack(samples, dim=1) # [bs x num_samples x forecast_len x num_channels] + return SamplePatchTSTPredictionOutput(sequences=samples) + + +class PatchTSTRegressionHead(nn.Module): + """ + Regression head + """ + + def __init__(self, config: PatchTSTConfig, distribution_output=None): + super().__init__() + self.y_range = config.output_range + self.use_cls_token = config.use_cls_token + self.pooling_type = config.pooling_type + self.distribution_output = distribution_output + + head_dim = config.num_input_channels * config.d_model + + self.flatten = nn.Flatten(start_dim=1) + self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() + + if distribution_output is None: + self.projection = nn.Linear(head_dim, config.num_targets) + else: + self.projection = distribution_output.get_parameter_projection(head_dim) + + def forward(self, embedding: torch.Tensor): + """ + Parameters: + embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` + or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): + Embedding from the model + Returns: + `torch.Tensor` of shape `(bs, output_dim)` + + """ + if self.use_cls_token: + # use the first output token, pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding[:, :, 0, :] + elif self.pooling_type == "mean": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.mean(dim=2) + elif self.pooling_type == "max": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.max(dim=2) + else: + raise Exception(f"pooling operator {self.pooling_type} is not implemented yet") + # flatten the input + # pooled_embedding: bs x (num_channels * d_model) + pooled_embedding = self.dropout(self.flatten(pooled_embedding)) + # projection + # output: bs x output_dim or a tuple of this shape for distribution head + output = self.projection(pooled_embedding) + # + if (self.distribution_output is None) & (self.y_range is not None): # linear head + output = torch.sigmoid(output) * (self.y_range[1] - self.y_range[0]) + self.y_range[0] + return output + + +class PatchTSTForRegression(PatchTSTPreTrainedModel): + # PatchTST model + Regression head + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + self.model = PatchTSTModel(config) + + self.model = PatchTSTModel(config) + if config.loss == "mse": + self.distribution_output = None + else: + if config.distribution_output == "student_t": + self.distribution_output = StudentTOutput(dim=config.prediction_length * config.num_targets) + elif config.distribution_output == "normal": + self.distribution_output = NormalOutput(dim=config.prediction_length * config.num_targets) + elif config.distribution_output == "negative_binomial": + self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length * config.num_targets) + else: + raise ValueError(f"Unknown distribution output {config.distribution_output}") + + self.head = PatchTSTRegressionHead(config, self.distribution_output) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + target_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, PatchTSTForRegressionOutput]: + """ + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + target_values (`torch.Tensor` of shape `(bs, num_input_channels)`): + target values associates with the `past_values` + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTForRegressionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or + `config.return_dict`=False) + + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_output = self.model( + past_values=past_values, + past_observed_mask=past_observed_mask, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + ) + # get output head. y_hat is of shape [bs x num_targets] or tuple of this shape + y_hat = self.head(model_output.last_hidden_state) + + loss_val = None + if target_values is not None: + if self.distribution_output: + distribution = self.distribution_output.distribution(y_hat) + loss_val = nll(distribution, target_values) + # take average of the loss + loss_val = weighted_average(loss_val) + else: + loss = nn.MSELoss(reduction="mean") + loss_val = loss(y_hat, target_values) + + if not return_dict: + outputs = (loss_val, y_hat, model_output.hidden_states, model_output.attentions) + return tuple(v for v in outputs if v is not None) + return PatchTSTForRegressionOutput( + loss=loss_val, + forecast_outputs=y_hat, + hidden_states=model_output.hidden_states, + attentions=model_output.attentions, + ) + + def generate( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + ) -> SamplePatchTSTRegressionOutput: + """ + Generate sequences of sample predictions from a model with a probability distribution head. + + Parameters: + past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Past values of the time series that serves as context in order to predict the future. + + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + + Return: + [`SamplePatchTSTRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, + number of samples, num_targets)`. + """ + # get number of samples + num_parallel_samples = self.config.num_parallel_samples + + # get model output + outputs = self( + past_values=past_values, + target_values=None, + past_observed_mask=past_observed_mask, + output_hidden_states=False, + ) + + # get distribution + distribution = self.distribution_output.distribution(outputs.forecast_outputs) + # get samples: list of [bs x num_targets] + samples = [distribution.sample() for _ in range(num_parallel_samples)] + # stack tensors + samples = torch.stack(samples, dim=1) # [bs x num_samples x num_targets] + return SamplePatchTSTRegressionOutput(sequences=samples) diff --git a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py index 904c02b4f043..2c875dd56e1b 100644 --- a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py @@ -83,67 +83,66 @@ def forward(self, features: torch.Tensor) -> torch.Tensor: class TimeSeriesStdScaler(nn.Module): """ - Standardize features by calculating the mean and scaling along some given dimension `dim`, and then normalizes it - by subtracting from the mean and dividing by the standard deviation. - - Args: - dim (`int`): - Dimension along which to calculate the mean and standard deviation. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. - minimum_scale (`float`, *optional*, defaults to 1e-5): - Default scale that is used for elements that are constantly zero along dimension `dim`. + Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by + subtracting from the mean and dividing by the standard deviation. """ - def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-5): + def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() - if not dim > 0: - raise ValueError("Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0") - self.dim = dim - self.keepdim = keepdim - self.minimum_scale = minimum_scale + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 - @torch.no_grad() - def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - denominator = weights.sum(self.dim, keepdim=self.keepdim) + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) - loc = (data * weights).sum(self.dim, keepdim=self.keepdim) / denominator + loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator - variance = (((data - loc) * weights) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator + variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale class TimeSeriesMeanScaler(nn.Module): """ - Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data + Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. - - Args: - dim (`int`): - Dimension along which to compute the scale. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. - default_scale (`float`, *optional*, defaults to `None`): - Default scale that is used for elements that are constantly zero. If `None`, we use the scale of the batch. - minimum_scale (`float`, *optional*, defaults to 1e-10): - Default minimum possible scale that is used for any item. """ - def __init__( - self, dim: int = -1, keepdim: bool = True, default_scale: Optional[float] = None, minimum_scale: float = 1e-10 - ): + def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() - self.dim = dim - self.keepdim = keepdim - self.minimum_scale = minimum_scale - self.default_scale = default_scale + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + self.default_scale = config.default_scale if hasattr(config, "default_scale") else None - @torch.no_grad() def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - # shape: (N, [C], T=1) + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) @@ -173,23 +172,26 @@ def forward( class TimeSeriesNOPScaler(nn.Module): """ - Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data. - - Args: - dim (`int`): - Dimension along which to compute the scale. - keepdim (`bool`, *optional*, defaults to `False`): - Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ - def __init__(self, dim: int, keepdim: bool = False): + def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() - self.dim = dim - self.keepdim = keepdim + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor + self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale @@ -1180,11 +1182,11 @@ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: - self.scaler = TimeSeriesMeanScaler(dim=1, keepdim=True) + self.scaler = TimeSeriesMeanScaler(config) elif config.scaling == "std": - self.scaler = TimeSeriesStdScaler(dim=1, keepdim=True) + self.scaler = TimeSeriesStdScaler(config) else: - self.scaler = TimeSeriesNOPScaler(dim=1, keepdim=True) + self.scaler = TimeSeriesNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = TimeSeriesFeatureEmbedder( diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index c6b20c7e3674..07bcf3867fb1 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -627,6 +627,12 @@ def __init__(self, *args, **kwargs): MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = None +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = None + + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None @@ -6019,6 +6025,51 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PatchTSTForClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForPretraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForRegression(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class PegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] diff --git a/tests/models/patchtst/__init__.py b/tests/models/patchtst/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/models/patchtst/test_modeling_patchtst.py b/tests/models/patchtst/test_modeling_patchtst.py new file mode 100644 index 000000000000..8d6f2202ee81 --- /dev/null +++ b/tests/models/patchtst/test_modeling_patchtst.py @@ -0,0 +1,353 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch PatchTST model. """ + +import inspect +import random +import tempfile +import unittest + +from huggingface_hub import hf_hub_download + +from transformers import is_torch_available +from transformers.models.auto import get_values +from transformers.testing_utils import is_flaky, require_torch, slow, torch_device + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin + + +TOLERANCE = 1e-4 + +if is_torch_available(): + import torch + + from transformers import ( + MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, + MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, + PatchTSTConfig, + PatchTSTForClassification, + PatchTSTForPrediction, + PatchTSTForPretraining, + PatchTSTForRegression, + PatchTSTModel, + ) + + +@require_torch +class PatchTSTModelTester: + def __init__( + self, + parent, + batch_size=13, + prediction_length=7, + context_length=14, + patch_length=5, + patch_stride=5, + num_input_channels=1, + num_time_features=1, + is_training=True, + hidden_size=16, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=4, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + lags_sequence=[1, 2, 3, 4, 5], + distil=False, + seed_number=42, + num_targets=2, + num_output_channels=2, + ): + self.parent = parent + self.batch_size = batch_size + self.prediction_length = prediction_length + self.context_length = context_length + self.patch_length = patch_length + self.patch_stride = patch_stride + self.num_input_channels = num_input_channels + self.num_time_features = num_time_features + self.lags_sequence = lags_sequence + self.is_training = is_training + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + + self.seed_number = seed_number + self.num_targets = num_targets + self.num_output_channels = num_output_channels + self.distil = distil + self.num_patches = (max(self.context_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 + + def get_config(self): + return PatchTSTConfig( + prediction_length=self.prediction_length, + patch_length=self.patch_length, + patch_stride=self.patch_stride, + num_input_channels=self.num_input_channels, + d_model=self.hidden_size, + encoder_layers=self.num_hidden_layers, + encoder_attention_heads=self.num_attention_heads, + encoder_ffn_dim=self.intermediate_size, + dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_probs_dropout_prob, + context_length=self.context_length, + activation_function=self.hidden_act, + seed_number=self.seed_number, + num_targets=self.num_targets, + num_output_channels=self.num_output_channels, + ) + + def prepare_patchtst_inputs_dict(self, config): + _past_length = config.context_length + # bs, num_input_channels, num_patch, patch_len + + # [bs x seq_len x num_input_channels] + past_values = floats_tensor([self.batch_size, _past_length, self.num_input_channels]) + + future_values = floats_tensor([self.batch_size, config.prediction_length, self.num_input_channels]) + + inputs_dict = { + "past_values": past_values, + "future_values": future_values, + } + return inputs_dict + + def prepare_config_and_inputs(self): + config = self.get_config() + inputs_dict = self.prepare_patchtst_inputs_dict(config) + return config, inputs_dict + + def prepare_config_and_inputs_for_common(self): + config, inputs_dict = self.prepare_config_and_inputs() + return config, inputs_dict + + +@require_torch +class PatchTSTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): + all_model_classes = ( + ( + PatchTSTModel, + PatchTSTForPrediction, + PatchTSTForPretraining, + PatchTSTForClassification, + PatchTSTForRegression, + ) + if is_torch_available() + else () + ) + all_generative_model_classes = ( + (PatchTSTForPrediction, PatchTSTForRegression, PatchTSTForPretraining) if is_torch_available() else () + ) + pipeline_model_mapping = {"feature-extraction": PatchTSTModel} if is_torch_available() else {} + test_pruning = False + test_head_masking = False + test_missing_keys = False + test_torchscript = False + test_inputs_embeds = False + test_model_common_attributes = False + + test_resize_embeddings = True + test_resize_position_embeddings = False + test_mismatched_shapes = True + test_model_parallel = False + has_attentions = False + + def setUp(self): + self.model_tester = PatchTSTModelTester(self) + self.config_tester = ConfigTester( + self, + config_class=PatchTSTConfig, + has_text_modality=False, + prediction_length=self.model_tester.prediction_length, + ) + + def test_config(self): + self.config_tester.run_common_tests() + + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) + + # if PatchTSTForPretraining + if model_class == PatchTSTForPretraining: + inputs_dict.pop("future_values") + # else if classification model: + elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING): + rng = random.Random(self.model_tester.seed_number) + labels = ids_tensor([self.model_tester.batch_size], self.model_tester.num_targets, rng=rng) + inputs_dict["target_values"] = labels + inputs_dict.pop("future_values") + elif model_class in get_values(MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING): + rng = random.Random(self.model_tester.seed_number) + target_values = floats_tensor( + [self.model_tester.batch_size, self.model_tester.num_output_channels], rng=rng + ) + inputs_dict["target_values"] = target_values + inputs_dict.pop("future_values") + return inputs_dict + + def test_save_load_strict(self): + config, _ = self.model_tester.prepare_config_and_inputs() + for model_class in self.all_model_classes: + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) + self.assertEqual(info["missing_keys"], []) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + num_patch = self.model_tester.num_patches + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [num_patch, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + print("model_class: ", model_class) + + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + @unittest.skip(reason="we have no tokens embeddings") + def test_resize_tokens_embeddings(self): + pass + + def test_model_main_input_name(self): + model_signature = inspect.signature(getattr(PatchTSTModel, "forward")) + # The main input is the name of the argument after `self` + observed_main_input_name = list(model_signature.parameters.keys())[1] + self.assertEqual(PatchTSTModel.main_input_name, observed_main_input_name) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = [ + "past_values", + "past_observed_mask", + "future_values", + ] + if model_class == PatchTSTForPretraining: + expected_arg_names.remove("future_values") + elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING) or model_class in get_values( + MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING + ): + expected_arg_names.remove("future_values") + expected_arg_names.remove("past_observed_mask") + expected_arg_names.append("target_values") if model_class in get_values( + MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING + ) else expected_arg_names.append("target_values") + expected_arg_names.append("past_observed_mask") + expected_arg_names.extend( + [ + "output_hidden_states", + "output_attentions", + "return_dict", + ] + ) + + self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) + + @is_flaky() + def test_retain_grad_hidden_states_attentions(self): + super().test_retain_grad_hidden_states_attentions() + + +# Note: Publishing of this dataset is under internal review. The dataset is not yet downloadable. +def prepare_batch(repo_id="ibm/etth1-forecast-test", file="train-batch.pt"): + file = hf_hub_download(repo_id=repo_id, filename=file, repo_type="dataset") + batch = torch.load(file, map_location=torch_device) + return batch + + +# Note: Publishing of pretrained weights is under internal review. Pretrained model is not yet downloadable. +@require_torch +@slow +class PatchTSTModelIntegrationTests(unittest.TestCase): + # Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable. + def test_pretrain_head(self): + model = PatchTSTForPretraining.from_pretrained("ibm/patchtst-etth1-pretrain").to(torch_device) + batch = prepare_batch() + + torch.manual_seed(0) + with torch.no_grad(): + output = model(past_values=batch["past_values"].to(torch_device)).prediction_output + num_patch = ( + max(model.config.context_length, model.config.patch_length) - model.config.patch_length + ) // model.config.patch_stride + 1 + expected_shape = torch.Size([64, model.config.num_input_channels, num_patch, model.config.patch_length]) + self.assertEqual(output.shape, expected_shape) + + expected_slice = torch.tensor( + [[[-0.5409]], [[0.3093]], [[-0.3759]], [[0.5068]], [[-0.8387]], [[0.0937]], [[0.2809]]], + device=torch_device, + ) + self.assertTrue(torch.allclose(output[0, :7, :1, :1], expected_slice, atol=TOLERANCE)) + + # Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable. + def test_prediction_head(self): + model = PatchTSTForPrediction.from_pretrained("ibm/patchtst-etth1-forecast").to(torch_device) + + batch = prepare_batch(file="test-batch.pt") + + torch.manual_seed(0) + with torch.no_grad(): + output = model( + past_values=batch["past_values"].to(torch_device), + future_values=batch["future_values"].to(torch_device), + ).prediction_outputs + expected_shape = torch.Size([64, model.config.prediction_length, model.config.num_input_channels]) + self.assertEqual(output.shape, expected_shape) + + expected_slice = torch.tensor( + [[0.3228, 0.4320, 0.4591, 0.4066, -0.3461, 0.3094, -0.8426]], + device=torch_device, + ) + self.assertTrue(torch.allclose(output[0, :1, :7], expected_slice, atol=TOLERANCE)) diff --git a/utils/check_repo.py b/utils/check_repo.py index d740eefed019..390f4ca5cab5 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -185,6 +185,8 @@ "TimeSeriesTransformerForPrediction", "InformerForPrediction", "AutoformerForPrediction", + "PatchTSTForPretraining", + "PatchTSTForPrediction", "JukeboxVQVAE", "JukeboxPrior", "SamModel", From 721d1c8ca6cd892a88d9847809deddfd92abfd49 Mon Sep 17 00:00:00 2001 From: Marc Sun <57196510+SunMarc@users.noreply.github.com> Date: Tue, 14 Nov 2023 10:20:29 +0100 Subject: [PATCH 161/268] Fix FA2 import + deprecation cycle (#27330) * put back import * switch to logger.warnings instead --- src/transformers/utils/__init__.py | 1 + src/transformers/utils/import_utils.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 1a0d68f700ec..e7911e5d552a 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -117,6 +117,7 @@ is_essentia_available, is_faiss_available, is_flash_attn_2_available, + is_flash_attn_available, is_flax_available, is_fsdp_available, is_ftfy_available, diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 3c05cac7dbe2..c4862b197c97 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -614,6 +614,14 @@ def is_flash_attn_2_available(): return _flash_attn_2_available and torch.cuda.is_available() +def is_flash_attn_available(): + logger.warning( + "Using `is_flash_attn_available` is deprecated and will be removed in v4.38. " + "Please use `is_flash_attn_2_available` instead." + ) + return is_flash_attn_2_available() + + def is_torchdistx_available(): return _torchdistx_available From d71fa9f618263130281501e6cf978d80e5850ef8 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 14 Nov 2023 10:32:57 +0100 Subject: [PATCH 162/268] [`Peft`] `modules_to_save` support for peft integration (#27466) * `modules_to_save` support for peft integration * Update docs/source/en/peft.md Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * slightly elaborate test --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docs/source/en/peft.md | 22 ++++++++++- src/transformers/integrations/peft.py | 6 ++- .../peft_integration/test_peft_integration.py | 38 +++++++++++++++++++ 3 files changed, 63 insertions(+), 3 deletions(-) diff --git a/docs/source/en/peft.md b/docs/source/en/peft.md index 302b614e5f7b..d86a36e62487 100644 --- a/docs/source/en/peft.md +++ b/docs/source/en/peft.md @@ -98,7 +98,7 @@ You can use [`~peft.PeftModel.add_adapter`] to add a new adapter to a model with ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer -from peft import PeftConfig +from peft import LoraConfig model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained(model_id) @@ -208,6 +208,26 @@ model.save_pretrained(save_dir) model = AutoModelForCausalLM.from_pretrained(save_dir) ``` +## Add additional trainable layers to a PEFT adapter + +You can also fine-tune additional trainable adapters on top of a model that has adapters attached by passing `modules_to_save` in your PEFT config. For example, if you want to also fine-tune the lm_head on top of a model with a LoRA adapter: + +```py +from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer +from peft import LoraConfig + +model_id = "facebook/opt-350m" +model = AutoModelForCausalLM.from_pretrained(model_id) + +lora_config = LoraConfig( + target_modules=["q_proj", "k_proj"], + modules_to_save=["lm_head"], +) + +model.add_adapter(lora_config) +``` + + - -# PatchTST - -## Overview - -The PatchTST model was proposed in [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/abs/2211.14730) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. - -The abstract from the paper is the following: - -*We propose an efficient design of Transformer-based models for multivariate time series forecasting and self-supervised representation learning. It is based on two key components: (i) segmentation of time series into subseries-level patches which are served as input tokens to Transformer; (ii) channel-independence where each channel contains a single univariate time series that shares the same embedding and Transformer weights across all the series. Patching design naturally has three-fold benefit: local semantic information is retained in the embedding; computation and memory usage of the attention maps are quadratically reduced given the same look-back window; and the model can attend longer history. Our channel-independent patch time series Transformer (PatchTST) can improve the long-term forecasting accuracy significantly when compared with that of SOTA Transformer-based models. We also apply our model to self-supervised pre-training tasks and attain excellent fine-tuning performance, which outperforms supervised training on large datasets. Transferring of masked pre-trained representation on one dataset to others also produces SOTA forecasting accuracy.* - -Tips: - -The model can also be used for time series classification and time series regression. See the respective [`PatchTSTForClassification`] and [`PatchTSTForRegression`] classes. - -At a high level the model vectorizes time series into patches of a given size and encodes them via a Transformer which then outputs the prediction length forecasts: - -![model](https://github.com/namctin/transformers/assets/8100/150af169-29de-419a-8d98-eb78251c21fa) - - -This model was contributed by [namctin](https://huggingface.co/namctin), [gsinthong](https://huggingface.co/gsinthong), [diepi](https://huggingface.co/diepi), [vijaye12](https://huggingface.co/vijaye12), [wmgifford](https://huggingface.co/wmgifford), and [kashif](https://huggingface.co/kashif). - -The original code can be found [here](https://github.com/yuqinie98/PatchTST). - - -## PatchTSTConfig - -[[autodoc]] PatchTSTConfig - - -## PatchTSTModel - -[[autodoc]] PatchTSTModel - - forward - - -## PatchTSTForPrediction - -[[autodoc]] PatchTSTForPrediction - - forward - - -## PatchTSTForClassification - -[[autodoc]] PatchTSTForClassification - - forward - - -## PatchTSTForPretraining - -[[autodoc]] PatchTSTForPretraining - - forward - - -## PatchTSTForRegression - -[[autodoc]] PatchTSTForRegression - - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 9cbb988c5347..cf89602b6597 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -493,7 +493,6 @@ "OwlViTTextConfig", "OwlViTVisionConfig", ], - "models.patchtst": ["PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", "PatchTSTConfig"], "models.pegasus": ["PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig", "PegasusTokenizer"], "models.pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], "models.perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverTokenizer"], @@ -1168,8 +1167,6 @@ "MODEL_FOR_TEXT_ENCODING_MAPPING", "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING", "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING", - "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING", - "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING", @@ -2488,17 +2485,6 @@ "OwlViTVisionModel", ] ) - _import_structure["models.patchtst"].extend( - [ - "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", - "PatchTSTForClassification", - "PatchTSTForPrediction", - "PatchTSTForPretraining", - "PatchTSTForRegression", - "PatchTSTModel", - "PatchTSTPreTrainedModel", - ] - ) _import_structure["models.pegasus"].extend( ["PegasusForCausalLM", "PegasusForConditionalGeneration", "PegasusModel", "PegasusPreTrainedModel"] ) @@ -4711,7 +4697,6 @@ OwlViTTextConfig, OwlViTVisionConfig, ) - from .models.patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig from .models.pegasus import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig, PegasusTokenizer from .models.pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig from .models.perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverTokenizer @@ -5318,8 +5303,6 @@ MODEL_FOR_TEXT_ENCODING_MAPPING, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, - MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, - MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, @@ -6404,15 +6387,6 @@ OwlViTTextModel, OwlViTVisionModel, ) - from .models.patchtst import ( - PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, - PatchTSTForClassification, - PatchTSTForPrediction, - PatchTSTForPretraining, - PatchTSTForRegression, - PatchTSTModel, - PatchTSTPreTrainedModel, - ) from .models.pegasus import ( PegasusForCausalLM, PegasusForConditionalGeneration, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 968704c0bf86..6132512688e6 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -158,7 +158,6 @@ opt, owlv2, owlvit, - patchtst, pegasus, pegasus_x, perceiver, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index 153f7f10def6..dc01c93406b7 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -77,8 +77,6 @@ "MODEL_WITH_LM_HEAD_MAPPING", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING", - "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING", - "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING", "AutoModel", "AutoBackbone", "AutoModelForAudioClassification", @@ -252,8 +250,6 @@ MODEL_FOR_TEXT_ENCODING_MAPPING, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, - MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, - MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 900f1da799d9..c1c2387373b8 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -164,7 +164,6 @@ ("opt", "OPTConfig"), ("owlv2", "Owlv2Config"), ("owlvit", "OwlViTConfig"), - ("patchtst", "PatchTSTConfig"), ("pegasus", "PegasusConfig"), ("pegasus_x", "PegasusXConfig"), ("perceiver", "PerceiverConfig"), @@ -377,7 +376,6 @@ ("opt", "OPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("owlv2", "OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("owlvit", "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("patchtst", "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("pegasus", "PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("pegasus_x", "PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("perceiver", "PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -609,7 +607,6 @@ ("opt", "OPT"), ("owlv2", "OWLv2"), ("owlvit", "OWL-ViT"), - ("patchtst", "PatchTST"), ("pegasus", "Pegasus"), ("pegasus_x", "PEGASUS-X"), ("perceiver", "Perceiver"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 437aed60143c..ffcae9a23494 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -157,7 +157,6 @@ ("opt", "OPTModel"), ("owlv2", "Owlv2Model"), ("owlvit", "OwlViTModel"), - ("patchtst", "PatchTSTModel"), ("pegasus", "PegasusModel"), ("pegasus_x", "PegasusXModel"), ("perceiver", "PerceiverModel"), @@ -1131,18 +1130,6 @@ ] ) -MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - ("patchtst", "PatchTSTForClassification"), - ] -) - -MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES = OrderedDict( - [ - ("patchtst", "PatchTSTForRegression"), - ] -) - MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = OrderedDict( [ ("swin2sr", "Swin2SRForImageSuperResolution"), @@ -1234,14 +1221,6 @@ MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES) -MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES -) - -MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES -) - MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES) diff --git a/src/transformers/models/autoformer/modeling_autoformer.py b/src/transformers/models/autoformer/modeling_autoformer.py index 8f26274b44bc..92e9df2c7e5b 100644 --- a/src/transformers/models/autoformer/modeling_autoformer.py +++ b/src/transformers/models/autoformer/modeling_autoformer.py @@ -208,70 +208,71 @@ def forward(self, features: torch.Tensor) -> torch.Tensor: ) -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeries->Autoformer class AutoformerStdScaler(nn.Module): """ - Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by - subtracting from the mean and dividing by the standard deviation. + Standardize features by calculating the mean and scaling along some given dimension `dim`, and then normalizes it + by subtracting from the mean and dividing by the standard deviation. + + Args: + dim (`int`): + Dimension along which to calculate the mean and standard deviation. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + minimum_scale (`float`, *optional*, defaults to 1e-5): + Default scale that is used for elements that are constantly zero along dimension `dim`. """ - def __init__(self, config: AutoformerConfig): + def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-5): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + if not dim > 0: + raise ValueError("Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0") + self.dim = dim + self.keepdim = keepdim + self.minimum_scale = minimum_scale - def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ - denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) + @torch.no_grad() + def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + denominator = weights.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) - loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator + loc = (data * weights).sum(self.dim, keepdim=self.keepdim) / denominator - variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator + variance = (((data - loc) * weights) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeries->Autoformer class AutoformerMeanScaler(nn.Module): """ - Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data + Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data accordingly. + + Args: + dim (`int`): + Dimension along which to compute the scale. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + default_scale (`float`, *optional*, defaults to `None`): + Default scale that is used for elements that are constantly zero. If `None`, we use the scale of the batch. + minimum_scale (`float`, *optional*, defaults to 1e-10): + Default minimum possible scale that is used for any item. """ - def __init__(self, config: AutoformerConfig): + def __init__( + self, dim: int = -1, keepdim: bool = True, default_scale: Optional[float] = None, minimum_scale: float = 1e-10 + ): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 - self.default_scale = config.default_scale if hasattr(config, "default_scale") else None + self.dim = dim + self.keepdim = keepdim + self.minimum_scale = minimum_scale + self.default_scale = default_scale + @torch.no_grad() def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ + # shape: (N, [C], T=1) ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) @@ -299,29 +300,26 @@ def forward( return scaled_data, torch.zeros_like(scale), scale -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeries->Autoformer class AutoformerNOPScaler(nn.Module): """ - Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. + Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data. + + Args: + dim (`int`): + Dimension along which to compute the scale. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. """ - def __init__(self, config: AutoformerConfig): + def __init__(self, dim: int, keepdim: bool = False): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.dim = dim + self.keepdim = keepdim def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor = None + self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale @@ -1435,11 +1433,11 @@ def __init__(self, config: AutoformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: - self.scaler = AutoformerMeanScaler(config) + self.scaler = AutoformerMeanScaler(dim=1, keepdim=True) elif config.scaling == "std": - self.scaler = AutoformerStdScaler(config) + self.scaler = AutoformerStdScaler(dim=1, keepdim=True) else: - self.scaler = AutoformerNOPScaler(config) + self.scaler = AutoformerNOPScaler(dim=1, keepdim=True) if config.num_static_categorical_features > 0: self.embedder = AutoformerFeatureEmbedder( diff --git a/src/transformers/models/informer/modeling_informer.py b/src/transformers/models/informer/modeling_informer.py index 205c8ba22f74..c0a5a2059502 100644 --- a/src/transformers/models/informer/modeling_informer.py +++ b/src/transformers/models/informer/modeling_informer.py @@ -81,70 +81,71 @@ def forward(self, features: torch.Tensor) -> torch.Tensor: ) -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeries->Informer class InformerStdScaler(nn.Module): """ - Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by - subtracting from the mean and dividing by the standard deviation. + Standardize features by calculating the mean and scaling along some given dimension `dim`, and then normalizes it + by subtracting from the mean and dividing by the standard deviation. + + Args: + dim (`int`): + Dimension along which to calculate the mean and standard deviation. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + minimum_scale (`float`, *optional*, defaults to 1e-5): + Default scale that is used for elements that are constantly zero along dimension `dim`. """ - def __init__(self, config: InformerConfig): + def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-5): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + if not dim > 0: + raise ValueError("Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0") + self.dim = dim + self.keepdim = keepdim + self.minimum_scale = minimum_scale - def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ - denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) + @torch.no_grad() + def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + denominator = weights.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) - loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator + loc = (data * weights).sum(self.dim, keepdim=self.keepdim) / denominator - variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator + variance = (((data - loc) * weights) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeries->Informer class InformerMeanScaler(nn.Module): """ - Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data + Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data accordingly. + + Args: + dim (`int`): + Dimension along which to compute the scale. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + default_scale (`float`, *optional*, defaults to `None`): + Default scale that is used for elements that are constantly zero. If `None`, we use the scale of the batch. + minimum_scale (`float`, *optional*, defaults to 1e-10): + Default minimum possible scale that is used for any item. """ - def __init__(self, config: InformerConfig): + def __init__( + self, dim: int = -1, keepdim: bool = True, default_scale: Optional[float] = None, minimum_scale: float = 1e-10 + ): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 - self.default_scale = config.default_scale if hasattr(config, "default_scale") else None + self.dim = dim + self.keepdim = keepdim + self.minimum_scale = minimum_scale + self.default_scale = default_scale + @torch.no_grad() def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ + # shape: (N, [C], T=1) ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) @@ -172,29 +173,26 @@ def forward( return scaled_data, torch.zeros_like(scale), scale -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Informer,TimeSeries->Informer +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeries->Informer class InformerNOPScaler(nn.Module): """ - Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. + Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data. + + Args: + dim (`int`): + Dimension along which to compute the scale. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. """ - def __init__(self, config: InformerConfig): + def __init__(self, dim: int, keepdim: bool = False): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.dim = dim + self.keepdim = keepdim def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor = None + self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale @@ -1448,11 +1446,11 @@ def __init__(self, config: InformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: - self.scaler = InformerMeanScaler(config) + self.scaler = InformerMeanScaler(dim=1, keepdim=True) elif config.scaling == "std": - self.scaler = InformerStdScaler(config) + self.scaler = InformerStdScaler(dim=1, keepdim=True) else: - self.scaler = InformerNOPScaler(config) + self.scaler = InformerNOPScaler(dim=1, keepdim=True) if config.num_static_categorical_features > 0: self.embedder = InformerFeatureEmbedder( diff --git a/src/transformers/models/patchtst/__init__.py b/src/transformers/models/patchtst/__init__.py deleted file mode 100644 index 8c7db64c1984..000000000000 --- a/src/transformers/models/patchtst/__init__.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_patchtst": [ - "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", - "PatchTSTConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_patchtst"] = [ - "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", - "PatchTSTModel", - "PatchTSTPreTrainedModel", - "PatchTSTForPrediction", - "PatchTSTForPretraining", - "PatchTSTForRegression", - "PatchTSTForClassification", - ] - - -if TYPE_CHECKING: - from .configuration_patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_patchtst import ( - PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, - PatchTSTForClassification, - PatchTSTForPrediction, - PatchTSTForPretraining, - PatchTSTForRegression, - PatchTSTModel, - PatchTSTPreTrainedModel, - ) - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/patchtst/configuration_patchtst.py b/src/transformers/models/patchtst/configuration_patchtst.py deleted file mode 100644 index 4ced00c36046..000000000000 --- a/src/transformers/models/patchtst/configuration_patchtst.py +++ /dev/null @@ -1,274 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""PatchTST model configuration""" - -from typing import List, Optional, Union - -from transformers.configuration_utils import PretrainedConfig -from transformers.utils import logging - - -logger = logging.get_logger(__name__) - -PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "ibm/patchtst-base": "https://huggingface.co/ibm/patchtst-base/resolve/main/config.json", - # See all PatchTST models at https://huggingface.co/ibm/models?filter=patchtst -} - - -class PatchTSTConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an - PatchTST model according to the specified arguments, defining the model architecture. - [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture. - - Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - Args: - num_input_channels (`int`, *optional*, defaults to 1): - The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of - multivariate targets. - context_length (`int`, *optional*, defaults to 32): - The context length for the encoder. - distribution_output (`str`, *optional*, defaults to `"student_t"`): - The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or - "negative_binomial". - loss (`str`, *optional*, defaults to `"mse"`): - The loss function for the model corresponding to the `distribution_output` head. For parametric - distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared - error "mse". - patch_length (`int`, *optional*, defaults to 1): - Define the patch length of the patchification process. - patch_stride (`int`, *optional*, defaults to 1): - define the stride of the patchification process. - encoder_layers (`int`, *optional*, defaults to 3): - Number of encoder layers. - d_model (`int`, *optional*, defaults to 64): - Dimensionality of the transformer layers. - encoder_attention_heads (`int`, *optional*, defaults to 4): - Number of attention heads for each attention layer in the Transformer encoder. - shared_embedding (`bool`, *optional*, defaults to `True`): - Sharing the input embedding across all channels. - channel_attention (`bool`, *optional*, defaults to `False`): - Activate channel attention block in the Transformer to allow channels to attend each other. - encoder_ffn_dim (`int`, *optional*, defaults to 256): - Dimension of the "intermediate" (often named feed-forward) layer in encoder. - norm (`str` , *optional*, defaults to `"BatchNorm"`): - Normalization at each Transformer layer. Can be `"BatchNorm"` or `"LayerNorm"`. - norm_eps (`float`, *optional*, defaults to 1e-05): - A value added to the denominator for numerical stability of normalization. - attention_dropout (`float`, *optional*, defaults to 0.0): - The dropout probability for the attention probabilities. - dropout (`float`, *optional*, defaults to 0.0): - The dropout probability for all fully connected layers in the encoder, and decoder. - positional_dropout (`float`, *optional*, defaults to 0.0): - The dropout probability in the positional embedding layer. - dropout_path (`float`, *optional*, defaults to 0.0): - The dropout path in the residual block. - ff_dropout (`float`, *optional*, defaults to 0.0): - The dropout probability used between the two layers of the feed-forward networks. - bias (`bool`, *optional*, defaults to `True`): - Consider bias in the feed-forward networks. - activation_function (`str`, *optional*, defaults to `"gelu"`): - The non-linear activation function (string) in the encoder.`"gelu"` and `"relu"` are supported. - pre_norm (`bool`, *optional*, defaults to `True`): - Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is - applied after residual block. - positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): - Positional encodings. `"zeros"`, `"normal"`, `"uniform"' and `"sincos"` are supported. - learn_pe (`bool`, *optional*, defaults to `False`): - Whether the positional encoding is updated during training. - use_cls_token (`bool`, *optional*, defaults to `False`): - Whether cls token is used. - init_std (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated normal weight initialization distribution. - shared_projection (`bool`, *optional*, defaults to `True`): - Sharing the projection layer across different channels in the forecast head. - seed_number (`Optional`, *optional*): - Seed number used for random masking. If unset, no seed is set. - scaling (`Union`, *optional*, defaults to `"mean"`): - Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the - scaler is set to "mean". - mask_input (`bool`, *optional*, defaults to `False`): - Apply masking during the pretraining. - mask_type (`str`, *optional*, defaults to `"random"`): - Masking type. Only `"random"` and `"forecast"` are currently supported. - random_mask_ratio (`float`, *optional*, defaults to 0.5): - Masking ratio is applied to mask the input data during random pretraining. - forecast_mask_patches (`List`, *optional*, defaults to `[2, 3]`): - List of patch lengths to mask in the end of the data. - forecast_mask_ratios (`List`, *optional*, defaults to `[1, 1]`): - List of weights to use for each patch length. For Ex. if patch_lengths is [5,4] and mix_ratio is [1,1], - then equal weights to both patch lengths. Defaults to None. - channel_consistent_masking (`bool`, *optional*, defaults to `False`): - If channel consistent masking is True, all the channels will have the same masking. - unmasked_channel_indices (`list`, *optional*): - Channels that are not masked during pretraining. - mask_value (`int`, *optional*, defaults to 0): - Define the value of entries to be masked when pretraining. - pooling_type (`str`, *optional*, defaults to `"mean"`): - Pooling of the embedding. `"mean"`, `"max"` and `None` are supported. - head_dropout (`float`, *optional*, defaults to 0.0): - The dropout probability for head. - prediction_length (`int`, *optional*, defaults to 24): - The prediction length for the encoder. In other words, the prediction horizon of the model. - num_targets (`int`, *optional*, defaults to 1): - Number of targets for regression and classificastion tasks. For classification, it is the number of - classes. - output_range (`list`, *optional*): - Output range for regression task. The range of output values can be set to enforce the model to produce - values within a range. - num_parallel_samples (`int`, *optional*, defaults to 100): - The number of samples is generated in parallel for probablistic prediction. - - - ```python - >>> from transformers import PatchTSTConfig, PatchTSTModel - - >>> # Initializing an PatchTST configuration with 12 time steps for prediction - >>> configuration = PatchTSTConfig(prediction_length=12) - - >>> # Randomly initializing a model (with random weights) from the configuration - >>> model = PatchTSTModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "patchtst" - attribute_map = { - "hidden_size": "d_model", - "num_attention_heads": "encoder_attention_heads", - "num_hidden_layers": "encoder_layers", - } - - def __init__( - self, - # time series specific configuration - num_input_channels: int = 1, - context_length: int = 32, - distribution_output: str = "student_t", - loss: str = "mse", - # PatchTST arguments - patch_length: int = 1, - patch_stride: int = 1, - # Transformer architecture configuration - encoder_layers: int = 3, - d_model: int = 64, - encoder_attention_heads: int = 4, - shared_embedding: bool = True, - channel_attention: bool = False, - encoder_ffn_dim: int = 256, - norm: str = "BatchNorm", - norm_eps: float = 1e-5, - attention_dropout: float = 0.0, - dropout: float = 0.0, - positional_dropout: float = 0.0, - dropout_path: float = 0.0, - ff_dropout: float = 0.0, - bias: bool = True, - activation_function: str = "gelu", - pre_norm: bool = True, - positional_encoding_type: str = "sincos", - learn_pe: bool = False, - use_cls_token: bool = False, - init_std: float = 0.02, - shared_projection: bool = True, - seed_number: Optional[int] = None, - scaling: Optional[Union[str, bool]] = "mean", - # mask pretraining - mask_input: Optional[bool] = None, - mask_type: str = "random", - random_mask_ratio: float = 0.5, - forecast_mask_patches: List[int] = [2, 3], - forecast_mask_ratios: List[int] = [1, 1], - channel_consistent_masking: bool = False, - unmasked_channel_indices: Optional[List[int]] = None, - mask_value=0, - # head - pooling_type: str = "mean", - head_dropout: float = 0.0, - prediction_length: int = 24, - num_targets: int = 1, - output_range: List = None, - # distribution head - num_parallel_samples: int = 100, - **kwargs, - ): - # time series specific configuration - self.context_length = context_length - self.num_input_channels = num_input_channels # n_vars - self.loss = loss - self.distribution_output = distribution_output - self.num_parallel_samples = num_parallel_samples - - # Transformer architecture configuration - self.d_model = d_model - self.encoder_attention_heads = encoder_attention_heads - self.encoder_ffn_dim = encoder_ffn_dim - self.encoder_layers = encoder_layers - self.dropout = dropout - self.attention_dropout = attention_dropout - self.shared_embedding = shared_embedding - self.channel_attention = channel_attention - self.norm = norm - self.norm_eps = norm_eps - self.positional_dropout = positional_dropout - self.dropout_path = dropout_path - self.ff_dropout = ff_dropout - self.bias = bias - self.activation_function = activation_function - self.pre_norm = pre_norm - self.positional_encoding_type = positional_encoding_type - self.learn_pe = learn_pe - self.use_cls_token = use_cls_token - self.init_std = init_std - self.scaling = scaling - - # PatchTST parameters - self.patch_length = patch_length - self.patch_stride = patch_stride - self.num_patches = self._num_patches() - - # Mask pretraining - self.seed_number = seed_number - self.mask_input = mask_input - self.mask_type = mask_type - self.random_mask_ratio = random_mask_ratio # for random masking - self.forecast_mask_patches = forecast_mask_patches # for forecast masking - self.forecast_mask_ratios = forecast_mask_ratios - self.channel_consistent_masking = channel_consistent_masking - self.unmasked_channel_indices = unmasked_channel_indices - self.mask_value = mask_value - - # general head params - self.pooling_type = pooling_type - self.head_dropout = head_dropout - - # For prediction head - self.shared_projection = shared_projection - self.prediction_length = prediction_length - - # For prediction and regression head - self.num_parallel_samples = num_parallel_samples - - # Regression - self.num_targets = num_targets - self.output_range = output_range - - super().__init__(**kwargs) - - def _num_patches(self): - return (max(self.context_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 diff --git a/src/transformers/models/patchtst/modeling_patchtst.py b/src/transformers/models/patchtst/modeling_patchtst.py deleted file mode 100755 index 30522a048f02..000000000000 --- a/src/transformers/models/patchtst/modeling_patchtst.py +++ /dev/null @@ -1,1913 +0,0 @@ -# coding=utf-8 -# Copyright 2023 IBM & Hugging Face. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch PatchTST model.""" - -import math -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import torch -from torch import nn - -from ...activations import ACT2CLS -from ...modeling_outputs import BaseModelOutput -from ...modeling_utils import PreTrainedModel -from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput -from ...trainer_utils import set_seed -from ...utils import ModelOutput, add_start_docstrings, logging -from .configuration_patchtst import PatchTSTConfig - - -logger = logging.get_logger(__name__) - -_CONFIG_FOR_DOC = "PatchTSTConfig" - -PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "ibm/patchtst-etth1-pretrain", - # See all PatchTST models at https://huggingface.co/models?filter=patchtst -] - - -# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PatchTST -class PatchTSTAttention(nn.Module): - """Multi-headed attention from 'Attention Is All You Need' paper""" - - def __init__( - self, - embed_dim: int, - num_heads: int, - dropout: float = 0.0, - is_decoder: bool = False, - bias: bool = True, - is_causal: bool = False, - config: Optional[PatchTSTConfig] = None, - ): - super().__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = dropout - self.head_dim = embed_dim // num_heads - self.config = config - - if (self.head_dim * num_heads) != self.embed_dim: - raise ValueError( - f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" - f" and `num_heads`: {num_heads})." - ) - self.scaling = self.head_dim**-0.5 - self.is_decoder = is_decoder - self.is_causal = is_causal - - self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def forward( - self, - hidden_states: torch.Tensor, - key_value_states: Optional[torch.Tensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - attention_mask: Optional[torch.Tensor] = None, - layer_head_mask: Optional[torch.Tensor] = None, - output_attentions: bool = False, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - """Input shape: Batch x Time x Channel""" - - # if key_value_states are provided this layer is used as a cross-attention layer - # for the decoder - is_cross_attention = key_value_states is not None - - bsz, tgt_len, _ = hidden_states.size() - - # get query proj - query_states = self.q_proj(hidden_states) * self.scaling - # get key, value proj - # `past_key_value[0].shape[2] == key_value_states.shape[1]` - # is checking that the `sequence_length` of the `past_key_value` is the same as - # the provided `key_value_states` to support prefix tuning - if ( - is_cross_attention - and past_key_value is not None - and past_key_value[0].shape[2] == key_value_states.shape[1] - ): - # reuse k,v, cross_attentions - key_states = past_key_value[0] - value_states = past_key_value[1] - elif is_cross_attention: - # cross_attentions - key_states = self._shape(self.k_proj(key_value_states), -1, bsz) - value_states = self._shape(self.v_proj(key_value_states), -1, bsz) - elif past_key_value is not None: - # reuse k, v, self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - key_states = torch.cat([past_key_value[0], key_states], dim=2) - value_states = torch.cat([past_key_value[1], value_states], dim=2) - else: - # self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - - if self.is_decoder: - # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. - # Further calls to cross_attention layer can then reuse all cross-attention - # key/value_states (first "if" case) - # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of - # all previous decoder key/value_states. Further calls to uni-directional self-attention - # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) - # if encoder bi-directional self-attention `past_key_value` is always `None` - past_key_value = (key_states, value_states) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.reshape(*proj_shape) - value_states = value_states.reshape(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {attn_weights.size()}" - ) - - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, tgt_len, src_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" - ) - attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - attn_weights = nn.functional.softmax(attn_weights, dim=-1) - - if layer_head_mask is not None: - if layer_head_mask.size() != (self.num_heads,): - raise ValueError( - f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" - f" {layer_head_mask.size()}" - ) - attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - if output_attentions: - # this operation is a bit awkward, but it's required to - # make sure that attn_weights keeps its gradient. - # In order to do so, attn_weights have to be reshaped - # twice and have to be reused in the following - attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) - else: - attn_weights_reshaped = None - - attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) - - attn_output = torch.bmm(attn_probs, value_states) - - if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output = attn_output.transpose(1, 2) - - # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be - # partitioned across GPUs when using tensor-parallelism. - attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) - - attn_output = self.out_proj(attn_output) - - return attn_output, attn_weights_reshaped, past_key_value - - -class PatchTSTBatchNorm(nn.Module): - """ - Parameters: - Compute batch normalization - d_model (`int`): model dimension - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) - - def forward(self, inputs: torch.Tensor): - """ - Parameters: - inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): - input for Batch norm calculation - Returns: - `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` - """ - output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length) - output = self.batchnorm(output) - return output.transpose(1, 2) - - -def positional_encoding(positional_encoding_type, learned, q_len, d_model): - # Positional encoding - if positional_encoding_type is None: - # positional_encoding_type = None and learned = False can be used to measure impact of positional encoding - position_enc = torch.empty((q_len, d_model)) - nn.init.uniform_(position_enc, -0.02, 0.02) - learned = False - elif positional_encoding_type == "zeros": - position_enc = torch.empty((q_len, d_model)) - nn.init.uniform_(position_enc, -0.02, 0.02) - elif positional_encoding_type == "normal": - position_enc = torch.zeros((q_len, 1)) - nn.init.normal_(position_enc, mean=0.0, std=0.1) - elif positional_encoding_type == "uniform": - position_enc = torch.zeros((q_len, 1)) - nn.init.uniform_(position_enc, a=0.0, b=0.1) - elif positional_encoding_type == "sincos": - position_enc = torch.zeros(q_len, d_model) - position = torch.arange(0, q_len).unsqueeze(1) - div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) - position_enc[:, 0::2] = torch.sin(position * div_term) - position_enc[:, 1::2] = torch.cos(position * div_term) - position_enc = position_enc - position_enc.mean() - position_enc = position_enc / (position_enc.std() * 10) - else: - raise ValueError( - f"{positional_encoding_type} is not a valid positional encoder. Available types are 'normal', 'zeros', 'zero', uniform', 'sincos', None." - ) - return nn.Parameter(position_enc, requires_grad=learned) - - -def random_masking( - inputs: torch.Tensor, - mask_ratio: float, - unmasked_channel_indices: list = None, - channel_consistent_masking: bool = False, - mask_value: int = 0, - seed_number: Optional[int] = None, -): - """random_masking: Mask the input considering the control variables. - - Args: - inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`): - The input tensor to mask. - mask_ratio (`float`): - Mask ratio. - unmasked_channel_indices (list, *optional*): - indices of unmasked channels. These channels will not be masked. - channel_consistent_masking (bool, *optional* defaults to False): - When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary - across channels. - mask_value (int, *optional*, defaults to 0): - Value to use for masking. - seed_number (int, *optional*): - Value to set for the random seed. - - Returns: - `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x - n] - """ - if seed_number: - set_seed(seed_number) - - batch_size, num_channels, sequence_length, num_features = inputs.shape - device = inputs.device - - len_keep = int(sequence_length * (1 - mask_ratio)) - - if channel_consistent_masking: - noise = torch.rand(batch_size, 1, sequence_length, device=device) # noise in [0, 1], bs x 1 x L - noise = noise.repeat(1, num_channels, 1) # bs x num_channels x time - else: - # noise in [0, 1], bs x num_channels x L - noise = torch.rand(batch_size, num_channels, sequence_length, device=device) - - # mask: [bs x num_channels x num_patch] - mask = torch.ones(batch_size, num_channels, sequence_length, device=device) - mask[:, :, :len_keep] = 0 - - # sort noise for each sample - ids_shuffle = torch.argsort(noise, dim=-1) # ascend: small is keep, large is remove - ids_restore = torch.argsort(ids_shuffle, dim=-1) # ids_restore: [bs x num_channels x L] - - mask = torch.gather(mask, dim=-1, index=ids_restore) - mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patches x patch_length] - if unmasked_channel_indices is not None: - mask[:, unmasked_channel_indices, :, :] = 0 - - inputs_mask = inputs.masked_fill(mask.bool(), mask_value) - return inputs_mask, mask[..., 0] - - -def forecast_masking( - inputs: torch.Tensor, - forecast_mask_patches: list, - forecast_mask_ratios: list = None, - unmasked_channel_indices: list = None, - mask_value: int = 0, - seed_number: Optional[int] = None, -): - """Forecast masking that masks the last K patches where K is from the forecast_mask_patches list. - For every batch, distribute the patch lengths based on forecast_mask_ratios and ignore masks for column indices - mentioned in unmasked_channel_indices. - - Parameters: - inputs (`torch.Tensor`): - Input of shape `(bs, num_channels, num_patch, patch_len)` or `(bs, tsg1, tag2, num_channels, num_patch, - patch_len)` - forecast_mask_patches (`list`): - List of patch lengths to mask at the end of the data e.g. [2, 4]. - forecast_mask_ratios (`list`, *optional*): - List of weights to use for each patch length. For example if forecast_mask_patches is [5,4] and - forecast_mask_ratios is [1,1], then equal weights to both patch lengths. - unmasked_channel_indices (`list`, *optional*): - Control Variable channel indices. These channels will not be masked. - mask_value (`int`, *optional*, defaults to 0): - Value to use for masking. - seed_number (`int`, *optional*): - Value to set for the random seed. - - Returns: - `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs, - num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)` - """ - if seed_number: - set_seed(seed_number) - - if forecast_mask_ratios is None: - forecast_mask_ratios = [1 for _ in forecast_mask_patches] - - batch_size, num_channels, sequence_length, num_features = inputs.shape - mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device) - - t_list = [] - total_length = 0 - total_ratio = sum(forecast_mask_ratios) - - for patch_length, ratio in zip(forecast_mask_patches, forecast_mask_ratios): - if patch_length <= 0 or patch_length >= sequence_length: - raise Exception("masked_patch_len should be greater than 0 and less than total patches.") - temp_len = int(batch_size * ratio / total_ratio) - t_list.append([patch_length, ratio, temp_len]) - total_length += temp_len - - t_list = sorted(t_list, key=lambda x: x[2]) - - if total_length < batch_size: - t_list[0][2] = t_list[0][2] + (batch_size - total_length) - elif total_length > batch_size: - t_list[-1][2] = t_list[-1][2] + (total_length - batch_size) - - batch1 = 0 - for patch_len, _, temp_len in t_list: - batch2 = batch1 + temp_len - mask[batch1:batch2, :, -patch_len:] = 1 - batch1 = batch2 - - perm = torch.randperm(mask.shape[0]) - mask = mask[perm] - - mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patch x patch_len] - if unmasked_channel_indices is not None: - mask[:, unmasked_channel_indices, :, :] = 0 - - inputs_mask = inputs.masked_fill(mask.bool(), mask_value) - return inputs_mask, mask[..., 0] - - -class PatchTSTPatchify(nn.Module): - """ - A class to patchify the time series sequence into different patches - - Returns: - `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - - self.sequence_length = config.context_length - self.patch_length = config.patch_length - self.patch_stride = config.patch_stride - - if self.sequence_length <= self.patch_length: - raise ValueError( - f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})" - ) - - # get the number of patches - num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 - new_sequence_length = self.patch_length + self.patch_stride * (num_patches - 1) - self.sequence_start = self.sequence_length - new_sequence_length - - def forward(self, past_values: torch.Tensor): - """ - Parameters: - past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*): - Input to be patchified - - Returns: - `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` - """ - sequence_length = past_values.shape[-2] - if sequence_length != self.sequence_length: - raise ValueError( - f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})." - ) - # output: [bs x new_sequence_length x num_channels] - output = past_values[:, self.sequence_start :, :] - # output: [bs x num_patches x num_input_channels x patch_length] - output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride) - # output: [bs x num_input_channels x num_patches x patch_length] - output = output.transpose(-2, -3).contiguous() - return output - - -class PatchTSTMasking(nn.Module): - """ - Class to perform random or forecast masking. - - Parameters: - config (`PatchTSTConfig`): model config - - Returns: - x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) - Masked patched input - mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) - Bool tensor indicating True on masked points - - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - self.random_mask_ratio = config.random_mask_ratio - self.channel_consistent_masking = config.channel_consistent_masking - self.mask_type = config.mask_type - self.forecast_mask_patches = config.forecast_mask_patches - self.forecast_mask_ratios = config.forecast_mask_ratios - self.unmasked_channel_indices = config.unmasked_channel_indices - self.mask_value = config.mask_value - if self.unmasked_channel_indices is not None: - self.unmasked_channel_indices.sort() - self.seed_number = config.seed_number - - def forward(self, patch_input: torch.Tensor): - """ - Parameters: - patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): - Patch input - - Return: - masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) - Masked patched input - mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) - Bool tensor indicating True on masked points - - """ - - if self.mask_type == "random": - masked_input, mask = random_masking( - inputs=patch_input, - mask_ratio=self.random_mask_ratio, - unmasked_channel_indices=self.unmasked_channel_indices, - channel_consistent_masking=self.channel_consistent_masking, - mask_value=self.mask_value, - seed_number=self.seed_number, - ) - elif self.mask_type == "forecast": - masked_input, mask = forecast_masking( - inputs=patch_input, - forecast_mask_patches=self.forecast_mask_patches, - forecast_mask_ratios=self.forecast_mask_ratios, - unmasked_channel_indices=self.unmasked_channel_indices, - mask_value=self.mask_value, - seed_number=self.seed_number, - ) - else: - raise Exception("Invalid mask type") - - mask = mask.bool() # mask: [bs x num_input_channels x num_patch] - - return masked_input, mask - - -class PatchTSTEncoderLayer(nn.Module): - """ - PatchTST encoder layer - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - - self.channel_attention = config.channel_attention - - # Multi-Head attention - self.self_attn = PatchTSTAttention( - embed_dim=config.d_model, - num_heads=config.encoder_attention_heads, - dropout=config.attention_dropout, - ) - - # Add & Norm of the sublayer 1 - self.dropout_path1 = nn.Dropout(config.dropout_path) if config.dropout_path > 0 else nn.Identity() - if "batch" in config.norm.lower(): - self.norm_sublayer1 = PatchTSTBatchNorm(config) - else: - self.norm_sublayer1 = nn.LayerNorm(config.d_model, eps=config.norm_eps) - - # Add & Norm of the sublayer 2 - if self.channel_attention: - self.dropout_path2 = nn.Dropout(config.dropout_path) if config.dropout_path > 0 else nn.Identity() - if "batch" in config.norm.lower(): - self.norm_sublayer2 = PatchTSTBatchNorm(config) - else: - self.norm_sublayer2 = nn.LayerNorm(config.d_model, eps=config.norm_eps) - - # Position-wise Feed-Forward - self.ff = nn.Sequential( - nn.Linear(config.d_model, config.encoder_ffn_dim, bias=config.bias), - ACT2CLS[config.activation_function](), - nn.Dropout(config.ff_dropout) if config.ff_dropout > 0 else nn.Identity(), - nn.Linear(config.encoder_ffn_dim, config.d_model, bias=config.bias), - ) - - # Add & Norm of sublayer 3 - self.dropout_path3 = nn.Dropout(config.dropout_path) if config.dropout_path > 0 else nn.Identity() - if "batch" in config.norm.lower(): - self.norm_sublayer3 = PatchTSTBatchNorm(config) - else: - self.norm_sublayer3 = nn.LayerNorm(config.d_model, eps=config.norm_eps) - - self.pre_norm = config.pre_norm - - def forward(self, hidden_state: torch.Tensor, output_attentions: Optional[bool] = None): - """ - Parameters: - hidden_state (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`, *required*): - Past values of the time series - Return: - `torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)` - - """ - batch_size, num_input_channels, sequence_length, d_model = hidden_state.shape - - # First sublayer: attention across time - # hidden_states: [(bs*num_channels) x sequence_length x d_model] - hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) - - if self.pre_norm: - ## Norm and Multi-Head attention and Add residual connection - attn_output, attn_weights, _ = self.self_attn( - hidden_states=self.norm_sublayer1(hidden_state), output_attentions=output_attentions - ) - # Add: residual connection with residual dropout - hidden_state = hidden_state + self.dropout_path1(attn_output) - else: - ## Multi-Head attention and Add residual connection and Norm - Standard Transformer from BERT - attn_output, attn_weights, _ = self.self_attn( - hidden_states=hidden_state, output_attentions=output_attentions - ) - # hidden_states: [(bs*num_channels) x sequence_length x d_model] - hidden_state = self.norm_sublayer1(hidden_state + self.dropout_path1(attn_output)) - - # hidden_state: [bs x num_channels x sequence_length x d_model] - hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) - - # second sublayer: attention across variable at any given time - if self.channel_attention: - # hidden_state: [bs x sequence_length x num_channels x d_model] - hidden_state = hidden_state.transpose(2, 1).contiguous() - # hidden_state: [(bs*sequence_length) x num_channels x d_model] - hidden_state = hidden_state.view(batch_size * sequence_length, num_input_channels, d_model) - if self.pre_norm: - ## Norm and Multi-Head attention and Add residual connection - attn_output, channel_attn_weights, _ = self.self_attn( - hidden_states=self.norm_sublayer2(hidden_state), output_attentions=output_attentions - ) - # Add: residual connection with residual dropout - hidden_state = hidden_state + self.dropout_path2(attn_output) - else: - ## Multi-Head attention and Add residual connection and Norm - attn_output, channel_attn_weights, _ = self.self_attn( - hidden_states=hidden_state, output_attentions=output_attentions - ) - # hidden_states: [(bs*sequence_length) x num_channels x d_model] - hidden_state = self.norm_sublayer2(hidden_state + self.dropout_path2(attn_output)) - - # Reshape hidden state - # hidden_state: [bs x sequence_length x num_channels x d_model] - hidden_state = hidden_state.reshape(batch_size, sequence_length, num_input_channels, d_model) - # hidden_state: [bs x num_channels x sequence_length x d_model] - hidden_state = hidden_state.transpose(1, 2).contiguous() - - # Third sublayer: mixing across hidden - # hidden_state: [(batch_size*num_channels) x sequence_length x d_model] - hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) - if self.pre_norm: - ## Norm and Position-wise Feed-Forward and Add residual connection - # Add: residual connection with residual dropout - hidden_state = hidden_state + self.dropout_path3(self.ff(self.norm_sublayer3(hidden_state))) - else: - ## Position-wise Feed-Forward and Add residual connection and Norm - # Add: residual connection with residual dropout - hidden_state = self.norm_sublayer3(hidden_state + self.dropout_path3(self.ff(hidden_state))) - - # [bs x num_channels x sequence_length x d_model] - hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) - - outputs = (hidden_state,) - if output_attentions: - outputs += (attn_weights, channel_attn_weights) if self.channel_attention else (attn_weights,) - - return outputs - - -class PatchTSTPreTrainedModel(PreTrainedModel): - config_class = PatchTSTConfig - base_model_prefix = "model" - main_input_name = "past_values" - supports_gradient_checkpointing = False - - def _init_weights(self, module): - """Initialize weights""" - if self.config.use_cls_token: - nn.init.normal_(self.config.cls_token, std=0.02) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - elif isinstance(module, (nn.Linear, nn.Conv1d)): - module.weight.data.normal_(mean=0.0, std=self.config.init_std) - if module.bias is not None: - module.bias.data.zero_() - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, (PatchTSTEncoder)): - module.gradient_checkpointing = value - - -class PatchTSTEmbedding(nn.Module): - def __init__(self, config: PatchTSTConfig): - super().__init__() - # Input encoding: projection of feature vectors onto a d-dim vector space - if not config.shared_embedding: - self.input_embedding = nn.ModuleList() - for _ in range(config.num_input_channels): - self.input_embedding.append(nn.Linear(config.patch_length, config.d_model)) - else: - self.input_embedding = nn.Linear(config.patch_length, config.d_model) - - def forward(self, patch_input: torch.Tensor): - """ - Parameters: - patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): - Patch input for embedding - return: - `torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)` - """ - # Input encoding - num_input_channels = patch_input.shape[1] - if isinstance(self.input_embedding, nn.ModuleList): - embeddings = [self.input_embedding[i](patch_input[:, i, :, :]) for i in range(num_input_channels)] - embeddings = torch.stack(embeddings, dim=1) - else: - embeddings = self.input_embedding(patch_input) # x: [bs x num_channels x num_patches x d_model] - return embeddings - - -class PatchTSTPositionalEncoding(nn.Module): - """ - Class for positional encoding - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - self.use_cls_token = config.use_cls_token - if config.use_cls_token: - self.cls_token = nn.Parameter(torch.zeros(1, 1, 1, config.d_model)) - num_patches = config.num_patches + 1 - else: - num_patches = config.num_patches - # postional encoding - self.position_enc = positional_encoding( - config.positional_encoding_type, config.learn_pe, num_patches, config.d_model - ) - # Positional dropout - self.positional_dropout = ( - nn.Dropout(config.positional_dropout) if config.positional_dropout > 0 else nn.Identity() - ) - - def forward(self, patch_input: torch.Tensor): - if self.use_cls_token: - # patch_input: [bs x num_channels x num_patches x d_model] - patch_input = self.positional_dropout(patch_input + self.position_enc[1:, :]) - # append cls token where cls_token: [1 x 1 x 1 x d_model] - cls_token = self.cls_token + self.position_enc[:1, :] - # get the same copy of cls_token for all the samples in batch - cls_tokens = cls_token.expand(patch_input.shape[0], -1, -1) - # hidden_state: [bs x num_channels x (num_patches+1) x d_model] - hidden_state = torch.cat((cls_tokens, patch_input), dim=1) - else: - # hidden_state: [bs x num_channels x num_patches x d_model] - hidden_state = self.positional_dropout(patch_input + self.position_enc) - return hidden_state - - -class PatchTSTEncoder(PatchTSTPreTrainedModel): - """ - PatchTST Encoder - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__(config) - self.num_input_channels = config.num_input_channels - self.num_patches = config.num_patches - self.patch_length = config.patch_length - self.d_model = config.d_model - self.shared_embedding = config.shared_embedding - self.use_cls_token = config.use_cls_token - self.gradient_checkpointing = False - - # Input embedding: projection of feature vectors onto a d-dim vector space - self.embedder = PatchTSTEmbedding(config) - # Positional encoding - self.positional_encoder = PatchTSTPositionalEncoding(config) - # Encoder - self.layers = nn.ModuleList([PatchTSTEncoderLayer(config) for i in range(config.encoder_layers)]) - - # Initialize weights and apply final processing - self.post_init() - - def forward( - self, - patch_input: torch.Tensor, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - ) -> BaseModelOutput: - """ - Parameters: - patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): - Past values of the time series - output_hidden_states (bool, optional): Indicates if hidden states should be outputted. - output_attentions (bool, optional): Indicates if attentions should be outputted. - - return: - `BaseModelOutput` - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - # Input embedding - patch_input = self.embedder(patch_input) - # Positional encoding - hidden_state = self.positional_encoder(patch_input) - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - for encoder_layer in self.layers: - if output_hidden_states: - encoder_states = encoder_states + (hidden_state,) - - layer_outputs = encoder_layer(hidden_state=hidden_state, output_attentions=output_attentions) - # get hidden state. hidden_state shape is [bs x num_channels x num_patches x d_model] - # or [bs x num_channels x (num_patches+1) x d_model] if use cls_token - hidden_state = layer_outputs[0] - # append attention matrix at each layer - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - # return past_values, hidden_states - return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=encoder_states, attentions=all_attentions) - - -PATCHTST_START_DOCSTRING = r""" - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - - Parameters: - config ([`PatchTSTConfig`]): - Model configuration class with all the parameters of the model. Initializing with a config file does not - load the weights associated with the model, only the configuration. Check out the - [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -PATCHTST_INPUTS_DOCSTRING = r""" - Parameters: - past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, num_input_channels)`): - Past values of the time series, that serve as context in order to predict the future. The sequence size of - this tensor must be larger than the `context_length` of the model, since the model will use the larger size - to construct lag features, i.e. additional values from the past which are added in order to serve as "extra - context". - - The `sequence_length` here is equal to `config.context_length` - - The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as - `static_categorical_features`, `static_real_features`). - - For multivariate time series, the `num_input_channels` > 1 dimension is required and corresponds to the - number of variates in the time series per time step. - - future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, num_input_channels)`, *optional*): - Future values of the time series, that serve as labels for the model. The `future_values` is what the - Transformer needs during training to learn to output, given the `past_values`. - - The sequence length here is equal to `prediction_length`. - - See the demo notebook and code snippets for details. - - For multivariate time series, the `num_input_channels` > 1 dimension is required and corresponds to the - number of variates in the time series per time step. - - output_hidden_states (`bool`, *optional*, default to False): - Whether or not to return the hidden states of all layers. -""" - - -@dataclass -class PatchTSTModelOutput(ModelOutput): - """ - Base class for model's outputs, with potential hidden states. - - Parameters: - last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): - Sequence of hidden-states at the output of the last layer of the model. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + - one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of - the model at the output of each layer plus the optional initial embedding outputs. - patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): - patched input to the Transformer - mask: (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`,*optional*) - Bool masked tensor indicating which patches are masked - loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*) - mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length - scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*) - std of the input data (batch_size, sequence_length, num_channels) over the sequence_length - """ - - last_hidden_state: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - patch_input: torch.FloatTensor = None - mask: torch.FloatTensor = None - loc: torch.FloatTensor = None - scale: torch.FloatTensor = None - - -@dataclass -class PatchTSTForPretrainingOutput(ModelOutput): - """ - Output type of [`PatchTSTForPretraining`]. - - Parameters: - loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): - MSE loss. - prediction_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction outputs of the time series modeling heads. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - loss: Optional[torch.FloatTensor] = None - prediction_output: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class PatchTSTForRegressionOutput(ModelOutput): - """ - Output type of [`PatchTSTForRegression`]. - - Parameters: - loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): - MSE loss. - forecast_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction outputs of the time series modeling heads. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - loss: Optional[torch.FloatTensor] = None - forecast_outputs: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class PatchTSTForPredictionOutput(ModelOutput): - """ - Output type of [`PatchTSTForPrediction`]. - - Parameters: - loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): - MSE loss. - prediction_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, -1)`): - Prediction outputs of the time series modeling heads. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - loss: Optional[torch.FloatTensor] = None - prediction_outputs: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - loc: torch.FloatTensor = None - scale: torch.FloatTensor = None - - -@dataclass -class PatchTSTForClassificationOutput(ModelOutput): - """ - Output type of [`PatchTSTForClassification`]. - - Parameters: - loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): - Total loss as the sum of the masked language modeling loss and the next sequence prediction - (classification) loss. - prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - loss: Optional[torch.FloatTensor] = None - prediction_logits: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class SamplePatchTSTPredictionOutput(ModelOutput): - """ - Base class for time series model's predictions outputs that contains the sampled values from the chosen - distribution. - - Parameters: - sequences `(batch_size, num_samples, prediction_length, num_targets)`): - Sampled values from the chosen distribution. - """ - - sequences: torch.FloatTensor = None - - -@dataclass -class SamplePatchTSTRegressionOutput(ModelOutput): - """ - Base class for time series model's predictions outputs that contains the sampled values from the chosen - distribution. - - Parameters: - sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, num_targets)` - Sampled values from the chosen distribution. - """ - - sequences: torch.FloatTensor = None - - -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll -def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: - """ - Computes the negative log likelihood loss from input distribution with respect to target. - """ - return -input.log_prob(target) - - -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average -def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: - """ - Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, - meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. - - Args: - input_tensor (`torch.FloatTensor`): - Input tensor, of which the average must be computed. - weights (`torch.FloatTensor`, *optional*): - Weights tensor, of the same shape as `input_tensor`. - dim (`int`, *optional*): - The dim along which to average `input_tensor`. - - Returns: - `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. - """ - if weights is not None: - weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) - sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) - return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights - else: - return input_tensor.mean(dim=dim) - - -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST -class PatchTSTStdScaler(nn.Module): - """ - Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by - subtracting from the mean and dividing by the standard deviation. - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 - - def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ - denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) - denominator = denominator.clamp_min(1.0) - loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator - - variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator - scale = torch.sqrt(variance + self.minimum_scale) - return (data - loc) / scale, loc, scale - - -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST -class PatchTSTMeanScaler(nn.Module): - """ - Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data - accordingly. - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 - self.default_scale = config.default_scale if hasattr(config, "default_scale") else None - - def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ - ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) - num_observed = observed_indicator.sum(self.dim, keepdim=True) - - scale = ts_sum / torch.clamp(num_observed, min=1) - - # If `default_scale` is provided, we use it, otherwise we use the scale - # of the batch. - if self.default_scale is None: - batch_sum = ts_sum.sum(dim=0) - batch_observations = torch.clamp(num_observed.sum(0), min=1) - default_scale = torch.squeeze(batch_sum / batch_observations) - else: - default_scale = self.default_scale * torch.ones_like(scale) - - # apply default scale where there are no observations - scale = torch.where(num_observed > 0, scale, default_scale) - - # ensure the scale is at least `self.minimum_scale` - scale = torch.clamp(scale, min=self.minimum_scale) - scaled_data = data / scale - - if not self.keepdim: - scale = scale.squeeze(dim=self.dim) - - return scaled_data, torch.zeros_like(scale), scale - - -# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST -class PatchTSTNOPScaler(nn.Module): - """ - Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - - def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor = None - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ - scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) - loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) - return data, loc, scale - - -class PatchTSTScaler(nn.Module): - def __init__(self, config: PatchTSTConfig): - super().__init__() - if config.scaling == "mean" or config.scaling is True: - self.scaler = PatchTSTMeanScaler(config) - elif config.scaling == "std": - self.scaler = PatchTSTStdScaler(config) - else: - self.scaler = PatchTSTNOPScaler(config) - - def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, um_input_channels)`) - """ - data, loc, scale = self.scaler(data, observed_indicator) - return data, loc, scale - - -@add_start_docstrings( - "The bare PatchTST Model outputting raw hidden-states without any specific head.", - PATCHTST_START_DOCSTRING, -) -class PatchTSTModel(PatchTSTPreTrainedModel): - def __init__(self, config: PatchTSTConfig): - super().__init__(config) - - self.scaler = PatchTSTScaler(config) - self.patchifier = PatchTSTPatchify(config) - self.mask_input = config.mask_input - - if self.mask_input: - self.masking = PatchTSTMasking(config) - else: - self.masking = nn.Identity() - self.encoder = PatchTSTEncoder(config) - - # Initialize weights and apply final processing - self.post_init() - - def forward( - self, - past_values: torch.Tensor, - past_observed_mask: Optional[torch.Tensor] = None, - future_values: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, PatchTSTModelOutput]: - """ - Parameters: - past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): - Input sequence to the model - past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): - Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected - in `[0, 1]`: - - - 1 for values that are **observed**, - - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers - output_attentions (`bool`, *optional*): - Whether or not to return the output attention of all layers - return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. - - Returns: - `PatchTSTModelOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) - - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - - if past_observed_mask is None: - past_observed_mask = torch.ones_like(past_values) - - # x: tensor [bs x sequence_length x num_input_channels] - scaled_past_values, loc, scale = self.scaler(past_values, past_observed_mask) - - # patched_values: [bs x num_input_channels x num_patches x patch_length] for pretrain - patched_values = self.patchifier(scaled_past_values) - if self.mask_input: - masked_values, mask = self.masking(patched_values) - else: - masked_values, mask = self.masking(patched_values), None - - encoder_output = self.encoder( - patch_input=masked_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions - ) - - if not return_dict: - outputs = (encoder_output.last_hidden_state, encoder_output.hidden_states, encoder_output.attentions) - outputs = outputs + (patched_values, mask, loc, scale) - return tuple(v for v in outputs if v is not None) - - return PatchTSTModelOutput( - last_hidden_state=encoder_output.last_hidden_state, - hidden_states=encoder_output.hidden_states, - attentions=encoder_output.attentions, - patch_input=patched_values, - mask=mask, - loc=loc, - scale=scale, - ) - - -class PatchTSTMaskPretrainHead(nn.Module): - """ - Pretraining head for mask modelling - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__() - self.dropout = nn.Dropout(config.dropout) - self.linear = nn.Linear(config.d_model, config.patch_length) - self.use_cls_token = config.use_cls_token - - def forward(self, embedding: torch.Tensor) -> torch.Tensor: - """ - Parameters: - embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` - or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): - Embedding from the model - Returns: - `torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or - `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True - - """ - embedding = self.linear(self.dropout(embedding)) # [bs x num_channels x num_patches x patch_length] - if self.use_cls_token: - embedding = embedding[:, :, 1:, :] # remove the first cls token - return embedding - - -class PatchTSTForPretraining(PatchTSTPreTrainedModel): - """ - Mask pretrain model: PatchTST model + pretrain head - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__(config) - - config.mask_input = True - self.model = PatchTSTModel(config=config) - self.head = PatchTSTMaskPretrainHead(config) - - # Initialize weights and apply final processing - self.post_init() - - def forward( - self, - past_values: torch.Tensor, - past_observed_mask: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, PatchTSTForPretrainingOutput]: - """ - Parameters: - past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): - Input sequence to the model - past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): - Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected - in `[0, 1]`: - - - 1 for values that are **observed**, - - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers - return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. - - Returns: - `PatchTSTForPretrainingOutput` or tuple of `torch.Tensor` (if `return_dict`=False or - `config.return_dict`=False) - - """ - - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # past_values: [bs x num_channels x num_patches x d_model] or - # [bs x num_channels x (num_patches+1) x d_model] if use cls_token - model_output = self.model( - past_values=past_values, - past_observed_mask=past_observed_mask, - output_hidden_states=output_hidden_states, - output_attentions=output_attentions, - ) - - # model_output[0]: [bs x num_channels x num_patches x patch_length] or - # [bs x num_channels x (num_patches+1) x patch_length] if use cls_token - x_hat = self.head(model_output[0]) - - # calculate masked_loss - loss = nn.MSELoss(reduction="none") - loss_val = loss(x_hat, model_output.patch_input) - masked_loss = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) - - encoder_states = model_output.hidden_states - if not return_dict: - outputs = (masked_loss, x_hat, model_output.hidden_states, model_output.attentions) - return tuple(v for v in outputs if v is not None) - return PatchTSTForPretrainingOutput( - loss=masked_loss, prediction_output=x_hat, hidden_states=encoder_states, attentions=model_output.attentions - ) - - -class PatchTSTForClassification(PatchTSTPreTrainedModel): - """ - PatchTST model for classification. The model contains PatchTST model + classification head - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__(config) - - self.model = PatchTSTModel(config) - self.head = PatchTSTClassificationHead(config) - - # Initialize weights and apply final processing - self.post_init() - - def forward( - self, - past_values: torch.Tensor, - target_values: torch.Tensor = None, - past_observed_mask: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[tuple, PatchTSTForClassificationOutput]: - """ - Parameters: - past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): - Input sequence to the model - target_values (`torch.Tensor`, *optional*): labels associates with the `past_values` - past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): - Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected - in `[0, 1]`: - - - 1 for values that are **observed**, - - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers - return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. - - Returns: - `PatchTSTForClassificationOutput` or tuple of `torch.Tensor` (if `return_dict`=False or - `config.return_dict`=False) - - """ - - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - model_output = self.model( - past_values=past_values, - past_observed_mask=past_observed_mask, - output_hidden_states=output_hidden_states, - output_attentions=output_attentions, - ) - y_hat = self.head(model_output[0]) - - loss_val = None - if target_values is not None: - loss = nn.CrossEntropyLoss() - loss_val = loss(y_hat, target_values) - - if not return_dict: - outputs = (loss_val, y_hat, model_output.hidden_states, model_output.attentions) - return tuple(v for v in outputs if v is not None) - return PatchTSTForClassificationOutput( - loss=loss_val, - prediction_logits=y_hat, - hidden_states=model_output.hidden_states, - attentions=model_output.attentions, - ) - - -class PatchTSTClassificationHead(nn.Module): - def __init__(self, config: PatchTSTConfig): - super().__init__() - self.use_cls_token = config.use_cls_token - self.pooling_type = config.pooling_type - self.flatten = nn.Flatten(start_dim=1) - self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() - self.linear = nn.Linear(config.num_input_channels * config.d_model, config.num_targets) - - def forward(self, embedding: torch.Tensor): - """ - Parameters: - embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` - or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): - Embedding from the model - Returns: - `torch.Tensor` of shape `(bs, num_targets)` - - """ - if self.use_cls_token: - # use the first output token, pooled_embedding: bs x num_channels x d_model - pooled_embedding = embedding[:, :, 0, :] - elif self.pooling_type == "mean": - # pooled_embedding: [bs x num_channels x d_model] - pooled_embedding = embedding.mean(dim=2) - elif self.pooling_type == "max": - # pooled_embedding: [bs x num_channels x d_model] - pooled_embedding = embedding.max(dim=2) - else: - raise Exception(f"pooling operator {self.pooling_type} is not implemented yet") - # pooled_embedding: bs x num_channels * d_model - pooled_embedding = self.flatten(pooled_embedding) - # output: bs x n_classes - output = self.linear(self.dropout(pooled_embedding)) - return output - - -class PatchTSTPredictionHead(nn.Module): - def __init__(self, config: PatchTSTConfig, distribution_output=None): - super().__init__() - - self.shared_projection = config.shared_projection - self.num_input_channels = config.num_input_channels - self.use_cls_token = config.use_cls_token - self.pooling_type = config.pooling_type - head_dim = config.d_model if self.pooling_type else config.d_model * config.num_patches - - if not self.shared_projection: - # if each channel has its own head - self.projections = nn.ModuleList() - self.dropouts = nn.ModuleList() - self.flattens = nn.ModuleList() - for i in range(self.num_input_channels): - self.flattens.append(nn.Flatten(start_dim=2)) - if distribution_output is None: - # use linear head - self.projections.append(nn.Linear(head_dim, config.prediction_length)) - else: - # use distribution head - self.projections.append(distribution_output.get_parameter_projection(head_dim)) - self.dropouts.append(nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()) - else: - # all the channels share the same head - self.flatten = nn.Flatten(start_dim=2) - if distribution_output is None: - # use linear head - self.projection = nn.Linear(head_dim, config.prediction_length) - else: - # use distribution head - self.projection = distribution_output.get_parameter_projection(head_dim) - self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() - - def forward(self, embedding: torch.Tensor): - """ - Parameters: - embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` - or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): - Embedding from the model - Returns: - `torch.Tensor` of shape `(bs, forecast_len, num_channels)` - - """ - if self.use_cls_token: - # pooled_embedding: [bs x num_channels x d_model] - pooled_embedding = embedding[:, :, 0, :] - else: - if self.pooling_type == "mean": - # pooled_embedding: [bs x num_channels x d_model] - pooled_embedding = embedding.mean(dim=2) - elif self.pooling_type == "max": - # pooled_embedding: [bs x num_channels x d_model] - pooled_embedding = embedding.max(dim=2) - else: - # pooled_embedding: [bs x num_channels x num_patches x d_model] - pooled_embedding = embedding - - if not self.shared_projection: - output = [] - for i in range(self.num_input_channels): - # pooled_embedding: [bs x (d_model * num_patches)] or [bs x d_model)] - pooled_embedding = self.flattens[i](pooled_embedding[:, i, :]) - pooled_embedding = self.dropouts[i](pooled_embedding) - # pooled_embedding: [bs x forecast_len] - # or tuple ([bs x forecast_len], [bs x forecast_len]) if using distribution head - pooled_embedding = self.projections[i](pooled_embedding) - output.append(pooled_embedding) - # output: [bs x num_channels x forecast_len] - output = torch.stack(output, dim=1) - else: - # pooled_embedding: [bs x num_channels x (d_model * num_patches)] or [bs x num_channels x d_model)] - pooled_embedding = self.flatten(pooled_embedding) - pooled_embedding = self.dropout(pooled_embedding) - # output: [bs x num_channels x forecast_len] or - # tuple ([bs x num_channels x forecast_len], [bs x num_channels x forecast_len]) if using distribution head - output = self.projection(pooled_embedding) - - if isinstance(output, tuple): - # output: ([bs x forecast_len x num_channels], [bs x forecast_len x num_channels]) - output = tuple(z.transpose(2, 1) for z in output) - else: - output = output.transpose(2, 1) # [bs x forecast_len x num_channels] - return output - - -class PatchTSTForPrediction(PatchTSTPreTrainedModel): - """ - PatchTST for forecasting. The model contains PatchTST model + Forecasting head - """ - - def __init__(self, config: PatchTSTConfig): - super().__init__(config) - self.model = PatchTSTModel(config) - - if config.loss == "mse": - self.distribution_output = None - else: - if config.distribution_output == "student_t": - self.distribution_output = StudentTOutput(dim=config.prediction_length) - elif config.distribution_output == "normal": - self.distribution_output = NormalOutput(dim=config.prediction_length) - elif config.distribution_output == "negative_binomial": - self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length) - else: - raise ValueError(f"Unknown distribution output {config.distribution_output}") - - self.head = PatchTSTPredictionHead(config, self.distribution_output) - - # Initialize weights and apply final processing - self.post_init() - - def forward( - self, - past_values: torch.Tensor, - past_observed_mask: Optional[torch.Tensor] = None, - future_values: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, PatchTSTForPredictionOutput]: - """ - Parameters: - past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): - Input sequence to the model - past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): - Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected - in `[0, 1]`: - - - 1 for values that are **observed**, - - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). - future_values (`torch.Tensor` of shape `(bs, forecast_len, num_input_channels)`, *optional*): - future target values associated with the `past_values` - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers - return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. - - Returns: - `PatchTSTForPredictionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or - `config.return_dict`=False) - - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # get model output - model_output = self.model( - past_values=past_values, - past_observed_mask=past_observed_mask, - output_hidden_states=output_hidden_states, - output_attentions=output_attentions, - ) - # get output head - y_hat = self.head(model_output.last_hidden_state) - - loss_val = None - - if future_values is not None: - if self.distribution_output: - distribution = self.distribution_output.distribution( - y_hat, loc=model_output.loc, scale=model_output.scale - ) - loss_val = nll(distribution, future_values) - # take average of the loss - loss_val = weighted_average(loss_val) - # for testing - # loss_val = nn.MSELoss(reduction='none')(distribution.mean, future_values) - # loss_val = weighted_average(loss_val) - else: - y_hat = y_hat * model_output.scale + model_output.loc - loss = nn.MSELoss(reduction="mean") - loss_val = loss(y_hat, future_values) - - loc = model_output.loc - scale = model_output.scale - - if not return_dict: - outputs = (loss_val, y_hat, model_output.hidden_states, model_output.attentions, loc, scale) - return tuple(v for v in outputs if v is not None) - return PatchTSTForPredictionOutput( - loss=loss_val, - prediction_outputs=y_hat, - hidden_states=model_output.hidden_states, - attentions=model_output.attentions, - loc=loc, - scale=scale, - ) - - def generate( - self, - past_values: torch.Tensor, - past_observed_mask: Optional[torch.Tensor] = None, - ) -> SamplePatchTSTPredictionOutput: - """ - Generate sequences of sample predictions from a model with a probability distribution head. - - Parameters: - past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Past values of the time series that serves as context in order to predict the future. - - past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): - Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected - in `[0, 1]`: - - - 1 for values that are **observed**, - - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). - - Return: - [`SamplePatchTSTPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, - number of samples, prediction_length, 1)` or `(batch_size, number of samples, prediction_length, - num_input_channels)` for multivariate predictions. - """ - # get number of samples - num_parallel_samples = self.config.num_parallel_samples - - # get model output - outputs = self( - past_values=past_values, - future_values=None, - past_observed_mask=past_observed_mask, - output_hidden_states=False, - ) - - # get distribution - distribution = self.distribution_output.distribution( - outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale - ) - # get samples: list of [bs x forecast_len x num_channels] - samples = [distribution.sample() for _ in range(num_parallel_samples)] - # stack tensors - samples = torch.stack(samples, dim=1) # [bs x num_samples x forecast_len x num_channels] - return SamplePatchTSTPredictionOutput(sequences=samples) - - -class PatchTSTRegressionHead(nn.Module): - """ - Regression head - """ - - def __init__(self, config: PatchTSTConfig, distribution_output=None): - super().__init__() - self.y_range = config.output_range - self.use_cls_token = config.use_cls_token - self.pooling_type = config.pooling_type - self.distribution_output = distribution_output - - head_dim = config.num_input_channels * config.d_model - - self.flatten = nn.Flatten(start_dim=1) - self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() - - if distribution_output is None: - self.projection = nn.Linear(head_dim, config.num_targets) - else: - self.projection = distribution_output.get_parameter_projection(head_dim) - - def forward(self, embedding: torch.Tensor): - """ - Parameters: - embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` - or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): - Embedding from the model - Returns: - `torch.Tensor` of shape `(bs, output_dim)` - - """ - if self.use_cls_token: - # use the first output token, pooled_embedding: [bs x num_channels x d_model] - pooled_embedding = embedding[:, :, 0, :] - elif self.pooling_type == "mean": - # pooled_embedding: [bs x num_channels x d_model] - pooled_embedding = embedding.mean(dim=2) - elif self.pooling_type == "max": - # pooled_embedding: [bs x num_channels x d_model] - pooled_embedding = embedding.max(dim=2) - else: - raise Exception(f"pooling operator {self.pooling_type} is not implemented yet") - # flatten the input - # pooled_embedding: bs x (num_channels * d_model) - pooled_embedding = self.dropout(self.flatten(pooled_embedding)) - # projection - # output: bs x output_dim or a tuple of this shape for distribution head - output = self.projection(pooled_embedding) - # - if (self.distribution_output is None) & (self.y_range is not None): # linear head - output = torch.sigmoid(output) * (self.y_range[1] - self.y_range[0]) + self.y_range[0] - return output - - -class PatchTSTForRegression(PatchTSTPreTrainedModel): - # PatchTST model + Regression head - def __init__(self, config: PatchTSTConfig): - super().__init__(config) - self.model = PatchTSTModel(config) - - self.model = PatchTSTModel(config) - if config.loss == "mse": - self.distribution_output = None - else: - if config.distribution_output == "student_t": - self.distribution_output = StudentTOutput(dim=config.prediction_length * config.num_targets) - elif config.distribution_output == "normal": - self.distribution_output = NormalOutput(dim=config.prediction_length * config.num_targets) - elif config.distribution_output == "negative_binomial": - self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length * config.num_targets) - else: - raise ValueError(f"Unknown distribution output {config.distribution_output}") - - self.head = PatchTSTRegressionHead(config, self.distribution_output) - - # Initialize weights and apply final processing - self.post_init() - - def forward( - self, - past_values: torch.Tensor, - target_values: torch.Tensor, - past_observed_mask: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[tuple, PatchTSTForRegressionOutput]: - """ - Parameters: - past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): - Input sequence to the model - past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): - Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected - in `[0, 1]`: - - - 1 for values that are **observed**, - - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). - target_values (`torch.Tensor` of shape `(bs, num_input_channels)`): - target values associates with the `past_values` - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers - return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. - - Returns: - `PatchTSTForRegressionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or - `config.return_dict`=False) - - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - model_output = self.model( - past_values=past_values, - past_observed_mask=past_observed_mask, - output_hidden_states=output_hidden_states, - output_attentions=output_attentions, - ) - # get output head. y_hat is of shape [bs x num_targets] or tuple of this shape - y_hat = self.head(model_output.last_hidden_state) - - loss_val = None - if target_values is not None: - if self.distribution_output: - distribution = self.distribution_output.distribution(y_hat) - loss_val = nll(distribution, target_values) - # take average of the loss - loss_val = weighted_average(loss_val) - else: - loss = nn.MSELoss(reduction="mean") - loss_val = loss(y_hat, target_values) - - if not return_dict: - outputs = (loss_val, y_hat, model_output.hidden_states, model_output.attentions) - return tuple(v for v in outputs if v is not None) - return PatchTSTForRegressionOutput( - loss=loss_val, - forecast_outputs=y_hat, - hidden_states=model_output.hidden_states, - attentions=model_output.attentions, - ) - - def generate( - self, - past_values: torch.Tensor, - past_observed_mask: Optional[torch.Tensor] = None, - ) -> SamplePatchTSTRegressionOutput: - """ - Generate sequences of sample predictions from a model with a probability distribution head. - - Parameters: - past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Past values of the time series that serves as context in order to predict the future. - - past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): - Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected - in `[0, 1]`: - - - 1 for values that are **observed**, - - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). - - Return: - [`SamplePatchTSTRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, - number of samples, num_targets)`. - """ - # get number of samples - num_parallel_samples = self.config.num_parallel_samples - - # get model output - outputs = self( - past_values=past_values, - target_values=None, - past_observed_mask=past_observed_mask, - output_hidden_states=False, - ) - - # get distribution - distribution = self.distribution_output.distribution(outputs.forecast_outputs) - # get samples: list of [bs x num_targets] - samples = [distribution.sample() for _ in range(num_parallel_samples)] - # stack tensors - samples = torch.stack(samples, dim=1) # [bs x num_samples x num_targets] - return SamplePatchTSTRegressionOutput(sequences=samples) diff --git a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py index 2c875dd56e1b..904c02b4f043 100644 --- a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py @@ -83,66 +83,67 @@ def forward(self, features: torch.Tensor) -> torch.Tensor: class TimeSeriesStdScaler(nn.Module): """ - Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by - subtracting from the mean and dividing by the standard deviation. + Standardize features by calculating the mean and scaling along some given dimension `dim`, and then normalizes it + by subtracting from the mean and dividing by the standard deviation. + + Args: + dim (`int`): + Dimension along which to calculate the mean and standard deviation. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + minimum_scale (`float`, *optional*, defaults to 1e-5): + Default scale that is used for elements that are constantly zero along dimension `dim`. """ - def __init__(self, config: TimeSeriesTransformerConfig): + def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-5): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + if not dim > 0: + raise ValueError("Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0") + self.dim = dim + self.keepdim = keepdim + self.minimum_scale = minimum_scale - def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ - denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) + @torch.no_grad() + def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + denominator = weights.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) - loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator + loc = (data * weights).sum(self.dim, keepdim=self.keepdim) / denominator - variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator + variance = (((data - loc) * weights) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale class TimeSeriesMeanScaler(nn.Module): """ - Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data + Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data accordingly. + + Args: + dim (`int`): + Dimension along which to compute the scale. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + default_scale (`float`, *optional*, defaults to `None`): + Default scale that is used for elements that are constantly zero. If `None`, we use the scale of the batch. + minimum_scale (`float`, *optional*, defaults to 1e-10): + Default minimum possible scale that is used for any item. """ - def __init__(self, config: TimeSeriesTransformerConfig): + def __init__( + self, dim: int = -1, keepdim: bool = True, default_scale: Optional[float] = None, minimum_scale: float = 1e-10 + ): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True - self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 - self.default_scale = config.default_scale if hasattr(config, "default_scale") else None + self.dim = dim + self.keepdim = keepdim + self.minimum_scale = minimum_scale + self.default_scale = default_scale + @torch.no_grad() def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): - Calculating the scale on the observed indicator. - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ + # shape: (N, [C], T=1) ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) @@ -172,26 +173,23 @@ def forward( class TimeSeriesNOPScaler(nn.Module): """ - Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. + Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data. + + Args: + dim (`int`): + Dimension along which to compute the scale. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. """ - def __init__(self, config: TimeSeriesTransformerConfig): + def __init__(self, dim: int, keepdim: bool = False): super().__init__() - self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 - self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.dim = dim + self.keepdim = keepdim def forward( - self, data: torch.Tensor, observed_indicator: torch.Tensor = None + self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Parameters: - data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): - input for Batch norm calculation - Returns: - tuple of `torch.Tensor` of shapes - (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, - `(batch_size, 1, num_input_channels)`) - """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale @@ -1182,11 +1180,11 @@ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: - self.scaler = TimeSeriesMeanScaler(config) + self.scaler = TimeSeriesMeanScaler(dim=1, keepdim=True) elif config.scaling == "std": - self.scaler = TimeSeriesStdScaler(config) + self.scaler = TimeSeriesStdScaler(dim=1, keepdim=True) else: - self.scaler = TimeSeriesNOPScaler(config) + self.scaler = TimeSeriesNOPScaler(dim=1, keepdim=True) if config.num_static_categorical_features > 0: self.embedder = TimeSeriesFeatureEmbedder( diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 07bcf3867fb1..c6b20c7e3674 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -627,12 +627,6 @@ def __init__(self, *args, **kwargs): MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = None -MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = None - - -MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = None - - MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None @@ -6025,51 +6019,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -class PatchTSTForClassification(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PatchTSTForPrediction(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PatchTSTForPretraining(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PatchTSTForRegression(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PatchTSTModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PatchTSTPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class PegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] diff --git a/tests/models/patchtst/__init__.py b/tests/models/patchtst/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/models/patchtst/test_modeling_patchtst.py b/tests/models/patchtst/test_modeling_patchtst.py deleted file mode 100644 index 8d6f2202ee81..000000000000 --- a/tests/models/patchtst/test_modeling_patchtst.py +++ /dev/null @@ -1,353 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Testing suite for the PyTorch PatchTST model. """ - -import inspect -import random -import tempfile -import unittest - -from huggingface_hub import hf_hub_download - -from transformers import is_torch_available -from transformers.models.auto import get_values -from transformers.testing_utils import is_flaky, require_torch, slow, torch_device - -from ...test_configuration_common import ConfigTester -from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor -from ...test_pipeline_mixin import PipelineTesterMixin - - -TOLERANCE = 1e-4 - -if is_torch_available(): - import torch - - from transformers import ( - MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, - MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, - PatchTSTConfig, - PatchTSTForClassification, - PatchTSTForPrediction, - PatchTSTForPretraining, - PatchTSTForRegression, - PatchTSTModel, - ) - - -@require_torch -class PatchTSTModelTester: - def __init__( - self, - parent, - batch_size=13, - prediction_length=7, - context_length=14, - patch_length=5, - patch_stride=5, - num_input_channels=1, - num_time_features=1, - is_training=True, - hidden_size=16, - num_hidden_layers=2, - num_attention_heads=4, - intermediate_size=4, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - lags_sequence=[1, 2, 3, 4, 5], - distil=False, - seed_number=42, - num_targets=2, - num_output_channels=2, - ): - self.parent = parent - self.batch_size = batch_size - self.prediction_length = prediction_length - self.context_length = context_length - self.patch_length = patch_length - self.patch_stride = patch_stride - self.num_input_channels = num_input_channels - self.num_time_features = num_time_features - self.lags_sequence = lags_sequence - self.is_training = is_training - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.intermediate_size = intermediate_size - self.hidden_act = hidden_act - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - - self.seed_number = seed_number - self.num_targets = num_targets - self.num_output_channels = num_output_channels - self.distil = distil - self.num_patches = (max(self.context_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 - - def get_config(self): - return PatchTSTConfig( - prediction_length=self.prediction_length, - patch_length=self.patch_length, - patch_stride=self.patch_stride, - num_input_channels=self.num_input_channels, - d_model=self.hidden_size, - encoder_layers=self.num_hidden_layers, - encoder_attention_heads=self.num_attention_heads, - encoder_ffn_dim=self.intermediate_size, - dropout=self.hidden_dropout_prob, - attention_dropout=self.attention_probs_dropout_prob, - context_length=self.context_length, - activation_function=self.hidden_act, - seed_number=self.seed_number, - num_targets=self.num_targets, - num_output_channels=self.num_output_channels, - ) - - def prepare_patchtst_inputs_dict(self, config): - _past_length = config.context_length - # bs, num_input_channels, num_patch, patch_len - - # [bs x seq_len x num_input_channels] - past_values = floats_tensor([self.batch_size, _past_length, self.num_input_channels]) - - future_values = floats_tensor([self.batch_size, config.prediction_length, self.num_input_channels]) - - inputs_dict = { - "past_values": past_values, - "future_values": future_values, - } - return inputs_dict - - def prepare_config_and_inputs(self): - config = self.get_config() - inputs_dict = self.prepare_patchtst_inputs_dict(config) - return config, inputs_dict - - def prepare_config_and_inputs_for_common(self): - config, inputs_dict = self.prepare_config_and_inputs() - return config, inputs_dict - - -@require_torch -class PatchTSTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): - all_model_classes = ( - ( - PatchTSTModel, - PatchTSTForPrediction, - PatchTSTForPretraining, - PatchTSTForClassification, - PatchTSTForRegression, - ) - if is_torch_available() - else () - ) - all_generative_model_classes = ( - (PatchTSTForPrediction, PatchTSTForRegression, PatchTSTForPretraining) if is_torch_available() else () - ) - pipeline_model_mapping = {"feature-extraction": PatchTSTModel} if is_torch_available() else {} - test_pruning = False - test_head_masking = False - test_missing_keys = False - test_torchscript = False - test_inputs_embeds = False - test_model_common_attributes = False - - test_resize_embeddings = True - test_resize_position_embeddings = False - test_mismatched_shapes = True - test_model_parallel = False - has_attentions = False - - def setUp(self): - self.model_tester = PatchTSTModelTester(self) - self.config_tester = ConfigTester( - self, - config_class=PatchTSTConfig, - has_text_modality=False, - prediction_length=self.model_tester.prediction_length, - ) - - def test_config(self): - self.config_tester.run_common_tests() - - def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): - inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) - - # if PatchTSTForPretraining - if model_class == PatchTSTForPretraining: - inputs_dict.pop("future_values") - # else if classification model: - elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING): - rng = random.Random(self.model_tester.seed_number) - labels = ids_tensor([self.model_tester.batch_size], self.model_tester.num_targets, rng=rng) - inputs_dict["target_values"] = labels - inputs_dict.pop("future_values") - elif model_class in get_values(MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING): - rng = random.Random(self.model_tester.seed_number) - target_values = floats_tensor( - [self.model_tester.batch_size, self.model_tester.num_output_channels], rng=rng - ) - inputs_dict["target_values"] = target_values - inputs_dict.pop("future_values") - return inputs_dict - - def test_save_load_strict(self): - config, _ = self.model_tester.prepare_config_and_inputs() - for model_class in self.all_model_classes: - model = model_class(config) - - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_pretrained(tmpdirname) - model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) - self.assertEqual(info["missing_keys"], []) - - def test_hidden_states_output(self): - def check_hidden_states_output(inputs_dict, config, model_class): - model = model_class(config) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - hidden_states = outputs.hidden_states - - expected_num_layers = getattr( - self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers - ) - self.assertEqual(len(hidden_states), expected_num_layers) - - num_patch = self.model_tester.num_patches - self.assertListEqual( - list(hidden_states[0].shape[-2:]), - [num_patch, self.model_tester.hidden_size], - ) - - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - inputs_dict["output_hidden_states"] = True - print("model_class: ", model_class) - - check_hidden_states_output(inputs_dict, config, model_class) - - # check that output_hidden_states also work using config - del inputs_dict["output_hidden_states"] - config.output_hidden_states = True - - check_hidden_states_output(inputs_dict, config, model_class) - - @unittest.skip(reason="we have no tokens embeddings") - def test_resize_tokens_embeddings(self): - pass - - def test_model_main_input_name(self): - model_signature = inspect.signature(getattr(PatchTSTModel, "forward")) - # The main input is the name of the argument after `self` - observed_main_input_name = list(model_signature.parameters.keys())[1] - self.assertEqual(PatchTSTModel.main_input_name, observed_main_input_name) - - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = [ - "past_values", - "past_observed_mask", - "future_values", - ] - if model_class == PatchTSTForPretraining: - expected_arg_names.remove("future_values") - elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING) or model_class in get_values( - MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING - ): - expected_arg_names.remove("future_values") - expected_arg_names.remove("past_observed_mask") - expected_arg_names.append("target_values") if model_class in get_values( - MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING - ) else expected_arg_names.append("target_values") - expected_arg_names.append("past_observed_mask") - expected_arg_names.extend( - [ - "output_hidden_states", - "output_attentions", - "return_dict", - ] - ) - - self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) - - @is_flaky() - def test_retain_grad_hidden_states_attentions(self): - super().test_retain_grad_hidden_states_attentions() - - -# Note: Publishing of this dataset is under internal review. The dataset is not yet downloadable. -def prepare_batch(repo_id="ibm/etth1-forecast-test", file="train-batch.pt"): - file = hf_hub_download(repo_id=repo_id, filename=file, repo_type="dataset") - batch = torch.load(file, map_location=torch_device) - return batch - - -# Note: Publishing of pretrained weights is under internal review. Pretrained model is not yet downloadable. -@require_torch -@slow -class PatchTSTModelIntegrationTests(unittest.TestCase): - # Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable. - def test_pretrain_head(self): - model = PatchTSTForPretraining.from_pretrained("ibm/patchtst-etth1-pretrain").to(torch_device) - batch = prepare_batch() - - torch.manual_seed(0) - with torch.no_grad(): - output = model(past_values=batch["past_values"].to(torch_device)).prediction_output - num_patch = ( - max(model.config.context_length, model.config.patch_length) - model.config.patch_length - ) // model.config.patch_stride + 1 - expected_shape = torch.Size([64, model.config.num_input_channels, num_patch, model.config.patch_length]) - self.assertEqual(output.shape, expected_shape) - - expected_slice = torch.tensor( - [[[-0.5409]], [[0.3093]], [[-0.3759]], [[0.5068]], [[-0.8387]], [[0.0937]], [[0.2809]]], - device=torch_device, - ) - self.assertTrue(torch.allclose(output[0, :7, :1, :1], expected_slice, atol=TOLERANCE)) - - # Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable. - def test_prediction_head(self): - model = PatchTSTForPrediction.from_pretrained("ibm/patchtst-etth1-forecast").to(torch_device) - - batch = prepare_batch(file="test-batch.pt") - - torch.manual_seed(0) - with torch.no_grad(): - output = model( - past_values=batch["past_values"].to(torch_device), - future_values=batch["future_values"].to(torch_device), - ).prediction_outputs - expected_shape = torch.Size([64, model.config.prediction_length, model.config.num_input_channels]) - self.assertEqual(output.shape, expected_shape) - - expected_slice = torch.tensor( - [[0.3228, 0.4320, 0.4591, 0.4066, -0.3461, 0.3094, -0.8426]], - device=torch_device, - ) - self.assertTrue(torch.allclose(output[0, :1, :7], expected_slice, atol=TOLERANCE)) diff --git a/utils/check_repo.py b/utils/check_repo.py index 390f4ca5cab5..d740eefed019 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -185,8 +185,6 @@ "TimeSeriesTransformerForPrediction", "InformerForPrediction", "AutoformerForPrediction", - "PatchTSTForPretraining", - "PatchTSTForPrediction", "JukeboxVQVAE", "JukeboxPrior", "SamModel", From 73bc0c9e88e6e7acfa3c519e5f73d505abf35607 Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Tue, 14 Nov 2023 11:57:17 -0600 Subject: [PATCH 169/268] translate hpo_train.md and perf_hardware.md to chinese (#27431) * translate * translate * update --- docs/source/zh/_toctree.yml | 6 ++ docs/source/zh/hpo_train.md | 139 ++++++++++++++++++++++++++++ docs/source/zh/perf_hardware.md | 156 ++++++++++++++++++++++++++++++++ 3 files changed, 301 insertions(+) create mode 100644 docs/source/zh/hpo_train.md create mode 100644 docs/source/zh/perf_hardware.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index fffa7569f497..c3c914186733 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -45,6 +45,12 @@ - sections: - local: performance title: 综述 + - sections: + - local: perf_hardware + title: 用于训练的定制硬件 + - local: hpo_train + title: 使用Trainer API 进行超参数搜索 + title: 高效训练技术 - local: big_models title: 实例化大模型 - local: debugging diff --git a/docs/source/zh/hpo_train.md b/docs/source/zh/hpo_train.md new file mode 100644 index 000000000000..182940c359bb --- /dev/null +++ b/docs/source/zh/hpo_train.md @@ -0,0 +1,139 @@ + + +# 使用Trainer API进行超参数搜索 + +🤗 Transformers库提供了一个优化过的[`Trainer`]类,用于训练🤗 Transformers模型,相比于手动编写自己的训练循环,这更容易开始训练。[`Trainer`]提供了超参数搜索的API。本文档展示了如何在示例中启用它。 + + +## 超参数搜索后端 + +[`Trainer`] 目前支持四种超参数搜索后端:[optuna](https://optuna.org/),[sigopt](https://sigopt.com/),[raytune](https://docs.ray.io/en/latest/tune/index.html),[wandb](https://wandb.ai/site/sweeps) + +在使用它们之前,您应该先安装它们作为超参数搜索后端。 + +```bash +pip install optuna/sigopt/wandb/ray[tune] +``` + +## 如何在示例中启用超参数搜索 + +定义超参数搜索空间,不同的后端需要不同的格式。 + +对于sigopt,请参阅sigopt [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter),它类似于以下内容: + +```py +>>> def sigopt_hp_space(trial): +... return [ +... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"}, +... { +... "categorical_values": ["16", "32", "64", "128"], +... "name": "per_device_train_batch_size", +... "type": "categorical", +... }, +... ] +``` + +对于optuna,请参阅optuna [object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py),它类似于以下内容: + +```py +>>> def optuna_hp_space(trial): +... return { +... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), +... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]), +... } +``` + +Optuna提供了多目标HPO。您可以在`hyperparameter_search`中传递`direction`参数,并定义自己的`compute_objective`以返回多个目标值。在`hyperparameter_search`中将返回Pareto Front(`List[BestRun]`),您应该参考[test_trainer](https://github.com/huggingface/transformers/blob/main/tests/trainer/test_trainer.py)中的测试用例`TrainerHyperParameterMultiObjectOptunaIntegrationTest`。它类似于以下内容: + +```py +>>> best_trials = trainer.hyperparameter_search( +... direction=["minimize", "maximize"], +... backend="optuna", +... hp_space=optuna_hp_space, +... n_trials=20, +... compute_objective=compute_objective, +... ) +``` + +对于raytune,可以参考raytune的[object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html),它类似于以下内容: + +```py +>>> def ray_hp_space(trial): +... return { +... "learning_rate": tune.loguniform(1e-6, 1e-4), +... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]), +... } +``` + +对于wandb,可以参考wandb的[object_parameter](https://docs.wandb.ai/guides/sweeps/configuration),它类似于以下内容: + +```py +>>> def wandb_hp_space(trial): +... return { +... "method": "random", +... "metric": {"name": "objective", "goal": "minimize"}, +... "parameters": { +... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, +... "per_device_train_batch_size": {"values": [16, 32, 64, 128]}, +... }, +... } +``` + +定义一个`model_init`函数并将其传递给[Trainer],作为示例: + +```py +>>> def model_init(trial): +... return AutoModelForSequenceClassification.from_pretrained( +... model_args.model_name_or_path, +... from_tf=bool(".ckpt" in model_args.model_name_or_path), +... config=config, +... cache_dir=model_args.cache_dir, +... revision=model_args.model_revision, +... use_auth_token=True if model_args.use_auth_token else None, +... ) +``` + +使用你的`model_init`函数、训练参数、训练和测试数据集以及评估函数创建一个[`Trainer`]。 + +```py +>>> trainer = Trainer( +... model=None, +... args=training_args, +... train_dataset=small_train_dataset, +... eval_dataset=small_eval_dataset, +... compute_metrics=compute_metrics, +... tokenizer=tokenizer, +... model_init=model_init, +... data_collator=data_collator, +... ) +``` + +调用超参数搜索,获取最佳试验参数,后端可以是`"optuna"`/`"sigopt"`/`"wandb"`/`"ray"`。方向可以是`"minimize"`或`"maximize"`,表示是否优化更大或更低的目标。 + +您可以定义自己的compute_objective函数,如果没有定义,将调用默认的compute_objective,并将评估指标(如f1)之和作为目标值返回。 + +```py +>>> best_trial = trainer.hyperparameter_search( +... direction="maximize", +... backend="optuna", +... hp_space=optuna_hp_space, +... n_trials=20, +... compute_objective=compute_objective, +... ) +``` + +## 针对DDP微调的超参数搜索 +目前,Optuna和Sigopt已启用针对DDP的超参数搜索。只有rank-zero进程会进行超参数搜索并将参数传递给其他进程。 \ No newline at end of file diff --git a/docs/source/zh/perf_hardware.md b/docs/source/zh/perf_hardware.md new file mode 100644 index 000000000000..f49e9a582963 --- /dev/null +++ b/docs/source/zh/perf_hardware.md @@ -0,0 +1,156 @@ + + + +# 训练用的定制硬件 + +您用来运行模型训练和推断的硬件可能会对性能产生重大影响。要深入了解 GPU,务必查看 Tim Dettmer 出色的[博文](https://timdettmers.com/2020/09/07/which-gpu-for-deep-learning/)。 + +让我们来看一些关于 GPU 配置的实用建议。 + +## GPU +当你训练更大的模型时,基本上有三种选择: + +- 更大的 GPU +- 更多的 GPU +- 更多的 CPU 和 NVMe(通过[DeepSpeed-Infinity](main_classes/deepspeed#nvme-support)实现) + +让我们从只有一块GPU的情况开始。 + +### 供电和散热 + +如果您购买了昂贵的高端GPU,请确保为其提供正确的供电和足够的散热。 + +**供电**: + +一些高端消费者级GPU卡具有2个,有时甚至3个PCI-E-8针电源插口。请确保将与插口数量相同的独立12V PCI-E-8针线缆插入卡中。不要使用同一根线缆两端的2个分叉(也称为pigtail cable)。也就是说,如果您的GPU上有2个插口,您需要使用2条PCI-E-8针线缆连接电源和卡,而不是使用一条末端有2个PCI-E-8针连接器的线缆!否则,您无法充分发挥卡的性能。 + +每个PCI-E-8针电源线缆需要插入电源侧的12V轨上,并且可以提供最多150W的功率。 + +其他一些卡可能使用PCI-E-12针连接器,这些连接器可以提供最多500-600W的功率。 + +低端卡可能使用6针连接器,这些连接器可提供最多75W的功率。 + +此外,您需要选择具有稳定电压的高端电源。一些质量较低的电源可能无法为卡提供所需的稳定电压以发挥其最大性能。 + +当然,电源还需要有足够的未使用的瓦数来为卡供电。 + +**散热**: + +当GPU过热时,它将开始降频,不会提供完整的性能。如果温度过高,可能会缩短GPU的使用寿命。 + +当GPU负载很重时,很难确定最佳温度是多少,但任何低于+80度的温度都是好的,越低越好,也许在70-75度之间是一个非常好的范围。降频可能从大约84-90度开始。但是除了降频外,持续的高温可能会缩短GPU的使用寿命。 + +接下来让我们看一下拥有多个GPU时最重要的方面之一:连接。 + +### 多GPU连接 + +如果您使用多个GPU,则卡之间的互连方式可能会对总训练时间产生巨大影响。如果GPU位于同一物理节点上,您可以运行以下代码: + +``` +nvidia-smi topo -m +``` + +它将告诉您GPU如何互连。在具有双GPU并通过NVLink连接的机器上,您最有可能看到类似以下内容: + +``` + GPU0 GPU1 CPU Affinity NUMA Affinity +GPU0 X NV2 0-23 N/A +GPU1 NV2 X 0-23 N/A +``` + +在不同的机器上,如果没有NVLink,我们可能会看到: +``` + GPU0 GPU1 CPU Affinity NUMA Affinity +GPU0 X PHB 0-11 N/A +GPU1 PHB X 0-11 N/A +``` + +这个报告包括了这个输出: + +``` + X = Self + SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) + NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node + PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) + PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) + PIX = Connection traversing at most a single PCIe bridge + NV# = Connection traversing a bonded set of # NVLinks +``` + +因此,第一个报告`NV2`告诉我们GPU通过2个NVLink互连,而第二个报告`PHB`展示了典型的消费者级PCIe+Bridge设置。 + +检查你的设置中具有哪种连接类型。其中一些会使卡之间的通信更快(例如NVLink),而其他则较慢(例如PHB)。 + +根据使用的扩展解决方案的类型,连接速度可能会产生重大或较小的影响。如果GPU很少需要同步,就像在DDP中一样,那么较慢的连接的影响将不那么显著。如果GPU经常需要相互发送消息,就像在ZeRO-DP中一样,那么更快的连接对于实现更快的训练变得非常重要。 + + +#### NVlink + +[NVLink](https://en.wikipedia.org/wiki/NVLink)是由Nvidia开发的一种基于线缆的串行多通道近程通信链接。 + +每个新一代提供更快的带宽,例如在[Nvidia Ampere GA102 GPU架构](https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf)中有这样的引述: + +> Third-Generation NVLink® +> GA102 GPUs utilize NVIDIA’s third-generation NVLink interface, which includes four x4 links, +> with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four +> links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth +> between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. +> (Note that 3-Way and 4-Way SLI configurations are not supported.) + +所以,在`nvidia-smi topo -m`输出的`NVX`报告中获取到的更高的`X`值意味着更好的性能。生成的结果将取决于您的GPU架构。 + +让我们比较在小样本wikitext上训练gpt2语言模型的执行结果。 + +结果是: + + +| NVlink | Time | +| ----- | ---: | +| Y | 101s | +| N | 131s | + + +可以看到,NVLink使训练速度提高了约23%。在第二个基准测试中,我们使用`NCCL_P2P_DISABLE=1`告诉GPU不要使用NVLink。 + +这里是完整的基准测试代码和输出: + +```bash +# DDP w/ NVLink + +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch \ +--nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ +--dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ +--output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 + +{'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} + +# DDP w/o NVLink + +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 python -m torch.distributed.launch \ +--nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ +--dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train +--output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 + +{'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} +``` + +硬件: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`) +软件: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0` From fe472b1db44335213f9d6d99f5ff1f35a86ad424 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 14 Nov 2023 18:21:50 +0000 Subject: [PATCH 170/268] Generate: fix `ExponentialDecayLengthPenalty` doctest (#27485) fix exponential doctest --- src/transformers/generation/logits_process.py | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/transformers/generation/logits_process.py b/src/transformers/generation/logits_process.py index 60d50a7efa27..3d1801b24804 100644 --- a/src/transformers/generation/logits_process.py +++ b/src/transformers/generation/logits_process.py @@ -1327,22 +1327,26 @@ class ExponentialDecayLengthPenalty(LogitsProcessor): ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed - >>> set_seed(1) >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> text = "Just wanted to let you know, I" >>> inputs = tokenizer(text, return_tensors="pt") - >>> # Generate sequences without exponential penalty. We want short sentences, so we limit max_length=30 - >>> # see that the answer tends to end abruptly + >>> # Let's consider that we want short sentences, so we limit `max_length=30`. However, we observe that the answer + >>> # tends to end abruptly. + >>> set_seed(1) >>> outputs = model.generate(**inputs, do_sample=True, temperature=0.9, max_length=30, pad_token_id=50256) >>> print(tokenizer.batch_decode(outputs)[0]) - Just wanted to let you know, I'm not even a lawyer. I'm a man. I have no real knowledge of politics. I'm a - - >>> # Generate sequences with exponential penalty, we add the exponential_decay_length_penalty=(start_index, decay_factor) - >>> # We see that instead of cutting at max_tokens, the output comes to an end before (at 25 tokens) and with more meaning - >>> # What happens is that starting from `start_index` the EOS token score will be increased by decay_factor exponentially + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network which was + published in 2010. Although + + >>> # To promote the appearance of the EOS token at the right time, we add the `exponential_decay_length_penalty = + >>> # (start_index, decay_factor)`. Instead of cutting at max_tokens, the output comes to an end before and usually + >>> # with more meaning. What happens is that starting from `start_index` the EOS token score will be increased + >>> # by `decay_factor` exponentially. However, if you set a high decay factor, you may also end up with abruptly + >>> # ending sequences. + >>> set_seed(1) >>> outputs = model.generate( ... **inputs, ... do_sample=True, @@ -1352,19 +1356,22 @@ class ExponentialDecayLengthPenalty(LogitsProcessor): ... exponential_decay_length_penalty=(15, 1.6), ... ) >>> print(tokenizer.batch_decode(outputs)[0]) - Just wanted to let you know, I've got a very cool t-shirt educating people on how to use the Internet<|endoftext|> + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network + which<|endoftext|> - >>> # Generate sequences with smaller decay_factor, still improving the hard cutoff mid-sentence + >>> # With a small decay factor, you will have a higher chance of getting a meaningful sequence. + >>> set_seed(1) >>> outputs = model.generate( ... **inputs, ... do_sample=True, ... temperature=0.9, ... max_length=30, ... pad_token_id=50256, - ... exponential_decay_length_penalty=(15, 1.05), + ... exponential_decay_length_penalty=(15, 1.01), ... ) >>> print(tokenizer.batch_decode(outputs)[0]) - Just wanted to let you know, I've been working on it for about 6 months and now it's in Alpha.<|endoftext|> + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network which was + published in 2010.<|endoftext|> ``` """ From 5468ab355567063ede0cc6c685a8feed4bfb2d1b Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 14 Nov 2023 18:26:13 +0000 Subject: [PATCH 171/268] Update and reorder docs for chat templates (#27443) * Update and reorder docs for chat templates * Fix Mistral docstring * Add section link and small fixes * Remove unneeded line in Mistral example * Add comment on saving memory * Fix generation prompts linl * Fix code block languages --- docs/source/en/chat_templating.md | 286 ++++++++++++++++++++---------- 1 file changed, 196 insertions(+), 90 deletions(-) diff --git a/docs/source/en/chat_templating.md b/docs/source/en/chat_templating.md index 115c9f51677d..82bdf591ae5f 100644 --- a/docs/source/en/chat_templating.md +++ b/docs/source/en/chat_templating.md @@ -20,25 +20,11 @@ rendered properly in your Markdown viewer. An increasingly common use case for LLMs is **chat**. In a chat context, rather than continuing a single string of text (as is the case with a standard language model), the model instead continues a conversation that consists -of one or more **messages**, each of which includes a **role** as well as message text. +of one or more **messages**, each of which includes a **role**, like "user" or "assistant", as well as message text. -Most commonly, these roles are "user" for messages sent by the user, and "assistant" for messages sent by the model. -Some models also support a "system" role. System messages are usually sent at the beginning of the conversation -and include directives about how the model should behave in the subsequent chat. - -All language models, including models fine-tuned for chat, operate on linear sequences of tokens and do not intrinsically -have special handling for roles. This means that role information is usually injected by adding control tokens -between messages, to indicate both the message boundary and the relevant roles. - -Unfortunately, there isn't (yet!) a standard for which tokens to use, and so different models have been trained -with wildly different formatting and control tokens for chat. This can be a real problem for users - if you use the -wrong format, then the model will be confused by your input, and your performance will be a lot worse than it should be. -This is the problem that **chat templates** aim to resolve. - -Chat conversations are typically represented as a list of dictionaries, where each dictionary contains `role` -and `content` keys, and represents a single chat message. Chat templates are strings containing a Jinja template that -specifies how to format a conversation for a given model into a single tokenizable sequence. By storing this information -with the tokenizer, we can ensure that models get input data in the format they expect. +Much like tokenization, different models expect very different input formats for chat. This is the reason we added +**chat templates** as a feature. Chat templates are part of the tokenizer. They specify how to convert conversations, +represented as lists of messages, into a single tokenizable string in the format that the model expects. Let's make this concrete with a quick example using the `BlenderBot` model. BlenderBot has an extremely simple default template, which mostly just adds whitespace between rounds of dialogue: @@ -48,9 +34,9 @@ template, which mostly just adds whitespace between rounds of dialogue: >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> chat = [ -... {"role": "user", "content": "Hello, how are you?"}, -... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, -... {"role": "user", "content": "I'd like to show off how chat templating works!"}, +... {"role": "user", "content": "Hello, how are you?"}, +... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, +... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) @@ -59,28 +45,196 @@ template, which mostly just adds whitespace between rounds of dialogue: Notice how the entire chat is condensed into a single string. If we use `tokenize=True`, which is the default setting, that string will also be tokenized for us. To see a more complex template in action, though, let's use the -`meta-llama/Llama-2-7b-chat-hf` model. Note that this model has gated access, so you will have to -[request access on the repo](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) if you want to run this code yourself: +`mistralai/Mistral-7B-Instruct-v0.1` model. ```python ->> from transformers import AutoTokenizer ->> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") +>>> from transformers import AutoTokenizer +>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") ->> chat = [ +>>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] ->> tokenizer.use_default_system_prompt = False ->> tokenizer.apply_chat_template(chat, tokenize=False) -"[INST] Hello, how are you? [/INST] I'm doing great. How can I help you today? [INST] I'd like to show off how chat templating works! [/INST]" +>>> tokenizer.apply_chat_template(chat, tokenize=False) +"[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today? [INST] I'd like to show off how chat templating works! [/INST]" ``` Note that this time, the tokenizer has added the control tokens [INST] and [/INST] to indicate the start and end of -user messages (but not assistant messages!) +user messages (but not assistant messages!). Mistral-instruct was trained with these tokens, but BlenderBot was not. + +## How do I use chat templates? + +As you can see in the example above, chat templates are easy to use. Simply build a list of messages, with `role` +and `content` keys, and then pass it to the [`~PreTrainedTokenizer.apply_chat_template`] method. Once you do that, +you'll get output that's ready to go! When using chat templates as input for model generation, it's also a good idea +to use `add_generation_prompt=True` to add a [generation prompt](#what-are-generation-prompts). + +Here's an example of preparing input for `model.generate()`, using the `Zephyr` assistant model: + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +checkpoint = "HuggingFaceH4/zephyr-7b-beta" +tokenizer = AutoTokenizer.from_pretrained(checkpoint) +model = AutoModelForCausalLM.from_pretrained(checkpoint) # You may want to use bfloat16 and/or move to GPU here + +messages = [ + { + "role": "system", + "content": "You are a friendly chatbot who always responds in the style of a pirate", + }, + {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, + ] +tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") +print(tokenizer.decode(tokenized_chat[0])) +``` +This will yield a string in the input format that Zephyr expects. +```text +<|system|> +You are a friendly chatbot who always responds in the style of a pirate +<|user|> +How many helicopters can a human eat in one sitting? +<|assistant|> +``` + +Now that our input is formatted correctly for Zephyr, we can use the model to generate a response to the user's question: + +```python +outputs = model.generate(tokenized_chat, max_new_tokens=128) +print(tokenizer.decode(outputs[0])) +``` + +This will yield: + +```text +<|system|> +You are a friendly chatbot who always responds in the style of a pirate +<|user|> +How many helicopters can a human eat in one sitting? +<|assistant|> +Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. +``` + +Arr, 'twas easy after all! + +## Is there an automated pipeline for chat? + +Yes, there is: [`ConversationalPipeline`]. This pipeline is designed to make it easy to use chat models. Let's try +the `Zephyr` example again, but this time using the pipeline: + +```python +from transformers import pipeline + +pipe = pipeline("conversational", "HuggingFaceH4/zephyr-7b-beta") +messages = [ + { + "role": "system", + "content": "You are a friendly chatbot who always responds in the style of a pirate", + }, + {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, +] +print(pipe(messages)) +``` + +```text +Conversation id: 76d886a0-74bd-454e-9804-0467041a63dc +system: You are a friendly chatbot who always responds in the style of a pirate +user: How many helicopters can a human eat in one sitting? +assistant: Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. +``` + +[`ConversationalPipeline`] will take care of all the details of tokenization and calling `apply_chat_template` for you - +once the model has a chat template, all you need to do is initialize the pipeline and pass it the list of messages! + +## What are "generation prompts"? + +You may have noticed that the `apply_chat_template` method has an `add_generation_prompt` argument. This argument tells +the template to add tokens that indicate the start of a bot response. For example, consider the following chat: + +```python +messages = [ + {"role": "user", "content": "Hi there!"}, + {"role": "assistant", "content": "Nice to meet you!"}, + {"role": "user", "content": "Can I ask a question?"} +] +``` + +Here's what this will look like without a generation prompt, using the ChatML template we saw in the Zephyr example: + +```python +tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False) +"""<|im_start|>user +Hi there!<|im_end|> +<|im_start|>assistant +Nice to meet you!<|im_end|> +<|im_start|>user +Can I ask a question?<|im_end|> +""" +``` + +And here's what it looks like **with** a generation prompt: + +```python +tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) +"""<|im_start|>user +Hi there!<|im_end|> +<|im_start|>assistant +Nice to meet you!<|im_end|> +<|im_start|>user +Can I ask a question?<|im_end|> +<|im_start|>assistant +""" +``` + +Note that this time, we've added the tokens that indicate the start of a bot response. This ensures that when the model +generates text it will write a bot response instead of doing something unexpected, like continuing the user's +message. Remember, chat models are still just language models - they're trained to continue text, and chat is just a +special kind of text to them! You need to guide them with the appropriate control tokens so they know what they're +supposed to be doing. + +Not all models require generation prompts. Some models, like BlenderBot and LLaMA, don't have any +special tokens before bot responses. In these cases, the `add_generation_prompt` argument will have no effect. The exact +effect that `add_generation_prompt` has will depend on the template being used. + +## Can I use chat templates in training? + +Yes! We recommend that you apply the chat template as a preprocessing step for your dataset. After this, you +can simply continue like any other language model training task. When training, you should usually set +`add_generation_prompt=False`, because the added tokens to prompt an assistant response will not be helpful during +training. Let's see an example: + +```python +from transformers import AutoTokenizer +from datasets import Dataset + +tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") + +chat1 = [ + {"role": "user", "content": "Which is bigger, the moon or the sun?"}, + {"role": "assistant", "content": "The sun."} +] +chat2 = [ + {"role": "user", "content": "Which is bigger, a virus or a bacterium?"}, + {"role": "assistant", "content": "A bacterium."} +] + +dataset = Dataset.from_dict({"chat": [chat1, chat2]}) +dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)}) +print(dataset['formatted_chat'][0]) +``` +And we get: +```text +<|user|> +Which is bigger, the moon or the sun? +<|assistant|> +The sun. +``` -## How do chat templates work? +From here, just continue training like you would with a standard language modelling task, using the `formatted_chat` column. + +## Advanced: How do chat templates work? The chat template for a model is stored on the `tokenizer.chat_template` attribute. If no chat template is set, the default template for that model class is used instead. Let's take a look at the template for `BlenderBot`: @@ -154,7 +308,9 @@ Hopefully if you stare at this for a little bit you can see what this template i on the "role" of each message, which represents who sent it. User, assistant and system messages are clearly distinguishable to the model because of the tokens they're wrapped in. -## How do I create a chat template? +## Advanced: Adding and editing chat templates + +### How do I create a chat template? Simple, just write a jinja template and set `tokenizer.chat_template`. You may find it easier to start with an existing template from another model and simply edit it for your needs! For example, we could take the LLaMA template @@ -187,7 +343,7 @@ tokenizer.push_to_hub("model_name") # Upload your new template to the Hub! The method [`~PreTrainedTokenizer.apply_chat_template`] which uses your chat template is called by the [`ConversationalPipeline`] class, so once you set the correct chat template, your model will automatically become compatible with [`ConversationalPipeline`]. -## What are "default" templates? +### What are "default" templates? Before the introduction of chat templates, chat handling was hardcoded at the model class level. For backwards compatibility, we have retained this class-specific handling as default templates, also set at the class level. If a @@ -200,7 +356,7 @@ the class template is appropriate for your model, we strongly recommend overridi setting the `chat_template` attribute explicitly to make it clear to users that your model has been correctly configured for chat, and to future-proof in case the default templates are ever altered or deprecated. -## What template should I use? +### What template should I use? When setting the template for a model that's already been trained for chat, you should ensure that the template exactly matches the message formatting that the model saw during training, or else you will probably experience @@ -229,7 +385,7 @@ tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set a This template wraps each message in `<|im_start|>` and `<|im_end|>` tokens, and simply writes the role as a string, which allows for flexibility in the roles you train with. The output looks like this: -``` +```text <|im_start|>system You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|> <|im_start|>user @@ -242,62 +398,12 @@ The "user", "system" and "assistant" roles are the standard for chat, and we rec particularly if you want your model to operate well with [`ConversationalPipeline`]. However, you are not limited to these roles - templating is extremely flexible, and any string can be a role. -## What are "generation prompts"? - -You may notice that the `apply_chat_template` method has an `add_generation_prompt` argument. This argument tells -the template to add tokens that indicate the start of a bot response. For example, consider the following chat: - -```python -messages = [ - {"role": "user", "content": "Hi there!"}, - {"role": "assistant", "content": "Nice to meet you!"}, - {"role": "user", "content": "Can I ask a question?"} -] -``` - -Here's what this will look like without a generation prompt, using the ChatML template we described above: - -```python ->> tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False) -"""<|im_start|>user -Hi there!<|im_end|> -<|im_start|>assistant -Nice to meet you!<|im_end|> -<|im_start|>user -Can I ask a question?<|im_end|> -""" -``` - -And here's what it looks like **with** a generation prompt: - -```python ->> tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) -"""<|im_start|>user -Hi there!<|im_end|> -<|im_start|>assistant -Nice to meet you!<|im_end|> -<|im_start|>user -Can I ask a question?<|im_end|> -<|im_start|>assistant -""" -``` - -Note that this time, we've added the tokens that indicate the start of a bot response. This ensures that when the model -generates text it will write a bot response instead of doing something unexpected, like continuing the user's -message. Remember, chat models are still just language models - they're trained to continue text, and chat is just a -special kind of text to them! You need to guide them with the appropriate control tokens so they know what they're -supposed to be doing. - -Not all models require generation prompts. Some models, like BlenderBot and LLaMA, don't have any -special tokens before bot responses. In these cases, the `add_generation_prompt` argument will have no effect. The exact -effect that `add_generation_prompt` has will depend on the template being used. - -## I want to use chat templates! How should I get started? +### I want to add some chat templates! How should I get started? If you have any chat models, you should set their `tokenizer.chat_template` attribute and test it using -[`~PreTrainedTokenizer.apply_chat_template`]. This applies even if you're not the model owner - if you're using a model -with an empty chat template, or one that's still using the default class template, please open a [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) to -the model repository so that this attribute can be set properly! +[`~PreTrainedTokenizer.apply_chat_template`], then push the updated tokenizer to the Hub. This applies even if you're +not the model owner - if you're using a model with an empty chat template, or one that's still using the default class +template, please open a [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) to the model repository so that this attribute can be set properly! Once the attribute is set, that's it, you're done! `tokenizer.apply_chat_template` will now work correctly for that model, which means it is also automatically supported in places like `ConversationalPipeline`! @@ -306,7 +412,7 @@ By ensuring that models have this attribute, we can make sure that the whole com open-source models. Formatting mismatches have been haunting the field and silently harming performance for too long - it's time to put an end to them! -## Template writing tips +## Advanced: Template writing tips If you're unfamiliar with Jinja, we generally find that the easiest way to write a chat template is to first write a short Python script that formats messages the way you want, and then convert that script into a template. From a53a0c5159a77f19d7e1d27063aa5ebd2a335a26 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 14 Nov 2023 18:40:57 +0000 Subject: [PATCH 172/268] Generate: `GenerationConfig.from_pretrained` can return unused kwargs (#27488) --- src/transformers/generation/configuration_utils.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index 04aa48364f6a..c53738f14abe 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -750,9 +750,14 @@ def from_pretrained( else: logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}") - config = cls.from_dict(config_dict, **kwargs) - config._original_object_hash = hash(config) # Hash to detect whether the instance was modified - return config + if kwargs.get("return_unused_kwargs") is True: + config, unused_kwargs = cls.from_dict(config_dict, **kwargs) + config._original_object_hash = hash(config) # Hash to detect whether the instance was modified + return config, unused_kwargs + else: + config = cls.from_dict(config_dict, **kwargs) + config._original_object_hash = hash(config) # Hash to detect whether the instance was modified + return config @classmethod def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): From 250032e974359ba8df38c7e5530020050240d305 Mon Sep 17 00:00:00 2001 From: Costa Huang Date: Tue, 14 Nov 2023 14:09:21 -0500 Subject: [PATCH 173/268] Minor type annotation fix (#27276) * Minor type annotation fix * Trigger Build --- src/transformers/training_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 9efe42c1353d..116ec6f15756 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1144,7 +1144,7 @@ class TrainingArguments: "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) - gradient_checkpointing_kwargs: dict = field( + gradient_checkpointing_kwargs: Optional[dict] = field( default=None, metadata={ "help": "Gradient checkpointing key word arguments such as `use_reentrant`. Will be passed to `torch.utils.checkpoint.checkpoint` through `model.gradient_checkpointing_enable`." From 067c4a310dd36d0472d4a587145e94d20bf64964 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Tue, 14 Nov 2023 14:54:44 -0500 Subject: [PATCH 174/268] Have seq2seq just use gather (#27025) * Have seq2seq just use gather * Change * Reset after * Make slow * Apply suggestions from code review Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Clean * Simplify and just use gather * Update tests/trainer/test_trainer_seq2seq.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * gather always for seq2seq --------- Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- src/transformers/trainer.py | 12 ++++-- src/transformers/trainer_seq2seq.py | 4 +- tests/trainer/test_trainer_seq2seq.py | 61 ++++++++++++++++++++++++++- 3 files changed, 70 insertions(+), 7 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 40159d816348..51c60ce69020 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -3208,13 +3208,13 @@ def evaluation_loop( # Update containers on host if loss is not None: - losses = self.accelerator.gather_for_metrics((loss.repeat(batch_size))) + losses = self.gather_function((loss.repeat(batch_size))) losses_host = losses if losses_host is None else nested_concat(losses_host, losses, padding_index=-100) if labels is not None: labels = self.accelerator.pad_across_processes(labels, dim=1, pad_index=-100) if inputs_decode is not None: inputs_decode = self.accelerator.pad_across_processes(inputs_decode, dim=1, pad_index=-100) - inputs_decode = self.accelerator.gather_for_metrics((inputs_decode)) + inputs_decode = self.gather_function((inputs_decode)) inputs_host = ( inputs_decode if inputs_host is None @@ -3224,11 +3224,11 @@ def evaluation_loop( logits = self.accelerator.pad_across_processes(logits, dim=1, pad_index=-100) if self.preprocess_logits_for_metrics is not None: logits = self.preprocess_logits_for_metrics(logits, labels) - logits = self.accelerator.gather_for_metrics((logits)) + logits = self.gather_function((logits)) preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) if labels is not None: - labels = self.accelerator.gather_for_metrics((labels)) + labels = self.gather_function((labels)) labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) @@ -3261,6 +3261,8 @@ def evaluation_loop( # Set back to None to begin a new accumulation losses_host, preds_host, inputs_host, labels_host = None, None, None, None + # After all calls to `.gather_function`, reset to `gather_for_metrics`: + self.gather_function = self.accelerator.gather_for_metrics if args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") @@ -3930,6 +3932,8 @@ def create_accelerator_and_postprocess(self): deepspeed_plugin=self.args.deepspeed_plugin, gradient_accumulation_plugin=gradient_accumulation_plugin, ) + # some Trainer classes need to use `gather` instead of `gather_for_metrics`, thus we store a flag + self.gather_function = self.accelerator.gather_for_metrics # deepspeed and accelerate flags covering both trainer args and accelerate launcher self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 13d407bec4a1..9f6bf1324556 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -160,8 +160,9 @@ def evaluate( gen_kwargs["max_length"] = self.args.generation_max_length if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: gen_kwargs["num_beams"] = self.args.generation_num_beams + # We don't want to drop samples in general + self.gather_function = self.accelerator.gather self._gen_kwargs = gen_kwargs - return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) def predict( @@ -223,6 +224,7 @@ def predict( gen_kwargs["max_length"] = self.args.generation_max_length if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: gen_kwargs["num_beams"] = self.args.generation_num_beams + self.gather_function = self.accelerator.gather self._gen_kwargs = gen_kwargs return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) diff --git a/tests/trainer/test_trainer_seq2seq.py b/tests/trainer/test_trainer_seq2seq.py index 918c22155832..3f875e6d3657 100644 --- a/tests/trainer/test_trainer_seq2seq.py +++ b/tests/trainer/test_trainer_seq2seq.py @@ -12,8 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from transformers import BertTokenizer, EncoderDecoderModel, Seq2SeqTrainer, Seq2SeqTrainingArguments +from transformers import ( + AutoModelForSeq2SeqLM, + BertTokenizer, + DataCollatorForSeq2Seq, + EncoderDecoderModel, + GenerationConfig, + Seq2SeqTrainer, + Seq2SeqTrainingArguments, + T5Tokenizer, +) from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available @@ -124,3 +132,52 @@ def _compute_metrics(pred): # start training trainer.train() + + @slow + @require_torch + def test_return_sequences(self): + # Tests that the number of generated sequences is correct when num_return_sequences > 1 + # and essentially ensuring that `accelerator.gather()` is used instead of `gather_for_metrics` + INPUT_COLUMN = "question" + TARGET_COLUMN = "answer" + MAX_INPUT_LENGTH = 256 + MAX_TARGET_LENGTH = 256 + + dataset = datasets.load_dataset("gsm8k", "main", split="train[:38]") + model = AutoModelForSeq2SeqLM.from_pretrained("t5-small") + tokenizer = T5Tokenizer.from_pretrained("t5-small") + data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") + gen_config = GenerationConfig.from_pretrained( + "t5-small", max_length=None, min_length=None, max_new_tokens=256, min_new_tokens=1, num_beams=5 + ) + + training_args = Seq2SeqTrainingArguments(".", predict_with_generate=True) + + trainer = Seq2SeqTrainer( + model=model, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=lambda x: {"samples": x[0].shape[0]}, + ) + + def prepare_data(examples): + # Remove pairs where at least one record is none + inputs = examples[INPUT_COLUMN] + targets = examples[TARGET_COLUMN] + + model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True) + labels = tokenizer(text_target=targets, max_length=MAX_TARGET_LENGTH, truncation=True) + model_inputs["labels"] = labels["input_ids"] + + return model_inputs + + prepared_dataset = dataset.map(prepare_data, batched=True, remove_columns=[INPUT_COLUMN, TARGET_COLUMN]) + dataset_len = len(prepared_dataset) # 38 + + for num_return_sequences in range(3, 0, -1): + gen_config.num_return_sequences = num_return_sequences + metrics = trainer.evaluate(eval_dataset=prepared_dataset, generation_config=gen_config) + assert ( + metrics["eval_samples"] == dataset_len * num_return_sequences + ), f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}" From 303c1d69f3efa44b565bc5618ff819daf4a0e80f Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Tue, 14 Nov 2023 20:05:54 +0000 Subject: [PATCH 175/268] Update processor mapping for hub snippets (#27477) --- utils/update_metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/update_metadata.py b/utils/update_metadata.py index 9a233a082ff7..2104d53b6e6f 100644 --- a/utils/update_metadata.py +++ b/utils/update_metadata.py @@ -197,9 +197,9 @@ def get_frameworks_table() -> pd.DataFrame: processors[t] = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: processors[t] = "AutoTokenizer" - elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: - processors[t] = "AutoFeatureExtractor" elif t in transformers_module.models.auto.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES: + processors[t] = "AutoImageProcessor" + elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: processors[t] = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. From 2fc33ebead50383f7707b17f0e2a178d86347d10 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Tue, 14 Nov 2023 15:31:04 -0500 Subject: [PATCH 176/268] Track the number of tokens seen to metrics (#27274) * Add tokens seen * Address comments, add to TrainingArgs * Update log * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Use self.args * Fix docstring Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- src/transformers/trainer.py | 13 +++++++++++++ src/transformers/trainer_callback.py | 3 +++ src/transformers/training_args.py | 13 +++++++++++++ 3 files changed, 29 insertions(+) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 51c60ce69020..25100dad69bc 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1838,6 +1838,17 @@ def _inner_training_loop( step = -1 for step, inputs in enumerate(epoch_iterator): total_batched_samples += 1 + + if self.args.include_num_input_tokens_seen: + main_input_name = getattr(self.model, "main_input_name", "input_ids") + if main_input_name not in inputs: + logger.warning( + "Tried to track the number of tokens seen, however the current model is " + "not configured properly to know what item is the input. To fix this, add " + "a `main_input_name` attribute to the model class you are using." + ) + else: + self.state.num_input_tokens_seen += self.accelerator.gather(inputs[main_input_name]).numel() if rng_to_sync: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False @@ -2640,6 +2651,8 @@ def log(self, logs: Dict[str, float]) -> None: """ if self.state.epoch is not None: logs["epoch"] = round(self.state.epoch, 2) + if self.args.include_num_input_tokens_seen: + logs["num_input_tokens_seen"] = self.state.num_input_tokens_seen output = {**logs, **{"step": self.state.global_step}} self.state.log_history.append(output) diff --git a/src/transformers/trainer_callback.py b/src/transformers/trainer_callback.py index 298b473850f4..5e0c75a1b841 100644 --- a/src/transformers/trainer_callback.py +++ b/src/transformers/trainer_callback.py @@ -59,6 +59,8 @@ class TrainerState: Run an evaluation every X steps. save_steps (`int`, *optional*, defaults to 500): Save checkpoint every X updates steps. + num_input_tokens_seen (`int`, *optional*, defaults to 0): + The number of tokens seen during training (number of input tokens, not the number of prediction tokens). total_flos (`float`, *optional*, defaults to 0): The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow). @@ -87,6 +89,7 @@ class TrainerState: eval_steps: int = 500 save_steps: int = 500 num_train_epochs: int = 0 + num_input_tokens_seen: int = 0 total_flos: float = 0 log_history: List[Dict[str, float]] = None best_metric: Optional[float] = None diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 116ec6f15756..b368d86e0ed8 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -637,6 +637,12 @@ class TrainingArguments: This will iterate over the entire training dataloader once beforehand, and will slow down the entire process. + + include_num_input_tokens_seen (`bool`, *optional*): + Whether or not to track the number of input tokens seen throughout training. + + May be slower in distributed training as gather operations must be called. + neftune_noise_alpha (`Optional[float]`): If not `None`, this will activate NEFTune noise embeddings. This can drastically improve model performance for instruction fine-tuning. Check out the [original paper](https://arxiv.org/abs/2310.05914) and the @@ -1258,6 +1264,13 @@ class TrainingArguments: metadata={"help": "If set to `True`, the speed metrics will include `tgs` (tokens per second per device)."}, ) + include_num_input_tokens_seen: Optional[bool] = field( + default=False, + metadata={ + "help": "If set to `True`, will track the number of input tokens seen throughout training. (May be slower in distributed training)" + }, + ) + neftune_noise_alpha: float = field( default=None, metadata={ From 186c0775132fecdc571f3996f75c7e1377d5fb9b Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Wed, 15 Nov 2023 08:39:29 +0100 Subject: [PATCH 177/268] [`CI-test_torch`] skip test_tf_from_pt_safetensors and `test_assisted_decoding_sample` (#27508) * skip 4 tests * nits * style * wow it's not my day * skip new failing tests * style * skip for NLLB MoE as well --- tests/models/nllb_moe/test_modeling_nllb_moe.py | 4 ++++ tests/models/speech_to_text/test_modeling_speech_to_text.py | 4 ++++ .../switch_transformers/test_modeling_switch_transformers.py | 4 ++++ tests/models/t5/test_modeling_t5.py | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/tests/models/nllb_moe/test_modeling_nllb_moe.py b/tests/models/nllb_moe/test_modeling_nllb_moe.py index 0ba66ff6b338..c3bf17ed8e69 100644 --- a/tests/models/nllb_moe/test_modeling_nllb_moe.py +++ b/tests/models/nllb_moe/test_modeling_nllb_moe.py @@ -348,6 +348,10 @@ def test_get_loss(self): self.assertIsNotNone(model(**input_dict)["encoder_router_logits"][1]) self.assertIsNotNone(model(**input_dict)["decoder_router_logits"][0]) + @unittest.skip("Test does not fail individually but fails on the CI @ArthurZucker looking into it") + def test_assisted_decoding_sample(self): + pass + @require_torch @require_sentencepiece diff --git a/tests/models/speech_to_text/test_modeling_speech_to_text.py b/tests/models/speech_to_text/test_modeling_speech_to_text.py index 2c4fc268e8d9..602a73bacd78 100644 --- a/tests/models/speech_to_text/test_modeling_speech_to_text.py +++ b/tests/models/speech_to_text/test_modeling_speech_to_text.py @@ -759,6 +759,10 @@ def test_pt_tf_model_equivalence(self, allow_missing_keys=True): # Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) + @unittest.skip("Test failing, @RocketNight is looking into it") + def test_tf_from_pt_safetensors(self): + pass + @require_torch @require_torchaudio diff --git a/tests/models/switch_transformers/test_modeling_switch_transformers.py b/tests/models/switch_transformers/test_modeling_switch_transformers.py index 7c2fb88acda4..6a2c3f5525e4 100644 --- a/tests/models/switch_transformers/test_modeling_switch_transformers.py +++ b/tests/models/switch_transformers/test_modeling_switch_transformers.py @@ -726,6 +726,10 @@ def test_generate_with_head_masking(self): def test_disk_offload(self): pass + @unittest.skip("Test does not fail individually but fails on the CI @ArthurZucker looking into it") + def test_assisted_decoding_sample(self): + pass + class SwitchTransformersEncoderOnlyModelTester: def __init__( diff --git a/tests/models/t5/test_modeling_t5.py b/tests/models/t5/test_modeling_t5.py index 68b9f45e155b..fe0983047359 100644 --- a/tests/models/t5/test_modeling_t5.py +++ b/tests/models/t5/test_modeling_t5.py @@ -1036,6 +1036,10 @@ def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) + @unittest.skip("Test does not fail individually but fails on the CI @ArthurZucker looking into it") + def test_assisted_decoding_sample(self): + pass + def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) From cc0dc24bc9ecd4199ec102bde676d49588c7bb60 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Wed, 15 Nov 2023 09:33:04 +0100 Subject: [PATCH 178/268] [Fuyu] Add tests (#27001) * Add tests * Add integration test * More improvements * Fix tests * Fix style * Skip gradient checkpointing tests * Update script * Remove scripts * Remove Fuyu from auto mapping * Fix integration test * More improvements * Remove file * Add Fuyu to slow documentation tests * Address comments * Clarify comment --- src/transformers/models/fuyu/modeling_fuyu.py | 78 ++++++++--- tests/models/fuyu/test_modeling_fuyu.py | 131 +++++++++++------- utils/slow_documentation_tests.txt | 3 +- 3 files changed, 139 insertions(+), 73 deletions(-) diff --git a/src/transformers/models/fuyu/modeling_fuyu.py b/src/transformers/models/fuyu/modeling_fuyu.py index 345d0a0e92a5..0d2a121edde2 100644 --- a/src/transformers/models/fuyu/modeling_fuyu.py +++ b/src/transformers/models/fuyu/modeling_fuyu.py @@ -19,10 +19,10 @@ import torch.utils.checkpoint from torch import nn -from ...modeling_outputs import BaseModelOutputWithPast +from ...modeling_outputs import CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...models.auto.modeling_auto import AutoModelForCausalLM -from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_fuyu import FuyuConfig @@ -101,6 +101,11 @@ def _init_weights(self, module): - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. + image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*): + Image patches to be used as continuous embeddings. The patches are flattened and then projected to the + hidden size of the model. + image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*): + Indices indicating at which position the image_patches have to be inserted in input_embeds. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. @@ -136,17 +141,10 @@ def _init_weights(self, module): @add_start_docstrings( - "The bare Fuyu Model outputting raw hidden-states without any specific head on top.", + "Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.", FUYU_START_DOCSTRING, ) class FuyuForCausalLM(FuyuPreTrainedModel): - """ - Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`FuyuDecoderLayer`] - - Args: - config: FuyuConfig - """ - def __init__(self, config: FuyuConfig): super().__init__(config) self.padding_idx = config.pad_token_id @@ -178,12 +176,14 @@ def gather_continuous_embeddings( embeddings. Args: - word_embeddings: Tensor of word embeddings. Shape: [b, s, h] - continuous_embeddings: - Tensor of continuous embeddings. The length of the list is the batch size. Each entry is - shape [num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative - indices in image_patch_input_indices for that batch element. - image_patch_input_indices: Tensor of indices of the image patches in the input_ids tensor. Shape: [b, s] + word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Tensor of word embeddings. + continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): + Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape + [num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative + indices in image_patch_input_indices for that batch element. + image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Tensor of indices of the image patches in the input_ids tensor. """ if not (word_embeddings.shape[0] == len(continuous_embeddings)): raise ValueError( @@ -208,6 +208,7 @@ def gather_continuous_embeddings( return output_embeddings @add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, @@ -218,10 +219,42 @@ def forward( past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, + labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPast]: + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Examples: + + ```python + >>> from transformers import FuyuProcessor, FuyuForCausalLM + >>> from PIL import Image + >>> import requests + + >>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b") + >>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + >>> prompt = "Generate a coco-style caption.\n" + + >>> inputs = processor(text=text_prompt, images=image, return_tensors="pt") + >>> outputs = model(**inputs) + + >>> generated_ids = model.generate(**model_inputs, max_new_tokens=7) + >>> generation_text = processor.batch_decode(generated_ids, skip_special_tokens=True) + >>> print(generation_text) + 'A bus parked on the side of a road.' + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states @@ -230,15 +263,14 @@ def forward( return_dict = return_dict if return_dict is not None else self.config.use_return_dict - # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: - raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + raise ValueError("You have to specify either input_is or inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 @@ -273,10 +305,12 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + labels=labels, use_cache=use_cache, + return_dict=return_dict, ) - if not return_dict: - return tuple(v for v in outputs if v is not None) + return outputs def prepare_inputs_for_generation( diff --git a/tests/models/fuyu/test_modeling_fuyu.py b/tests/models/fuyu/test_modeling_fuyu.py index 9fb6820e45ff..aaed47a2aea4 100644 --- a/tests/models/fuyu/test_modeling_fuyu.py +++ b/tests/models/fuyu/test_modeling_fuyu.py @@ -1,12 +1,29 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch Fuyu model. """ + import io import unittest import requests from transformers import FuyuConfig, is_torch_available, is_vision_available -from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device +from transformers.utils import cached_property -from ...test_modeling_common import ids_tensor, random_attention_mask +from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_vision_available(): @@ -23,19 +40,17 @@ from transformers import FuyuForCausalLM -# Copied from transformers.tests.llama.test_modelling_llama.LlamaModelTest with Llama->Fuyu class FuyuModelTester: def __init__( self, parent, batch_size=13, seq_length=7, - image_size=300, - patch_size=30, + image_size=30, + patch_size=15, num_channels=3, is_training=True, use_input_mask=True, - use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, @@ -62,7 +77,6 @@ def __init__( self.num_channels = num_channels self.is_training = is_training self.use_input_mask = use_input_mask - self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size @@ -88,21 +102,15 @@ def prepare_config_and_inputs(self): if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) - token_type_ids = None - if self.use_token_type_ids: - token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) - sequence_labels = None token_labels = None - choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) - choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() - return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + return config, input_ids, input_mask, sequence_labels, token_labels def get_config(self): return FuyuConfig( @@ -122,7 +130,12 @@ def get_config(self): ) def create_and_check_model( - self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + self, + config, + input_ids, + input_mask, + sequence_labels, + token_labels, ): model = FuyuForCausalLM(config=config) model.to(torch_device) @@ -135,11 +148,9 @@ def create_and_check_model_as_decoder( self, config, input_ids, - token_type_ids, input_mask, sequence_labels, token_labels, - choice_labels, encoder_hidden_states, encoder_attention_mask, ): @@ -165,11 +176,9 @@ def create_and_check_for_causal_lm( self, config, input_ids, - token_type_ids, input_mask, sequence_labels, token_labels, - choice_labels, encoder_hidden_states, encoder_attention_mask, ): @@ -183,11 +192,9 @@ def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, - token_type_ids, input_mask, sequence_labels, token_labels, - choice_labels, encoder_hidden_states, encoder_attention_mask, ): @@ -246,49 +253,73 @@ def prepare_config_and_inputs_for_common(self): ( config, input_ids, - token_type_ids, input_mask, sequence_labels, token_labels, - choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch -@require_torch_accelerator -@slow -class FuyuIntegrationTest(unittest.TestCase): # , ModelTesterMixin) - """ - Currently, all these tests depend on a value of max_tokens_to_generate of 10. - """ +class FuyuModelTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = (FuyuForCausalLM,) if is_torch_available() else () + pipeline_model_mapping = {"image-to-text": FuyuForCausalLM} if is_torch_available() else {} - all_model_classes = ("FuyuForCausalLM") if is_torch_available() else () + test_head_masking = False + test_pruning = False + test_cpu_offload = False + test_disk_offload = False + test_model_parallel = False def setUp(self): - self.pretrained_model_name = "adept/fuyu-8b" - self.processor = FuyuProcessor.from_pretrained(self.pretrained_model_name) - self.model = FuyuForCausalLM.from_pretrained(self.pretrained_model_name) - self.bus_image_url = ( - "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" - ) - self.bus_image_pil = Image.open(io.BytesIO(requests.get(self.bus_image_url).content)) + self.model_tester = FuyuModelTester(self) + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + + +@slow +@require_torch_gpu +class FuyuModelIntegrationTest(unittest.TestCase): + @cached_property + def default_processor(self): + return FuyuProcessor.from_pretrained("adept/fuyu-8b") + + @cached_property + def default_model(self): + return FuyuForCausalLM.from_pretrained("adept/fuyu-8b") + + def test_greedy_generation(self): + processor = self.default_processor + model = self.default_model + + url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" + image = Image.open(io.BytesIO(requests.get(url).content)) - @slow - def test_model_8b_chat_greedy_generation_bus_captioning(self): - EXPECTED_TEXT_COMPLETION = """A blue bus parked on the side of a road.|ENDOFTEXT|""" text_prompt_coco_captioning = "Generate a coco-style caption.\n" - model_inputs_bus_captioning = self.processor(text=text_prompt_coco_captioning, images=self.bus_image_pil) - generated_tokens = self.model.generate(**model_inputs_bus_captioning, max_new_tokens=10) - text = self.processor.tokenizer.batch_decode(generated_tokens) - end_sequence = text[0].split("\x04")[1] - clean_sequence = ( - end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] - if "|ENDOFTEXT|" in end_sequence - else end_sequence - ) - self.assertEqual(EXPECTED_TEXT_COMPLETION, clean_sequence[1:]) + + inputs = processor(text=text_prompt_coco_captioning, images=image, return_tensors="pt") + generated_ids = model.generate(**inputs, max_new_tokens=10) + + # take the last 8 tokens (in order to skip special \n\x04 characters) and decode them + generated_text = processor.batch_decode(generated_ids[:, -8:], skip_special_tokens=True)[0] + self.assertEqual(generated_text, "A blue bus parked on the side of a road.") """ diff --git a/utils/slow_documentation_tests.txt b/utils/slow_documentation_tests.txt index 98b3156e1d23..64841a19cb6c 100644 --- a/utils/slow_documentation_tests.txt +++ b/utils/slow_documentation_tests.txt @@ -6,4 +6,5 @@ docs/source/en/task_summary.md docs/source/en/tasks/prompting.md src/transformers/models/blip_2/modeling_blip_2.py src/transformers/models/ctrl/modeling_ctrl.py -src/transformers/models/kosmos2/modeling_kosmos2.py +src/transformers/models/fuyu/modeling_fuyu.py +src/transformers/models/kosmos2/modeling_kosmos2.py \ No newline at end of file From 72f531ab6b1aa229b6199ada993954fa2efb0425 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Wed, 15 Nov 2023 09:35:53 +0100 Subject: [PATCH 179/268] [Table Transformer] Add Transformers-native checkpoints (#26928) * Improve conversion scripts * Fix paths * Fix style --- ....py => convert_table_transformer_to_hf.py} | 2 +- ...convert_table_transformer_to_hf_no_timm.py | 435 ++++++++++++++++++ utils/not_doctested.txt | 5 +- 3 files changed, 439 insertions(+), 3 deletions(-) rename src/transformers/models/table_transformer/{convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py => convert_table_transformer_to_hf.py} (99%) create mode 100644 src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py diff --git a/src/transformers/models/table_transformer/convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/table_transformer/convert_table_transformer_to_hf.py similarity index 99% rename from src/transformers/models/table_transformer/convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py rename to src/transformers/models/table_transformer/convert_table_transformer_to_hf.py index d351473e2224..d06c3eb26b61 100644 --- a/src/transformers/models/table_transformer/convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/table_transformer/convert_table_transformer_to_hf.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Convert Table Transformer checkpoints. +"""Convert Table Transformer checkpoints with timm-backbone. URL: https://github.com/microsoft/table-transformer """ diff --git a/src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py b/src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py new file mode 100644 index 000000000000..0a2b7b87fe97 --- /dev/null +++ b/src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py @@ -0,0 +1,435 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Table Transformer checkpoints with native (Transformers) backbone. + +URL: https://github.com/microsoft/table-transformer +""" + + +import argparse +from pathlib import Path + +import torch +from huggingface_hub import hf_hub_download +from PIL import Image +from torchvision.transforms import functional as F + +from transformers import DetrImageProcessor, ResNetConfig, TableTransformerConfig, TableTransformerForObjectDetection +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +def create_rename_keys(config): + # here we list all keys to be renamed (original name on the left, our name on the right) + rename_keys = [] + + # stem + # fmt: off + rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight")) + rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight")) + rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias")) + rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean")) + rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var")) + # stages + for stage_idx in range(len(config.backbone_config.depths)): + for layer_idx in range(config.backbone_config.depths[stage_idx]): + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv1.weight", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.convolution.weight", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.weight", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.weight", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.bias", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.bias", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.running_mean", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.running_mean", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.running_var", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.running_var", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv2.weight", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.convolution.weight", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.weight", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.weight", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.bias", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.bias", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.running_mean", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.running_mean", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.running_var", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.running_var", + ) + ) + # all ResNet stages except the first one have a downsample as first layer + if stage_idx != 0 and layer_idx == 0: + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", + ) + ) + rename_keys.append( + ( + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", + ) + ) + rename_keys.append( + ( + # "backbone.conv_encoder.model.encoder.stages.3.layers.0.shortcut.normalization.running_var" + f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", + f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", + ) + ) + # fmt: on + + for i in range(config.encoder_layers): + # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms + rename_keys.append( + ( + f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", + f"encoder.layers.{i}.self_attn.out_proj.weight", + ) + ) + rename_keys.append( + (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") + ) + rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) + rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) + rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) + rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) + rename_keys.append( + (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") + ) + rename_keys.append( + (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") + ) + rename_keys.append( + (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") + ) + rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) + # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms + rename_keys.append( + ( + f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", + f"decoder.layers.{i}.self_attn.out_proj.weight", + ) + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") + ) + rename_keys.append( + ( + f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", + f"decoder.layers.{i}.encoder_attn.out_proj.weight", + ) + ) + rename_keys.append( + ( + f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", + f"decoder.layers.{i}.encoder_attn.out_proj.bias", + ) + ) + rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) + rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) + rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) + rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") + ) + rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) + + # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads + rename_keys.extend( + [ + ("input_proj.weight", "input_projection.weight"), + ("input_proj.bias", "input_projection.bias"), + ("query_embed.weight", "query_position_embeddings.weight"), + ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), + ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), + ("class_embed.weight", "class_labels_classifier.weight"), + ("class_embed.bias", "class_labels_classifier.bias"), + ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), + ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), + ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), + ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), + ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), + ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), + ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), + ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), + ] + ) + + return rename_keys + + +def rename_key(state_dict, old, new): + val = state_dict.pop(old) + state_dict[new] = val + + +def read_in_q_k_v(state_dict, is_panoptic=False): + prefix = "" + if is_panoptic: + prefix = "detr." + + # first: transformer encoder + for i in range(6): + # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") + in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] + state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] + state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] + state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] + state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] + state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] + # next: transformer decoder (which is a bit more complex because it also includes cross-attention) + for i in range(6): + # read in weights + bias of input projection layer of self-attention + in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight") + in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] + state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] + state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] + state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] + state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] + state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] + # read in weights + bias of input projection layer of cross-attention + in_proj_weight_cross_attn = state_dict.pop( + f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" + ) + in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias") + # next, add query, keys and values (in that order) of cross-attention to the state dict + state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :] + state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256] + state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :] + state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512] + state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :] + state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:] + + +def resize(image, checkpoint_url): + width, height = image.size + current_max_size = max(width, height) + target_max_size = 800 if "detection" in checkpoint_url else 1000 + scale = target_max_size / current_max_size + resized_image = image.resize((int(round(scale * width)), int(round(scale * height)))) + + return resized_image + + +def normalize(image): + image = F.to_tensor(image) + image = F.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + return image + + +@torch.no_grad() +def convert_table_transformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub): + """ + Copy/paste/tweak model's weights to our DETR structure. + """ + + logger.info("Converting model...") + + # create HuggingFace model and load state dict + backbone_config = ResNetConfig.from_pretrained( + "microsoft/resnet-18", out_features=["stage1", "stage2", "stage3", "stage4"] + ) + + config = TableTransformerConfig( + backbone_config=backbone_config, + use_timm_backbone=False, + mask_loss_coefficient=1, + dice_loss_coefficient=1, + ce_loss_coefficient=1, + bbox_loss_coefficient=5, + giou_loss_coefficient=2, + eos_coefficient=0.4, + class_cost=1, + bbox_cost=5, + giou_cost=2, + ) + + # load original state dict + state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") + + # rename keys + for src, dest in create_rename_keys(config): + rename_key(state_dict, src, dest) + # query, key and value matrices need special treatment + read_in_q_k_v(state_dict) + # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them + prefix = "model." + for key in state_dict.copy().keys(): + if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): + val = state_dict.pop(key) + state_dict[prefix + key] = val + + if "detection" in checkpoint_url: + config.num_queries = 15 + config.num_labels = 2 + id2label = {0: "table", 1: "table rotated"} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + else: + config.num_queries = 125 + config.num_labels = 6 + id2label = { + 0: "table", + 1: "table column", + 2: "table row", + 3: "table column header", + 4: "table projected row header", + 5: "table spanning cell", + } + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + image_processor = DetrImageProcessor(format="coco_detection", size={"longest_edge": 800}) + model = TableTransformerForObjectDetection(config) + model.load_state_dict(state_dict) + model.eval() + + # verify our conversion + filename = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" + file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=filename) + image = Image.open(file_path).convert("RGB") + pixel_values = normalize(resize(image, checkpoint_url)).unsqueeze(0) + + outputs = model(pixel_values) + + if "detection" in checkpoint_url: + expected_shape = (1, 15, 3) + expected_logits = torch.tensor( + [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] + ) + expected_boxes = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]]) + + else: + expected_shape = (1, 125, 7) + expected_logits = torch.tensor( + [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] + ) + expected_boxes = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]]) + + assert outputs.logits.shape == expected_shape + assert torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4) + assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + # Save model and image processor + logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + model.save_pretrained(pytorch_dump_folder_path) + image_processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + # Push model to HF hub + logger.info("Pushing model to the hub...") + model_name = ( + "microsoft/table-transformer-detection" + if "detection" in checkpoint_url + else "microsoft/table-transformer-structure-recognition" + ) + model.push_to_hub(model_name, revision="no_timm") + image_processor.push_to_hub(model_name, revision="no_timm") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "--checkpoint_url", + default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", + type=str, + choices=[ + "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", + "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", + ], + help="URL of the Table Transformer checkpoint you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + args = parser.parse_args() + convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/utils/not_doctested.txt b/utils/not_doctested.txt index 744caff11279..07775fe823a4 100644 --- a/utils/not_doctested.txt +++ b/utils/not_doctested.txt @@ -809,7 +809,8 @@ src/transformers/models/t5/modeling_flax_t5.py src/transformers/models/t5/modeling_t5.py src/transformers/models/t5/modeling_tf_t5.py src/transformers/models/table_transformer/configuration_table_transformer.py -src/transformers/models/table_transformer/convert_table_transformer_original_pytorch_checkpoint_to_pytorch.py +src/transformers/models/table_transformer/convert_table_transformer_to_hf.py +src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py src/transformers/models/tapas/configuration_tapas.py src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py src/transformers/models/tapas/modeling_tapas.py @@ -989,4 +990,4 @@ src/transformers/utils/peft_utils.py src/transformers/utils/quantization_config.py src/transformers/utils/sentencepiece_model_pb2.py src/transformers/utils/sentencepiece_model_pb2_new.py -src/transformers/utils/versions.py +src/transformers/utils/versions.py \ No newline at end of file From 7ddb21b4dbc0dd96520d0bbe72584e436cfff6a7 Mon Sep 17 00:00:00 2001 From: Phyzer Date: Wed, 15 Nov 2023 16:50:45 +0800 Subject: [PATCH 180/268] Update spelling mistake (#27506) thoroughly was misspelled thouroughly --- src/transformers/models/llama/tokenization_llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/llama/tokenization_llama.py b/src/transformers/models/llama/tokenization_llama.py index b5d58d82d36f..a7c2155b0da2 100644 --- a/src/transformers/models/llama/tokenization_llama.py +++ b/src/transformers/models/llama/tokenization_llama.py @@ -165,7 +165,7 @@ def __init__( f"You are using the default legacy behaviour of the {self.__class__}. This is" " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you." " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it" - " means, and thouroughly read the reason why this was added as explained in" + " means, and thoroughly read the reason why this was added as explained in" " https://github.com/huggingface/transformers/pull/24565" ) legacy = True From 1e0e2dd37606ade3e3e5a8e247a5f23d4c24fe48 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Wed, 15 Nov 2023 10:17:51 +0100 Subject: [PATCH 181/268] [`CircleCI`] skip test_assisted_decoding_sample for everyone (#27511) * skip 4 tests * nits * style * wow it's not my day * skip new failing tests * style * skip for NLLB MoE as well * skip `test_assisted_decoding_sample` for everyone --- tests/generation/test_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 7531502be289..1705142f8f1f 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1576,6 +1576,7 @@ def test_assisted_decoding_matches_greedy_search(self): for output in (output_greedy, output_assisted): self._check_outputs(output, input_ids, model.config, use_cache=True) + @unittest.skip("Failing for a lot of models du to attention mask size missmatch. Works well when standalone.") def test_assisted_decoding_sample(self): # Seeded assisted decoding will not match sample for the same seed, as the forward pass does not return the # exact same logits (the forward pass of the main model, now with several tokens at once, has causal masking). From 64e21ca2a4f6dc463ba1472984ca0d4332213ac6 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 15 Nov 2023 10:43:16 +0100 Subject: [PATCH 182/268] Make some jobs run on the GitHub Actions runners (#27512) fix Co-authored-by: ydshieh --- .github/workflows/add-model-like.yml | 2 +- .github/workflows/build-docker-images.yml | 12 ++++++------ .github/workflows/build-nightly-ci-docker-images.yml | 4 ++-- .github/workflows/build-past-ci-docker-images.yml | 4 ++-- .github/workflows/check_runner_status.yml | 4 ++-- .github/workflows/check_tiny_models.yml | 2 +- .github/workflows/doctests.yml | 2 +- .github/workflows/model-templates.yml | 2 +- .github/workflows/release-conda.yml | 2 +- .github/workflows/self-nightly-scheduled.yml | 2 +- .github/workflows/self-past.yml | 2 +- .github/workflows/self-push-amd.yml | 4 ++-- .github/workflows/self-push-caller.yml | 4 ++-- .github/workflows/self-push.yml | 2 +- .github/workflows/self-scheduled.yml | 4 ++-- .github/workflows/stale.yml | 2 +- .github/workflows/update_metdata.yml | 2 +- 17 files changed, 28 insertions(+), 28 deletions(-) diff --git a/.github/workflows/add-model-like.yml b/.github/workflows/add-model-like.yml index 68133a7e2243..8bdd66e4466d 100644 --- a/.github/workflows/add-model-like.yml +++ b/.github/workflows/add-model-like.yml @@ -14,7 +14,7 @@ on: jobs: run_tests_templates_like: name: "Add new model like template tests" - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml index 710ad7afe77a..48db1a55aa57 100644 --- a/.github/workflows/build-docker-images.yml +++ b/.github/workflows/build-docker-images.yml @@ -20,7 +20,7 @@ concurrency: jobs: latest-docker: name: "Latest PyTorch + TensorFlow [dev]" - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cleanup disk run: | @@ -69,7 +69,7 @@ jobs: latest-torch-deepspeed-docker: name: "Latest PyTorch + DeepSpeed" - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cleanup disk run: | @@ -106,7 +106,7 @@ jobs: # Can't build 2 images in a single job `latest-torch-deepspeed-docker` (for `nvcr.io/nvidia`) latest-torch-deepspeed-docker-for-push-ci-daily-build: name: "Latest PyTorch + DeepSpeed (Push CI - Daily Build)" - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cleanup disk run: | @@ -148,7 +148,7 @@ jobs: name: "Doc builder" # Push CI doesn't need this image if: inputs.image_postfix != '-push-ci' - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Set up Docker Buildx @@ -174,7 +174,7 @@ jobs: name: "Latest PyTorch [dev]" # Push CI doesn't need this image if: inputs.image_postfix != '-push-ci' - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cleanup disk run: | @@ -247,7 +247,7 @@ jobs: name: "Latest TensorFlow [dev]" # Push CI doesn't need this image if: inputs.image_postfix != '-push-ci' - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Set up Docker Buildx diff --git a/.github/workflows/build-nightly-ci-docker-images.yml b/.github/workflows/build-nightly-ci-docker-images.yml index 1b8cab864d92..63bc7daa7434 100644 --- a/.github/workflows/build-nightly-ci-docker-images.yml +++ b/.github/workflows/build-nightly-ci-docker-images.yml @@ -13,7 +13,7 @@ concurrency: jobs: latest-with-torch-nightly-docker: name: "Nightly PyTorch + Stable TensorFlow" - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cleanup disk run: | @@ -50,7 +50,7 @@ jobs: nightly-torch-deepspeed-docker: name: "Nightly PyTorch + DeepSpeed" - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cleanup disk run: | diff --git a/.github/workflows/build-past-ci-docker-images.yml b/.github/workflows/build-past-ci-docker-images.yml index aa47dfd08c2d..21028568c963 100644 --- a/.github/workflows/build-past-ci-docker-images.yml +++ b/.github/workflows/build-past-ci-docker-images.yml @@ -16,7 +16,7 @@ jobs: fail-fast: false matrix: version: ["1.13", "1.12", "1.11", "1.10"] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Set up Docker Buildx @@ -60,7 +60,7 @@ jobs: fail-fast: false matrix: version: ["2.11", "2.10", "2.9", "2.8", "2.7", "2.6", "2.5"] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Set up Docker Buildx diff --git a/.github/workflows/check_runner_status.yml b/.github/workflows/check_runner_status.yml index 7d0e3853b5df..328d284223a8 100644 --- a/.github/workflows/check_runner_status.yml +++ b/.github/workflows/check_runner_status.yml @@ -18,7 +18,7 @@ env: jobs: check_runner_status: name: Check Runner Status - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 outputs: offline_runners: ${{ steps.set-offline_runners.outputs.offline_runners }} steps: @@ -39,7 +39,7 @@ jobs: send_results: name: Send results to webhook - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: check_runner_status if: ${{ failure() }} steps: diff --git a/.github/workflows/check_tiny_models.yml b/.github/workflows/check_tiny_models.yml index 5a4cb9622f06..898e441a4234 100644 --- a/.github/workflows/check_tiny_models.yml +++ b/.github/workflows/check_tiny_models.yml @@ -14,7 +14,7 @@ env: jobs: check_tiny_models: name: Check tiny models - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout transformers uses: actions/checkout@v3 diff --git a/.github/workflows/doctests.yml b/.github/workflows/doctests.yml index 82944ed2dfe8..0384144ceac7 100644 --- a/.github/workflows/doctests.yml +++ b/.github/workflows/doctests.yml @@ -66,7 +66,7 @@ jobs: send_results: name: Send results to webhook - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 if: always() needs: [run_doctests] steps: diff --git a/.github/workflows/model-templates.yml b/.github/workflows/model-templates.yml index 3830c23fe048..eb77d9dcbe1e 100644 --- a/.github/workflows/model-templates.yml +++ b/.github/workflows/model-templates.yml @@ -7,7 +7,7 @@ on: jobs: run_tests_templates: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout repository uses: actions/checkout@v3 diff --git a/.github/workflows/release-conda.yml b/.github/workflows/release-conda.yml index 4cc0b662fcc8..7a1990eec6b3 100644 --- a/.github/workflows/release-conda.yml +++ b/.github/workflows/release-conda.yml @@ -12,7 +12,7 @@ env: jobs: build_and_package: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 defaults: run: shell: bash -l {0} diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index 07a6197584a4..e4b4f7f77cf0 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -246,7 +246,7 @@ jobs: send_results: name: Send results to webhook - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 if: always() needs: [ setup, diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index 568b6a9b64f8..6a154544df8b 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -289,7 +289,7 @@ jobs: send_results: name: Send results to webhook - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 if: always() needs: [ setup, diff --git a/.github/workflows/self-push-amd.yml b/.github/workflows/self-push-amd.yml index 875169827768..c72f224a300c 100644 --- a/.github/workflows/self-push-amd.yml +++ b/.github/workflows/self-push-amd.yml @@ -19,7 +19,7 @@ env: jobs: check_runner_status: name: Check Runner Status - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout transformers uses: actions/checkout@v3 @@ -241,7 +241,7 @@ jobs: send_results: name: Send results to webhook - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 if: always() needs: [ check_runner_status, diff --git a/.github/workflows/self-push-caller.yml b/.github/workflows/self-push-caller.yml index 994567c5cdbd..9247848b89ec 100644 --- a/.github/workflows/self-push-caller.yml +++ b/.github/workflows/self-push-caller.yml @@ -14,7 +14,7 @@ on: jobs: check-for-setup: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 name: Check if setup was changed outputs: changed: ${{ steps.was_changed.outputs.changed }} @@ -46,7 +46,7 @@ jobs: run_push_ci: name: Trigger Push CI - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 if: ${{ always() }} needs: build-docker-containers steps: diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index 423b0c8c6932..a6ea5b1e04b9 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -491,7 +491,7 @@ jobs: send_results: name: Send results to webhook - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 if: always() needs: [ setup, diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 13d6a3806985..4a04cb14ac7b 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -400,7 +400,7 @@ jobs: run_extract_warnings: name: Extract warnings in CI artifacts - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 if: always() needs: [ setup, @@ -448,7 +448,7 @@ jobs: send_results: name: Send results to webhook - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 if: always() needs: [ setup, diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 1211d71a32e2..4a7e94bac429 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -8,7 +8,7 @@ jobs: close_stale_issues: name: Close Stale Issues if: github.repository == 'huggingface/transformers' - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: diff --git a/.github/workflows/update_metdata.yml b/.github/workflows/update_metdata.yml index de25fe0a08a2..a2269e32e4d3 100644 --- a/.github/workflows/update_metdata.yml +++ b/.github/workflows/update_metdata.yml @@ -8,7 +8,7 @@ on: jobs: build_and_package: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 defaults: run: shell: bash -l {0} From 3d1a7bf4761e332e78e07d147bacb0d26a522187 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Wed, 15 Nov 2023 10:46:02 +0100 Subject: [PATCH 183/268] [`tokenizers`] update `tokenizers` version pin (#27494) * update `tokenizers` version pin * force tokenizers>=0.15 * use 0.14 Co-authored-by: Lysandre --------- Co-authored-by: Lysandre --- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 78763b23242b..e42a29585f97 100644 --- a/setup.py +++ b/setup.py @@ -175,7 +175,7 @@ "tf2onnx", "timeout-decorator", "timm", - "tokenizers>=0.14,<0.15", + "tokenizers>=0.14,<0.19", "torch>=1.10,!=1.12.0", "torchaudio", "torchvision", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 398e787684b1..736b3128100f 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -80,7 +80,7 @@ "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", - "tokenizers": "tokenizers>=0.14,<0.15", + "tokenizers": "tokenizers>=0.14,<0.19", "torch": "torch>=1.10,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", From 453079c7f843e7eb920a47fcdaa431413ac0fe72 Mon Sep 17 00:00:00 2001 From: Xin Qiu Date: Wed, 15 Nov 2023 20:49:14 +0800 Subject: [PATCH 184/268] =?UTF-8?q?=F0=9F=9A=A8=F0=9F=9A=A8=20Fix=20beam?= =?UTF-8?q?=20score=20calculation=20issue=20for=20decoder-only=20models=20?= =?UTF-8?q?(#27351)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix beam score calculation issue for decoder-only models * Update beam search test and fix code quality issue * Fix beam_sample, group_beam_search and constrained_beam_search * Split test for pytorch and TF, add documentation --------- Co-authored-by: Xin Qiu --- src/transformers/generation/beam_search.py | 42 +++++++++++++++++---- src/transformers/generation/utils.py | 16 ++++++++ tests/generation/test_framework_agnostic.py | 6 ++- 3 files changed, 55 insertions(+), 9 deletions(-) diff --git a/src/transformers/generation/beam_search.py b/src/transformers/generation/beam_search.py index 03334b6b6145..a46859c88cbd 100644 --- a/src/transformers/generation/beam_search.py +++ b/src/transformers/generation/beam_search.py @@ -222,8 +222,10 @@ def process( eos_token_id: Optional[Union[int, List[int]]] = None, beam_indices: Optional[torch.LongTensor] = None, group_index: Optional[int] = 0, + decoder_prompt_len: Optional[int] = 0, ) -> Dict[str, torch.Tensor]: - cur_len = input_ids.shape[-1] + 1 # add up to the length which the next_scores is calculated on + # add up to the length which the next_scores is calculated on + cur_len = input_ids.shape[-1] - decoder_prompt_len + 1 batch_size = len(self._beam_hyps) // self.num_beam_groups if not (batch_size == (input_ids.shape[0] // self.group_size)): @@ -277,10 +279,15 @@ def process( else: beam_index = None + # skip the corner case where the very first generated token is eos_token + if decoder_prompt_len == input_ids.shape[-1]: + continue + self._beam_hyps[batch_group_idx].add( input_ids[batch_beam_idx].clone(), next_score.item(), beam_indices=beam_index, + decoder_prompt_len=decoder_prompt_len, ) else: # add next predicted token since it is not eos_token @@ -322,6 +329,7 @@ def finalize( pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, ) -> Tuple[torch.LongTensor]: batch_size = len(self._beam_hyps) // self.num_beam_groups @@ -340,7 +348,7 @@ def finalize( final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None - beam_hyp.add(final_tokens, final_score, beam_indices=beam_index) + beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, decoder_prompt_len=decoder_prompt_len) # select the best hypotheses sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) @@ -511,6 +519,7 @@ def process( pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, ) -> Tuple[torch.Tensor]: r""" Args: @@ -535,7 +544,8 @@ def process( The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. beam_indices (`torch.LongTensor`, *optional*): Beam indices indicating to which beam hypothesis each token correspond. - + decoder_prompt_len (`int`, *optional*): + The length of prompt that is included in the input to decoder. Return: `UserDict`: A dictionary composed of the fields as defined above: @@ -550,7 +560,8 @@ def process( indicating to which beam the next tokens shall be added. """ - cur_len = input_ids.shape[-1] + 1 # add up to the length which the next_scores is calculated on + # add up to the length which the next_scores is calculated on + cur_len = input_ids.shape[-1] - decoder_prompt_len + 1 batch_size = len(self._beam_hyps) if not (batch_size == (input_ids.shape[0] // self.group_size)): if self.num_beam_groups > 1: @@ -606,10 +617,16 @@ def process( else: beam_index = None + # skip the corner case where the only constraint token is + # eos_token and the very first generated token is eos_token + if decoder_prompt_len == input_ids.shape[-1]: + continue + beam_hyp.add( input_ids[batch_beam_idx].clone(), next_score.item(), beam_indices=beam_index, + decoder_prompt_len=decoder_prompt_len, ) else: # add next predicted token since it is not eos_token @@ -805,6 +822,7 @@ def finalize( pad_token_id: Optional[int] = None, eos_token_id: Optional[Union[int, List[int]]] = None, beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, ) -> Tuple[torch.LongTensor]: batch_size = len(self._beam_hyps) @@ -828,7 +846,9 @@ def finalize( completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist()) if completes_constraint: beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None - beam_hyp.add(final_tokens, final_score, beam_indices=beam_index) + beam_hyp.add( + final_tokens, final_score, beam_indices=beam_index, decoder_prompt_len=decoder_prompt_len + ) ids_collect.append(beam_id) # due to overly complex constraints or other factors, sometimes we can't gaurantee a successful @@ -839,7 +859,7 @@ def finalize( batch_beam_idx = batch_idx * self.num_beams + beam_id final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] - beam_hyp.add(final_tokens, final_score) + beam_hyp.add(final_tokens, final_score, decoder_prompt_len=decoder_prompt_len) if len(ids_collect) >= self.num_beam_hyps_to_keep: break @@ -931,11 +951,17 @@ def __len__(self): """ return len(self.beams) - def add(self, hyp: torch.LongTensor, sum_logprobs: float, beam_indices: Optional[torch.LongTensor] = None): + def add( + self, + hyp: torch.LongTensor, + sum_logprobs: float, + beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, + ): """ Add a new hypothesis to the list. """ - score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty) + score = sum_logprobs / ((hyp.shape[-1] - decoder_prompt_len) ** self.length_penalty) if len(self) < self.num_beams or score > self.worst_score: self.beams.append((score, hyp, beam_indices)) if len(self) > self.num_beams: diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 4dbfc367064b..10ffffc37ca8 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -3172,6 +3172,8 @@ def beam_search( beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. @@ -3246,6 +3248,7 @@ def beam_search( pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, ) beam_scores = beam_outputs["next_beam_scores"] @@ -3281,6 +3284,7 @@ def beam_search( eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, ) if return_dict_in_generate: @@ -3500,6 +3504,8 @@ def beam_sample( beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. @@ -3578,6 +3584,7 @@ def beam_sample( pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] @@ -3612,6 +3619,7 @@ def beam_sample( eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, ) if return_dict_in_generate: @@ -3837,6 +3845,8 @@ def group_beam_search( beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. @@ -3924,6 +3934,7 @@ def group_beam_search( eos_token_id=eos_token_id, beam_indices=process_beam_indices, group_index=beam_group_idx, + decoder_prompt_len=decoder_prompt_len, ) beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] @@ -3993,6 +4004,7 @@ def group_beam_search( eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=final_beam_indices, + decoder_prompt_len=decoder_prompt_len, ) if return_dict_in_generate: @@ -4220,6 +4232,8 @@ def constrained_beam_search( beam_scores = beam_scores.view((batch_size * num_beams,)) this_peer_finished = False # used by synced_gpus only + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder while True: if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. @@ -4298,6 +4312,7 @@ def constrained_beam_search( pad_token_id=pad_token_id, eos_token_id=eos_token_id, beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] @@ -4331,6 +4346,7 @@ def constrained_beam_search( eos_token_id=eos_token_id, max_length=stopping_criteria.max_length, beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, ) if return_dict_in_generate: diff --git a/tests/generation/test_framework_agnostic.py b/tests/generation/test_framework_agnostic.py index 306cb15168e5..8a269801640e 100644 --- a/tests/generation/test_framework_agnostic.py +++ b/tests/generation/test_framework_agnostic.py @@ -633,7 +633,11 @@ def test_eos_token_id_int_and_list_beam_search(self): "do_sample": False, "num_beams": 3, } - expectation = 13 + if is_pt: + expectation = 20 + else: + # TODO (joao): fix me + expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" From 48ba1e074f3c335451e6fcae19760c30aea8d70a Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Wed, 15 Nov 2023 14:10:39 +0100 Subject: [PATCH 185/268] [ `PretrainedConfig`] Improve messaging (#27438) * import hf error * nits * fixup * catch the error at the correct place * style * improve message a tiny bit * Update src/transformers/utils/hub.py Co-authored-by: Lucain * add a test --------- Co-authored-by: Lucain --- src/transformers/utils/hub.py | 6 +++++- tests/test_configuration_common.py | 3 +++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 0d58211da835..f6cf0a852ed7 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -43,6 +43,7 @@ from huggingface_hub.utils import ( EntryNotFoundError, GatedRepoError, + HFValidationError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, @@ -491,7 +492,10 @@ def cached_file( return None raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}") - + except HFValidationError as e: + raise EnvironmentError( + f"Incorrect path_or_model_id: '{path_or_repo_id}'. Please provide either the path to a local folder or the repo_id of a model on the Hub." + ) from e return resolved_file diff --git a/tests/test_configuration_common.py b/tests/test_configuration_common.py index 5fb93f71eb04..57521a3e7c53 100644 --- a/tests/test_configuration_common.py +++ b/tests/test_configuration_common.py @@ -96,6 +96,9 @@ def create_and_test_config_from_and_save_pretrained(self): self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) + with self.parent.assertRaises(OSError): + self.config_class.from_pretrained(f".{tmpdirname}") + def create_and_test_config_from_and_save_pretrained_subfolder(self): config_first = self.config_class(**self.inputs_dict) From a85ea4b19a47824a8e85d3304a698e2d5c8325ec Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Wed, 15 Nov 2023 09:24:03 -0500 Subject: [PATCH 186/268] Fix wav2vec2 params (#27515) Fix test --- examples/pytorch/test_pytorch_examples.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/pytorch/test_pytorch_examples.py b/examples/pytorch/test_pytorch_examples.py index 7d27804a7330..a0781b356595 100644 --- a/examples/pytorch/test_pytorch_examples.py +++ b/examples/pytorch/test_pytorch_examples.py @@ -550,9 +550,6 @@ def test_run_wav2vec2_pretraining(self): --seed 42 """.split() - if is_torch_fp16_available_on_device(torch_device): - testargs.append("--fp16") - with patch.object(sys, "argv", testargs): run_wav2vec2_pretraining_no_trainer.main() model = Wav2Vec2ForPreTraining.from_pretrained(tmp_dir) From a0633c44834e18bdd6a073c4d427e5e870476636 Mon Sep 17 00:00:00 2001 From: Yuki-Imajuku <72183189+Yuki-Imajuku@users.noreply.github.com> Date: Thu, 16 Nov 2023 03:13:52 +0900 Subject: [PATCH 187/268] Translating `en/model_doc` docs to Japanese. (#27401) * update _toctree.yml & add albert-autoformer * Fixed typo in docs/source/ja/model_doc/audio-spectrogram-transformer.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Delete duplicated sentence docs/source/ja/model_doc/autoformer.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Reflect reviews * delete untranslated models from toctree * delete all comments * add abstract translation --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- .../audio-spectrogram-transformer.md | 4 +- docs/source/ja/_toctree.yml | 95 ++--- docs/source/ja/model_doc/albert.md | 193 +++++++++ docs/source/ja/model_doc/align.md | 104 +++++ docs/source/ja/model_doc/altclip.md | 97 +++++ .../audio-spectrogram-transformer.md | 69 ++++ docs/source/ja/model_doc/auto.md | 370 ++++++++++++++++++ docs/source/ja/model_doc/autoformer.md | 50 +++ 8 files changed, 912 insertions(+), 70 deletions(-) create mode 100644 docs/source/ja/model_doc/albert.md create mode 100644 docs/source/ja/model_doc/align.md create mode 100644 docs/source/ja/model_doc/altclip.md create mode 100644 docs/source/ja/model_doc/audio-spectrogram-transformer.md create mode 100644 docs/source/ja/model_doc/auto.md create mode 100644 docs/source/ja/model_doc/autoformer.md diff --git a/docs/source/en/model_doc/audio-spectrogram-transformer.md b/docs/source/en/model_doc/audio-spectrogram-transformer.md index 587ec85d09b6..3eac3781667e 100644 --- a/docs/source/en/model_doc/audio-spectrogram-transformer.md +++ b/docs/source/en/model_doc/audio-spectrogram-transformer.md @@ -29,7 +29,7 @@ The abstract from the paper is the following: drawing - Audio pectrogram Transformer architecture. Taken from the original paper. + Audio Spectrogram Transformer architecture. Taken from the original paper. This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/YuanGongND/ast). @@ -72,4 +72,4 @@ If you're interested in submitting a resource to be included here, please feel f ## ASTForAudioClassification [[autodoc]] ASTForAudioClassification - - forward \ No newline at end of file + - forward diff --git a/docs/source/ja/_toctree.yml b/docs/source/ja/_toctree.yml index 69169d6fffc0..df686b475dab 100644 --- a/docs/source/ja/_toctree.yml +++ b/docs/source/ja/_toctree.yml @@ -29,77 +29,12 @@ title: LLM を使用した生成 title: Tutorials - sections: - - isExpanded: false - # sections: - # - local: tasks/sequence_classification - # title: Text classification - # - local: tasks/token_classification - # title: Token classification - # - local: tasks/question_answering - # title: Question answering - # - local: tasks/language_modeling - # title: Causal language modeling - # - local: tasks/masked_language_modeling - # title: Masked language modeling - # - local: tasks/translation - # title: Translation - # - local: tasks/summarization - # title: Summarization - # - local: tasks/multiple_choice - # title: Multiple choice - # title: Natural Language Processing - # - isExpanded: false - # sections: - # - local: tasks/audio_classification - # title: Audio classification - # - local: tasks/asr - # title: Automatic speech recognition - # title: Audio - # - isExpanded: false - # sections: - # - local: tasks/image_classification - # title: Image classification - # - local: tasks/semantic_segmentation - # title: Semantic segmentation - # - local: tasks/video_classification - # title: Video classification - # - local: tasks/object_detection - # title: Object detection - # - local: tasks/zero_shot_object_detection - # title: Zero-shot object detection - # - local: tasks/zero_shot_image_classification - # title: Zero-shot image classification - # - local: tasks/monocular_depth_estimation - # title: Depth estimation - # - local: tasks/image_to_image - # title: Image-to-Image - # - local: tasks/knowledge_distillation_for_image_classification - # title: Knowledge Distillation for Computer Vision - # title: Computer Vision - # - isExpanded: false - # sections: - # - local: tasks/image_captioning - # title: Image captioning - # - local: tasks/document_question_answering - # title: Document Question Answering - # - local: tasks/visual_question_answering - # title: Visual Question Answering - # - local: tasks/text-to-speech - # title: Text to speech - # title: Multimodal - isExpanded: false sections: - local: generation_strategies title: 生成戦略をカスタマイズする title: Generation - # - isExpanded: false - # sections: - # - local: tasks/idefics - # title: Image tasks with IDEFICS - # - local: tasks/prompting - # title: LLM prompting guide - # title: Prompting - title: Task Guides + title: Task Guides - sections: - local: fast_tokenizers title: 🤗 トークナイザーの高速トークナイザーを使用する @@ -206,8 +141,8 @@ - sections: - local: main_classes/agent title: エージェントとツール - # - local: model_doc/auto - # title: Auto Classes + - local: model_doc/auto + title: Auto Classes - local: main_classes/callback title: コールバック - local: main_classes/configuration @@ -245,6 +180,30 @@ - local: main_classes/image_processor title: 画像処理プロセッサ title: 主要なクラス + - sections: + - isExpanded: false + sections: + - local: model_doc/albert + title: ALBERT + title: 文章モデル + - isExpanded: false + sections: + - local: model_doc/audio-spectrogram-transformer + title: Audio Spectrogram Transformer + title: 音声モデル + - isExpanded: false + sections: + - local: model_doc/align + title: ALIGN + - local: model_doc/altclip + title: AltCLIP + title: マルチモーダルモデル + - isExpanded: false + sections: + - local: model_doc/autoformer + title: Autoformer + title: 時系列モデル + title: モデル - sections: - local: internal/modeling_utils title: カスタムレイヤーとユーティリティ diff --git a/docs/source/ja/model_doc/albert.md b/docs/source/ja/model_doc/albert.md new file mode 100644 index 000000000000..00403ea53765 --- /dev/null +++ b/docs/source/ja/model_doc/albert.md @@ -0,0 +1,193 @@ + + +# ALBERT + +
+ +Models + + +Spaces + +
+ +## 概要 + +ALBERTモデルは、「[ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942)」という論文でZhenzhong Lan、Mingda Chen、Sebastian Goodman、Kevin Gimpel、Piyush Sharma、Radu Soricutによって提案されました。BERTのメモリ消費を減らしトレーニングを高速化するためのパラメータ削減技術を2つ示しています: + +- 埋め込み行列を2つの小さな行列に分割する。 +- グループ間で分割された繰り返し層を使用する。 + +論文の要旨は以下の通りです: + +*自然言語表現の事前学習時にモデルのサイズを増やすと、下流タスクのパフォーマンスが向上することがしばしばあります。しかし、ある時点でさらなるモデルの増大は、GPU/TPUのメモリ制限、長い訓練時間、予期せぬモデルの劣化といった問題のために困難になります。これらの問題に対処するために、我々はBERTのメモリ消費を低減し、訓練速度を高めるための2つのパラメータ削減技術を提案します。包括的な実証的証拠は、我々の提案方法が元のBERTに比べてはるかによくスケールするモデルを生み出すことを示しています。また、文間の一貫性をモデリングに焦点を当てた自己教師あり損失を使用し、複数の文が含まれる下流タスクに一貫して助けとなることを示します。その結果、我々の最良のモデルは、BERT-largeに比べてパラメータが少ないにもかかわらず、GLUE、RACE、SQuADベンチマークで新たな最先端の結果を確立します。* + +このモデルは[lysandre](https://huggingface.co/lysandre)により提供されました。このモデルのjaxバージョンは[kamalkraj](https://huggingface.co/kamalkraj)により提供されました。オリジナルのコードは[こちら](https://github.com/google-research/ALBERT)で見ることができます。 + +## 使用上のヒント + +- ALBERTは絶対位置埋め込みを使用するモデルなので、通常、入力を左側ではなく右側にパディングすることが推奨されます。 +- ALBERTは繰り返し層を使用するためメモリ使用量は小さくなりますが、同じ数の(繰り返し)層を反復しなければならないため、隠れ層の数が同じであればBERTのようなアーキテクチャと同様の計算コストがかかります。 +- 埋め込みサイズEは隠れサイズHと異なりますが、これは埋め込みが文脈に依存しない(一つの埋め込みベクトルが一つのトークンを表す)のに対し、隠れ状態は文脈に依存する(1つの隠れ状態がトークン系列を表す)ため、H >> Eとすることがより論理的です。また、埋め込み行列のサイズはV x Eと大きいです(Vは語彙サイズ)。E < Hであれば、パラメータは少なくなります。 +- 層はパラメータを共有するグループに分割されています(メモリ節約のため)。次文予測(NSP: Next Sentence Prediction)は文の順序予測に置き換えられます:入力では、2つの文AとB(それらは連続している)があり、Aに続いてBを与えるか、Bに続いてAを与えます。モデルはそれらが入れ替わっているかどうかを予測する必要があります。 + +## 参考資料 + +- [テキスト分類タスクガイド](../tasks/sequence_classification) +- [トークン分類タスクガイド](../tasks/token_classification) +- [質問応答タスクガイド](../tasks/question_answering) +- [マスクされた言語モデルタスクガイド](../tasks/masked_language_modeling) +- [多肢選択タスクガイド](../tasks/multiple_choice) + +## AlbertConfig + +[[autodoc]] AlbertConfig + +## AlbertTokenizer + +[[autodoc]] AlbertTokenizer + - build_inputs_with_special_tokens + - get_special_tokens_mask + - create_token_type_ids_from_sequences + - save_vocabulary + +## AlbertTokenizerFast + +[[autodoc]] AlbertTokenizerFast + +## Albert specific outputs + +[[autodoc]] models.albert.modeling_albert.AlbertForPreTrainingOutput + +[[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput + + + + +## AlbertModel + +[[autodoc]] AlbertModel + - forward + +## AlbertForPreTraining + +[[autodoc]] AlbertForPreTraining + - forward + +## AlbertForMaskedLM + +[[autodoc]] AlbertForMaskedLM + - forward + +## AlbertForSequenceClassification + +[[autodoc]] AlbertForSequenceClassification + - forward + +## AlbertForMultipleChoice + +[[autodoc]] AlbertForMultipleChoice + +## AlbertForTokenClassification + +[[autodoc]] AlbertForTokenClassification + - forward + +## AlbertForQuestionAnswering + +[[autodoc]] AlbertForQuestionAnswering + - forward + + + + + +## TFAlbertModel + +[[autodoc]] TFAlbertModel + - call + +## TFAlbertForPreTraining + +[[autodoc]] TFAlbertForPreTraining + - call + +## TFAlbertForMaskedLM + +[[autodoc]] TFAlbertForMaskedLM + - call + +## TFAlbertForSequenceClassification + +[[autodoc]] TFAlbertForSequenceClassification + - call + +## TFAlbertForMultipleChoice + +[[autodoc]] TFAlbertForMultipleChoice + - call + +## TFAlbertForTokenClassification + +[[autodoc]] TFAlbertForTokenClassification + - call + +## TFAlbertForQuestionAnswering + +[[autodoc]] TFAlbertForQuestionAnswering + - call + + + + +## FlaxAlbertModel + +[[autodoc]] FlaxAlbertModel + - __call__ + +## FlaxAlbertForPreTraining + +[[autodoc]] FlaxAlbertForPreTraining + - __call__ + +## FlaxAlbertForMaskedLM + +[[autodoc]] FlaxAlbertForMaskedLM + - __call__ + +## FlaxAlbertForSequenceClassification + +[[autodoc]] FlaxAlbertForSequenceClassification + - __call__ + +## FlaxAlbertForMultipleChoice + +[[autodoc]] FlaxAlbertForMultipleChoice + - __call__ + +## FlaxAlbertForTokenClassification + +[[autodoc]] FlaxAlbertForTokenClassification + - __call__ + +## FlaxAlbertForQuestionAnswering + +[[autodoc]] FlaxAlbertForQuestionAnswering + - __call__ + + + diff --git a/docs/source/ja/model_doc/align.md b/docs/source/ja/model_doc/align.md new file mode 100644 index 000000000000..6e62c3d9f4ca --- /dev/null +++ b/docs/source/ja/model_doc/align.md @@ -0,0 +1,104 @@ + + +# ALIGN + +## 概要 + +ALIGNモデルは、「[Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918)」という論文でChao Jia、Yinfei Yang、Ye Xia、Yi-Ting Chen、Zarana Parekh、Hieu Pham、Quoc V. Le、Yunhsuan Sung、Zhen Li、Tom Duerigによって提案されました。ALIGNはマルチモーダルな視覚言語モデルです。これは画像とテキストの類似度や、ゼロショット画像分類に使用できます。ALIGNは[EfficientNet](efficientnet)を視覚エンコーダーとして、[BERT](bert)をテキストエンコーダーとして搭載したデュアルエンコーダー構造を特徴とし、対照学習によって視覚とテキストの表現を整合させることを学びます。それまでの研究とは異なり、ALIGNは巨大でノイジーなデータセットを活用し、コーパスのスケールを利用して単純な方法ながら最先端の表現を達成できることを示しています。 + +論文の要旨は以下の通りです: + +*事前学習された表現は、多くの自然言語処理(NLP)および知覚タスクにとって重要になっています。NLPにおける表現学習は、人間のアノテーションのない生のテキストでの学習へと移行していますが、視覚および視覚言語の表現は依然として精巧な学習データセットに大きく依存しており、これは高価であったり専門知識を必要としたりします。視覚アプリケーションの場合、ImageNetやOpenImagesのような明示的なクラスラベルを持つデータセットを使用して学習されることがほとんどです。視覚言語の場合、Conceptual Captions、MSCOCO、CLIPなどの人気のあるデータセットはすべて、それぞれ無視できないデータ収集(およびクリーニング)プロセスを含みます。このコストのかかるキュレーションプロセスはデータセットのサイズを制限し、訓練されたモデルのスケーリングを妨げます。本論文では、Conceptual Captionsデータセットの高価なフィルタリングや後処理ステップなしで得られた、10億を超える画像alt-textペアのノイズの多いデータセットを活用します。シンプルなデュアルエンコーダーアーキテクチャは、対照損失を使用して画像とテキストペアの視覚的および言語的表現を整合させることを学習します。我々は、コーパスの規模がそのノイズを補い、このような単純な学習スキームでも最先端の表現につながることを示します。我々の視覚表現は、ImageNetやVTABなどの分類タスクへの転移において強力な性能を発揮します。整合した視覚的および言語的表現は、ゼロショット画像分類を可能にし、また、より洗練されたクロスアテンションモデルと比較しても、Flickr30KおよびMSCOCO画像テキスト検索ベンチマークにおいて新たな最先端の結果を達成します。また、これらの表現は、複雑なテキストおよびテキスト+画像のクエリを用いたクロスモーダル検索を可能にします。* + +このモデルは[Alara Dirik](https://huggingface.co/adirik)により提供されました。 +オリジナルのコードは公開されておらず、この実装は元論文に基づいたKakao Brainの実装をベースにしています。 + +## 使用例 + +ALIGNはEfficientNetを使用して視覚的特徴を、BERTを使用してテキスト特徴を取得します。テキストと視覚の両方の特徴は、同一の次元を持つ潜在空間に射影されます。射影された画像とテキスト特徴間のドット積が類似度スコアとして使用されます。 + +[`AlignProcessor`]は、テキストのエンコードと画像の前処理を両方行うために、[`EfficientNetImageProcessor`]と[`BertTokenizer`]を単一のインスタンスにラップします。以下の例は、[`AlignProcessor`]と[`AlignModel`]を使用して画像-テキスト類似度スコアを取得する方法を示しています。 + +```python +import requests +import torch +from PIL import Image +from transformers import AlignProcessor, AlignModel + +processor = AlignProcessor.from_pretrained("kakaobrain/align-base") +model = AlignModel.from_pretrained("kakaobrain/align-base") + +url = "http://images.cocodataset.org/val2017/000000039769.jpg" +image = Image.open(requests.get(url, stream=True).raw) +candidate_labels = ["an image of a cat", "an image of a dog"] + +inputs = processor(text=candidate_labels, images=image, return_tensors="pt") + +with torch.no_grad(): + outputs = model(**inputs) + +# これは画像-テキスト類似度スコア +logits_per_image = outputs.logits_per_image + +# Softmaxを取ることで各ラベルの確率を得られる +probs = logits_per_image.softmax(dim=1) +print(probs) +``` + +## 参考資料 + +ALIGNの使用を開始するのに役立つ公式のHugging Faceとコミュニティ(🌎で示されている)の参考資料の一覧です。 + +- [ALIGNとCOYO-700Mデータセット](https://huggingface.co/blog/vit-align)に関するブログ投稿。 +- ゼロショット画像分類[デモ](https://huggingface.co/spaces/adirik/ALIGN-zero-shot-image-classification)。 +- `kakaobrain/align-base` モデルの[モデルカード](https://huggingface.co/kakaobrain/align-base)。 + +ここに参考資料を提出したい場合は、気兼ねなくPull Requestを開いてください。私たちはそれをレビューいたします!参考資料は、既存のものを複製するのではなく、何か新しいことを示すことが理想的です。 + +## AlignConfig + +[[autodoc]] AlignConfig + - from_text_vision_configs + +## AlignTextConfig + +[[autodoc]] AlignTextConfig + +## AlignVisionConfig + +[[autodoc]] AlignVisionConfig + +## AlignProcessor + +[[autodoc]] AlignProcessor + +## AlignModel + +[[autodoc]] AlignModel + - forward + - get_text_features + - get_image_features + +## AlignTextModel + +[[autodoc]] AlignTextModel + - forward + +## AlignVisionModel + +[[autodoc]] AlignVisionModel + - forward diff --git a/docs/source/ja/model_doc/altclip.md b/docs/source/ja/model_doc/altclip.md new file mode 100644 index 000000000000..232b3645544f --- /dev/null +++ b/docs/source/ja/model_doc/altclip.md @@ -0,0 +1,97 @@ + + +# AltCLIP + +## 概要 + + +AltCLIPモデルは、「[AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679v2)」という論文でZhongzhi Chen、Guang Liu、Bo-Wen Zhang、Fulong Ye、Qinghong Yang、Ledell Wuによって提案されました。AltCLIP(CLIPの言語エンコーダーの代替)は、様々な画像-テキストペアおよびテキスト-テキストペアでトレーニングされたニューラルネットワークです。CLIPのテキストエンコーダーを事前学習済みの多言語テキストエンコーダーXLM-Rに置き換えることで、ほぼ全てのタスクでCLIPに非常に近い性能を得られ、オリジナルのCLIPの能力を多言語理解などに拡張しました。 + +論文の要旨は以下の通りです: + +*この研究では、強力なバイリンガルマルチモーダル表現モデルを訓練するための概念的に単純で効果的な方法を提案します。OpenAIによってリリースされたマルチモーダル表現モデルCLIPから開始し、そのテキストエンコーダを事前学習済みの多言語テキストエンコーダXLM-Rに交換し、教師学習と対照学習からなる2段階のトレーニングスキーマを用いて言語と画像の表現を整合させました。幅広いタスクの評価を通じて、我々の方法を検証します。ImageNet-CN、Flicker30k-CN、COCO-CNを含む多くのタスクで新たな最先端の性能を達成しました。さらに、ほぼすべてのタスクでCLIPに非常に近い性能を得ており、これはCLIPのテキストエンコーダを変更するだけで、多言語理解などの拡張を実現できることを示唆しています。* + +このモデルは[jongjyh](https://huggingface.co/jongjyh)により提供されました。 + +## 使用上のヒントと使用例 + +AltCLIPの使用方法はCLIPに非常に似ています。CLIPとの違いはテキストエンコーダーにあります。私たちはカジュアルアテンションではなく双方向アテンションを使用し、XLM-Rの[CLS]トークンをテキスト埋め込みを表すものとして取ることに留意してください。 + +AltCLIPはマルチモーダルな視覚言語モデルです。これは画像とテキストの類似度や、ゼロショット画像分類に使用できます。AltCLIPはViTのようなTransformerを使用して視覚的特徴を、双方向言語モデルを使用してテキスト特徴を取得します。テキストと視覚の両方の特徴は、同一の次元を持つ潜在空間に射影されます。射影された画像とテキスト特徴間のドット積が類似度スコアとして使用されます。 + +Transformerエンコーダーに画像を与えるには、各画像を固定サイズの重複しないパッチの系列に分割し、それらを線形に埋め込みます。画像全体を表現するための[CLS]トークンが追加されます。著者は絶対位置埋め込みも追加し、結果として得られるベクトルの系列を標準的なTransformerエンコーダーに供給します。[`CLIPImageProcessor`]を使用して、モデルのために画像のサイズ変更(または拡大縮小)と正規化を行うことができます。 + +[`AltCLIPProcessor`]は、テキストのエンコードと画像の前処理を両方行うために、[`CLIPImageProcessor`]と[`XLMRobertaTokenizer`]を単一のインスタンスにラップします。以下の例は、[`AltCLIPProcessor`]と[`AltCLIPModel`]を使用して画像-テキスト類似スコアを取得する方法を示しています。 + +```python +>>> from PIL import Image +>>> import requests + +>>> from transformers import AltCLIPModel, AltCLIPProcessor + +>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") +>>> processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP") + +>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" +>>> image = Image.open(requests.get(url, stream=True).raw) + +>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) + +>>> outputs = model(**inputs) +>>> logits_per_image = outputs.logits_per_image # これは画像-テキスト類似度スコア +>>> probs = logits_per_image.softmax(dim=1) # Softmaxを取ることで各ラベルの確率を得られる +``` + + + +このモデルは`CLIPModel`をベースにしており、オリジナルの[CLIP](clip)と同じように使用してください。 + + + +## AltCLIPConfig + +[[autodoc]] AltCLIPConfig + - from_text_vision_configs + +## AltCLIPTextConfig + +[[autodoc]] AltCLIPTextConfig + +## AltCLIPVisionConfig + +[[autodoc]] AltCLIPVisionConfig + +## AltCLIPProcessor + +[[autodoc]] AltCLIPProcessor + +## AltCLIPModel + +[[autodoc]] AltCLIPModel + - forward + - get_text_features + - get_image_features + +## AltCLIPTextModel + +[[autodoc]] AltCLIPTextModel + - forward + +## AltCLIPVisionModel + +[[autodoc]] AltCLIPVisionModel + - forward diff --git a/docs/source/ja/model_doc/audio-spectrogram-transformer.md b/docs/source/ja/model_doc/audio-spectrogram-transformer.md new file mode 100644 index 000000000000..efbadbd4bae6 --- /dev/null +++ b/docs/source/ja/model_doc/audio-spectrogram-transformer.md @@ -0,0 +1,69 @@ + + +# Audio Spectrogram Transformer + +## 概要 + +Audio Spectrogram Transformerモデルは、「[AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778)」という論文でYuan Gong、Yu-An Chung、James Glassによって提案されました。これは、音声を画像(スペクトログラム)に変換することで、音声に[Vision Transformer](vit)を適用します。このモデルは音声分類において最先端の結果を得ています。 + +論文の要旨は以下の通りです: + +*過去10年間で、畳み込みニューラルネットワーク(CNN)は、音声スペクトログラムから対応するラベルへの直接的なマッピングを学習することを目指す、エンドツーエンドの音声分類モデルの主要な構成要素として広く採用されてきました。長距離のグローバルなコンテキストをより良く捉えるため、最近の傾向として、CNNの上にセルフアテンション機構を追加し、CNN-アテンションハイブリッドモデルを形成することがあります。しかし、CNNへの依存が必要かどうか、そして純粋にアテンションに基づくニューラルネットワークだけで音声分類において良いパフォーマンスを得ることができるかどうかは明らかではありません。本論文では、これらの問いに答えるため、音声分類用では最初の畳み込みなしで純粋にアテンションベースのモデルであるAudio Spectrogram Transformer(AST)を紹介します。我々はASTを様々なオーディオ分類ベンチマークで評価し、AudioSetで0.485 mAP、ESC-50で95.6%の正解率、Speech Commands V2で98.1%の正解率という新たな最先端の結果を達成しました。* + + + + Audio Spectrogram Transformerのアーキテクチャ。元論文より抜粋。 + +このモデルは[nielsr](https://huggingface.co/nielsr)より提供されました。 +オリジナルのコードは[こちら](https://github.com/YuanGongND/ast)で見ることができます。 + +## 使用上のヒント + +- 独自のデータセットでAudio Spectrogram Transformer(AST)をファインチューニングする場合、入力の正規化(入力の平均を0、標準偏差を0.5にすること)処理することが推奨されます。[`ASTFeatureExtractor`]はこれを処理します。デフォルトではAudioSetの平均と標準偏差を使用していることに注意してください。著者が下流のデータセットの統計をどのように計算しているかは、[`ast/src/get_norm_stats.py`](https://github.com/YuanGongND/ast/blob/master/src/get_norm_stats.py)で確認することができます。 +- ASTは低い学習率が必要であり(著者は[PSLA論文](https://arxiv.org/abs/2102.01243)で提案されたCNNモデルに比べて10倍小さい学習率を使用しています)、素早く収束するため、タスクに適した学習率と学習率スケジューラーを探すことをお勧めします。 + +## 参考資料 + +Audio Spectrogram Transformerの使用を開始するのに役立つ公式のHugging Faceおよびコミュニティ(🌎で示されている)の参考資料の一覧です。 + + + +- ASTを用いた音声分類の推論を説明するノートブックは[こちら](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/AST)で見ることができます。 +- [`ASTForAudioClassification`]は、この[例示スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification)と[ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)によってサポートされています。 +- こちらも参照:[音声分類タスク](../tasks/audio_classification)。 + +ここに参考資料を提出したい場合は、気兼ねなくPull Requestを開いてください。私たちはそれをレビューいたします!参考資料は、既存のものを複製するのではなく、何か新しいことを示すことが理想的です。 + +## ASTConfig + +[[autodoc]] ASTConfig + +## ASTFeatureExtractor + +[[autodoc]] ASTFeatureExtractor + - __call__ + +## ASTModel + +[[autodoc]] ASTModel + - forward + +## ASTForAudioClassification + +[[autodoc]] ASTForAudioClassification + - forward diff --git a/docs/source/ja/model_doc/auto.md b/docs/source/ja/model_doc/auto.md new file mode 100644 index 000000000000..b104e4c99fe3 --- /dev/null +++ b/docs/source/ja/model_doc/auto.md @@ -0,0 +1,370 @@ + + +# Auto Classes + +多くの場合、`from_pretrained()`メソッドに与えられた事前学習済みモデルの名前やパスから、使用したいアーキテクチャを推測することができます。自動クラスはこの仕事をあなたに代わって行うためにここにありますので、事前学習済みの重み/設定/語彙への名前/パスを与えると自動的に関連するモデルを取得できます。 + +[`AutoConfig`]、[`AutoModel`]、[`AutoTokenizer`]のいずれかをインスタンス化すると、関連するアーキテクチャのクラスが直接作成されます。例えば、 + +```python +model = AutoModel.from_pretrained("bert-base-cased") +``` + +これは[`BertModel`]のインスタンスであるモデルを作成します。 + +各タスクごと、そして各バックエンド(PyTorch、TensorFlow、またはFlax)ごとに`AutoModel`のクラスが存在します。 + +## 自動クラスの拡張 + +それぞれの自動クラスには、カスタムクラスで拡張するためのメソッドがあります。例えば、`NewModel`というモデルのカスタムクラスを定義した場合、`NewModelConfig`を確保しておけばこのようにして自動クラスに追加することができます: + +```python +from transformers import AutoConfig, AutoModel + +AutoConfig.register("new-model", NewModelConfig) +AutoModel.register(NewModelConfig, NewModel) +``` + +その後、通常どおりauto classesを使用することができるようになります! + + + +あなたの`NewModelConfig`が[`~transformer.PretrainedConfig`]のサブクラスである場合、その`model_type`属性がコンフィグを登録するときに使用するキー(ここでは`"new-model"`)と同じに設定されていることを確認してください。 + +同様に、あなたの`NewModel`が[`PreTrainedModel`]のサブクラスである場合、その`config_class`属性がモデルを登録する際に使用するクラス(ここでは`NewModelConfig`)と同じに設定されていることを確認してください。 + + + +## AutoConfig + +[[autodoc]] AutoConfig + +## AutoTokenizer + +[[autodoc]] AutoTokenizer + +## AutoFeatureExtractor + +[[autodoc]] AutoFeatureExtractor + +## AutoImageProcessor + +[[autodoc]] AutoImageProcessor + +## AutoProcessor + +[[autodoc]] AutoProcessor + +## Generic model classes + +以下の自動クラスは、特定のヘッドを持たないベースモデルクラスをインスタンス化するために利用可能です。 + +### AutoModel + +[[autodoc]] AutoModel + +### TFAutoModel + +[[autodoc]] TFAutoModel + +### FlaxAutoModel + +[[autodoc]] FlaxAutoModel + +## Generic pretraining classes + +以下の自動クラスは、事前学習ヘッドを持つモデルをインスタンス化するために利用可能です。 + +### AutoModelForPreTraining + +[[autodoc]] AutoModelForPreTraining + +### TFAutoModelForPreTraining + +[[autodoc]] TFAutoModelForPreTraining + +### FlaxAutoModelForPreTraining + +[[autodoc]] FlaxAutoModelForPreTraining + +## Natural Language Processing + +以下の自動クラスは、次の自然言語処理タスクに利用可能です。 + +### AutoModelForCausalLM + +[[autodoc]] AutoModelForCausalLM + +### TFAutoModelForCausalLM + +[[autodoc]] TFAutoModelForCausalLM + +### FlaxAutoModelForCausalLM + +[[autodoc]] FlaxAutoModelForCausalLM + +### AutoModelForMaskedLM + +[[autodoc]] AutoModelForMaskedLM + +### TFAutoModelForMaskedLM + +[[autodoc]] TFAutoModelForMaskedLM + +### FlaxAutoModelForMaskedLM + +[[autodoc]] FlaxAutoModelForMaskedLM + +### AutoModelForMaskGeneration + +[[autodoc]] AutoModelForMaskGeneration + +### TFAutoModelForMaskGeneration + +[[autodoc]] TFAutoModelForMaskGeneration + +### AutoModelForSeq2SeqLM + +[[autodoc]] AutoModelForSeq2SeqLM + +### TFAutoModelForSeq2SeqLM + +[[autodoc]] TFAutoModelForSeq2SeqLM + +### FlaxAutoModelForSeq2SeqLM + +[[autodoc]] FlaxAutoModelForSeq2SeqLM + +### AutoModelForSequenceClassification + +[[autodoc]] AutoModelForSequenceClassification + +### TFAutoModelForSequenceClassification + +[[autodoc]] TFAutoModelForSequenceClassification + +### FlaxAutoModelForSequenceClassification + +[[autodoc]] FlaxAutoModelForSequenceClassification + +### AutoModelForMultipleChoice + +[[autodoc]] AutoModelForMultipleChoice + +### TFAutoModelForMultipleChoice + +[[autodoc]] TFAutoModelForMultipleChoice + +### FlaxAutoModelForMultipleChoice + +[[autodoc]] FlaxAutoModelForMultipleChoice + +### AutoModelForNextSentencePrediction + +[[autodoc]] AutoModelForNextSentencePrediction + +### TFAutoModelForNextSentencePrediction + +[[autodoc]] TFAutoModelForNextSentencePrediction + +### FlaxAutoModelForNextSentencePrediction + +[[autodoc]] FlaxAutoModelForNextSentencePrediction + +### AutoModelForTokenClassification + +[[autodoc]] AutoModelForTokenClassification + +### TFAutoModelForTokenClassification + +[[autodoc]] TFAutoModelForTokenClassification + +### FlaxAutoModelForTokenClassification + +[[autodoc]] FlaxAutoModelForTokenClassification + +### AutoModelForQuestionAnswering + +[[autodoc]] AutoModelForQuestionAnswering + +### TFAutoModelForQuestionAnswering + +[[autodoc]] TFAutoModelForQuestionAnswering + +### FlaxAutoModelForQuestionAnswering + +[[autodoc]] FlaxAutoModelForQuestionAnswering + +### AutoModelForTextEncoding + +[[autodoc]] AutoModelForTextEncoding + +### TFAutoModelForTextEncoding + +[[autodoc]] TFAutoModelForTextEncoding + +## Computer vision + +以下の自動クラスは、次のコンピュータービジョンタスクに利用可能です。 + +### AutoModelForDepthEstimation + +[[autodoc]] AutoModelForDepthEstimation + +### AutoModelForImageClassification + +[[autodoc]] AutoModelForImageClassification + +### TFAutoModelForImageClassification + +[[autodoc]] TFAutoModelForImageClassification + +### FlaxAutoModelForImageClassification + +[[autodoc]] FlaxAutoModelForImageClassification + +### AutoModelForVideoClassification + +[[autodoc]] AutoModelForVideoClassification + +### AutoModelForMaskedImageModeling + +[[autodoc]] AutoModelForMaskedImageModeling + +### TFAutoModelForMaskedImageModeling + +[[autodoc]] TFAutoModelForMaskedImageModeling + +### AutoModelForObjectDetection + +[[autodoc]] AutoModelForObjectDetection + +### AutoModelForImageSegmentation + +[[autodoc]] AutoModelForImageSegmentation + +### AutoModelForImageToImage + +[[autodoc]] AutoModelForImageToImage + +### AutoModelForSemanticSegmentation + +[[autodoc]] AutoModelForSemanticSegmentation + +### TFAutoModelForSemanticSegmentation + +[[autodoc]] TFAutoModelForSemanticSegmentation + +### AutoModelForInstanceSegmentation + +[[autodoc]] AutoModelForInstanceSegmentation + +### AutoModelForUniversalSegmentation + +[[autodoc]] AutoModelForUniversalSegmentation + +### AutoModelForZeroShotImageClassification + +[[autodoc]] AutoModelForZeroShotImageClassification + +### TFAutoModelForZeroShotImageClassification + +[[autodoc]] TFAutoModelForZeroShotImageClassification + +### AutoModelForZeroShotObjectDetection + +[[autodoc]] AutoModelForZeroShotObjectDetection + +## Audio + +以下の自動クラスは、次の音声タスクに利用可能です。 + +### AutoModelForAudioClassification + +[[autodoc]] AutoModelForAudioClassification + +### AutoModelForAudioFrameClassification + +[[autodoc]] TFAutoModelForAudioClassification + +### TFAutoModelForAudioFrameClassification + +[[autodoc]] AutoModelForAudioFrameClassification + +### AutoModelForCTC + +[[autodoc]] AutoModelForCTC + +### AutoModelForSpeechSeq2Seq + +[[autodoc]] AutoModelForSpeechSeq2Seq + +### TFAutoModelForSpeechSeq2Seq + +[[autodoc]] TFAutoModelForSpeechSeq2Seq + +### FlaxAutoModelForSpeechSeq2Seq + +[[autodoc]] FlaxAutoModelForSpeechSeq2Seq + +### AutoModelForAudioXVector + +[[autodoc]] AutoModelForAudioXVector + +### AutoModelForTextToSpectrogram + +[[autodoc]] AutoModelForTextToSpectrogram + +### AutoModelForTextToWaveform + +[[autodoc]] AutoModelForTextToWaveform + +## Multimodal + +以下の自動クラスは、次のマルチモーダルタスクに利用可能です。 + +### AutoModelForTableQuestionAnswering + +[[autodoc]] AutoModelForTableQuestionAnswering + +### TFAutoModelForTableQuestionAnswering + +[[autodoc]] TFAutoModelForTableQuestionAnswering + +### AutoModelForDocumentQuestionAnswering + +[[autodoc]] AutoModelForDocumentQuestionAnswering + +### TFAutoModelForDocumentQuestionAnswering + +[[autodoc]] TFAutoModelForDocumentQuestionAnswering + +### AutoModelForVisualQuestionAnswering + +[[autodoc]] AutoModelForVisualQuestionAnswering + +### AutoModelForVision2Seq + +[[autodoc]] AutoModelForVision2Seq + +### TFAutoModelForVision2Seq + +[[autodoc]] TFAutoModelForVision2Seq + +### FlaxAutoModelForVision2Seq + +[[autodoc]] FlaxAutoModelForVision2Seq diff --git a/docs/source/ja/model_doc/autoformer.md b/docs/source/ja/model_doc/autoformer.md new file mode 100644 index 000000000000..b8b0948b960d --- /dev/null +++ b/docs/source/ja/model_doc/autoformer.md @@ -0,0 +1,50 @@ + + +# Autoformer + +## 概要 + +Autoformerモデルは、「[Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008)」という論文でHaixu Wu、Jiehui Xu、Jianmin Wang、Mingsheng Longによって提案されました。 + +このモデルは、予測プロセス中にトレンドと季節性成分を逐次的に分解できる深層分解アーキテクチャとしてTransformerを増強します。 + +論文の要旨は以下の通りです: + +*例えば異常気象の早期警告や長期的なエネルギー消費計画といった実応用において、予測時間を延長することは重要な要求です。本論文では、時系列の長期予測問題を研究しています。以前のTransformerベースのモデルは、長距離依存関係を発見するために様々なセルフアテンション機構を採用しています。しかし、長期未来の複雑な時間的パターンによってモデルが信頼できる依存関係を見つけることを妨げられます。また、Transformerは、長い系列の効率化のためにポイントワイズなセルフアテンションのスパースバージョンを採用する必要があり、情報利用のボトルネックとなります。Transformerを超えて、我々は自己相関機構を持つ新しい分解アーキテクチャとしてAutoformerを設計しました。系列分解の事前処理の慣行を破り、それを深層モデルの基本的な内部ブロックとして革新します。この設計は、複雑な時系列に対するAutoformerの進行的な分解能力を強化します。さらに、確率過程理論に触発されて、系列の周期性に基づいた自己相関機構を設計し、サブ系列レベルでの依存関係の発見と表現の集約を行います。自己相関は効率と精度の両方でセルフアテンションを上回ります。長期予測において、Autoformerは、エネルギー、交通、経済、気象、疾病の5つの実用的な応用をカバーする6つのベンチマークで38%の相対的な改善をもたらし、最先端の精度を達成します。* + +このモデルは[elisim](https://huggingface.co/elisim)と[kashif](https://huggingface.co/kashif)より提供されました。 +オリジナルのコードは[こちら](https://github.com/thuml/Autoformer)で見ることができます。 + +## 参考資料 + +Autoformerの使用を開始するのに役立つ公式のHugging Faceおよびコミュニティ(🌎で示されている)の参考資料の一覧です。ここに参考資料を提出したい場合は、気兼ねなくPull Requestを開いてください。私たちはそれをレビューいたします!参考資料は、既存のものを複製するのではなく、何か新しいことを示すことが理想的です。 + +- HuggingFaceブログでAutoformerに関するブログ記事をチェックしてください:[はい、Transformersは時系列予測に効果的です(+ Autoformer)](https://huggingface.co/blog/autoformer) + +## AutoformerConfig + +[[autodoc]] AutoformerConfig + +## AutoformerModel + +[[autodoc]] AutoformerModel + - forward + +## AutoformerForPrediction + +[[autodoc]] AutoformerForPrediction + - forward From e6522e49a7ff8a58793d13653aa94adfe56b44ce Mon Sep 17 00:00:00 2001 From: Adam Louly Date: Wed, 15 Nov 2023 10:16:42 -0800 Subject: [PATCH 188/268] Fixing the failure of models without max_position_embeddings attribute. (#27499) fix max pos issue Co-authored-by: Adam Louly --- examples/pytorch/language-modeling/run_clm.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 4e153e72fe48..7c668919253b 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -497,15 +497,20 @@ def tokenize_function(examples): batched=True, remove_columns=column_names, ) + if hasattr(config, "max_position_embeddings"): + max_pos_embeddings = config.max_position_embeddings + else: + # Define a default value if the attribute is missing in the config. + max_pos_embeddings = 1024 if data_args.block_size is None: block_size = tokenizer.model_max_length - if block_size > config.max_position_embeddings: + if block_size > max_pos_embeddings: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " - f"Using block_size={min(1024, config.max_position_embeddings)} instead. You can change that default value by passing --block_size xxx." + f"Using block_size={min(1024, max_pos_embeddings)} instead. You can change that default value by passing --block_size xxx." ) - block_size = min(1024, config.max_position_embeddings) + block_size = min(1024, max_pos_embeddings) else: if data_args.block_size > tokenizer.model_max_length: logger.warning( From 2e72bbab2cd169903b1e77b439718c1bdc5d50b2 Mon Sep 17 00:00:00 2001 From: Matt Date: Wed, 15 Nov 2023 18:18:54 +0000 Subject: [PATCH 189/268] Incorrect setting for num_beams in translation and summarization examples (#27519) * Remove the torch main_process_first context manager from TF examples * Correctly set num_beams=1 in our examples, and add a guard in GenerationConfig.validate() * Update src/transformers/generation/configuration_utils.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../summarization/run_summarization_flax.py | 2 +- .../summarization/run_summarization.py | 2 +- .../pytorch/translation/run_translation.py | 2 +- .../tensorflow/multiple-choice/run_swag.py | 26 +++++++------- .../summarization/run_summarization.py | 36 +++++++++---------- .../tensorflow/translation/run_translation.py | 36 +++++++++---------- .../generation/configuration_utils.py | 4 +++ 7 files changed, 53 insertions(+), 55 deletions(-) diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index f47c11e4ec36..a7d6633f64f8 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -312,7 +312,7 @@ class DataTrainingArguments: default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) num_beams: Optional[int] = field( - default=None, + default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to `model.generate`, " diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index d7f8b9f1c559..d7f543c24868 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -249,7 +249,7 @@ class DataTrainingArguments: }, ) num_beams: Optional[int] = field( - default=None, + default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index 92af72ccd209..6edbe6a995c3 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -217,7 +217,7 @@ class DataTrainingArguments: }, ) num_beams: Optional[int] = field( - default=None, + default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " diff --git a/examples/tensorflow/multiple-choice/run_swag.py b/examples/tensorflow/multiple-choice/run_swag.py index e78becda89d5..db73e137b33d 100644 --- a/examples/tensorflow/multiple-choice/run_swag.py +++ b/examples/tensorflow/multiple-choice/run_swag.py @@ -415,13 +415,12 @@ def preprocess_function(examples): if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) - with training_args.main_process_first(desc="train dataset map pre-processing"): - train_dataset = train_dataset.map( - preprocess_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - ) + train_dataset = train_dataset.map( + preprocess_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + load_from_cache_file=not data_args.overwrite_cache, + ) if training_args.do_eval: if "validation" not in raw_datasets: @@ -430,13 +429,12 @@ def preprocess_function(examples): if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) - with training_args.main_process_first(desc="validation dataset map pre-processing"): - eval_dataset = eval_dataset.map( - preprocess_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - ) + eval_dataset = eval_dataset.map( + preprocess_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + load_from_cache_file=not data_args.overwrite_cache, + ) if data_args.pad_to_max_length: data_collator = DefaultDataCollator(return_tensors="np") diff --git a/examples/tensorflow/summarization/run_summarization.py b/examples/tensorflow/summarization/run_summarization.py index 3ca57b033ccf..c60893399340 100644 --- a/examples/tensorflow/summarization/run_summarization.py +++ b/examples/tensorflow/summarization/run_summarization.py @@ -238,7 +238,7 @@ class DataTrainingArguments: }, ) num_beams: Optional[int] = field( - default=None, + default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " @@ -488,15 +488,14 @@ def preprocess_function(examples): if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) - with training_args.main_process_first(desc="train dataset map pre-processing"): - train_dataset = train_dataset.map( - preprocess_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on train dataset", - ) + train_dataset = train_dataset.map( + preprocess_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on train dataset", + ) else: train_dataset = None @@ -508,15 +507,14 @@ def preprocess_function(examples): if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) - with training_args.main_process_first(desc="validation dataset map pre-processing"): - eval_dataset = eval_dataset.map( - preprocess_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on validation dataset", - ) + eval_dataset = eval_dataset.map( + preprocess_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on validation dataset", + ) else: eval_dataset = None # endregion diff --git a/examples/tensorflow/translation/run_translation.py b/examples/tensorflow/translation/run_translation.py index e7fc47b3388b..7dd926774ece 100644 --- a/examples/tensorflow/translation/run_translation.py +++ b/examples/tensorflow/translation/run_translation.py @@ -226,7 +226,7 @@ class DataTrainingArguments: }, ) num_beams: Optional[int] = field( - default=None, + default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " @@ -454,15 +454,14 @@ def preprocess_function(examples): if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) - with training_args.main_process_first(desc="train dataset map pre-processing"): - train_dataset = train_dataset.map( - preprocess_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on train dataset", - ) + train_dataset = train_dataset.map( + preprocess_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on train dataset", + ) else: train_dataset = None @@ -474,15 +473,14 @@ def preprocess_function(examples): if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) - with training_args.main_process_first(desc="validation dataset map pre-processing"): - eval_dataset = eval_dataset.map( - preprocess_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on validation dataset", - ) + eval_dataset = eval_dataset.map( + preprocess_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on validation dataset", + ) else: eval_dataset = None # endregion diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index c53738f14abe..6b0b434ec726 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -409,6 +409,10 @@ def validate(self, is_init=False): ) # 2. detect beam-only parameterization when not in beam mode + if self.num_beams is None: + logging.warning("`num_beams` is set to None - defaulting to 1.", UserWarning) + self.num_beams = 1 + if self.num_beams == 1: single_beam_wrong_parameter_msg = ( "`num_beams` is set to 1. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used " From b71c38a094ee91140f82d5285be9fcb0ea43839e Mon Sep 17 00:00:00 2001 From: JiangZhongqing <49048129+JamesJiang97@users.noreply.github.com> Date: Thu, 16 Nov 2023 04:00:22 +0900 Subject: [PATCH 190/268] Fix bug for T5x to PyTorch convert script with varying encoder and decoder layers (#27448) * Fix bug in handling varying encoder and decoder layers This commit resolves an issue where the script failed to convert T5x models to PyTorch models when the number of decoder layers differed from the number of encoder layers. I've addressed this issue by passing an additional 'num_decoder_layers' parameter to the relevant function. * Fix bug in handling varying encoder and decoder layers --- .../models/t5/convert_t5x_checkpoint_to_pytorch.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py b/src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py index d69e14057fc5..5e7d9ef33d3e 100755 --- a/src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py +++ b/src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py @@ -69,7 +69,7 @@ def t5x_layer_norm_lookup(params, i, prefix, layer_name): return params[f"{prefix}/layers_{i}/{layer_name}/scale"] -def convert_t5x_to_pytorch(variables: dict, *, num_layers: int, is_encoder_only: bool): +def convert_t5x_to_pytorch(variables: dict, *, num_layers: int, num_decoder_layers: int, is_encoder_only: bool): """Converts the parameters from T5X-Flax to Transformers-PyTorch.""" old = traverse_util.flatten_dict(variables["target"]) old = {"/".join(k): v for k, v in old.items()} @@ -112,7 +112,7 @@ def convert_t5x_to_pytorch(variables: dict, *, num_layers: int, is_encoder_only: if not is_encoder_only: # Decoder. - for i in range(num_layers): + for i in range(num_decoder_layers): # Block i, layer 0 (Self Attention). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_self_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "decoder", "self_attention") @@ -177,7 +177,12 @@ def make_state_dict(converted_params, is_encoder_only: bool): def load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only): """Replaces the params in model witht the T5X converted params.""" variables = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) - converted = convert_t5x_to_pytorch(variables, num_layers=config.num_layers, is_encoder_only=is_encoder_only) + converted = convert_t5x_to_pytorch( + variables, + num_layers=config.num_layers, + num_decoder_layers=config.num_decoder_layers, + is_encoder_only=is_encoder_only, + ) state_dict = make_state_dict(converted, is_encoder_only) model.load_state_dict(state_dict, strict=True) From 1ac599d90f740ce28f637ad32ff5f59c40cd5a0a Mon Sep 17 00:00:00 2001 From: Marc Sun <57196510+SunMarc@users.noreply.github.com> Date: Wed, 15 Nov 2023 20:58:08 +0100 Subject: [PATCH 191/268] Fix offload disk for loading derivated model checkpoint into base model (#27253) * fix * style * add test --- src/transformers/modeling_utils.py | 22 ++++++++++------ tests/test_modeling_utils.py | 40 ++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 8 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index fcb51e6a56be..57eb08a4159b 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -3793,8 +3793,7 @@ def _find_mismatched_keys( else: folder = None if device_map is not None and is_safetensors: - param_device_map = expand_device_map(device_map, original_loaded_keys) - + param_device_map = expand_device_map(device_map, original_loaded_keys, start_prefix) str_dtype = str(dtype).replace("torch.", "") if dtype is not None else "float32" if sharded_metadata is None: archive_file = ( @@ -3806,9 +3805,9 @@ def _find_mismatched_keys( else: weight_map = {p: os.path.join(folder, f) for p, f in sharded_metadata["weight_map"].items()} offload_index = { - p: {"safetensors_file": f, "weight_name": p, "dtype": str_dtype} + p[len(start_prefix) :]: {"safetensors_file": f, "weight_name": p, "dtype": str_dtype} for p, f in weight_map.items() - if param_device_map[p] == "disk" + if p.startswith(start_prefix) and param_device_map[p[len(start_prefix) :]] == "disk" } if state_dict is not None: @@ -3842,7 +3841,9 @@ def _find_mismatched_keys( state_dict_index = None if is_sharded_safetensors: - disk_only_shard_files = get_disk_only_shard_files(device_map, sharded_metadata=sharded_metadata) + disk_only_shard_files = get_disk_only_shard_files( + device_map, sharded_metadata=sharded_metadata, start_prefix=start_prefix + ) disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files] else: disk_only_shard_files = [] @@ -4576,11 +4577,12 @@ def unwrap_model(model: nn.Module) -> nn.Module: return model -def expand_device_map(device_map, param_names): +def expand_device_map(device_map, param_names, start_prefix): """ Expand a device map to return the correspondance parameter name to device. """ new_device_map = {} + param_names = [p[len(start_prefix) :] for p in param_names if p.startswith(start_prefix)] for module, device in device_map.items(): new_device_map.update( {p: device for p in param_names if p == module or p.startswith(f"{module}.") or module == ""} @@ -4588,12 +4590,16 @@ def expand_device_map(device_map, param_names): return new_device_map -def get_disk_only_shard_files(device_map, sharded_metadata): +def get_disk_only_shard_files(device_map, sharded_metadata, start_prefix): """ Returns the list of shard files containing only weights offloaded to disk. """ + + weight_map = { + p[len(start_prefix) :]: v for p, v in sharded_metadata["weight_map"].items() if p.startswith(start_prefix) + } files_content = collections.defaultdict(list) - for weight_name, filename in sharded_metadata["weight_map"].items(): + for weight_name, filename in weight_map.items(): while len(weight_name) > 0 and weight_name not in device_map: weight_name = ".".join(weight_name.split(".")[:-1]) files_content[filename].append(device_map[weight_name]) diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index e457dc07a9fb..62a639a95f56 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -750,6 +750,46 @@ def test_from_pretrained_disk_offload_task_model(self): self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) + @require_accelerate + @mark.accelerate_tests + @require_torch_accelerator + def test_from_pretrained_disk_offload_derived_to_base_model(self): + derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") + + device_map = { + "wte": 0, + "wpe": 0, + "h.0": "cpu", + "h.1": "cpu", + "h.2": "cpu", + "h.3": "disk", + "h.4": "disk", + "ln_f": 0, + } + with tempfile.TemporaryDirectory() as tmp_dir: + inputs = torch.tensor([[1, 2, 3]]).to(0) + derived_model.save_pretrained(tmp_dir, use_safetensors=True) + base_model = AutoModel.from_pretrained(tmp_dir) + outputs1 = base_model.to(0)(inputs) + + # with disk offload + offload_folder = os.path.join(tmp_dir, "offload") + base_model_with_offload = AutoModel.from_pretrained( + tmp_dir, device_map=device_map, offload_folder=offload_folder + ) + outputs2 = base_model_with_offload(inputs) + self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu())) + + # With state dict temp offload + new_model_with_offload = AutoModel.from_pretrained( + tmp_dir, + device_map=device_map, + offload_folder=offload_folder, + offload_state_dict=True, + ) + outputs2 = new_model_with_offload(inputs) + self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu())) + def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() From 06343b06335a1f8417bd32d3ffc7cf2cca9a24ac Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Thu, 16 Nov 2023 08:59:03 +0800 Subject: [PATCH 192/268] translate model.md to chinese (#27518) * translate model.md to chinese * apply review suggestion Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/zh/_toctree.yml | 8 +- docs/source/zh/main_classes/model.md | 137 +++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 docs/source/zh/main_classes/model.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index c3c914186733..914ce68fd26d 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -65,4 +65,10 @@ title: 🤗Transformers能做什么 - local: tokenizer_summary title: 分词器的摘要 - title: 概念指南 \ No newline at end of file + title: 概念指南 +- sections: + - sections: + - local: main_classes/model + title: 模型 + title: 主要类 + title: 应用程序接口 (API) \ No newline at end of file diff --git a/docs/source/zh/main_classes/model.md b/docs/source/zh/main_classes/model.md new file mode 100644 index 000000000000..d1b1c98f59a3 --- /dev/null +++ b/docs/source/zh/main_classes/model.md @@ -0,0 +1,137 @@ + + +# 模型 + +基类 [`PreTrainedModel`]、[`TFPreTrainedModel`] 和 [`FlaxPreTrainedModel`] 实现了从本地文件或目录加载/保存模型的常用方法,或者从库上提供的预训练模型配置(从 HuggingFace 的 AWS S3 存储库下载)加载模型。 + +[`PreTrainedModel`] 和 [`TFPreTrainedModel`] 还实现了一些所有模型共有的方法: + +- 在向量词嵌入增加新词汇时调整输入标记(token)的大小 +- 对模型的注意力头进行修剪。 + +其他的通用方法在 [`~modeling_utils.ModuleUtilsMixin`](用于 PyTorch 模型)和 [`~modeling_tf_utils.TFModuleUtilsMixin`](用于 TensorFlow 模型)中定义;文本生成方面的方法则定义在 [`~generation.GenerationMixin`](用于 PyTorch 模型)、[`~generation.TFGenerationMixin`](用于 TensorFlow 模型)和 [`~generation.FlaxGenerationMixin`](用于 Flax/JAX 模型)中。 + +## PreTrainedModel + +[[autodoc]] PreTrainedModel + - push_to_hub + - all + + + +### 大模型加载 + +在 Transformers 4.20.0 中,[`~PreTrainedModel.from_pretrained`] 方法已重新设计,以适应使用 [Accelerate](https://huggingface.co/docs/accelerate/big_modeling) 加载大型模型的场景。这需要您使用的 Accelerate 和 PyTorch 版本满足: Accelerate >= 0.9.0, PyTorch >= 1.9.0。除了创建完整模型,然后在其中加载预训练权重(这会占用两倍于模型大小的内存空间,一个用于随机初始化模型,一个用于预训练权重),我们提供了一种选项,将模型创建为空壳,然后只有在加载预训练权重时才实例化其参数。 + +您可以使用 `low_cpu_mem_usage=True` 激活此选项。首先,在 Meta 设备上创建模型(带有空权重),然后将状态字典加载到其中(在分片检查点的情况下逐片加载)。这样,最大使用的内存占用仅为模型的完整大小。 + +```python +from transformers import AutoModelForSeq2SeqLM + +t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", low_cpu_mem_usage=True) +``` + +此外,如果内存不足以放下加载整个模型(目前仅适用于推理),您可以直接将模型放置在不同的设备上。使用 `device_map="auto"`,Accelerate 将确定将每一层放置在哪个设备上,以最大化使用最快的设备(GPU),并将其余部分卸载到 CPU,甚至硬盘上(如果您没有足够的 GPU 内存 或 CPU 内存)。即使模型分布在几个设备上,它也将像您通常期望的那样运行。 + +在传递 `device_map` 时,`low_cpu_mem_usage` 会自动设置为 `True`,因此您不需要指定它: + +```python +from transformers import AutoModelForSeq2SeqLM + +t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") +``` + +您可以通过 `hf_device_map` 属性来查看模型是如何在设备上分割的: + +```python +t0pp.hf_device_map +{'shared': 0, + 'decoder.embed_tokens': 0, + 'encoder': 0, + 'decoder.block.0': 0, + 'decoder.block.1': 1, + 'decoder.block.2': 1, + 'decoder.block.3': 1, + 'decoder.block.4': 1, + 'decoder.block.5': 1, + 'decoder.block.6': 1, + 'decoder.block.7': 1, + 'decoder.block.8': 1, + 'decoder.block.9': 1, + 'decoder.block.10': 1, + 'decoder.block.11': 1, + 'decoder.block.12': 1, + 'decoder.block.13': 1, + 'decoder.block.14': 1, + 'decoder.block.15': 1, + 'decoder.block.16': 1, + 'decoder.block.17': 1, + 'decoder.block.18': 1, + 'decoder.block.19': 1, + 'decoder.block.20': 1, + 'decoder.block.21': 1, + 'decoder.block.22': 'cpu', + 'decoder.block.23': 'cpu', + 'decoder.final_layer_norm': 'cpu', + 'decoder.dropout': 'cpu', + 'lm_head': 'cpu'} +``` + +您还可以按照相同的格式(一个层名称到设备的映射关系的字典)编写自己的设备映射规则。它应该将模型的所有参数映射到给定的设备上,如果该层的所有子模块都在同一设备上,您不必详细说明其中所有子模块的位置。例如,以下设备映射对于 T0pp 将正常工作(只要您有 GPU 内存): + +```python +device_map = {"shared": 0, "encoder": 0, "decoder": 1, "lm_head": 1} +``` + +另一种减少模型内存影响的方法是以较低精度的 dtype(例如 `torch.float16`)实例化它,或者使用下面介绍的直接量化技术。 + +### 模型实例化 dtype + +在 PyTorch 下,模型通常以 `torch.float32` 格式实例化。如果尝试加载权重为 fp16 的模型,这可能会导致问题,因为它将需要两倍的内存。为了克服此限制,您可以使用 `torch_dtype` 参数显式传递所需的 `dtype`: + +```python +model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype=torch.float16) +``` +或者,如果您希望模型始终以最优的内存模式加载,则可以使用特殊值 `"auto"`,然后 `dtype` 将自动从模型的权重中推导出: +```python +model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype="auto") +``` + +也可以通过以下方式告知从头开始实例化的模型要使用哪种 `dtype`: + +```python +config = T5Config.from_pretrained("t5") +model = AutoModel.from_config(config) +``` + +由于 PyTorch 的设计,此功能仅适用于浮点类型。 + + +## ModuleUtilsMixin + +[[autodoc]] modeling_utils.ModuleUtilsMixin + +TFPreTrainedModel +[[autodoc]] TFPreTrainedModel +- push_to_hub +- all + +## TFModelUtilsMixin +[[autodoc]] modeling_tf_utils.TFModelUtilsMixin + +FlaxPreTrainedModel +[[autodoc]] FlaxPreTrainedModel +- push_to_hub +- all + +## 推送到 Hub +[[autodoc]] utils.PushToHubMixin + +## 分片检查点 +[[autodoc]] modeling_utils.load_sharded_checkpoint From 1394e08cf099d16515c1889ab9507946489f5afe Mon Sep 17 00:00:00 2001 From: Dean Wyatte <2512762+dwyatte@users.noreply.github.com> Date: Thu, 16 Nov 2023 02:56:34 -0700 Subject: [PATCH 193/268] Support ONNX export for causal LM sequence classifiers (#27450) support onnx for causal lm sequence classification --- src/transformers/models/ctrl/modeling_ctrl.py | 2 +- .../models/deprecated/open_llama/modeling_open_llama.py | 2 +- src/transformers/models/gpt2/modeling_gpt2.py | 2 +- src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py | 2 +- src/transformers/models/gpt_neo/modeling_gpt_neo.py | 2 +- src/transformers/models/gpt_neox/modeling_gpt_neox.py | 2 +- src/transformers/models/gptj/modeling_gptj.py | 2 +- src/transformers/models/llama/modeling_llama.py | 2 +- src/transformers/models/mistral/modeling_mistral.py | 2 +- src/transformers/models/openai/modeling_openai.py | 2 +- src/transformers/models/opt/modeling_opt.py | 2 +- src/transformers/models/persimmon/modeling_persimmon.py | 2 +- src/transformers/models/phi/modeling_phi.py | 2 +- src/transformers/models/transfo_xl/modeling_transfo_xl.py | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index cec68de07dda..489cd7564b7b 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -796,7 +796,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/deprecated/open_llama/modeling_open_llama.py b/src/transformers/models/deprecated/open_llama/modeling_open_llama.py index f0558edf6b53..e4607d56f706 100644 --- a/src/transformers/models/deprecated/open_llama/modeling_open_llama.py +++ b/src/transformers/models/deprecated/open_llama/modeling_open_llama.py @@ -924,7 +924,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index bbae7a6c5553..b5adf39ecee7 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -1451,7 +1451,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index 533c471aa95f..2d12d5909742 100644 --- a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -1184,7 +1184,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index aa1f1295157d..368920f3769c 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -1090,7 +1090,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index cc62d7fe4963..33cc00cc2f42 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -948,7 +948,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index 45c7114943b7..b4989a98002e 100644 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -1001,7 +1001,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index fd4bd16fd2df..b9625dd92139 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -1204,7 +1204,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 445b7d95cb6b..3aefb03d8c6d 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -1174,7 +1174,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 2d56272721e2..26bc0ee8a037 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -814,7 +814,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index c62c517f9e2c..5b56ff9e618d 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -1030,7 +1030,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/persimmon/modeling_persimmon.py b/src/transformers/models/persimmon/modeling_persimmon.py index 36be8761871f..f1b7ae320539 100644 --- a/src/transformers/models/persimmon/modeling_persimmon.py +++ b/src/transformers/models/persimmon/modeling_persimmon.py @@ -925,7 +925,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/phi/modeling_phi.py b/src/transformers/models/phi/modeling_phi.py index 7009b59c8318..7f0f8caecb0a 100644 --- a/src/transformers/models/phi/modeling_phi.py +++ b/src/transformers/models/phi/modeling_phi.py @@ -938,7 +938,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_transfo_xl.py index 2bb7d3b91238..1e944c335ae7 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_transfo_xl.py @@ -1247,7 +1247,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to( + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( logits.device ) else: From 85fde09c97213bf7e8625f83096bb2a9e183f987 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Thu, 16 Nov 2023 11:13:07 +0100 Subject: [PATCH 194/268] [`pytest`] Avoid flash attn test marker warning (#27509) add flash attn markers --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 37bc1d7efe16..d13ee54afcf1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,3 +21,6 @@ known-first-party = ["transformers"] [tool.pytest.ini_options] doctest_optionflags="NUMBER NORMALIZE_WHITESPACE ELLIPSIS" doctest_glob="**/*.md" +markers = [ + "flash_attn_test: marks tests related to flash attention (deselect with '-m \"not flash_attn_test\"')", +] \ No newline at end of file From 69c9b89fcb2a45875fbf3fd6594ed65403b95170 Mon Sep 17 00:00:00 2001 From: Phuc Van Phan Date: Thu, 16 Nov 2023 20:16:19 +0700 Subject: [PATCH 195/268] docs: add docs for map, and add num procs to load_dataset (#27520) --- .../speech-recognition/run_flax_speech_recognition_seq2seq.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py b/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py index 67de00c00e19..9f602e1c8586 100644 --- a/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py +++ b/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py @@ -439,6 +439,7 @@ def main(): data_args.dataset_config_name, split=data_args.train_split_name, cache_dir=data_args.dataset_cache_dir, + num_proc=data_args.preprocessing_num_workers, token=True if model_args.use_auth_token else None, ) @@ -448,6 +449,7 @@ def main(): data_args.dataset_config_name, split=data_args.eval_split_name, cache_dir=data_args.dataset_cache_dir, + num_proc=data_args.preprocessing_num_workers, token=True if model_args.use_auth_token else None, ) @@ -551,7 +553,7 @@ def prepare_dataset(batch): prepare_dataset, remove_columns=next(iter(raw_datasets.values())).column_names, num_proc=num_workers, - desc="preprocess train dataset", + desc="preprocess train and eval dataset", ) # filter training data with inputs longer than max_input_length From 4989e73e2f86f991a7c2aec4852d873903c71c14 Mon Sep 17 00:00:00 2001 From: Matt Date: Thu, 16 Nov 2023 13:47:43 +0000 Subject: [PATCH 196/268] Update the TF pin for 2.15 (#27375) * Move the TF pin for 2.15 * make fixup --- examples/tensorflow/_tests_requirements.txt | 4 ++-- setup.py | 8 ++++---- src/transformers/dependency_versions_table.py | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/examples/tensorflow/_tests_requirements.txt b/examples/tensorflow/_tests_requirements.txt index e9351b2b4220..161a045bd51e 100644 --- a/examples/tensorflow/_tests_requirements.txt +++ b/examples/tensorflow/_tests_requirements.txt @@ -1,5 +1,5 @@ -tensorflow<2.15 -keras<2.15 +tensorflow<2.16 +keras<2.16 tensorboard scikit-learn seqeval diff --git a/setup.py b/setup.py index e42a29585f97..764411dab31a 100644 --- a/setup.py +++ b/setup.py @@ -127,7 +127,7 @@ "jieba", "kenlm", # Keras pin - this is to make sure Keras 3 doesn't destroy us. Remove or change when we have proper support. - "keras<2.15", + "keras<2.16", "keras-nlp>=0.3.1", "librosa", "nltk", @@ -169,9 +169,9 @@ "sudachidict_core>=20220729", "tensorboard", # TensorFlow pin. When changing this value, update examples/tensorflow/_tests_requirements.txt accordingly - "tensorflow-cpu>=2.6,<2.15", - "tensorflow>=2.6,<2.15", - "tensorflow-text<2.15", + "tensorflow-cpu>=2.6,<2.16", + "tensorflow>=2.6,<2.16", + "tensorflow-text<2.16", "tf2onnx", "timeout-decorator", "timm", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 736b3128100f..d460790c8ec5 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -33,7 +33,7 @@ "jaxlib": "jaxlib>=0.4.1,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", - "keras": "keras<2.15", + "keras": "keras<2.16", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", @@ -74,9 +74,9 @@ "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorboard": "tensorboard", - "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.15", - "tensorflow": "tensorflow>=2.6,<2.15", - "tensorflow-text": "tensorflow-text<2.15", + "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.16", + "tensorflow": "tensorflow>=2.6,<2.16", + "tensorflow-text": "tensorflow-text<2.16", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", From 5603fad2479ad22ca4689f6a4dbf56ef2f1f0973 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 16 Nov 2023 14:50:39 +0100 Subject: [PATCH 197/268] Revert "add attention_mask and position_ids in assisted model" (#27523) * Revert "add attention_mask and position_ids in assisted model (#26892)" This reverts commit 184f60dcec6f7f664687a9e211e8d2216052b05d. * more debug --- src/transformers/generation/utils.py | 90 +++++++++-------- tests/models/whisper/test_modeling_whisper.py | 97 +++++++++++++++++++ 2 files changed, 141 insertions(+), 46 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 10ffffc37ca8..14e4b1012911 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -4504,6 +4504,11 @@ def assisted_decoding( else: num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens + # check if assistant model accepts encoder_outputs + assistant_accepts_encoder_outputs = "encoder_outputs" in set( + inspect.signature(assistant_model.forward).parameters.keys() + ) + # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() @@ -4546,6 +4551,15 @@ def assisted_decoding( # other auxiliary variables max_len = stopping_criteria[0].max_length + assistant_kv_indexing = ( + 1 + if "bloom" in assistant_model.__class__.__name__.lower() + or ( + assistant_model.config.architectures is not None + and "bloom" in assistant_model.config.architectures[0].lower() + ) + else 0 + ) this_peer_finished = False # used by synced_gpus only while True: @@ -4566,28 +4580,42 @@ def assisted_decoding( # `.generate()` call if we decide to add `past_key_values` as a possible output of generate, as we # need access to the assistant cache to secure strong speedups. candidate_input_ids = input_ids - assistant_attention_mask = model_kwargs.get("attention_mask", None) - assistant_decoder_attention_mask = model_kwargs.get("decoder_attention_mask", None) - assistant_encoder_outputs = (model_kwargs.get("assistant_encoder_outputs", None),) for _ in range(int(num_assistant_tokens)): # 1.1. use the assistant model to obtain the next candidate logits - assistant_inputs = assistant_model.prepare_inputs_for_generation( - candidate_input_ids, - attention_mask=assistant_attention_mask, - decoder_attention_mask=assistant_decoder_attention_mask, - encoder_outputs=assistant_encoder_outputs, - past_key_values=model_kwargs.get("assistant_past_key_values", None), - ) - if assistant_inputs.get("past_key_values", None) is not None: + if "assistant_past_key_values" in model_kwargs: + prev_seq_len = model_kwargs["assistant_past_key_values"][0][assistant_kv_indexing].shape[-2] + # `new_token_len` can be 1 or 2 (next token in assistant + last token picked by the larger model) + new_token_len = candidate_input_ids.shape[1] - prev_seq_len + assist_inputs = candidate_input_ids[:, -new_token_len:] + # TODO (joao): make it compatible with models that use unconventional fwd pass logic, like blip2 + if assistant_model.config.is_encoder_decoder: + assistant_model_outputs = assistant_model( + decoder_input_ids=assist_inputs, + past_key_values=model_kwargs["assistant_past_key_values"], + encoder_outputs=model_kwargs["assistant_encoder_outputs"], + ) + else: + encoder_kwargs = {} + + if assistant_accepts_encoder_outputs and "assistant_encoder_outputs" in model_kwargs: + encoder_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] + + assistant_model_outputs = assistant_model( + assist_inputs, past_key_values=model_kwargs["assistant_past_key_values"], **encoder_kwargs + ) + else: if assistant_model.config.is_encoder_decoder: - input_ids_len = assistant_inputs["decoder_input_ids"].shape[-1] + assistant_model_outputs = assistant_model( + decoder_input_ids=candidate_input_ids, + encoder_outputs=model_kwargs["assistant_encoder_outputs"], + ) else: - input_ids_len = assistant_inputs["input_ids"].shape[-1] + encoder_kwargs = {} - if input_ids_len not in (1, 2): - raise ValueError("The length of the input ids in assistant inputs should be 1 or 2") + if assistant_accepts_encoder_outputs and "assistant_encoder_outputs" in model_kwargs: + encoder_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] - assistant_model_outputs = assistant_model(**assistant_inputs) + assistant_model_outputs = assistant_model(candidate_input_ids, **encoder_kwargs) # 1.2. greedily select the next candidate token model_kwargs["assistant_past_key_values"] = assistant_model_outputs.past_key_values @@ -4595,31 +4623,8 @@ def assisted_decoding( assistant_model_outputs.logits[:, -1, :] = logits_processor( candidate_input_ids, assistant_model_outputs.logits[:, -1, :] ) - new_token = assistant_model_outputs.logits[:, -1, :].argmax(dim=-1) candidate_input_ids = torch.cat((candidate_input_ids, new_token[:, None]), dim=-1) - if assistant_model.config.is_encoder_decoder and assistant_decoder_attention_mask is not None: - assistant_decoder_attention_mask = torch.cat( - ( - assistant_decoder_attention_mask, - torch.ones( - [1, 1], - dtype=assistant_decoder_attention_mask.dtype, - device=assistant_decoder_attention_mask.device, - ), - ), - dim=-1, - ) - elif not assistant_model.config.is_encoder_decoder and assistant_attention_mask is not None: - assistant_attention_mask = torch.cat( - ( - assistant_attention_mask, - torch.ones( - [1, 1], dtype=assistant_attention_mask.dtype, device=assistant_attention_mask.device - ), - ), - dim=-1, - ) # 1.3. stop assistant generation on EOS if eos_token_id_tensor is not None: @@ -4755,13 +4760,6 @@ def assisted_decoding( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) - # Update attention_mask for the assistant's next round of generations - if n_matches > 0 and model_kwargs.get("attention_mask", None) is not None: - attention_mask = model_kwargs["attention_mask"] - model_kwargs["attention_mask"] = torch.cat( - [attention_mask, attention_mask.new_ones((attention_mask.shape[0], n_matches))], dim=-1 - ) - # if eos_token was found in one sentence, set sentence to finished if eos_token_id_tensor is not None: unfinished_sequences = unfinished_sequences.mul( diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 22290bab6691..c7d6fb692654 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -18,6 +18,7 @@ import inspect import os import tempfile +import time import unittest import numpy as np @@ -1736,6 +1737,102 @@ def test_generate_with_prompt_ids_and_no_non_prompt_forced_decoder_ids(self): self.assertTrue(prompt in text) + @slow + @require_torch_gpu + def test_speculative_decoding_distil(self): + torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 + model_id = "openai/whisper-large-v2" + model = WhisperForConditionalGeneration.from_pretrained( + model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True + ) + model.to(torch_device) + + processor = WhisperProcessor.from_pretrained(model_id) + + assistant_model_id = "distil-whisper/distil-large-v2" + assistant_model = WhisperForCausalLM.from_pretrained( + assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True + ) + assistant_model.to(torch_device) + + dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + sample = dataset[0]["audio"] + + input_features = processor(sample["array"], return_tensors="pt").input_features.to("cuda").to(torch.float16) + + # warm up assisted decoding + _ = model.generate(input_features, assistant_model=assistant_model) + # warm up non-assisted decoding + _ = model.generate(input_features) + + # assisted decoding + start_time = time.time() + tokens = model.generate(input_features, assistant_model=assistant_model) + total_time_assist = time.time() - start_time + + transcription_ass = processor.batch_decode(tokens, skip_special_tokens=True) + + # non-assisted decoding + start_time = time.time() + tokens = model.generate(input_features) + total_time_non_assist = time.time() - start_time + + transcription_non_ass = processor.batch_decode(tokens, skip_special_tokens=True) + + assert transcription_ass == transcription_non_ass + assert transcription_ass == [ + " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel." + ] + assert total_time_non_assist > total_time_assist, "Make sure that assistant decoding is faster" + + @slow + @require_torch_gpu + def test_speculative_decoding_non_distil(self): + torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 + model_id = "openai/whisper-large-v2" + model = WhisperForConditionalGeneration.from_pretrained( + model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True + ) + model.to(torch_device) + + processor = WhisperProcessor.from_pretrained(model_id) + + assistant_model_id = "openai/whisper-tiny" + assistant_model = WhisperForConditionalGeneration.from_pretrained( + assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True + ) + assistant_model.to(torch_device) + + dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + sample = dataset[0]["audio"] + + input_features = processor(sample["array"], return_tensors="pt").input_features.to("cuda").to(torch.float16) + + # warm up assisted decoding + _ = model.generate(input_features, assistant_model=assistant_model) + # warm up non-assisted decoding + _ = model.generate(input_features) + + # assisted decoding + start_time = time.time() + tokens = model.generate(input_features, assistant_model=assistant_model) + total_time_assist = time.time() - start_time + + transcription_ass = processor.batch_decode(tokens, skip_special_tokens=True) + + # non-assisted decoding + start_time = time.time() + tokens = model.generate(input_features) + total_time_non_assist = time.time() - start_time + + transcription_non_ass = processor.batch_decode(tokens, skip_special_tokens=True) + + assert transcription_ass == transcription_non_ass + assert transcription_ass == [ + " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel." + ] + assert total_time_non_assist > total_time_assist, "Make sure that assistant decoding is faster" + def prepare_whisper_encoder_inputs_dict(config, input_features, head_mask=None): if head_mask is None: From fd65aa981868cd1066738658ebd3efdc9c43df55 Mon Sep 17 00:00:00 2001 From: Lucain Date: Thu, 16 Nov 2023 15:29:53 +0100 Subject: [PATCH 198/268] Set `usedforsecurity=False` in hashlib methods (FIPS compliance) (#27483) * Set usedforsecurity=False in hashlib methods (FIPS compliance) * trigger ci * tokenizers version * deps * bump hfh version * let's try this --- .../research_projects/codeparrot/scripts/preprocessing.py | 4 ++-- examples/research_projects/lxmert/utils.py | 6 +++--- examples/research_projects/visual_bert/utils.py | 6 +++--- setup.py | 3 ++- src/transformers/dependency_versions_table.py | 2 +- src/transformers/models/whisper/convert_openai_to_hf.py | 6 +++--- tests/pipelines/test_pipelines_depth_estimation.py | 5 +++-- tests/pipelines/test_pipelines_image_segmentation.py | 4 ++-- tests/pipelines/test_pipelines_mask_generation.py | 4 ++-- 9 files changed, 21 insertions(+), 19 deletions(-) diff --git a/examples/research_projects/codeparrot/scripts/preprocessing.py b/examples/research_projects/codeparrot/scripts/preprocessing.py index aecc37223f0d..f3b9efa9bed1 100644 --- a/examples/research_projects/codeparrot/scripts/preprocessing.py +++ b/examples/research_projects/codeparrot/scripts/preprocessing.py @@ -1,5 +1,4 @@ import gzip -import hashlib import json import multiprocessing import os @@ -11,6 +10,7 @@ import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset +from huggingface_hub.utils import insecure_hashlib from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser @@ -21,7 +21,7 @@ def get_hash(example): """Get hash of content field.""" - return {"hash": hashlib.md5(re.sub(PATTERN, "", example["content"]).encode("utf-8")).hexdigest()} + return {"hash": insecure_hashlib.md5(re.sub(PATTERN, "", example["content"]).encode("utf-8")).hexdigest()} def line_stats(example): diff --git a/examples/research_projects/lxmert/utils.py b/examples/research_projects/lxmert/utils.py index 2fc6ea2062ef..c75f523a08ea 100644 --- a/examples/research_projects/lxmert/utils.py +++ b/examples/research_projects/lxmert/utils.py @@ -28,7 +28,6 @@ from collections import OrderedDict from contextlib import contextmanager from functools import partial -from hashlib import sha256 from io import BytesIO from pathlib import Path from urllib.parse import urlparse @@ -39,6 +38,7 @@ import requests import wget from filelock import FileLock +from huggingface_hub.utils import insecure_hashlib from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load @@ -402,12 +402,12 @@ def _resumable_file_manager(): def url_to_filename(url, etag=None): url_bytes = url.encode("utf-8") - url_hash = sha256(url_bytes) + url_hash = insecure_hashlib.sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") - etag_hash = sha256(etag_bytes) + etag_hash = insecure_hashlib.sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".h5"): diff --git a/examples/research_projects/visual_bert/utils.py b/examples/research_projects/visual_bert/utils.py index 2fc6ea2062ef..c75f523a08ea 100644 --- a/examples/research_projects/visual_bert/utils.py +++ b/examples/research_projects/visual_bert/utils.py @@ -28,7 +28,6 @@ from collections import OrderedDict from contextlib import contextmanager from functools import partial -from hashlib import sha256 from io import BytesIO from pathlib import Path from urllib.parse import urlparse @@ -39,6 +38,7 @@ import requests import wget from filelock import FileLock +from huggingface_hub.utils import insecure_hashlib from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load @@ -402,12 +402,12 @@ def _resumable_file_manager(): def url_to_filename(url, etag=None): url_bytes = url.encode("utf-8") - url_hash = sha256(url_bytes) + url_hash = insecure_hashlib.sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") - etag_hash = sha256(etag_bytes) + etag_hash = insecure_hashlib.sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".h5"): diff --git a/setup.py b/setup.py index 764411dab31a..86c8a8a5bffb 100644 --- a/setup.py +++ b/setup.py @@ -118,7 +118,7 @@ "fugashi>=1.0", "GitPython<3.1.19", "hf-doc-builder>=0.3.0", - "huggingface-hub>=0.16.4,<1.0", + "huggingface-hub>=0.19.3,<1.0", "importlib_metadata", "ipadic>=1.0.0,<2.0", "isort>=5.5.4", @@ -321,6 +321,7 @@ def run(self): "rjieba", "beautifulsoup4", "tensorboard", + "pydantic", ) + extras["retrieval"] + extras["modelcreation"] diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index d460790c8ec5..30e902d6989c 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -25,7 +25,7 @@ "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", - "huggingface-hub": "huggingface-hub>=0.16.4,<1.0", + "huggingface-hub": "huggingface-hub>=0.19.3,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", diff --git a/src/transformers/models/whisper/convert_openai_to_hf.py b/src/transformers/models/whisper/convert_openai_to_hf.py index 0db555c4aab2..0d6cdaa95882 100755 --- a/src/transformers/models/whisper/convert_openai_to_hf.py +++ b/src/transformers/models/whisper/convert_openai_to_hf.py @@ -15,7 +15,6 @@ # limitations under the License. import argparse -import hashlib import io import json import os @@ -24,6 +23,7 @@ import warnings import torch +from huggingface_hub.utils import insecure_hashlib from torch import nn from tqdm import tqdm @@ -114,7 +114,7 @@ def _download(url: str, root: str) -> io.BytesIO: if os.path.isfile(download_target): model_bytes = open(download_target, "rb").read() - if hashlib.sha256(model_bytes).hexdigest() == expected_sha256: + if insecure_hashlib.sha256(model_bytes).hexdigest() == expected_sha256: return torch.load(io.BytesIO(model_bytes)) else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") @@ -132,7 +132,7 @@ def _download(url: str, root: str) -> io.BytesIO: loop.update(len(buffer)) model_bytes = open(download_target, "rb").read() - if hashlib.sha256(model_bytes).hexdigest() != expected_sha256: + if insecure_hashlib.sha256(model_bytes).hexdigest() != expected_sha256: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) diff --git a/tests/pipelines/test_pipelines_depth_estimation.py b/tests/pipelines/test_pipelines_depth_estimation.py index 054574b4fd80..009aa1c942d2 100644 --- a/tests/pipelines/test_pipelines_depth_estimation.py +++ b/tests/pipelines/test_pipelines_depth_estimation.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib import unittest +from huggingface_hub.utils import insecure_hashlib + from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( @@ -44,7 +45,7 @@ def open(*args, **kwargs): def hashimage(image: Image) -> str: - m = hashlib.md5(image.tobytes()) + m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest() diff --git a/tests/pipelines/test_pipelines_image_segmentation.py b/tests/pipelines/test_pipelines_image_segmentation.py index dbc0c0db809a..9c5c8fdfd4a8 100644 --- a/tests/pipelines/test_pipelines_image_segmentation.py +++ b/tests/pipelines/test_pipelines_image_segmentation.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib import tempfile import unittest from typing import Dict @@ -21,6 +20,7 @@ import numpy as np import requests from datasets import load_dataset +from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, @@ -59,7 +59,7 @@ def open(*args, **kwargs): def hashimage(image: Image) -> str: - m = hashlib.md5(image.tobytes()) + m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] diff --git a/tests/pipelines/test_pipelines_mask_generation.py b/tests/pipelines/test_pipelines_mask_generation.py index cf1703906515..c9a44a535483 100644 --- a/tests/pipelines/test_pipelines_mask_generation.py +++ b/tests/pipelines/test_pipelines_mask_generation.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib import unittest from typing import Dict import numpy as np +from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, @@ -46,7 +46,7 @@ def open(*args, **kwargs): def hashimage(image: Image) -> str: - m = hashlib.md5(image.tobytes()) + m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] From 6b39470b74e916f9698f91bfc1fda23bed0a89d1 Mon Sep 17 00:00:00 2001 From: Marc Sun <57196510+SunMarc@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:35:40 +0100 Subject: [PATCH 199/268] Raise error when quantizing a quantized model (#27500) add error msg --- src/transformers/modeling_utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 57eb08a4159b..6d3a4d34dfbd 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2781,7 +2781,12 @@ def from_pretrained( quantization_method_from_config = config.quantization_config.get( "quant_method", QuantizationMethod.BITS_AND_BYTES ) - + if quantization_method_from_config is not None and quantization_method_from_args is not None: + if quantization_method_from_config != quantization_method_from_args: + raise ValueError( + f"The model is already quantized with {quantization_method_from_config}. " + f"You can't quantize it again with {quantization_method_from_args}" + ) if quantization_method_from_config == QuantizationMethod.GPTQ and quantization_method_from_args is not None: loading_attr_dict = quantization_config.get_loading_attributes() for attr, val in loading_attr_dict.items(): From acb5b4aff5ce4c4a9929d3c9bb658b1f64637295 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 16 Nov 2023 17:00:46 +0100 Subject: [PATCH 200/268] Disable docker image build job `latest-pytorch-amd` for now (#27541) fix Co-authored-by: ydshieh --- .github/workflows/build-docker-images.yml | 69 ++++++++++++----------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml index 48db1a55aa57..b267ad7882d8 100644 --- a/.github/workflows/build-docker-images.yml +++ b/.github/workflows/build-docker-images.yml @@ -208,40 +208,41 @@ jobs: push: true tags: huggingface/transformers-pytorch-gpu - latest-pytorch-amd: - name: "Latest PyTorch (AMD) [dev]" - runs-on: [self-hosted, docker-gpu, amd-gpu, single-gpu, mi210] - steps: - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Check out code - uses: actions/checkout@v3 - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - name: Build and push - uses: docker/build-push-action@v5 - with: - context: ./docker/transformers-pytorch-amd-gpu - build-args: | - REF=main - push: true - tags: huggingface/transformers-pytorch-amd-gpu${{ inputs.image_postfix }} - # Push CI images still need to be re-built daily - - - name: Build and push (for Push CI) in a daily basis - # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`. - # The later case is useful for manual image building for debugging purpose. Use another tag in this case! - if: inputs.image_postfix != '-push-ci' - uses: docker/build-push-action@v5 - with: - context: ./docker/transformers-pytorch-amd-gpu - build-args: | - REF=main - push: true - tags: huggingface/transformers-pytorch-amd-gpu-push-ci +# Need to be fixed with the help from Guillaume. +# latest-pytorch-amd: +# name: "Latest PyTorch (AMD) [dev]" +# runs-on: [self-hosted, docker-gpu, amd-gpu, single-gpu, mi210] +# steps: +# - name: Set up Docker Buildx +# uses: docker/setup-buildx-action@v3 +# - name: Check out code +# uses: actions/checkout@v3 +# - name: Login to DockerHub +# uses: docker/login-action@v3 +# with: +# username: ${{ secrets.DOCKERHUB_USERNAME }} +# password: ${{ secrets.DOCKERHUB_PASSWORD }} +# - name: Build and push +# uses: docker/build-push-action@v5 +# with: +# context: ./docker/transformers-pytorch-amd-gpu +# build-args: | +# REF=main +# push: true +# tags: huggingface/transformers-pytorch-amd-gpu${{ inputs.image_postfix }} +# # Push CI images still need to be re-built daily +# - +# name: Build and push (for Push CI) in a daily basis +# # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`. +# # The later case is useful for manual image building for debugging purpose. Use another tag in this case! +# if: inputs.image_postfix != '-push-ci' +# uses: docker/build-push-action@v5 +# with: +# context: ./docker/transformers-pytorch-amd-gpu +# build-args: | +# REF=main +# push: true +# tags: huggingface/transformers-pytorch-amd-gpu-push-ci latest-tensorflow: name: "Latest TensorFlow [dev]" From 651408a077f842e76e75bfc7d02b8ac38eeb6480 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Thu, 16 Nov 2023 17:43:19 +0100 Subject: [PATCH 201/268] [`Styling`] stylify using ruff (#27144) * try to stylify using ruff * might need to remove these changes? * use ruf format andruff check * use isinstance instead of type comparision * use # fmt: skip * use # fmt: skip * nits * soem styling changes * update ci job * nits isinstance * more files update * nits * more nits * small nits * check and format * revert wrong changes * actually use formatter instead of checker * nits * well docbuilder is overwriting this commit * revert notebook changes * try to nuke docbuilder * style * fix feature exrtaction test * remve `indent-width = 4` * fixup * more nits * update the ruff version that we use * style * nuke docbuilder styling * leve the print for detected changes * nits * Remove file I/O Co-authored-by: charliermarsh * style * nits * revert notebook changes * Add # fmt skip when possible * Add # fmt skip when possible * Fix * More ` # fmt: skip` usage * More ` # fmt: skip` usage * More ` # fmt: skip` usage * NIts * more fixes * fix tapas * Another way to skip * Recommended way * Fix two more fiels * Remove asynch Remove asynch --------- Co-authored-by: charliermarsh --- .circleci/config.yml | 5 +- .circleci/create_circleci_config.py | 5 +- Makefile | 14 +++--- docs/source/_config.py | 2 +- docs/source/en/_config.py | 2 +- docs/source/en/tasks/semantic_segmentation.md | 2 +- docs/source/ko/_config.py | 2 +- docs/source/ko/tasks/semantic_segmentation.md | 2 +- docs/source/pt/_config.py | 2 +- .../flax/text-classification/run_flax_glue.py | 2 +- examples/legacy/pytorch-lightning/run_glue.py | 2 +- examples/legacy/pytorch-lightning/run_ner.py | 2 +- .../deebert/src/modeling_highway_bert.py | 6 +-- .../research_projects/longform-qa/eli5_app.py | 4 +- .../lxmert/modeling_frcnn.py | 4 +- .../emmental/modeling_bert_masked.py | 4 +- .../movement-pruning/masked_run_glue.py | 3 +- .../quantization-qdqbert/quant_trainer.py | 2 +- .../visual_bert/modeling_frcnn.py | 4 +- hubconf.py | 1 + pyproject.toml | 19 +++++-- scripts/check_tokenizers.py | 8 +-- scripts/fsmt/fsmt-make-super-tiny-model.py | 9 ++-- scripts/fsmt/fsmt-make-tiny-model.py | 14 +++--- scripts/fsmt/gen-card-allenai-wmt16.py | 1 + scripts/fsmt/gen-card-allenai-wmt19.py | 1 + scripts/fsmt/gen-card-facebook-wmt19.py | 3 +- .../pegasus/build_test_sample_spm_no_bos.py | 5 +- scripts/stale.py | 4 +- setup.py | 7 ++- src/transformers/configuration_utils.py | 1 + src/transformers/convert_slow_tokenizer.py | 14 ++---- src/transformers/data/data_collator.py | 4 +- src/transformers/dependency_versions_table.py | 3 +- src/transformers/image_utils.py | 6 +-- src/transformers/modeling_flax_utils.py | 1 + src/transformers/modeling_tf_utils.py | 2 + src/transformers/modeling_utils.py | 1 + .../models/align/configuration_align.py | 1 + .../models/altclip/configuration_altclip.py | 1 + .../models/altclip/modeling_altclip.py | 7 +-- .../models/altclip/processing_altclip.py | 1 + ...iguration_audio_spectrogram_transformer.py | 1 + .../autoformer/configuration_autoformer.py | 1 + .../models/bark/processing_bark.py | 1 + .../models/bart/configuration_bart.py | 1 + .../models/bart/tokenization_bart_fast.py | 1 + .../models/beit/configuration_beit.py | 1 + .../models/bert/configuration_bert.py | 1 + .../configuration_bert_generation.py | 1 + .../models/big_bird/configuration_big_bird.py | 1 + .../models/big_bird/modeling_big_bird.py | 8 +-- .../configuration_bigbird_pegasus.py | 1 + .../modeling_bigbird_pegasus.py | 8 +-- .../models/biogpt/configuration_biogpt.py | 1 + .../models/bit/configuration_bit.py | 1 + .../blenderbot/configuration_blenderbot.py | 1 + .../models/blenderbot/modeling_blenderbot.py | 4 +- .../blenderbot/tokenization_blenderbot.py | 4 +- .../tokenization_blenderbot_fast.py | 8 +-- .../configuration_blenderbot_small.py | 1 + .../modeling_blenderbot_small.py | 4 +- .../models/blip/configuration_blip.py | 1 + .../models/blip/modeling_blip_text.py | 4 +- .../models/blip/modeling_tf_blip_text.py | 4 +- .../models/blip/processing_blip.py | 1 + .../models/blip_2/configuration_blip_2.py | 1 + .../models/blip_2/modeling_blip_2.py | 4 +- .../models/blip_2/processing_blip_2.py | 5 +- .../bridgetower/configuration_bridgetower.py | 3 ++ .../bridgetower/modeling_bridgetower.py | 2 +- .../bridgetower/processing_bridgetower.py | 1 + .../models/bros/configuration_bros.py | 1 + .../models/bros/processing_bros.py | 1 + .../models/canine/configuration_canine.py | 1 + .../models/canine/modeling_canine.py | 2 +- .../configuration_chinese_clip.py | 1 + .../chinese_clip/modeling_chinese_clip.py | 4 +- .../chinese_clip/processing_chinese_clip.py | 1 + .../models/clap/configuration_clap.py | 1 + .../models/clap/processing_clap.py | 1 + .../models/clip/configuration_clip.py | 1 + src/transformers/models/clip/modeling_clip.py | 4 +- .../models/clip/processing_clip.py | 1 + .../models/clipseg/configuration_clipseg.py | 1 + .../models/clipseg/modeling_clipseg.py | 7 +-- .../models/clipseg/processing_clipseg.py | 1 + src/transformers/models/clvp/modeling_clvp.py | 4 +- .../models/clvp/processing_clvp.py | 9 ++-- .../models/codegen/configuration_codegen.py | 1 + .../configuration_conditional_detr.py | 1 + .../image_processing_conditional_detr.py | 13 ++--- .../modeling_conditional_detr.py | 12 ++--- .../models/convbert/configuration_convbert.py | 1 + .../models/convbert/tokenization_convbert.py | 4 +- .../convbert/tokenization_convbert_fast.py | 4 +- .../models/convnext/configuration_convnext.py | 1 + .../convnextv2/configuration_convnextv2.py | 1 + .../models/cpmant/configuration_cpmant.py | 1 + .../models/cvt/configuration_cvt.py | 1 + .../data2vec/configuration_data2vec_audio.py | 1 + .../data2vec/configuration_data2vec_text.py | 1 + .../data2vec/configuration_data2vec_vision.py | 1 + .../data2vec/modeling_data2vec_vision.py | 4 +- .../models/deberta/configuration_deberta.py | 3 +- .../deberta_v2/configuration_deberta_v2.py | 3 +- .../models/deberta_v2/modeling_deberta_v2.py | 8 +-- .../configuration_deformable_detr.py | 1 + .../models/deit/configuration_deit.py | 1 + .../deprecated/mctct/configuration_mctct.py | 1 + .../deprecated/mctct/processing_mctct.py | 1 + .../open_llama/configuration_open_llama.py | 1 + .../retribert/configuration_retribert.py | 1 + .../configuration_trajectory_transformer.py | 1 + .../deprecated/van/configuration_van.py | 1 + .../models/deta/configuration_deta.py | 1 + src/transformers/models/deta/modeling_deta.py | 4 +- .../models/detr/configuration_detr.py | 1 + .../models/dinat/configuration_dinat.py | 1 + .../models/dinov2/configuration_dinov2.py | 1 + .../distilbert/configuration_distilbert.py | 1 + .../models/donut/configuration_donut_swin.py | 1 + .../models/donut/processing_donut.py | 1 + .../models/dpr/configuration_dpr.py | 1 + .../models/dpt/configuration_dpt.py | 1 + .../configuration_efficientnet.py | 1 + .../models/electra/configuration_electra.py | 1 + .../models/electra/modeling_flax_electra.py | 1 + .../models/electra/tokenization_electra.py | 4 +- .../electra/tokenization_electra_fast.py | 4 +- .../models/encodec/configuration_encodec.py | 1 + .../configuration_encoder_decoder.py | 1 + .../modeling_encoder_decoder.py | 1 + .../modeling_flax_encoder_decoder.py | 1 + .../modeling_tf_encoder_decoder.py | 1 + .../models/ernie/configuration_ernie.py | 1 + .../models/ernie_m/configuration_ernie_m.py | 1 + .../models/esm/configuration_esm.py | 1 + .../models/esm/modeling_esmfold.py | 8 +-- .../models/falcon/configuration_falcon.py | 1 + .../models/flaubert/modeling_tf_flaubert.py | 21 ++++---- .../models/flava/configuration_flava.py | 1 + .../models/flava/modeling_flava.py | 16 ++---- .../models/flava/processing_flava.py | 1 + .../models/fnet/configuration_fnet.py | 3 +- src/transformers/models/fnet/modeling_fnet.py | 2 +- .../models/focalnet/configuration_focalnet.py | 1 + .../models/fsmt/configuration_fsmt.py | 2 + src/transformers/models/fsmt/modeling_fsmt.py | 8 +-- .../models/funnel/configuration_funnel.py | 1 + .../models/fuyu/configuration_fuyu.py | 1 + .../models/fuyu/processing_fuyu.py | 1 + .../models/git/configuration_git.py | 1 + src/transformers/models/git/processing_git.py | 1 + .../models/glpn/configuration_glpn.py | 1 + src/transformers/models/gpt2/modeling_gpt2.py | 15 +++++- .../models/gpt_neo/configuration_gpt_neo.py | 1 + .../models/gpt_neox/configuration_gpt_neox.py | 1 + .../configuration_gpt_neox_japanese.py | 1 + .../models/gptj/configuration_gptj.py | 1 + .../configuration_gptsan_japanese.py | 1 + .../modeling_gptsan_japanese.py | 2 +- .../tokenization_gptsan_japanese.py | 2 +- .../graphormer/configuration_graphormer.py | 1 + .../models/groupvit/configuration_groupvit.py | 1 + .../models/groupvit/modeling_groupvit.py | 4 +- .../models/hubert/configuration_hubert.py | 1 + .../models/idefics/configuration_idefics.py | 3 ++ .../models/idefics/processing_idefics.py | 1 + .../models/informer/configuration_informer.py | 1 + .../models/informer/modeling_informer.py | 7 ++- .../configuration_instructblip.py | 1 + .../instructblip/modeling_instructblip.py | 4 +- .../instructblip/processing_instructblip.py | 5 +- .../models/kosmos2/configuration_kosmos2.py | 2 + .../models/kosmos2/modeling_kosmos2.py | 4 +- .../models/kosmos2/processing_kosmos2.py | 5 +- .../models/layoutlm/configuration_layoutlm.py | 1 + .../models/layoutlm/tokenization_layoutlm.py | 4 +- .../layoutlm/tokenization_layoutlm_fast.py | 4 +- .../layoutlmv2/configuration_layoutlmv2.py | 1 + .../layoutlmv2/processing_layoutlmv2.py | 1 + .../layoutlmv3/configuration_layoutlmv3.py | 1 + .../models/layoutlmv3/modeling_layoutlmv3.py | 5 +- .../layoutlmv3/processing_layoutlmv3.py | 1 + .../layoutlmv3/tokenization_layoutlmv3.py | 1 + .../models/led/configuration_led.py | 1 + .../models/levit/configuration_levit.py | 1 + .../models/lilt/configuration_lilt.py | 1 + .../models/llama/configuration_llama.py | 1 + .../longformer/configuration_longformer.py | 1 + .../longformer/tokenization_longformer.py | 4 +- .../tokenization_longformer_fast.py | 8 +-- .../models/longt5/configuration_longt5.py | 1 + .../models/luke/configuration_luke.py | 1 + .../models/lxmert/tokenization_lxmert.py | 4 +- .../models/lxmert/tokenization_lxmert_fast.py | 4 +- .../models/m2m_100/configuration_m2m_100.py | 1 + .../models/marian/configuration_marian.py | 1 + .../models/markuplm/configuration_markuplm.py | 1 + .../markuplm/feature_extraction_markuplm.py | 2 +- .../models/markuplm/processing_markuplm.py | 1 + .../mask2former/configuration_mask2former.py | 1 + .../mask2former/modeling_mask2former.py | 2 +- .../maskformer/configuration_maskformer.py | 1 + .../configuration_maskformer_swin.py | 1 + .../models/mbart/configuration_mbart.py | 1 + .../models/mbart/tokenization_mbart.py | 4 +- .../models/mbart/tokenization_mbart_fast.py | 4 +- .../models/mbart50/tokenization_mbart50.py | 4 +- .../mbart50/tokenization_mbart50_fast.py | 4 +- .../models/mega/configuration_mega.py | 1 + src/transformers/models/mega/modeling_mega.py | 4 +- .../configuration_megatron_bert.py | 1 + .../models/mgp_str/configuration_mgp_str.py | 1 + .../models/mgp_str/processing_mgp_str.py | 1 + .../mobilebert/configuration_mobilebert.py | 1 + .../mobilebert/tokenization_mobilebert.py | 4 +- .../tokenization_mobilebert_fast.py | 4 +- .../configuration_mobilenet_v1.py | 1 + .../configuration_mobilenet_v2.py | 1 + .../image_processing_mobilenet_v2.py | 3 +- .../mobilevit/configuration_mobilevit.py | 1 + .../mobilevit/image_processing_mobilevit.py | 3 +- .../mobilevitv2/configuration_mobilevitv2.py | 1 + .../models/mpnet/configuration_mpnet.py | 1 + src/transformers/models/mpt/modeling_mpt.py | 2 +- .../models/mra/configuration_mra.py | 1 + .../models/mt5/configuration_mt5.py | 1 + .../models/mt5/modeling_flax_mt5.py | 2 + src/transformers/models/mt5/modeling_mt5.py | 1 + .../models/mt5/modeling_tf_mt5.py | 1 + .../models/musicgen/configuration_musicgen.py | 1 + .../models/musicgen/processing_musicgen.py | 1 + .../models/mvp/configuration_mvp.py | 1 + .../models/mvp/tokenization_mvp_fast.py | 1 + .../models/nat/configuration_nat.py | 1 + .../models/nezha/configuration_nezha.py | 1 + .../models/nllb/tokenization_nllb.py | 4 +- .../models/nllb/tokenization_nllb_fast.py | 4 +- .../models/nllb_moe/configuration_nllb_moe.py | 1 + .../models/nougat/processing_nougat.py | 1 + .../models/nougat/tokenization_nougat_fast.py | 2 +- .../configuration_nystromformer.py | 1 + .../oneformer/configuration_oneformer.py | 1 + .../models/oneformer/modeling_oneformer.py | 2 +- .../models/oneformer/processing_oneformer.py | 1 + .../models/opt/configuration_opt.py | 1 + .../models/owlv2/configuration_owlv2.py | 20 ++++---- .../models/owlv2/modeling_owlv2.py | 12 ++--- .../models/owlv2/processing_owlv2.py | 1 + .../models/owlvit/configuration_owlvit.py | 1 + .../models/owlvit/image_processing_owlvit.py | 1 + .../models/owlvit/modeling_owlvit.py | 4 +- .../models/owlvit/processing_owlvit.py | 1 + .../models/pegasus/configuration_pegasus.py | 1 + .../models/pegasus/tokenization_pegasus.py | 1 + .../pegasus/tokenization_pegasus_fast.py | 1 + .../pegasus_x/configuration_pegasus_x.py | 1 + .../perceiver/configuration_perceiver.py | 1 + .../persimmon/configuration_persimmon.py | 1 + .../models/phi/configuration_phi.py | 1 + .../pix2struct/configuration_pix2struct.py | 1 + .../pix2struct/processing_pix2struct.py | 1 + .../models/plbart/configuration_plbart.py | 1 + .../poolformer/configuration_poolformer.py | 1 + .../pop2piano/feature_extraction_pop2piano.py | 1 + .../models/pop2piano/processing_pop2piano.py | 1 + .../prophetnet/configuration_prophetnet.py | 1 + .../models/pvt/configuration_pvt.py | 1 + .../models/qdqbert/configuration_qdqbert.py | 1 + src/transformers/models/rag/modeling_rag.py | 1 + .../models/rag/modeling_tf_rag.py | 1 + .../models/realm/configuration_realm.py | 1 + .../models/reformer/configuration_reformer.py | 1 + .../models/regnet/configuration_regnet.py | 1 + .../regnet/convert_regnet_to_pytorch.py | 2 +- .../models/rembert/configuration_rembert.py | 1 + .../models/resnet/configuration_resnet.py | 1 + .../models/roberta/configuration_roberta.py | 1 + .../configuration_roberta_prelayernorm.py | 16 +++--- .../models/roc_bert/configuration_roc_bert.py | 1 + .../models/roc_bert/modeling_roc_bert.py | 4 +- .../models/roformer/configuration_roformer.py | 1 + .../models/roformer/modeling_flax_roformer.py | 2 +- .../models/roformer/modeling_roformer.py | 2 +- .../models/roformer/modeling_tf_roformer.py | 2 +- .../models/roformer/tokenization_roformer.py | 1 + src/transformers/models/sam/processing_sam.py | 1 + .../configuration_seamless_m4t.py | 1 + .../seamless_m4t/convert_fairseq2_to_hf.py | 21 ++------ .../seamless_m4t/processing_seamless_m4t.py | 1 + .../segformer/configuration_segformer.py | 1 + .../segformer/image_processing_segformer.py | 3 +- .../models/sew/configuration_sew.py | 1 + .../models/sew_d/configuration_sew_d.py | 1 + .../models/sew_d/modeling_sew_d.py | 8 +-- .../configuration_speech_encoder_decoder.py | 1 + .../modeling_speech_encoder_decoder.py | 1 + .../configuration_speech_to_text.py | 1 + .../processing_speech_to_text.py | 1 + .../configuration_speech_to_text_2.py | 1 + .../processing_speech_to_text_2.py | 1 + .../models/speecht5/configuration_speecht5.py | 2 + .../models/speecht5/processing_speecht5.py | 1 + .../models/splinter/configuration_splinter.py | 1 + .../squeezebert/configuration_squeezebert.py | 1 + .../squeezebert/tokenization_squeezebert.py | 4 +- .../tokenization_squeezebert_fast.py | 4 +- .../swiftformer/configuration_swiftformer.py | 1 + .../models/swin/configuration_swin.py | 1 + .../models/swin2sr/configuration_swin2sr.py | 1 + .../models/swinv2/configuration_swinv2.py | 1 + .../configuration_switch_transformers.py | 1 + .../models/t5/configuration_t5.py | 1 + .../configuration_table_transformer.py | 1 + .../modeling_table_transformer.py | 36 +++++++------ .../configuration_time_series_transformer.py | 1 + .../timesformer/configuration_timesformer.py | 1 + .../configuration_timm_backbone.py | 1 + .../transfo_xl/tokenization_transfo_xl.py | 2 +- .../models/trocr/configuration_trocr.py | 1 + .../models/trocr/processing_trocr.py | 1 + .../models/tvlt/configuration_tvlt.py | 1 + .../models/tvlt/processing_tvlt.py | 1 + .../models/umt5/configuration_umt5.py | 1 + src/transformers/models/umt5/modeling_umt5.py | 1 + .../unispeech/configuration_unispeech.py | 1 + .../configuration_unispeech_sat.py | 1 + .../models/upernet/configuration_upernet.py | 1 + .../models/videomae/configuration_videomae.py | 1 + .../models/vilt/configuration_vilt.py | 1 + .../models/vilt/processing_vilt.py | 1 + .../configuration_vision_encoder_decoder.py | 1 + .../modeling_flax_vision_encoder_decoder.py | 1 + .../modeling_tf_vision_encoder_decoder.py | 1 + .../modeling_vision_encoder_decoder.py | 1 + .../processing_vision_text_dual_encoder.py | 1 + .../visual_bert/configuration_visual_bert.py | 2 +- .../visual_bert/modeling_visual_bert.py | 2 +- .../models/vit/configuration_vit.py | 1 + .../vit_hybrid/configuration_vit_hybrid.py | 1 + .../models/vit_mae/configuration_vit_mae.py | 1 + .../models/vit_msn/configuration_vit_msn.py | 1 + .../models/vitdet/configuration_vitdet.py | 1 + .../models/vitmatte/configuration_vitmatte.py | 1 + .../models/vits/configuration_vits.py | 1 + src/transformers/models/vits/modeling_vits.py | 4 +- .../models/vivit/configuration_vivit.py | 1 + .../vivit/convert_vivit_flax_to_pytorch.py | 23 ++++----- .../models/wav2vec2/configuration_wav2vec2.py | 1 + .../models/wav2vec2/processing_wav2vec2.py | 1 + .../configuration_wav2vec2_conformer.py | 1 + .../modeling_wav2vec2_conformer.py | 5 +- .../processing_wav2vec2_with_lm.py | 1 + .../models/wavlm/configuration_wavlm.py | 1 + .../models/whisper/configuration_whisper.py | 1 + .../models/whisper/processing_whisper.py | 1 + .../models/x_clip/configuration_x_clip.py | 1 + .../models/x_clip/modeling_x_clip.py | 4 +- .../models/x_clip/processing_x_clip.py | 1 + .../models/xglm/configuration_xglm.py | 1 + .../models/xlm/modeling_tf_xlm.py | 21 ++++---- .../configuration_xlm_prophetnet.py | 1 + .../xlm_prophetnet/modeling_xlm_prophetnet.py | 8 +-- .../xlm_roberta/configuration_xlm_roberta.py | 1 + .../configuration_xlm_roberta_xl.py | 1 + .../models/xmod/configuration_xmod.py | 1 + .../models/yolos/configuration_yolos.py | 1 + .../models/yolos/modeling_yolos.py | 6 +-- .../models/yoso/configuration_yoso.py | 1 + src/transformers/models/yoso/modeling_yoso.py | 2 +- src/transformers/tokenization_utils_base.py | 4 +- src/transformers/trainer_pt_utils.py | 2 +- src/transformers/utils/fx.py | 2 +- .../utils/sentencepiece_model_pb2.py | 12 ++--- ...on_{{cookiecutter.lowercase_modelname}}.py | 2 +- .../run_{{cookiecutter.example_shortcut}}.py | 4 +- ...on_{{cookiecutter.lowercase_modelname}}.py | 3 +- ...ax_{{cookiecutter.lowercase_modelname}}.py | 2 +- .../models/albert/test_tokenization_albert.py | 4 +- tests/models/altclip/test_modeling_altclip.py | 2 +- tests/models/bark/test_modeling_bark.py | 12 ++--- .../barthez/test_tokenization_barthez.py | 4 +- .../test_tokenization_bert_generation.py | 4 +- .../test_tokenization_bert_japanese.py | 50 +++++-------------- .../big_bird/test_tokenization_big_bird.py | 8 +-- tests/models/bloom/test_modeling_bloom.py | 12 ++--- tests/models/byt5/test_tokenization_byt5.py | 10 ++-- .../camembert/test_tokenization_camembert.py | 4 +- .../models/canine/test_tokenization_canine.py | 4 +- .../test_modeling_chinese_clip.py | 6 +-- tests/models/clip/test_processor_clip.py | 4 +- tests/models/clip/test_tokenization_clip.py | 4 +- .../models/clipseg/test_processor_clipseg.py | 4 +- .../test_tokenization_code_llama.py | 8 ++- .../test_tokenization_deberta_v2.py | 4 +- .../test_modeling_tf_encoder_decoder.py | 2 +- .../ernie_m/test_tokenization_ernie_m.py | 4 +- tests/models/esm/test_tokenization_esm.py | 4 +- tests/models/flava/test_processor_flava.py | 4 +- tests/models/fnet/test_modeling_fnet.py | 4 +- tests/models/fnet/test_tokenization_fnet.py | 16 +++--- tests/models/fuyu/test_modeling_fuyu.py | 4 +- tests/models/gpt2/test_modeling_gpt2.py | 6 +-- tests/models/gpt_neo/test_modeling_gpt_neo.py | 4 +- .../test_modeling_gpt_neox_japanese.py | 2 +- .../gpt_sw3/test_tokenization_gpt_sw3.py | 4 +- tests/models/gptj/test_modeling_gptj.py | 4 +- tests/models/gptj/test_modeling_tf_gptj.py | 4 +- .../test_modeling_gptsan_japanese.py | 9 ++-- .../test_tokenization_gptsan_japanese.py | 4 +- .../test_modeling_instructblip.py | 8 +-- .../layoutlmv2/test_processor_layoutlmv2.py | 16 ++---- .../test_tokenization_layoutlmv2.py | 24 +++------ .../layoutlmv3/test_processor_layoutlmv3.py | 16 ++---- .../test_tokenization_layoutlmv3.py | 24 +++------ .../layoutxlm/test_processor_layoutxlm.py | 16 ++---- .../layoutxlm/test_tokenization_layoutxlm.py | 25 +++------- tests/models/llama/test_modeling_llama.py | 16 ++---- tests/models/llama/test_tokenization_llama.py | 8 ++- tests/models/lxmert/test_modeling_lxmert.py | 2 +- .../models/lxmert/test_modeling_tf_lxmert.py | 2 +- .../m2m_100/test_tokenization_m2m_100.py | 12 ++--- .../models/marian/test_tokenization_marian.py | 5 +- .../markuplm/test_processor_markuplm.py | 24 +++------ .../markuplm/test_tokenization_markuplm.py | 26 +++------- .../test_image_processing_mask2former.py | 8 +-- .../test_image_processing_maskformer.py | 8 +-- .../mbart50/test_tokenization_mbart50.py | 18 ++----- .../models/mgp_str/test_processor_mgp_str.py | 4 +- .../mgp_str/test_tokenization_mgp_str.py | 4 +- tests/models/mistral/test_modeling_mistral.py | 4 +- tests/models/mvp/test_modeling_mvp.py | 4 +- tests/models/nllb/test_tokenization_nllb.py | 4 +- .../models/nllb_moe/test_modeling_nllb_moe.py | 9 ++-- .../oneformer/test_processor_oneformer.py | 24 +++------ tests/models/owlv2/test_modeling_owlv2.py | 4 +- tests/models/owlvit/test_processor_owlvit.py | 4 +- .../pegasus/test_tokenization_pegasus.py | 4 +- .../perceiver/test_tokenization_perceiver.py | 4 +- tests/models/phi/test_modeling_phi.py | 8 +-- .../reformer/test_tokenization_reformer.py | 4 +- .../test_modeling_roberta_prelayernorm.py | 8 +-- tests/models/sam/test_modeling_sam.py | 4 +- tests/models/sam/test_modeling_tf_sam.py | 4 +- .../test_modeling_seamless_m4t.py | 28 +++-------- .../test_tokenization_seamless_m4t.py | 8 +-- .../test_tokenization_speech_to_text.py | 18 ++----- .../speecht5/test_tokenization_speecht5.py | 22 ++------ .../test_modeling_switch_transformers.py | 1 + tests/models/t5/test_tokenization_t5.py | 4 +- tests/models/tapas/test_tokenization_tapas.py | 8 +-- .../transfo_xl/test_modeling_tf_transfo_xl.py | 34 ++++++------- .../transfo_xl/test_modeling_transfo_xl.py | 8 +-- ...test_modeling_tf_vision_encoder_decoder.py | 2 +- .../test_modeling_vision_encoder_decoder.py | 4 +- ...test_processor_vision_text_dual_encoder.py | 4 +- .../wav2vec2/test_modeling_tf_wav2vec2.py | 29 ++++++----- .../whisper/test_modeling_flax_whisper.py | 4 +- tests/models/whisper/test_modeling_whisper.py | 4 +- .../whisper/test_tokenization_whisper.py | 20 +++----- tests/models/xglm/test_modeling_tf_xglm.py | 4 +- tests/models/xglm/test_modeling_xglm.py | 4 +- tests/models/xglm/test_tokenization_xglm.py | 4 +- .../test_modeling_xlm_prophetnet.py | 4 +- .../test_tokenization_xlm_prophetnet.py | 4 +- .../test_tokenization_xlm_roberta.py | 4 +- tests/models/xlnet/test_tokenization_xlnet.py | 4 +- ..._pipelines_automatic_speech_recognition.py | 16 ++---- .../test_pipelines_conversational.py | 4 +- .../test_pipelines_question_answering.py | 9 ++-- .../test_pipelines_token_classification.py | 20 +++----- tests/test_modeling_common.py | 2 +- tests/test_modeling_tf_common.py | 2 +- tests/test_tokenization_common.py | 16 ++++-- tests/tokenization/test_tokenization_fast.py | 8 +-- tests/trainer/test_trainer.py | 4 +- utils/check_copies.py | 25 ++++++---- utils/tests_fetcher.py | 4 +- 480 files changed, 868 insertions(+), 1060 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4e6718005ab7..1b3a57c76f75 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -157,11 +157,10 @@ jobs: command: pip freeze | tee installed.txt - store_artifacts: path: ~/transformers/installed.txt - - run: black --check examples tests src utils - - run: ruff examples tests src utils + - run: ruff check examples tests src utils + - run: ruff format tests src utils --check - run: python utils/custom_init_isort.py --check_only - run: python utils/sort_auto_mappings.py --check_only - - run: doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source - run: python utils/check_doc_toc.py check_repository_consistency: diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index e326e324b65c..41e83d87438e 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -15,7 +15,6 @@ import argparse import copy -import glob import os import random from dataclasses import dataclass @@ -239,7 +238,7 @@ def to_dict(self): py_command = f'import os; fp = open("reports/{self.job_name}/summary_short.txt"); failed = os.linesep.join([x for x in fp.read().split(os.linesep) if x.startswith("ERROR ")]); fp.close(); fp = open("summary_short.txt", "w"); fp.write(failed); fp.close()' check_test_command += f"$(python3 -c '{py_command}'); " - check_test_command += f'cat summary_short.txt; echo ""; exit -1; ' + check_test_command += 'cat summary_short.txt; echo ""; exit -1; ' # Deeal with failed tests check_test_command += f'elif [ -s reports/{self.job_name}/failures_short.txt ]; ' @@ -249,7 +248,7 @@ def to_dict(self): py_command = f'import os; fp = open("reports/{self.job_name}/summary_short.txt"); failed = os.linesep.join([x for x in fp.read().split(os.linesep) if x.startswith("FAILED ")]); fp.close(); fp = open("summary_short.txt", "w"); fp.write(failed); fp.close()' check_test_command += f"$(python3 -c '{py_command}'); " - check_test_command += f'cat summary_short.txt; echo ""; exit -1; ' + check_test_command += 'cat summary_short.txt; echo ""; exit -1; ' check_test_command += f'elif [ -s reports/{self.job_name}/stats.txt ]; then echo "All tests pass!"; ' diff --git a/Makefile b/Makefile index 0c51598594c0..befdce08a26c 100644 --- a/Makefile +++ b/Makefile @@ -9,8 +9,8 @@ modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ - black $(modified_py_files); \ - ruff $(modified_py_files) --fix; \ + ruff check $(modified_py_files) --fix; \ + ruff format $(modified_py_files);\ else \ echo "No library .py files were modified"; \ fi @@ -48,11 +48,10 @@ repo-consistency: # this target runs checks on all files quality: - black --check $(check_dirs) setup.py conftest.py + ruff check $(check_dirs) setup.py conftest.py + ruff format --check $(check_dirs) setup.py conftest.py python utils/custom_init_isort.py --check_only python utils/sort_auto_mappings.py --check_only - ruff $(check_dirs) setup.py conftest.py - doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source python utils/check_doc_toc.py # Format source code automatically and check is there are any problems left that need manual fixing @@ -60,14 +59,13 @@ quality: extra_style_checks: python utils/custom_init_isort.py python utils/sort_auto_mappings.py - doc-builder style src/transformers docs/source --max_len 119 --path_to_docs docs/source python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: - black $(check_dirs) setup.py conftest.py - ruff $(check_dirs) setup.py conftest.py --fix + ruff check $(check_dirs) setup.py conftest.py --fix + ruff format $(check_dirs) setup.py conftest.py ${MAKE} autogenerate_code ${MAKE} extra_style_checks diff --git a/docs/source/_config.py b/docs/source/_config.py index 4a7a86cc23d8..d26d908aa29e 100644 --- a/docs/source/_config.py +++ b/docs/source/_config.py @@ -10,5 +10,5 @@ black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", - "{object_class}": "FakeObjectClass", + "{object_class}": "FakeObjectClass", } diff --git a/docs/source/en/_config.py b/docs/source/en/_config.py index cd76263e9a5c..a6d75853f572 100644 --- a/docs/source/en/_config.py +++ b/docs/source/en/_config.py @@ -10,5 +10,5 @@ black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", - "{object_class}": "FakeObjectClass", + "{object_class}": "FakeObjectClass", } diff --git a/docs/source/en/tasks/semantic_segmentation.md b/docs/source/en/tasks/semantic_segmentation.md index c3ad3e00f61a..2895a1977721 100644 --- a/docs/source/en/tasks/semantic_segmentation.md +++ b/docs/source/en/tasks/semantic_segmentation.md @@ -245,7 +245,7 @@ logits first, and then reshaped to match the size of the labels before you can c ... reduce_labels=False, ... ) ... for key, value in metrics.items(): -... if type(value) is np.ndarray: +... if isinstance(value, np.ndarray): ... metrics[key] = value.tolist() ... return metrics ``` diff --git a/docs/source/ko/_config.py b/docs/source/ko/_config.py index 5d966e8c40f0..9bdfef7af94b 100644 --- a/docs/source/ko/_config.py +++ b/docs/source/ko/_config.py @@ -10,5 +10,5 @@ black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", - "{object_class}": "FakeObjectClass", + "{object_class}": "FakeObjectClass", } diff --git a/docs/source/ko/tasks/semantic_segmentation.md b/docs/source/ko/tasks/semantic_segmentation.md index fe27b8f0ba8c..4b6109d692bf 100644 --- a/docs/source/ko/tasks/semantic_segmentation.md +++ b/docs/source/ko/tasks/semantic_segmentation.md @@ -242,7 +242,7 @@ pip install -q datasets transformers evaluate ... reduce_labels=False, ... ) ... for key, value in metrics.items(): -... if type(value) is np.ndarray: +... if isinstance(value, np.ndarray): ... metrics[key] = value.tolist() ... return metrics ``` diff --git a/docs/source/pt/_config.py b/docs/source/pt/_config.py index cd76263e9a5c..a6d75853f572 100644 --- a/docs/source/pt/_config.py +++ b/docs/source/pt/_config.py @@ -10,5 +10,5 @@ black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", - "{object_class}": "FakeObjectClass", + "{object_class}": "FakeObjectClass", } diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py index 37f16459d3b9..632a66841b36 100755 --- a/examples/flax/text-classification/run_flax_glue.py +++ b/examples/flax/text-classification/run_flax_glue.py @@ -212,7 +212,7 @@ def __post_init__(self): if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - self.task_name = self.task_name.lower() if type(self.task_name) == str else self.task_name + self.task_name = self.task_name.lower() if isinstance(self.task_name, str) else self.task_name def create_train_state( diff --git a/examples/legacy/pytorch-lightning/run_glue.py b/examples/legacy/pytorch-lightning/run_glue.py index 5f22e2fc7a13..681f633fcd6d 100644 --- a/examples/legacy/pytorch-lightning/run_glue.py +++ b/examples/legacy/pytorch-lightning/run_glue.py @@ -23,7 +23,7 @@ class GLUETransformer(BaseTransformer): mode = "sequence-classification" def __init__(self, hparams): - if type(hparams) == dict: + if isinstance(hparams, dict): hparams = Namespace(**hparams) hparams.glue_output_mode = glue_output_modes[hparams.task] num_labels = glue_tasks_num_labels[hparams.task] diff --git a/examples/legacy/pytorch-lightning/run_ner.py b/examples/legacy/pytorch-lightning/run_ner.py index 7f6b00854d99..fc6f812275ea 100644 --- a/examples/legacy/pytorch-lightning/run_ner.py +++ b/examples/legacy/pytorch-lightning/run_ner.py @@ -25,7 +25,7 @@ class NERTransformer(BaseTransformer): mode = "token-classification" def __init__(self, hparams): - if type(hparams) == dict: + if isinstance(hparams, dict): hparams = Namespace(**hparams) module = import_module("tasks") try: diff --git a/examples/research_projects/deebert/src/modeling_highway_bert.py b/examples/research_projects/deebert/src/modeling_highway_bert.py index 2a881decbbd5..b866ef0869c7 100644 --- a/examples/research_projects/deebert/src/modeling_highway_bert.py +++ b/examples/research_projects/deebert/src/modeling_highway_bert.py @@ -32,7 +32,7 @@ def __init__(self, config): self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] def set_early_exit_entropy(self, x): - if (type(x) is float) or (type(x) is int): + if isinstance(x, (float, int)): for i in range(len(self.early_exit_entropy)): self.early_exit_entropy[i] = x else: @@ -232,9 +232,7 @@ def forward( outputs = ( sequence_output, pooled_output, - ) + encoder_outputs[ - 1: - ] # add hidden_states and attentions if they are here + ) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits diff --git a/examples/research_projects/longform-qa/eli5_app.py b/examples/research_projects/longform-qa/eli5_app.py index 1bcb6fd20d25..ae8d8f91568d 100644 --- a/examples/research_projects/longform-qa/eli5_app.py +++ b/examples/research_projects/longform-qa/eli5_app.py @@ -158,9 +158,7 @@ def answer_question( -""" % ( - header_html, -) +""" % (header_html,) st.sidebar.markdown( header_full, unsafe_allow_html=True, diff --git a/examples/research_projects/lxmert/modeling_frcnn.py b/examples/research_projects/lxmert/modeling_frcnn.py index 943588a5ed8c..499de532070c 100644 --- a/examples/research_projects/lxmert/modeling_frcnn.py +++ b/examples/research_projects/lxmert/modeling_frcnn.py @@ -1706,9 +1706,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): - assert ( - from_tf - ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( + assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index" ) archive_file = pretrained_model_name_or_path + ".index" diff --git a/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py b/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py index d404bf49aaa6..f47395bb000b 100644 --- a/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py +++ b/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py @@ -652,9 +652,7 @@ def forward( outputs = ( sequence_output, pooled_output, - ) + encoder_outputs[ - 1: - ] # add hidden_states and attentions if they are here + ) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions) diff --git a/examples/research_projects/movement-pruning/masked_run_glue.py b/examples/research_projects/movement-pruning/masked_run_glue.py index f440e627bbe0..e2090c431e3d 100644 --- a/examples/research_projects/movement-pruning/masked_run_glue.py +++ b/examples/research_projects/movement-pruning/masked_run_glue.py @@ -311,8 +311,7 @@ def train(args, train_dataset, model, tokenizer, teacher=None): tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps - len(epoch_iterator) <= args.gradient_accumulation_steps - and (step + 1) == len(epoch_iterator) + len(epoch_iterator) <= args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator) ): if args.fp16: nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) diff --git a/examples/research_projects/quantization-qdqbert/quant_trainer.py b/examples/research_projects/quantization-qdqbert/quant_trainer.py index 73e880ebf676..09bac19e921a 100755 --- a/examples/research_projects/quantization-qdqbert/quant_trainer.py +++ b/examples/research_projects/quantization-qdqbert/quant_trainer.py @@ -239,7 +239,7 @@ def print_model_summary(model, name_width=25, line_width=180, ignore=None): continue if type(mod) in ignore: continue - if [True for s in ignore if type(s) is str and s in name]: + if [True for s in ignore if isinstance(s, str) and s in name]: continue act_str = f"Act:{input_q.extra_repr()}" wgt_str = f"Wgt:{weight_q.extra_repr()}" diff --git a/examples/research_projects/visual_bert/modeling_frcnn.py b/examples/research_projects/visual_bert/modeling_frcnn.py index 943588a5ed8c..499de532070c 100644 --- a/examples/research_projects/visual_bert/modeling_frcnn.py +++ b/examples/research_projects/visual_bert/modeling_frcnn.py @@ -1706,9 +1706,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): - assert ( - from_tf - ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( + assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index" ) archive_file = pretrained_model_name_or_path + ".index" diff --git a/hubconf.py b/hubconf.py index 6c60cd4213d5..f2ef70b73db7 100644 --- a/hubconf.py +++ b/hubconf.py @@ -15,6 +15,7 @@ import os import sys + SRC_DIR = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) diff --git a/pyproject.toml b/pyproject.toml index d13ee54afcf1..a7e172002214 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,6 @@ -[tool.black] -line-length = 119 -target-version = ['py37'] - [tool.ruff] # Never enforce `E501` (line length violations). -ignore = ["C901", "E501", "E741"] +ignore = ["C901", "E501", "E741", "F402", "F823" ] select = ["C", "E", "F", "I", "W"] line-length = 119 @@ -18,6 +14,19 @@ line-length = 119 lines-after-imports = 2 known-first-party = ["transformers"] +[tool.ruff.format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + [tool.pytest.ini_options] doctest_optionflags="NUMBER NORMALIZE_WHITESPACE ELLIPSIS" doctest_glob="**/*.md" diff --git a/scripts/check_tokenizers.py b/scripts/check_tokenizers.py index cfd0a7f3a1de..ea0d0bc21850 100644 --- a/scripts/check_tokenizers.py +++ b/scripts/check_tokenizers.py @@ -1,10 +1,12 @@ from collections import Counter + import datasets + import transformers from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS - from transformers.utils import logging + logging.set_verbosity_info() TOKENIZER_CLASSES = { @@ -101,8 +103,8 @@ def check_details(line, spm_ids, tok_ids, slow, fast): except Exception: pass - ok_start = fast.decode(spm_ids[:first]) - ok_end = fast.decode(spm_ids[last:]) + fast.decode(spm_ids[:first]) + fast.decode(spm_ids[last:]) wrong = fast.decode(spm_ids[first:last]) print() print(wrong) diff --git a/scripts/fsmt/fsmt-make-super-tiny-model.py b/scripts/fsmt/fsmt-make-super-tiny-model.py index 4a6b8e0c1b4c..a70f40ee6ca4 100755 --- a/scripts/fsmt/fsmt-make-super-tiny-model.py +++ b/scripts/fsmt/fsmt-make-super-tiny-model.py @@ -24,18 +24,19 @@ # # It will be used then as "stas/tiny-wmt19-en-ru" -from pathlib import Path import json import tempfile +from pathlib import Path -from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration +from transformers import FSMTConfig, FSMTForConditionalGeneration, FSMTTokenizer from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES + mname_tiny = "tiny-wmt19-en-ru" # Build -# borrowed from a test +# borrowed from a test vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w", "r", "t", "lo", "low", "er", "low", "lowest", "newer", "wider", "", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r 1789", ""] @@ -57,7 +58,7 @@ tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) - + config = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, diff --git a/scripts/fsmt/fsmt-make-tiny-model.py b/scripts/fsmt/fsmt-make-tiny-model.py index 431942c05ddb..b737cc61cea3 100755 --- a/scripts/fsmt/fsmt-make-tiny-model.py +++ b/scripts/fsmt/fsmt-make-tiny-model.py @@ -27,16 +27,18 @@ # It will be used then as "stas/tiny-wmt19-en-de" # Build -from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration +from transformers import FSMTConfig, FSMTForConditionalGeneration, FSMTTokenizer + + mname = "facebook/wmt19-en-de" tokenizer = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model config = FSMTConfig.from_pretrained(mname) -config.update(dict( - d_model=4, - encoder_layers=1, decoder_layers=1, - encoder_ffn_dim=4, decoder_ffn_dim=4, - encoder_attention_heads=1, decoder_attention_heads=1)) +config.update({ + "d_model": 4, + "encoder_layers": 1, "decoder_layers": 1, + "encoder_ffn_dim": 4, "decoder_ffn_dim": 4, + "encoder_attention_heads": 1, "decoder_attention_heads": 1}) tiny_model = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") diff --git a/scripts/fsmt/gen-card-allenai-wmt16.py b/scripts/fsmt/gen-card-allenai-wmt16.py index b910cb05b1bb..1b5fe1cda8b2 100755 --- a/scripts/fsmt/gen-card-allenai-wmt16.py +++ b/scripts/fsmt/gen-card-allenai-wmt16.py @@ -19,6 +19,7 @@ import os from pathlib import Path + def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): texts = { diff --git a/scripts/fsmt/gen-card-allenai-wmt19.py b/scripts/fsmt/gen-card-allenai-wmt19.py index df0f5851c82e..b7d727ff2a14 100755 --- a/scripts/fsmt/gen-card-allenai-wmt19.py +++ b/scripts/fsmt/gen-card-allenai-wmt19.py @@ -19,6 +19,7 @@ import os from pathlib import Path + def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): texts = { diff --git a/scripts/fsmt/gen-card-facebook-wmt19.py b/scripts/fsmt/gen-card-facebook-wmt19.py index e75406b261dc..58df676cbc94 100755 --- a/scripts/fsmt/gen-card-facebook-wmt19.py +++ b/scripts/fsmt/gen-card-facebook-wmt19.py @@ -19,6 +19,7 @@ import os from pathlib import Path + def write_model_card(model_card_dir, src_lang, tgt_lang): texts = { @@ -39,7 +40,7 @@ def write_model_card(model_card_dir, src_lang, tgt_lang): readme = f""" --- -language: +language: - {src_lang} - {tgt_lang} thumbnail: diff --git a/scripts/pegasus/build_test_sample_spm_no_bos.py b/scripts/pegasus/build_test_sample_spm_no_bos.py index 324db02ef710..f223304a7717 100755 --- a/scripts/pegasus/build_test_sample_spm_no_bos.py +++ b/scripts/pegasus/build_test_sample_spm_no_bos.py @@ -13,15 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# this script builds a small sample spm file tests/fixtures/test_sentencepiece_no_bos.model, with features needed by pegasus +# this script builds a small sample spm file tests/fixtures/test_sentencepiece_no_bos.model, with features needed by pegasus # 1. pip install sentencepiece -# +# # 2. wget https://raw.githubusercontent.com/google/sentencepiece/master/data/botchan.txt # 3. build import sentencepiece as spm + # pegasus: # 1. no bos # 2. eos_id is 1 diff --git a/scripts/stale.py b/scripts/stale.py index c7fc6aa221a7..bf7c6670c431 100644 --- a/scripts/stale.py +++ b/scripts/stale.py @@ -15,8 +15,8 @@ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ -from datetime import datetime as dt import os +from datetime import datetime as dt import github.GithubException from github import Github @@ -39,7 +39,7 @@ def main(): for i, issue in enumerate(open_issues): print(i, issue) - comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True) + comments = sorted(list(issue.get_comments()), key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" diff --git a/setup.py b/setup.py index 86c8a8a5bffb..deccac468a8a 100644 --- a/setup.py +++ b/setup.py @@ -99,7 +99,6 @@ "accelerate>=0.20.3", "av==9.2.0", # Latest version of PyAV (10.0.0) has issues with audio stream. "beautifulsoup4", - "black~=23.1", "codecarbon==1.2.0", "cookiecutter==1.7.3", "dataclasses", @@ -156,7 +155,7 @@ "rhoknp>=1.1.0,<1.3.1", "rjieba", "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", - "ruff>=0.0.241,<=0.0.259", + "ruff>=0.1.5,<=0.2", "sacrebleu>=1.4.12,<2.0.0", "sacremoses", "safetensors>=0.3.1", @@ -310,7 +309,7 @@ def run(self): "dill", "evaluate", "pytest-timeout", - "black", + "ruff", "sacrebleu", "rouge-score", "nltk", @@ -329,7 +328,7 @@ def run(self): extras["deepspeed-testing"] = extras["deepspeed"] + extras["testing"] + extras["optuna"] + extras["sentencepiece"] -extras["quality"] = deps_list("black", "datasets", "isort", "ruff", "GitPython", "hf-doc-builder", "urllib3") +extras["quality"] = deps_list("datasets", "isort", "ruff", "GitPython", "hf-doc-builder", "urllib3") extras["all"] = ( extras["tf"] diff --git a/src/transformers/configuration_utils.py b/src/transformers/configuration_utils.py index d4f59020434e..22ea0abbd601 100755 --- a/src/transformers/configuration_utils.py +++ b/src/transformers/configuration_utils.py @@ -246,6 +246,7 @@ class PretrainedConfig(PushToHubMixin): not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers v5. """ + model_type: str = "" is_composition: bool = False attribute_map: Dict[str, str] = {} diff --git a/src/transformers/convert_slow_tokenizer.py b/src/transformers/convert_slow_tokenizer.py index 04f3e7e1c587..76ac66ceb9ef 100644 --- a/src/transformers/convert_slow_tokenizer.py +++ b/src/transformers/convert_slow_tokenizer.py @@ -724,9 +724,7 @@ def vocab(self, proto): ("", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] - # fmt: off - vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] - # fmt: on + vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] # fmt: skip vocab += [("", 0.0)] return vocab @@ -753,11 +751,7 @@ def vocab(self, proto): ("", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] - vocab += [ - # fmt: off - ('ace_Arab', 0.0), ('ace_Latn', 0.0), ('acm_Arab', 0.0), ('acq_Arab', 0.0), ('aeb_Arab', 0.0), ('afr_Latn', 0.0), ('ajp_Arab', 0.0), ('aka_Latn', 0.0), ('amh_Ethi', 0.0), ('apc_Arab', 0.0), ('arb_Arab', 0.0), ('ars_Arab', 0.0), ('ary_Arab', 0.0), ('arz_Arab', 0.0), ('asm_Beng', 0.0), ('ast_Latn', 0.0), ('awa_Deva', 0.0), ('ayr_Latn', 0.0), ('azb_Arab', 0.0), ('azj_Latn', 0.0), ('bak_Cyrl', 0.0), ('bam_Latn', 0.0), ('ban_Latn', 0.0), ('bel_Cyrl', 0.0), ('bem_Latn', 0.0), ('ben_Beng', 0.0), ('bho_Deva', 0.0), ('bjn_Arab', 0.0), ('bjn_Latn', 0.0), ('bod_Tibt', 0.0), ('bos_Latn', 0.0), ('bug_Latn', 0.0), ('bul_Cyrl', 0.0), ('cat_Latn', 0.0), ('ceb_Latn', 0.0), ('ces_Latn', 0.0), ('cjk_Latn', 0.0), ('ckb_Arab', 0.0), ('crh_Latn', 0.0), ('cym_Latn', 0.0), ('dan_Latn', 0.0), ('deu_Latn', 0.0), ('dik_Latn', 0.0), ('dyu_Latn', 0.0), ('dzo_Tibt', 0.0), ('ell_Grek', 0.0), ('eng_Latn', 0.0), ('epo_Latn', 0.0), ('est_Latn', 0.0), ('eus_Latn', 0.0), ('ewe_Latn', 0.0), ('fao_Latn', 0.0), ('pes_Arab', 0.0), ('fij_Latn', 0.0), ('fin_Latn', 0.0), ('fon_Latn', 0.0), ('fra_Latn', 0.0), ('fur_Latn', 0.0), ('fuv_Latn', 0.0), ('gla_Latn', 0.0), ('gle_Latn', 0.0), ('glg_Latn', 0.0), ('grn_Latn', 0.0), ('guj_Gujr', 0.0), ('hat_Latn', 0.0), ('hau_Latn', 0.0), ('heb_Hebr', 0.0), ('hin_Deva', 0.0), ('hne_Deva', 0.0), ('hrv_Latn', 0.0), ('hun_Latn', 0.0), ('hye_Armn', 0.0), ('ibo_Latn', 0.0), ('ilo_Latn', 0.0), ('ind_Latn', 0.0), ('isl_Latn', 0.0), ('ita_Latn', 0.0), ('jav_Latn', 0.0), ('jpn_Jpan', 0.0), ('kab_Latn', 0.0), ('kac_Latn', 0.0), ('kam_Latn', 0.0), ('kan_Knda', 0.0), ('kas_Arab', 0.0), ('kas_Deva', 0.0), ('kat_Geor', 0.0), ('knc_Arab', 0.0), ('knc_Latn', 0.0), ('kaz_Cyrl', 0.0), ('kbp_Latn', 0.0), ('kea_Latn', 0.0), ('khm_Khmr', 0.0), ('kik_Latn', 0.0), ('kin_Latn', 0.0), ('kir_Cyrl', 0.0), ('kmb_Latn', 0.0), ('kon_Latn', 0.0), ('kor_Hang', 0.0), ('kmr_Latn', 0.0), ('lao_Laoo', 0.0), ('lvs_Latn', 0.0), ('lij_Latn', 0.0), ('lim_Latn', 0.0), ('lin_Latn', 0.0), ('lit_Latn', 0.0), ('lmo_Latn', 0.0), ('ltg_Latn', 0.0), ('ltz_Latn', 0.0), ('lua_Latn', 0.0), ('lug_Latn', 0.0), ('luo_Latn', 0.0), ('lus_Latn', 0.0), ('mag_Deva', 0.0), ('mai_Deva', 0.0), ('mal_Mlym', 0.0), ('mar_Deva', 0.0), ('min_Latn', 0.0), ('mkd_Cyrl', 0.0), ('plt_Latn', 0.0), ('mlt_Latn', 0.0), ('mni_Beng', 0.0), ('khk_Cyrl', 0.0), ('mos_Latn', 0.0), ('mri_Latn', 0.0), ('zsm_Latn', 0.0), ('mya_Mymr', 0.0), ('nld_Latn', 0.0), ('nno_Latn', 0.0), ('nob_Latn', 0.0), ('npi_Deva', 0.0), ('nso_Latn', 0.0), ('nus_Latn', 0.0), ('nya_Latn', 0.0), ('oci_Latn', 0.0), ('gaz_Latn', 0.0), ('ory_Orya', 0.0), ('pag_Latn', 0.0), ('pan_Guru', 0.0), ('pap_Latn', 0.0), ('pol_Latn', 0.0), ('por_Latn', 0.0), ('prs_Arab', 0.0), ('pbt_Arab', 0.0), ('quy_Latn', 0.0), ('ron_Latn', 0.0), ('run_Latn', 0.0), ('rus_Cyrl', 0.0), ('sag_Latn', 0.0), ('san_Deva', 0.0), ('sat_Beng', 0.0), ('scn_Latn', 0.0), ('shn_Mymr', 0.0), ('sin_Sinh', 0.0), ('slk_Latn', 0.0), ('slv_Latn', 0.0), ('smo_Latn', 0.0), ('sna_Latn', 0.0), ('snd_Arab', 0.0), ('som_Latn', 0.0), ('sot_Latn', 0.0), ('spa_Latn', 0.0), ('als_Latn', 0.0), ('srd_Latn', 0.0), ('srp_Cyrl', 0.0), ('ssw_Latn', 0.0), ('sun_Latn', 0.0), ('swe_Latn', 0.0), ('swh_Latn', 0.0), ('szl_Latn', 0.0), ('tam_Taml', 0.0), ('tat_Cyrl', 0.0), ('tel_Telu', 0.0), ('tgk_Cyrl', 0.0), ('tgl_Latn', 0.0), ('tha_Thai', 0.0), ('tir_Ethi', 0.0), ('taq_Latn', 0.0), ('taq_Tfng', 0.0), ('tpi_Latn', 0.0), ('tsn_Latn', 0.0), ('tso_Latn', 0.0), ('tuk_Latn', 0.0), ('tum_Latn', 0.0), ('tur_Latn', 0.0), ('twi_Latn', 0.0), ('tzm_Tfng', 0.0), ('uig_Arab', 0.0), ('ukr_Cyrl', 0.0), ('umb_Latn', 0.0), ('urd_Arab', 0.0), ('uzn_Latn', 0.0), ('vec_Latn', 0.0), ('vie_Latn', 0.0), ('war_Latn', 0.0), ('wol_Latn', 0.0), ('xho_Latn', 0.0), ('ydd_Hebr', 0.0), ('yor_Latn', 0.0), ('yue_Hant', 0.0), ('zho_Hans', 0.0), ('zho_Hant', 0.0), ('zul_Latn', 0.0) - # fmt: on - ] + vocab += [('ace_Arab', 0.0), ('ace_Latn', 0.0), ('acm_Arab', 0.0), ('acq_Arab', 0.0), ('aeb_Arab', 0.0), ('afr_Latn', 0.0), ('ajp_Arab', 0.0), ('aka_Latn', 0.0), ('amh_Ethi', 0.0), ('apc_Arab', 0.0), ('arb_Arab', 0.0), ('ars_Arab', 0.0), ('ary_Arab', 0.0), ('arz_Arab', 0.0), ('asm_Beng', 0.0), ('ast_Latn', 0.0), ('awa_Deva', 0.0), ('ayr_Latn', 0.0), ('azb_Arab', 0.0), ('azj_Latn', 0.0), ('bak_Cyrl', 0.0), ('bam_Latn', 0.0), ('ban_Latn', 0.0), ('bel_Cyrl', 0.0), ('bem_Latn', 0.0), ('ben_Beng', 0.0), ('bho_Deva', 0.0), ('bjn_Arab', 0.0), ('bjn_Latn', 0.0), ('bod_Tibt', 0.0), ('bos_Latn', 0.0), ('bug_Latn', 0.0), ('bul_Cyrl', 0.0), ('cat_Latn', 0.0), ('ceb_Latn', 0.0), ('ces_Latn', 0.0), ('cjk_Latn', 0.0), ('ckb_Arab', 0.0), ('crh_Latn', 0.0), ('cym_Latn', 0.0), ('dan_Latn', 0.0), ('deu_Latn', 0.0), ('dik_Latn', 0.0), ('dyu_Latn', 0.0), ('dzo_Tibt', 0.0), ('ell_Grek', 0.0), ('eng_Latn', 0.0), ('epo_Latn', 0.0), ('est_Latn', 0.0), ('eus_Latn', 0.0), ('ewe_Latn', 0.0), ('fao_Latn', 0.0), ('pes_Arab', 0.0), ('fij_Latn', 0.0), ('fin_Latn', 0.0), ('fon_Latn', 0.0), ('fra_Latn', 0.0), ('fur_Latn', 0.0), ('fuv_Latn', 0.0), ('gla_Latn', 0.0), ('gle_Latn', 0.0), ('glg_Latn', 0.0), ('grn_Latn', 0.0), ('guj_Gujr', 0.0), ('hat_Latn', 0.0), ('hau_Latn', 0.0), ('heb_Hebr', 0.0), ('hin_Deva', 0.0), ('hne_Deva', 0.0), ('hrv_Latn', 0.0), ('hun_Latn', 0.0), ('hye_Armn', 0.0), ('ibo_Latn', 0.0), ('ilo_Latn', 0.0), ('ind_Latn', 0.0), ('isl_Latn', 0.0), ('ita_Latn', 0.0), ('jav_Latn', 0.0), ('jpn_Jpan', 0.0), ('kab_Latn', 0.0), ('kac_Latn', 0.0), ('kam_Latn', 0.0), ('kan_Knda', 0.0), ('kas_Arab', 0.0), ('kas_Deva', 0.0), ('kat_Geor', 0.0), ('knc_Arab', 0.0), ('knc_Latn', 0.0), ('kaz_Cyrl', 0.0), ('kbp_Latn', 0.0), ('kea_Latn', 0.0), ('khm_Khmr', 0.0), ('kik_Latn', 0.0), ('kin_Latn', 0.0), ('kir_Cyrl', 0.0), ('kmb_Latn', 0.0), ('kon_Latn', 0.0), ('kor_Hang', 0.0), ('kmr_Latn', 0.0), ('lao_Laoo', 0.0), ('lvs_Latn', 0.0), ('lij_Latn', 0.0), ('lim_Latn', 0.0), ('lin_Latn', 0.0), ('lit_Latn', 0.0), ('lmo_Latn', 0.0), ('ltg_Latn', 0.0), ('ltz_Latn', 0.0), ('lua_Latn', 0.0), ('lug_Latn', 0.0), ('luo_Latn', 0.0), ('lus_Latn', 0.0), ('mag_Deva', 0.0), ('mai_Deva', 0.0), ('mal_Mlym', 0.0), ('mar_Deva', 0.0), ('min_Latn', 0.0), ('mkd_Cyrl', 0.0), ('plt_Latn', 0.0), ('mlt_Latn', 0.0), ('mni_Beng', 0.0), ('khk_Cyrl', 0.0), ('mos_Latn', 0.0), ('mri_Latn', 0.0), ('zsm_Latn', 0.0), ('mya_Mymr', 0.0), ('nld_Latn', 0.0), ('nno_Latn', 0.0), ('nob_Latn', 0.0), ('npi_Deva', 0.0), ('nso_Latn', 0.0), ('nus_Latn', 0.0), ('nya_Latn', 0.0), ('oci_Latn', 0.0), ('gaz_Latn', 0.0), ('ory_Orya', 0.0), ('pag_Latn', 0.0), ('pan_Guru', 0.0), ('pap_Latn', 0.0), ('pol_Latn', 0.0), ('por_Latn', 0.0), ('prs_Arab', 0.0), ('pbt_Arab', 0.0), ('quy_Latn', 0.0), ('ron_Latn', 0.0), ('run_Latn', 0.0), ('rus_Cyrl', 0.0), ('sag_Latn', 0.0), ('san_Deva', 0.0), ('sat_Beng', 0.0), ('scn_Latn', 0.0), ('shn_Mymr', 0.0), ('sin_Sinh', 0.0), ('slk_Latn', 0.0), ('slv_Latn', 0.0), ('smo_Latn', 0.0), ('sna_Latn', 0.0), ('snd_Arab', 0.0), ('som_Latn', 0.0), ('sot_Latn', 0.0), ('spa_Latn', 0.0), ('als_Latn', 0.0), ('srd_Latn', 0.0), ('srp_Cyrl', 0.0), ('ssw_Latn', 0.0), ('sun_Latn', 0.0), ('swe_Latn', 0.0), ('swh_Latn', 0.0), ('szl_Latn', 0.0), ('tam_Taml', 0.0), ('tat_Cyrl', 0.0), ('tel_Telu', 0.0), ('tgk_Cyrl', 0.0), ('tgl_Latn', 0.0), ('tha_Thai', 0.0), ('tir_Ethi', 0.0), ('taq_Latn', 0.0), ('taq_Tfng', 0.0), ('tpi_Latn', 0.0), ('tsn_Latn', 0.0), ('tso_Latn', 0.0), ('tuk_Latn', 0.0), ('tum_Latn', 0.0), ('tur_Latn', 0.0), ('twi_Latn', 0.0), ('tzm_Tfng', 0.0), ('uig_Arab', 0.0), ('ukr_Cyrl', 0.0), ('umb_Latn', 0.0), ('urd_Arab', 0.0), ('uzn_Latn', 0.0), ('vec_Latn', 0.0), ('vie_Latn', 0.0), ('war_Latn', 0.0), ('wol_Latn', 0.0), ('xho_Latn', 0.0), ('ydd_Hebr', 0.0), ('yor_Latn', 0.0), ('yue_Hant', 0.0), ('zho_Hans', 0.0), ('zho_Hant', 0.0), ('zul_Latn', 0.0)] # fmt: skip vocab += [("", 0.0)] return vocab @@ -1128,9 +1122,7 @@ def vocab(self, proto): ("", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] - # fmt: off - vocab += [("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0)] - # fmt: on + vocab += [("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0), ("", 0.0)] # fmt: skip return vocab def unk_id(self, proto): diff --git a/src/transformers/data/data_collator.py b/src/transformers/data/data_collator.py index 57879ec3e2d8..7954c828fee9 100644 --- a/src/transformers/data/data_collator.py +++ b/src/transformers/data/data_collator.py @@ -121,7 +121,7 @@ def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any if isinstance(first["label_ids"], torch.Tensor): batch["labels"] = torch.stack([f["label_ids"] for f in features]) else: - dtype = torch.long if type(first["label_ids"][0]) is int else torch.float + dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. @@ -196,7 +196,7 @@ def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any if isinstance(first["label_ids"], np.ndarray): batch["labels"] = np.stack([f["label_ids"] for f in features]) else: - dtype = np.int64 if type(first["label_ids"][0]) is int else np.float32 + dtype = np.int64 if isinstance(first["label_ids"][0], int) else np.float32 batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 30e902d6989c..93e21ab2d3e5 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -6,7 +6,6 @@ "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", - "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", @@ -62,7 +61,7 @@ "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", - "ruff": "ruff>=0.0.241,<=0.0.259", + "ruff": "ruff>=0.1.5,<=0.2", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", diff --git a/src/transformers/image_utils.py b/src/transformers/image_utils.py index 628fe5dea72f..bfb88d03d3fc 100644 --- a/src/transformers/image_utils.py +++ b/src/transformers/image_utils.py @@ -245,8 +245,7 @@ def is_valid_annotation_coco_detection(annotation: Dict[str, Union[List, Tuple]] and isinstance(annotation["annotations"], (list, tuple)) and ( # an image can have no annotations - len(annotation["annotations"]) == 0 - or isinstance(annotation["annotations"][0], dict) + len(annotation["annotations"]) == 0 or isinstance(annotation["annotations"][0], dict) ) ): return True @@ -262,8 +261,7 @@ def is_valid_annotation_coco_panoptic(annotation: Dict[str, Union[List, Tuple]]) and isinstance(annotation["segments_info"], (list, tuple)) and ( # an image can have no segments - len(annotation["segments_info"]) == 0 - or isinstance(annotation["segments_info"][0], dict) + len(annotation["segments_info"]) == 0 or isinstance(annotation["segments_info"][0], dict) ) ): return True diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index 37567d3d8432..d4617c111bb2 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -179,6 +179,7 @@ class FlaxPreTrainedModel(PushToHubMixin, FlaxGenerationMixin): - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ + config_class = None base_model_prefix = "" main_input_name = "input_ids" diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index c342b5059c4f..bfd928a9011f 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -1075,6 +1075,7 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ + config_class = None base_model_prefix = "" main_input_name = "input_ids" @@ -3242,6 +3243,7 @@ class TFSharedEmbeddings(tf.keras.layers.Layer): kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`. """ + # TODO (joao): flagged for delection due to embeddings refactor def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 6d3a4d34dfbd..d7e0580e4359 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1095,6 +1095,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ + config_class = None base_model_prefix = "" main_input_name = "input_ids" diff --git a/src/transformers/models/align/configuration_align.py b/src/transformers/models/align/configuration_align.py index 74cfbfbe3380..b7f377d48136 100644 --- a/src/transformers/models/align/configuration_align.py +++ b/src/transformers/models/align/configuration_align.py @@ -97,6 +97,7 @@ class AlignTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "align_text_model" def __init__( diff --git a/src/transformers/models/altclip/configuration_altclip.py b/src/transformers/models/altclip/configuration_altclip.py index 032006452099..e557d270dfb2 100755 --- a/src/transformers/models/altclip/configuration_altclip.py +++ b/src/transformers/models/altclip/configuration_altclip.py @@ -100,6 +100,7 @@ class AltCLIPTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "altclip_text_model" def __init__( diff --git a/src/transformers/models/altclip/modeling_altclip.py b/src/transformers/models/altclip/modeling_altclip.py index 048c18edcc83..2f511bace5fa 100755 --- a/src/transformers/models/altclip/modeling_altclip.py +++ b/src/transformers/models/altclip/modeling_altclip.py @@ -174,8 +174,7 @@ class AltCLIPOutput(ModelOutput): text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPTextModel`]. image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): - The image embeddings obtained by applying the projection layer to the pooled output of - [`AltCLIPVisionModel`]. + The image embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPVisionModel`]. text_model_output(`BaseModelOutputWithPooling`): The output of the [`AltCLIPTextModel`]. vision_model_output(`BaseModelOutputWithPooling`): @@ -1049,9 +1048,7 @@ def _init_weights(self, module): nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, AltCLIPMLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/altclip/processing_altclip.py b/src/transformers/models/altclip/processing_altclip.py index 102535bc5b0e..e9b4f45269ca 100644 --- a/src/transformers/models/altclip/processing_altclip.py +++ b/src/transformers/models/altclip/processing_altclip.py @@ -35,6 +35,7 @@ class AltCLIPProcessor(ProcessorMixin): tokenizer ([`XLMRobertaTokenizerFast`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "CLIPImageProcessor" tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") diff --git a/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py b/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py index 23a2d83e78ac..81a087f07f69 100644 --- a/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +++ b/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py @@ -86,6 +86,7 @@ class ASTConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "audio-spectrogram-transformer" def __init__( diff --git a/src/transformers/models/autoformer/configuration_autoformer.py b/src/transformers/models/autoformer/configuration_autoformer.py index ced76448cd1e..7604233e3273 100644 --- a/src/transformers/models/autoformer/configuration_autoformer.py +++ b/src/transformers/models/autoformer/configuration_autoformer.py @@ -131,6 +131,7 @@ class AutoformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "autoformer" attribute_map = { "hidden_size": "d_model", diff --git a/src/transformers/models/bark/processing_bark.py b/src/transformers/models/bark/processing_bark.py index ef8ed03224be..b322615ae233 100644 --- a/src/transformers/models/bark/processing_bark.py +++ b/src/transformers/models/bark/processing_bark.py @@ -46,6 +46,7 @@ class BarkProcessor(ProcessorMixin): a list of `voice_preset_names`. """ + tokenizer_class = "AutoTokenizer" attributes = ["tokenizer"] diff --git a/src/transformers/models/bart/configuration_bart.py b/src/transformers/models/bart/configuration_bart.py index 2a04657f4199..8c03be9a6202 100644 --- a/src/transformers/models/bart/configuration_bart.py +++ b/src/transformers/models/bart/configuration_bart.py @@ -107,6 +107,7 @@ class BartConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "bart" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/bart/tokenization_bart_fast.py b/src/transformers/models/bart/tokenization_bart_fast.py index dfbf493af266..850c9636833a 100644 --- a/src/transformers/models/bart/tokenization_bart_fast.py +++ b/src/transformers/models/bart/tokenization_bart_fast.py @@ -147,6 +147,7 @@ class BartTokenizerFast(PreTrainedTokenizerFast): trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES diff --git a/src/transformers/models/beit/configuration_beit.py b/src/transformers/models/beit/configuration_beit.py index e554f45f7910..102718e17ff1 100644 --- a/src/transformers/models/beit/configuration_beit.py +++ b/src/transformers/models/beit/configuration_beit.py @@ -115,6 +115,7 @@ class BeitConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "beit" def __init__( diff --git a/src/transformers/models/bert/configuration_bert.py b/src/transformers/models/bert/configuration_bert.py index 589c2b026185..e0db2c9f1bb2 100644 --- a/src/transformers/models/bert/configuration_bert.py +++ b/src/transformers/models/bert/configuration_bert.py @@ -136,6 +136,7 @@ class BertConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "bert" def __init__( diff --git a/src/transformers/models/bert_generation/configuration_bert_generation.py b/src/transformers/models/bert_generation/configuration_bert_generation.py index e3e1b30e56fd..841aec5c0fb7 100644 --- a/src/transformers/models/bert_generation/configuration_bert_generation.py +++ b/src/transformers/models/bert_generation/configuration_bert_generation.py @@ -84,6 +84,7 @@ class BertGenerationConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "bert-generation" def __init__( diff --git a/src/transformers/models/big_bird/configuration_big_bird.py b/src/transformers/models/big_bird/configuration_big_bird.py index 53bf1ee6f44b..e71d3ea44460 100644 --- a/src/transformers/models/big_bird/configuration_big_bird.py +++ b/src/transformers/models/big_bird/configuration_big_bird.py @@ -104,6 +104,7 @@ class BigBirdConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "big_bird" def __init__( diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index 4383d210cd80..d90aa443dad2 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -896,15 +896,11 @@ def bigbird_block_sparse_attention( # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size - ].view( - bsz, n_heads, -1, to_block_size - ) # first_band_product + ].view(bsz, n_heads, -1, to_block_size) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: - ].view( - bsz, n_heads, -1, to_block_size - ) # last_band_product + ].view(bsz, n_heads, -1, to_block_size) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch diff --git a/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py index a7f198a735b3..1c78803c4b11 100644 --- a/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py @@ -120,6 +120,7 @@ class BigBirdPegasusConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "bigbird_pegasus" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index 222873ac852b..bc9682786942 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -683,15 +683,11 @@ def bigbird_block_sparse_attention( # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size - ].view( - bsz, n_heads, -1, to_block_size - ) # first_band_product + ].view(bsz, n_heads, -1, to_block_size) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: - ].view( - bsz, n_heads, -1, to_block_size - ) # last_band_product + ].view(bsz, n_heads, -1, to_block_size) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch diff --git a/src/transformers/models/biogpt/configuration_biogpt.py b/src/transformers/models/biogpt/configuration_biogpt.py index b6911e2ef903..e8635490bf36 100644 --- a/src/transformers/models/biogpt/configuration_biogpt.py +++ b/src/transformers/models/biogpt/configuration_biogpt.py @@ -93,6 +93,7 @@ class BioGptConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "biogpt" def __init__( diff --git a/src/transformers/models/bit/configuration_bit.py b/src/transformers/models/bit/configuration_bit.py index 1e5ded1e1913..495d348f0530 100644 --- a/src/transformers/models/bit/configuration_bit.py +++ b/src/transformers/models/bit/configuration_bit.py @@ -85,6 +85,7 @@ class BitConfig(BackboneConfigMixin, PretrainedConfig): >>> configuration = model.config ``` """ + model_type = "bit" layer_types = ["preactivation", "bottleneck"] supported_padding = ["SAME", "VALID"] diff --git a/src/transformers/models/blenderbot/configuration_blenderbot.py b/src/transformers/models/blenderbot/configuration_blenderbot.py index 93ee92813645..4f55a96bf62b 100644 --- a/src/transformers/models/blenderbot/configuration_blenderbot.py +++ b/src/transformers/models/blenderbot/configuration_blenderbot.py @@ -104,6 +104,7 @@ class BlenderbotConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "blenderbot" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index f49f90f794fc..2fbd4621361e 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -1511,9 +1511,7 @@ def forward( >>> from transformers import AutoTokenizer, BlenderbotForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") - >>> model = BlenderbotForCausalLM.from_pretrained( - ... "facebook/blenderbot-400M-distill", add_cross_attention=False - ... ) + >>> model = BlenderbotForCausalLM.from_pretrained("facebook/blenderbot-400M-distill", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) diff --git a/src/transformers/models/blenderbot/tokenization_blenderbot.py b/src/transformers/models/blenderbot/tokenization_blenderbot.py index 7c1ef43bccb2..29386c1233ad 100644 --- a/src/transformers/models/blenderbot/tokenization_blenderbot.py +++ b/src/transformers/models/blenderbot/tokenization_blenderbot.py @@ -376,8 +376,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does - not make use of token type ids, therefore a list of zeros is returned. + Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not + make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): diff --git a/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py b/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py index 1c0d8f3fab75..6245025b503d 100644 --- a/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py +++ b/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py @@ -212,8 +212,8 @@ def mask_token(self) -> str: `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. - Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will - greedily comprise the space before the **. + Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily + comprise the space before the **. """ if self._mask_token is None: if self.verbose: @@ -264,8 +264,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does - not make use of token type ids, therefore a list of zeros is returned. + Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not + make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): diff --git a/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py b/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py index fbc23435d66f..b41330656d39 100644 --- a/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py @@ -104,6 +104,7 @@ class BlenderbotSmallConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "blenderbot-small" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index 292b5a8c6e8b..1669602832d8 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -1478,9 +1478,7 @@ def forward( >>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") - >>> model = BlenderbotSmallForCausalLM.from_pretrained( - ... "facebook/blenderbot_small-90M", add_cross_attention=False - ... ) + >>> model = BlenderbotSmallForCausalLM.from_pretrained("facebook/blenderbot_small-90M", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) diff --git a/src/transformers/models/blip/configuration_blip.py b/src/transformers/models/blip/configuration_blip.py index 2482fcaf9ec2..0b3dfb4a121c 100644 --- a/src/transformers/models/blip/configuration_blip.py +++ b/src/transformers/models/blip/configuration_blip.py @@ -109,6 +109,7 @@ class BlipTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "blip_text_model" def __init__( diff --git a/src/transformers/models/blip/modeling_blip_text.py b/src/transformers/models/blip/modeling_blip_text.py index 00c6a85ee61f..353c0f486a56 100644 --- a/src/transformers/models/blip/modeling_blip_text.py +++ b/src/transformers/models/blip/modeling_blip_text.py @@ -742,13 +742,13 @@ def forward( # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: - if type(encoder_hidden_states) == list: + if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - if type(encoder_attention_mask) == list: + if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) diff --git a/src/transformers/models/blip/modeling_tf_blip_text.py b/src/transformers/models/blip/modeling_tf_blip_text.py index 9873c292b7af..b7307c062f79 100644 --- a/src/transformers/models/blip/modeling_tf_blip_text.py +++ b/src/transformers/models/blip/modeling_tf_blip_text.py @@ -741,13 +741,13 @@ def call( # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: - if type(encoder_hidden_states) == list: + if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states[0]) else: encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states) encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - if type(encoder_attention_mask) == list: + if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = tf.ones(encoder_hidden_shape) diff --git a/src/transformers/models/blip/processing_blip.py b/src/transformers/models/blip/processing_blip.py index c4df8ddffaba..3b9d5c369a44 100644 --- a/src/transformers/models/blip/processing_blip.py +++ b/src/transformers/models/blip/processing_blip.py @@ -37,6 +37,7 @@ class BlipProcessor(ProcessorMixin): tokenizer (`BertTokenizerFast`): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "BlipImageProcessor" tokenizer_class = ("BertTokenizer", "BertTokenizerFast") diff --git a/src/transformers/models/blip_2/configuration_blip_2.py b/src/transformers/models/blip_2/configuration_blip_2.py index 1b375e147f78..85749888a54b 100644 --- a/src/transformers/models/blip_2/configuration_blip_2.py +++ b/src/transformers/models/blip_2/configuration_blip_2.py @@ -190,6 +190,7 @@ class Blip2QFormerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "blip_2_qformer" def __init__( diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index 10a37c79b863..00433f3ea349 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -1123,13 +1123,13 @@ def forward( # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: - if type(encoder_hidden_states) == list: + if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - if type(encoder_attention_mask) == list: + if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) diff --git a/src/transformers/models/blip_2/processing_blip_2.py b/src/transformers/models/blip_2/processing_blip_2.py index 837056f88891..ff7044c82aed 100644 --- a/src/transformers/models/blip_2/processing_blip_2.py +++ b/src/transformers/models/blip_2/processing_blip_2.py @@ -37,6 +37,7 @@ class Blip2Processor(ProcessorMixin): tokenizer (`AutoTokenizer`): An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "BlipImageProcessor" tokenizer_class = "AutoTokenizer" @@ -141,8 +142,8 @@ def batch_decode(self, *args, **kwargs): # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer def decode(self, *args, **kwargs): """ - This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer - to the docstring of this method for more information. + This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/models/bridgetower/configuration_bridgetower.py b/src/transformers/models/bridgetower/configuration_bridgetower.py index e44373cba59c..c12c1600e9b4 100644 --- a/src/transformers/models/bridgetower/configuration_bridgetower.py +++ b/src/transformers/models/bridgetower/configuration_bridgetower.py @@ -73,6 +73,7 @@ class BridgeTowerVisionConfig(PretrainedConfig): >>> # Accessing the configuration >>> configuration ```""" + model_type = "bridgetower_vision_model" def __init__( @@ -179,6 +180,7 @@ class BridgeTowerTextConfig(PretrainedConfig): >>> # Accessing the configuration >>> configuration ```""" + model_type = "bridgetower_text_model" def __init__( @@ -291,6 +293,7 @@ class BridgeTowerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "bridgetower" def __init__( diff --git a/src/transformers/models/bridgetower/modeling_bridgetower.py b/src/transformers/models/bridgetower/modeling_bridgetower.py index 89655db7f048..f5822070db6a 100644 --- a/src/transformers/models/bridgetower/modeling_bridgetower.py +++ b/src/transformers/models/bridgetower/modeling_bridgetower.py @@ -46,7 +46,7 @@ BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "BridgeTower/bridgetower-base", - "BridgeTower/bridgetower-base-itm-mlm" + "BridgeTower/bridgetower-base-itm-mlm", # See all bridgetower models at https://huggingface.co/BridgeTower ] diff --git a/src/transformers/models/bridgetower/processing_bridgetower.py b/src/transformers/models/bridgetower/processing_bridgetower.py index c268d7c26f43..7718c3bf833f 100644 --- a/src/transformers/models/bridgetower/processing_bridgetower.py +++ b/src/transformers/models/bridgetower/processing_bridgetower.py @@ -38,6 +38,7 @@ class BridgeTowerProcessor(ProcessorMixin): tokenizer (`RobertaTokenizerFast`): An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "BridgeTowerImageProcessor" tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast") diff --git a/src/transformers/models/bros/configuration_bros.py b/src/transformers/models/bros/configuration_bros.py index f0a5dbff86ed..4384810a55a0 100644 --- a/src/transformers/models/bros/configuration_bros.py +++ b/src/transformers/models/bros/configuration_bros.py @@ -90,6 +90,7 @@ class BrosConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "bros" def __init__( diff --git a/src/transformers/models/bros/processing_bros.py b/src/transformers/models/bros/processing_bros.py index 77b73e48b90a..9c2e0642d8cd 100644 --- a/src/transformers/models/bros/processing_bros.py +++ b/src/transformers/models/bros/processing_bros.py @@ -34,6 +34,7 @@ class BrosProcessor(ProcessorMixin): tokenizer (`BertTokenizerFast`, *optional*): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. """ + attributes = ["tokenizer"] tokenizer_class = ("BertTokenizer", "BertTokenizerFast") diff --git a/src/transformers/models/canine/configuration_canine.py b/src/transformers/models/canine/configuration_canine.py index e218044b553b..9cd86c6ac0e6 100644 --- a/src/transformers/models/canine/configuration_canine.py +++ b/src/transformers/models/canine/configuration_canine.py @@ -95,6 +95,7 @@ class CanineConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "canine" def __init__( diff --git a/src/transformers/models/canine/modeling_canine.py b/src/transformers/models/canine/modeling_canine.py index ead9619d926b..378a5775256f 100644 --- a/src/transformers/models/canine/modeling_canine.py +++ b/src/transformers/models/canine/modeling_canine.py @@ -54,7 +54,7 @@ CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/canine-s", - "google/canine-r" + "google/canine-r", # See all CANINE models at https://huggingface.co/models?filter=canine ] diff --git a/src/transformers/models/chinese_clip/configuration_chinese_clip.py b/src/transformers/models/chinese_clip/configuration_chinese_clip.py index 0e91200ce601..e00077399117 100644 --- a/src/transformers/models/chinese_clip/configuration_chinese_clip.py +++ b/src/transformers/models/chinese_clip/configuration_chinese_clip.py @@ -106,6 +106,7 @@ class ChineseCLIPTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "chinese_clip_text_model" def __init__( diff --git a/src/transformers/models/chinese_clip/modeling_chinese_clip.py b/src/transformers/models/chinese_clip/modeling_chinese_clip.py index ec2086bf67cf..a16fb081b193 100644 --- a/src/transformers/models/chinese_clip/modeling_chinese_clip.py +++ b/src/transformers/models/chinese_clip/modeling_chinese_clip.py @@ -718,9 +718,7 @@ def _init_weights(self, module): nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, ChineseCLIPVisionMLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/chinese_clip/processing_chinese_clip.py b/src/transformers/models/chinese_clip/processing_chinese_clip.py index fbd4d579df90..832f44102abf 100644 --- a/src/transformers/models/chinese_clip/processing_chinese_clip.py +++ b/src/transformers/models/chinese_clip/processing_chinese_clip.py @@ -36,6 +36,7 @@ class ChineseCLIPProcessor(ProcessorMixin): tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "ChineseCLIPImageProcessor" tokenizer_class = ("BertTokenizer", "BertTokenizerFast") diff --git a/src/transformers/models/clap/configuration_clap.py b/src/transformers/models/clap/configuration_clap.py index fca9b0087c8f..f940ee15f973 100644 --- a/src/transformers/models/clap/configuration_clap.py +++ b/src/transformers/models/clap/configuration_clap.py @@ -97,6 +97,7 @@ class ClapTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "clap_text_model" def __init__( diff --git a/src/transformers/models/clap/processing_clap.py b/src/transformers/models/clap/processing_clap.py index 7492f102b4b2..87799899945f 100644 --- a/src/transformers/models/clap/processing_clap.py +++ b/src/transformers/models/clap/processing_clap.py @@ -33,6 +33,7 @@ class ClapProcessor(ProcessorMixin): tokenizer ([`RobertaTokenizerFast`]): The tokenizer is a required input. """ + feature_extractor_class = "ClapFeatureExtractor" tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast") diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py index f9ecf5f7d463..e9d2b132b812 100644 --- a/src/transformers/models/clip/configuration_clip.py +++ b/src/transformers/models/clip/configuration_clip.py @@ -96,6 +96,7 @@ class CLIPTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "clip_text_model" def __init__( diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index ccc253cc98dc..9e708e51056a 100644 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -421,9 +421,7 @@ def _init_weights(self, module): nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, CLIPMLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/clip/processing_clip.py b/src/transformers/models/clip/processing_clip.py index f083380e6ad1..31351f31efc5 100644 --- a/src/transformers/models/clip/processing_clip.py +++ b/src/transformers/models/clip/processing_clip.py @@ -35,6 +35,7 @@ class CLIPProcessor(ProcessorMixin): tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "CLIPImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") diff --git a/src/transformers/models/clipseg/configuration_clipseg.py b/src/transformers/models/clipseg/configuration_clipseg.py index cb178514b293..5f8ba12723a0 100644 --- a/src/transformers/models/clipseg/configuration_clipseg.py +++ b/src/transformers/models/clipseg/configuration_clipseg.py @@ -86,6 +86,7 @@ class CLIPSegTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "clipseg_text_model" def __init__( diff --git a/src/transformers/models/clipseg/modeling_clipseg.py b/src/transformers/models/clipseg/modeling_clipseg.py index 4ef9395eb259..c0cf6b3b1657 100644 --- a/src/transformers/models/clipseg/modeling_clipseg.py +++ b/src/transformers/models/clipseg/modeling_clipseg.py @@ -77,8 +77,7 @@ class CLIPSegOutput(ModelOutput): text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegTextModel`]. image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): - The image embeddings obtained by applying the projection layer to the pooled output of - [`CLIPSegVisionModel`]. + The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegVisionModel`]. text_model_output(`BaseModelOutputWithPooling`): The output of the [`CLIPSegTextModel`]. vision_model_output(`BaseModelOutputWithPooling`): @@ -443,9 +442,7 @@ def _init_weights(self, module): nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, CLIPSegMLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/clipseg/processing_clipseg.py b/src/transformers/models/clipseg/processing_clipseg.py index bc1d36a1c668..e57021f213ab 100644 --- a/src/transformers/models/clipseg/processing_clipseg.py +++ b/src/transformers/models/clipseg/processing_clipseg.py @@ -35,6 +35,7 @@ class CLIPSegProcessor(ProcessorMixin): tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "ViTImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") diff --git a/src/transformers/models/clvp/modeling_clvp.py b/src/transformers/models/clvp/modeling_clvp.py index 045ac33ffb82..db2bbe3f00ea 100644 --- a/src/transformers/models/clvp/modeling_clvp.py +++ b/src/transformers/models/clvp/modeling_clvp.py @@ -684,9 +684,7 @@ def _init_weights(self, module): module.bias.data.zero_() elif isinstance(module, ClvpEncoderMLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.proj.weight if getattr(module.fc1, "proj") else module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/clvp/processing_clvp.py b/src/transformers/models/clvp/processing_clvp.py index cf4bd4de8f78..0723986db975 100644 --- a/src/transformers/models/clvp/processing_clvp.py +++ b/src/transformers/models/clvp/processing_clvp.py @@ -34,6 +34,7 @@ class ClvpProcessor(ProcessorMixin): tokenizer (`ClvpTokenizer`): An instance of [`ClvpTokenizer`]. The tokenizer is a required input. """ + feature_extractor_class = "ClvpFeatureExtractor" tokenizer_class = "ClvpTokenizer" model_input_names = [ @@ -76,15 +77,15 @@ def __call__(self, *args, **kwargs): # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.batch_decode with Whisper->Clvp def batch_decode(self, *args, **kwargs): """ - This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer - to the docstring of this method for more information. + This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.decode with Whisper->Clvp def decode(self, *args, **kwargs): """ - This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the - docstring of this method for more information. + This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/models/codegen/configuration_codegen.py b/src/transformers/models/codegen/configuration_codegen.py index d960b21b6afb..73c019870f1f 100644 --- a/src/transformers/models/codegen/configuration_codegen.py +++ b/src/transformers/models/codegen/configuration_codegen.py @@ -105,6 +105,7 @@ class CodeGenConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "codegen" attribute_map = { "max_position_embeddings": "n_positions", diff --git a/src/transformers/models/conditional_detr/configuration_conditional_detr.py b/src/transformers/models/conditional_detr/configuration_conditional_detr.py index 356e5c0a574b..5f9e49db6cc2 100644 --- a/src/transformers/models/conditional_detr/configuration_conditional_detr.py +++ b/src/transformers/models/conditional_detr/configuration_conditional_detr.py @@ -134,6 +134,7 @@ class ConditionalDetrConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "conditional_detr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py index 998cb7419174..c46527971f1a 100644 --- a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py +++ b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py @@ -478,8 +478,7 @@ def post_process_panoptic_sample( threshold=0.85, ) -> Dict: """ - Converts the output of [`ConditionalDetrForSegmentation`] into panoptic segmentation predictions for a single - sample. + Converts the output of [`ConditionalDetrForSegmentation`] into panoptic segmentation predictions for a single sample. Args: out_logits (`torch.Tensor`): @@ -1454,8 +1453,7 @@ def post_process_object_detection( # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation with Detr->ConditionalDetr def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ - Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports - PyTorch. + Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): @@ -1511,8 +1509,7 @@ def post_process_instance_segmentation( return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ - Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports - PyTorch. + Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): @@ -1596,8 +1593,8 @@ def post_process_panoptic_segmentation( target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ - Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only - supports PyTorch. + Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports + PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index 961febc480ba..fd9a9d7e6f20 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -153,8 +153,8 @@ class ConditionalDetrObjectDetectionOutput(ModelOutput): pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding - possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve - the unnormalized bounding boxes. + possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the + unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and @@ -217,14 +217,14 @@ class ConditionalDetrSegmentationOutput(ModelOutput): pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding - possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve - the unnormalized bounding boxes. + possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the + unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): Segmentation masks logits for all queries. See also [`~ConditionalDetrImageProcessor.post_process_semantic_segmentation`] or [`~ConditionalDetrImageProcessor.post_process_instance_segmentation`] - [`~ConditionalDetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and - panoptic segmentation masks respectively. + [`~ConditionalDetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic + segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and diff --git a/src/transformers/models/convbert/configuration_convbert.py b/src/transformers/models/convbert/configuration_convbert.py index 4c1032f4ffa0..bbdbd26349b4 100644 --- a/src/transformers/models/convbert/configuration_convbert.py +++ b/src/transformers/models/convbert/configuration_convbert.py @@ -96,6 +96,7 @@ class ConvBertConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "convbert" def __init__( diff --git a/src/transformers/models/convbert/tokenization_convbert.py b/src/transformers/models/convbert/tokenization_convbert.py index 439beb7abb4d..8c359886cf74 100644 --- a/src/transformers/models/convbert/tokenization_convbert.py +++ b/src/transformers/models/convbert/tokenization_convbert.py @@ -263,8 +263,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/convbert/tokenization_convbert_fast.py b/src/transformers/models/convbert/tokenization_convbert_fast.py index 7ccc21b3e058..14909876ded8 100644 --- a/src/transformers/models/convbert/tokenization_convbert_fast.py +++ b/src/transformers/models/convbert/tokenization_convbert_fast.py @@ -168,8 +168,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/convnext/configuration_convnext.py b/src/transformers/models/convnext/configuration_convnext.py index 0cba78040579..24fb822b8460 100644 --- a/src/transformers/models/convnext/configuration_convnext.py +++ b/src/transformers/models/convnext/configuration_convnext.py @@ -87,6 +87,7 @@ class ConvNextConfig(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "convnext" def __init__( diff --git a/src/transformers/models/convnextv2/configuration_convnextv2.py b/src/transformers/models/convnextv2/configuration_convnextv2.py index 14dfcf85124e..952760eb9a26 100644 --- a/src/transformers/models/convnextv2/configuration_convnextv2.py +++ b/src/transformers/models/convnextv2/configuration_convnextv2.py @@ -79,6 +79,7 @@ class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "convnextv2" def __init__( diff --git a/src/transformers/models/cpmant/configuration_cpmant.py b/src/transformers/models/cpmant/configuration_cpmant.py index bd85244c81f3..7013b8dde73b 100644 --- a/src/transformers/models/cpmant/configuration_cpmant.py +++ b/src/transformers/models/cpmant/configuration_cpmant.py @@ -84,6 +84,7 @@ class CpmAntConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "cpmant" def __init__( diff --git a/src/transformers/models/cvt/configuration_cvt.py b/src/transformers/models/cvt/configuration_cvt.py index a540c0f4807c..f1d96fc17ea5 100644 --- a/src/transformers/models/cvt/configuration_cvt.py +++ b/src/transformers/models/cvt/configuration_cvt.py @@ -96,6 +96,7 @@ class CvtConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "cvt" def __init__( diff --git a/src/transformers/models/data2vec/configuration_data2vec_audio.py b/src/transformers/models/data2vec/configuration_data2vec_audio.py index 6c24f3effbaa..e37def379fbb 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_audio.py +++ b/src/transformers/models/data2vec/configuration_data2vec_audio.py @@ -168,6 +168,7 @@ class Data2VecAudioConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "data2vec-audio" def __init__( diff --git a/src/transformers/models/data2vec/configuration_data2vec_text.py b/src/transformers/models/data2vec/configuration_data2vec_text.py index 305a3ea5e4ff..01a81e95b412 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_text.py +++ b/src/transformers/models/data2vec/configuration_data2vec_text.py @@ -95,6 +95,7 @@ class Data2VecTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "data2vec-text" def __init__( diff --git a/src/transformers/models/data2vec/configuration_data2vec_vision.py b/src/transformers/models/data2vec/configuration_data2vec_vision.py index b45f8420ca00..5d8e4a252a7c 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_vision.py +++ b/src/transformers/models/data2vec/configuration_data2vec_vision.py @@ -111,6 +111,7 @@ class Data2VecVisionConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "data2vec-vision" def __init__( diff --git a/src/transformers/models/data2vec/modeling_data2vec_vision.py b/src/transformers/models/data2vec/modeling_data2vec_vision.py index 49f8c411c337..b749b7bf1548 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_data2vec_vision.py @@ -289,8 +289,8 @@ def forward( # Copied from transformers.models.beit.modeling_beit.BeitSelfOutput with Beit->Data2VecVision class Data2VecVisionSelfOutput(nn.Module): """ - The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due - to the layernorm applied before each block. + The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due to the + layernorm applied before each block. """ def __init__(self, config: Data2VecVisionConfig) -> None: diff --git a/src/transformers/models/deberta/configuration_deberta.py b/src/transformers/models/deberta/configuration_deberta.py index 94ea91cd3a08..f6db66f0d8d9 100644 --- a/src/transformers/models/deberta/configuration_deberta.py +++ b/src/transformers/models/deberta/configuration_deberta.py @@ -105,6 +105,7 @@ class DebertaConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "deberta" def __init__( @@ -148,7 +149,7 @@ def __init__( self.position_biased_input = position_biased_input # Backwards compatibility - if type(pos_att_type) == str: + if isinstance(pos_att_type, str): pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")] self.pos_att_type = pos_att_type diff --git a/src/transformers/models/deberta_v2/configuration_deberta_v2.py b/src/transformers/models/deberta_v2/configuration_deberta_v2.py index d55486cd5633..68f2112754a4 100644 --- a/src/transformers/models/deberta_v2/configuration_deberta_v2.py +++ b/src/transformers/models/deberta_v2/configuration_deberta_v2.py @@ -107,6 +107,7 @@ class DebertaV2Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "deberta-v2" def __init__( @@ -150,7 +151,7 @@ def __init__( self.position_biased_input = position_biased_input # Backwards compatibility - if type(pos_att_type) == str: + if isinstance(pos_att_type, str): pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")] self.pos_att_type = pos_att_type diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index 0d3ed94aeab2..a8f064369268 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -780,15 +780,11 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ if "c2p" in self.pos_att_type: pos_key_layer = self.transpose_for_scores( self.pos_key_proj(rel_embeddings), self.num_attention_heads - ).repeat( - query_layer.size(0) // self.num_attention_heads, 1, 1 - ) # .split(self.all_head_size, dim=-1) + ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1) if "p2c" in self.pos_att_type: pos_query_layer = self.transpose_for_scores( self.pos_query_proj(rel_embeddings), self.num_attention_heads - ).repeat( - query_layer.size(0) // self.num_attention_heads, 1, 1 - ) # .split(self.all_head_size, dim=-1) + ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1) score = 0 # content->position diff --git a/src/transformers/models/deformable_detr/configuration_deformable_detr.py b/src/transformers/models/deformable_detr/configuration_deformable_detr.py index dbe5fd7f0a78..a6161061d9a7 100644 --- a/src/transformers/models/deformable_detr/configuration_deformable_detr.py +++ b/src/transformers/models/deformable_detr/configuration_deformable_detr.py @@ -143,6 +143,7 @@ class DeformableDetrConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "deformable_detr" attribute_map = { "hidden_size": "d_model", diff --git a/src/transformers/models/deit/configuration_deit.py b/src/transformers/models/deit/configuration_deit.py index 905473c13eb5..ef346637ba7d 100644 --- a/src/transformers/models/deit/configuration_deit.py +++ b/src/transformers/models/deit/configuration_deit.py @@ -91,6 +91,7 @@ class DeiTConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "deit" def __init__( diff --git a/src/transformers/models/deprecated/mctct/configuration_mctct.py b/src/transformers/models/deprecated/mctct/configuration_mctct.py index e91104112b68..aea085cc5a61 100644 --- a/src/transformers/models/deprecated/mctct/configuration_mctct.py +++ b/src/transformers/models/deprecated/mctct/configuration_mctct.py @@ -114,6 +114,7 @@ class MCTCTConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mctct" def __init__( diff --git a/src/transformers/models/deprecated/mctct/processing_mctct.py b/src/transformers/models/deprecated/mctct/processing_mctct.py index 764ed8d3db50..4e0cbe27dd9b 100644 --- a/src/transformers/models/deprecated/mctct/processing_mctct.py +++ b/src/transformers/models/deprecated/mctct/processing_mctct.py @@ -34,6 +34,7 @@ class MCTCTProcessor(ProcessorMixin): tokenizer (`AutoTokenizer`): An instance of [`AutoTokenizer`]. The tokenizer is a required input. """ + feature_extractor_class = "MCTCTFeatureExtractor" tokenizer_class = "AutoTokenizer" diff --git a/src/transformers/models/deprecated/open_llama/configuration_open_llama.py b/src/transformers/models/deprecated/open_llama/configuration_open_llama.py index 1d5756cd38a3..5786abac850d 100644 --- a/src/transformers/models/deprecated/open_llama/configuration_open_llama.py +++ b/src/transformers/models/deprecated/open_llama/configuration_open_llama.py @@ -90,6 +90,7 @@ class OpenLlamaConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "open-llama" def __init__( diff --git a/src/transformers/models/deprecated/retribert/configuration_retribert.py b/src/transformers/models/deprecated/retribert/configuration_retribert.py index 11d19193b360..3861b9c90f33 100644 --- a/src/transformers/models/deprecated/retribert/configuration_retribert.py +++ b/src/transformers/models/deprecated/retribert/configuration_retribert.py @@ -72,6 +72,7 @@ class RetriBertConfig(PretrainedConfig): projection_dim (`int`, *optional*, defaults to 128): Final dimension of the query and document representation after projection """ + model_type = "retribert" def __init__( diff --git a/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py b/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py index a64a0cbd89e1..cfad075c6ae8 100644 --- a/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py +++ b/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py @@ -100,6 +100,7 @@ class TrajectoryTransformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "trajectory_transformer" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/deprecated/van/configuration_van.py b/src/transformers/models/deprecated/van/configuration_van.py index 70942ad645b4..85f228193c45 100644 --- a/src/transformers/models/deprecated/van/configuration_van.py +++ b/src/transformers/models/deprecated/van/configuration_van.py @@ -77,6 +77,7 @@ class VanConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "van" def __init__( diff --git a/src/transformers/models/deta/configuration_deta.py b/src/transformers/models/deta/configuration_deta.py index 8abe077ae126..0d8e59e96081 100644 --- a/src/transformers/models/deta/configuration_deta.py +++ b/src/transformers/models/deta/configuration_deta.py @@ -124,6 +124,7 @@ class DetaConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "deta" attribute_map = { "hidden_size": "d_model", diff --git a/src/transformers/models/deta/modeling_deta.py b/src/transformers/models/deta/modeling_deta.py index db607b183b79..0f245f2a3058 100644 --- a/src/transformers/models/deta/modeling_deta.py +++ b/src/transformers/models/deta/modeling_deta.py @@ -70,8 +70,8 @@ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoderOutput with DeformableDetr->Deta class DetaDecoderOutput(ModelOutput): """ - Base class for outputs of the DetaDecoder. This class adds two attributes to BaseModelOutputWithCrossAttentions, - namely: + Base class for outputs of the DetaDecoder. This class adds two attributes to + BaseModelOutputWithCrossAttentions, namely: - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) - a stacked tensor of intermediate reference points. diff --git a/src/transformers/models/detr/configuration_detr.py b/src/transformers/models/detr/configuration_detr.py index 3b6ac3624f10..fadd9ce0872a 100644 --- a/src/transformers/models/detr/configuration_detr.py +++ b/src/transformers/models/detr/configuration_detr.py @@ -132,6 +132,7 @@ class DetrConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "detr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/dinat/configuration_dinat.py b/src/transformers/models/dinat/configuration_dinat.py index b70797b55c34..4baa92f469fd 100644 --- a/src/transformers/models/dinat/configuration_dinat.py +++ b/src/transformers/models/dinat/configuration_dinat.py @@ -94,6 +94,7 @@ class DinatConfig(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "dinat" attribute_map = { diff --git a/src/transformers/models/dinov2/configuration_dinov2.py b/src/transformers/models/dinov2/configuration_dinov2.py index 4c3c26623a3a..741007f30767 100644 --- a/src/transformers/models/dinov2/configuration_dinov2.py +++ b/src/transformers/models/dinov2/configuration_dinov2.py @@ -105,6 +105,7 @@ class Dinov2Config(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "dinov2" def __init__( diff --git a/src/transformers/models/distilbert/configuration_distilbert.py b/src/transformers/models/distilbert/configuration_distilbert.py index 3dabb3d3e234..97b5b7c86906 100644 --- a/src/transformers/models/distilbert/configuration_distilbert.py +++ b/src/transformers/models/distilbert/configuration_distilbert.py @@ -98,6 +98,7 @@ class DistilBertConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "distilbert" attribute_map = { "hidden_size": "dim", diff --git a/src/transformers/models/donut/configuration_donut_swin.py b/src/transformers/models/donut/configuration_donut_swin.py index c7d6792467fe..9de3181b55bc 100644 --- a/src/transformers/models/donut/configuration_donut_swin.py +++ b/src/transformers/models/donut/configuration_donut_swin.py @@ -85,6 +85,7 @@ class DonutSwinConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "donut-swin" attribute_map = { diff --git a/src/transformers/models/donut/processing_donut.py b/src/transformers/models/donut/processing_donut.py index f797aec18ed4..5636ecb9435c 100644 --- a/src/transformers/models/donut/processing_donut.py +++ b/src/transformers/models/donut/processing_donut.py @@ -37,6 +37,7 @@ class DonutProcessor(ProcessorMixin): tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*): An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" diff --git a/src/transformers/models/dpr/configuration_dpr.py b/src/transformers/models/dpr/configuration_dpr.py index 06433f57daa9..3b6785c6b540 100644 --- a/src/transformers/models/dpr/configuration_dpr.py +++ b/src/transformers/models/dpr/configuration_dpr.py @@ -109,6 +109,7 @@ class DPRConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "dpr" def __init__( diff --git a/src/transformers/models/dpt/configuration_dpt.py b/src/transformers/models/dpt/configuration_dpt.py index 99994c5cb2d8..e668bb7f0217 100644 --- a/src/transformers/models/dpt/configuration_dpt.py +++ b/src/transformers/models/dpt/configuration_dpt.py @@ -126,6 +126,7 @@ class DPTConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "dpt" def __init__( diff --git a/src/transformers/models/efficientnet/configuration_efficientnet.py b/src/transformers/models/efficientnet/configuration_efficientnet.py index e6b6a1c261ca..49e50a45e115 100644 --- a/src/transformers/models/efficientnet/configuration_efficientnet.py +++ b/src/transformers/models/efficientnet/configuration_efficientnet.py @@ -100,6 +100,7 @@ class EfficientNetConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "efficientnet" def __init__( diff --git a/src/transformers/models/electra/configuration_electra.py b/src/transformers/models/electra/configuration_electra.py index d8e1de0fc97f..d45f62930212 100644 --- a/src/transformers/models/electra/configuration_electra.py +++ b/src/transformers/models/electra/configuration_electra.py @@ -130,6 +130,7 @@ class ElectraConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "electra" def __init__( diff --git a/src/transformers/models/electra/modeling_flax_electra.py b/src/transformers/models/electra/modeling_flax_electra.py index 8fced6ff1ea2..64d49eb17a46 100644 --- a/src/transformers/models/electra/modeling_flax_electra.py +++ b/src/transformers/models/electra/modeling_flax_electra.py @@ -1196,6 +1196,7 @@ class FlaxElectraSequenceSummary(nn.Module): - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. """ + config: ElectraConfig dtype: jnp.dtype = jnp.float32 diff --git a/src/transformers/models/electra/tokenization_electra.py b/src/transformers/models/electra/tokenization_electra.py index fb9bf9dfa13c..6ea9a600a6e9 100644 --- a/src/transformers/models/electra/tokenization_electra.py +++ b/src/transformers/models/electra/tokenization_electra.py @@ -280,8 +280,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Electra - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Electra sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/electra/tokenization_electra_fast.py b/src/transformers/models/electra/tokenization_electra_fast.py index 81704317f869..e76082de174d 100644 --- a/src/transformers/models/electra/tokenization_electra_fast.py +++ b/src/transformers/models/electra/tokenization_electra_fast.py @@ -201,8 +201,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ELECTRA - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ELECTRA sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/encodec/configuration_encodec.py b/src/transformers/models/encodec/configuration_encodec.py index e75711d9264e..af493c325bec 100644 --- a/src/transformers/models/encodec/configuration_encodec.py +++ b/src/transformers/models/encodec/configuration_encodec.py @@ -108,6 +108,7 @@ class EncodecConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "encodec" def __init__( diff --git a/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py b/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py index 15fed4dbd1bb..9f373ea45442 100644 --- a/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py @@ -68,6 +68,7 @@ class EncoderDecoderConfig(PretrainedConfig): >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model") >>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config) ```""" + model_type = "encoder-decoder" is_composition = True diff --git a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index 27a213707c76..12959f8f200a 100644 --- a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -174,6 +174,7 @@ class EncoderDecoderModel(PreTrainedModel): :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder. """ + config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" main_input_name = "input_ids" diff --git a/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py index 3d9679f26a1c..93cac0b3f657 100644 --- a/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py @@ -306,6 +306,7 @@ class FlaxEncoderDecoderModel(FlaxPreTrainedModel): decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder. """ + config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" module_class = FlaxEncoderDecoderModule diff --git a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py index 14653410b029..5b4fc5884c10 100644 --- a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py @@ -197,6 +197,7 @@ class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss): [`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class method for the decoder. """ + config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" load_weight_prefix = "tf_encoder_decoder_model" diff --git a/src/transformers/models/ernie/configuration_ernie.py b/src/transformers/models/ernie/configuration_ernie.py index 91253ab1384b..143fb8cc5870 100644 --- a/src/transformers/models/ernie/configuration_ernie.py +++ b/src/transformers/models/ernie/configuration_ernie.py @@ -109,6 +109,7 @@ class ErnieConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "ernie" def __init__( diff --git a/src/transformers/models/ernie_m/configuration_ernie_m.py b/src/transformers/models/ernie_m/configuration_ernie_m.py index d23d616b8190..eb7eaad83721 100644 --- a/src/transformers/models/ernie_m/configuration_ernie_m.py +++ b/src/transformers/models/ernie_m/configuration_ernie_m.py @@ -78,6 +78,7 @@ class ErnieMConfig(PretrainedConfig): A normal_initializer initializes weight matrices as normal distributions. See `ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`. """ + model_type = "ernie_m" attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} diff --git a/src/transformers/models/esm/configuration_esm.py b/src/transformers/models/esm/configuration_esm.py index e51c5d01f155..75f8609ab0ff 100644 --- a/src/transformers/models/esm/configuration_esm.py +++ b/src/transformers/models/esm/configuration_esm.py @@ -97,6 +97,7 @@ class EsmConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "esm" def __init__( diff --git a/src/transformers/models/esm/modeling_esmfold.py b/src/transformers/models/esm/modeling_esmfold.py index a0e9ebcc2d6e..4843d688dab8 100644 --- a/src/transformers/models/esm/modeling_esmfold.py +++ b/src/transformers/models/esm/modeling_esmfold.py @@ -229,7 +229,7 @@ def dict_multimap(fn, dicts): new_dict = {} for k, v in first.items(): all_v = [d[k] for d in dicts] - if type(v) is dict: + if isinstance(v, dict): new_dict[k] = dict_multimap(fn, all_v) else: new_dict[k] = fn(all_v) @@ -1060,7 +1060,7 @@ def __init__(self, r: float, batch_dim: Union[int, List[int]]): super().__init__() self.r = r - if type(batch_dim) == int: + if isinstance(batch_dim, int): batch_dim = [batch_dim] self.batch_dim = batch_dim self.dropout = nn.Dropout(self.r) @@ -2254,7 +2254,7 @@ def infer( seqs: Union[str, List[str]], position_ids=None, ): - if type(seqs) is str: + if isinstance(seqs, str): lst = [seqs] else: lst = seqs @@ -2312,7 +2312,7 @@ def output_to_pdb(output: Dict) -> List[str]: def infer_pdb(self, seqs, *args, **kwargs) -> str: """Returns the pdb (file) string from the model given an input sequence.""" - assert type(seqs) is str + assert isinstance(seqs, str) output = self.infer(seqs, *args, **kwargs) return self.output_to_pdb(output)[0] diff --git a/src/transformers/models/falcon/configuration_falcon.py b/src/transformers/models/falcon/configuration_falcon.py index 251dd7f2d571..fe0a450a24eb 100644 --- a/src/transformers/models/falcon/configuration_falcon.py +++ b/src/transformers/models/falcon/configuration_falcon.py @@ -104,6 +104,7 @@ class FalconConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "falcon" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index 068119d35f17..375e19360f2a 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -518,9 +518,10 @@ def call( # check inputs # assert shape_list(lengths)[0] == bs - tf.debugging.assert_equal( - shape_list(lengths)[0], bs - ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" + ( + tf.debugging.assert_equal(shape_list(lengths)[0], bs), + f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched", + ) # assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) @@ -539,17 +540,19 @@ def call( position_ids = tf.tile(position_ids, (bs, 1)) # assert shape_list(position_ids) == [bs, slen] # (slen, bs) - tf.debugging.assert_equal( - shape_list(position_ids), [bs, slen] - ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" + ( + tf.debugging.assert_equal(shape_list(position_ids), [bs, slen]), + f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched", + ) # position_ids = position_ids.transpose(0, 1) # langs if langs is not None: # assert shape_list(langs) == [bs, slen] # (slen, bs) - tf.debugging.assert_equal( - shape_list(langs), [bs, slen] - ), f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched" + ( + tf.debugging.assert_equal(shape_list(langs), [bs, slen]), + f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched", + ) # langs = langs.transpose(0, 1) # Prepare head mask if needed diff --git a/src/transformers/models/flava/configuration_flava.py b/src/transformers/models/flava/configuration_flava.py index 4125d9126220..fee98ecfa9c4 100644 --- a/src/transformers/models/flava/configuration_flava.py +++ b/src/transformers/models/flava/configuration_flava.py @@ -218,6 +218,7 @@ class FlavaTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "flava_text_model" def __init__( diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index de5ec177ae4b..64ede9c89ed8 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -1254,9 +1254,7 @@ def get_text_features( ... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt" ... ) >>> text_features = model.get_text_features(**inputs) - ```""".format( - _CHECKPOINT_FOR_DOC - ) + ```""".format(_CHECKPOINT_FOR_DOC) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, @@ -1305,9 +1303,7 @@ def get_image_features( >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) - ```""".format( - _CHECKPOINT_FOR_DOC - ) + ```""".format(_CHECKPOINT_FOR_DOC) image_outputs = self.image_model( pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, @@ -1583,9 +1579,7 @@ def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor: >>> outputs = model.get_codebook_indices(**inputs) ``` - """.format( - _CHECKPOINT_FOR_CODEBOOK_DOC - ) + """.format(_CHECKPOINT_FOR_CODEBOOK_DOC) z_logits = self.blocks(pixel_values) return torch.argmax(z_logits, axis=1) @@ -1620,9 +1614,7 @@ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: >>> print(outputs.shape) (1, 196) ``` - """.format( - _CHECKPOINT_FOR_CODEBOOK_DOC - ) + """.format(_CHECKPOINT_FOR_CODEBOOK_DOC) if len(pixel_values.shape) != 4: raise ValueError(f"input shape {pixel_values.shape} is not 4d") if pixel_values.shape[1] != self.input_channels: diff --git a/src/transformers/models/flava/processing_flava.py b/src/transformers/models/flava/processing_flava.py index 1736257a3555..7f439b040a8f 100644 --- a/src/transformers/models/flava/processing_flava.py +++ b/src/transformers/models/flava/processing_flava.py @@ -36,6 +36,7 @@ class FlavaProcessor(ProcessorMixin): image_processor ([`FlavaImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "FlavaImageProcessor" tokenizer_class = ("BertTokenizer", "BertTokenizerFast") diff --git a/src/transformers/models/fnet/configuration_fnet.py b/src/transformers/models/fnet/configuration_fnet.py index 9efa06487756..c2cf25615bb2 100644 --- a/src/transformers/models/fnet/configuration_fnet.py +++ b/src/transformers/models/fnet/configuration_fnet.py @@ -22,7 +22,7 @@ FNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", - "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" + "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json", # See all FNet models at https://huggingface.co/models?filter=fnet } @@ -84,6 +84,7 @@ class FNetConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "fnet" def __init__( diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index 2784880f3c79..dac75178d5f4 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -61,7 +61,7 @@ FNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/fnet-base", - "google/fnet-large" + "google/fnet-large", # See all FNet models at https://huggingface.co/models?filter=fnet ] diff --git a/src/transformers/models/focalnet/configuration_focalnet.py b/src/transformers/models/focalnet/configuration_focalnet.py index 83540c0f3491..95ee92de1bfc 100644 --- a/src/transformers/models/focalnet/configuration_focalnet.py +++ b/src/transformers/models/focalnet/configuration_focalnet.py @@ -104,6 +104,7 @@ class FocalNetConfig(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "focalnet" def __init__( diff --git a/src/transformers/models/fsmt/configuration_fsmt.py b/src/transformers/models/fsmt/configuration_fsmt.py index afd97f137dc3..493e6b6bf5d6 100644 --- a/src/transformers/models/fsmt/configuration_fsmt.py +++ b/src/transformers/models/fsmt/configuration_fsmt.py @@ -28,6 +28,7 @@ class DecoderConfig(PretrainedConfig): r""" Configuration class for FSMT's decoder specific things. note: this is a private helper class """ + model_type = "fsmt_decoder" def __init__(self, vocab_size=0, bos_token_id=0): @@ -132,6 +133,7 @@ class FSMTConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "fsmt" attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index 1e566b150f76..b0dd427a6814 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -472,9 +472,7 @@ def __init__(self, config: FSMTConfig, embed_tokens): self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx ) - self.layers = nn.ModuleList( - [EncoderLayer(config) for _ in range(config.encoder_layers)] - ) # type: List[EncoderLayer] + self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) # type: List[EncoderLayer] def forward( self, @@ -682,9 +680,7 @@ def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding): self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx ) - self.layers = nn.ModuleList( - [DecoderLayer(config) for _ in range(config.decoder_layers)] - ) # type: List[DecoderLayer] + self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.decoder_layers)]) # type: List[DecoderLayer] if is_deepspeed_zero3_enabled(): import deepspeed diff --git a/src/transformers/models/funnel/configuration_funnel.py b/src/transformers/models/funnel/configuration_funnel.py index d049b15911b0..228216163b24 100644 --- a/src/transformers/models/funnel/configuration_funnel.py +++ b/src/transformers/models/funnel/configuration_funnel.py @@ -96,6 +96,7 @@ class FunnelConfig(PretrainedConfig): pool_q_only (`bool`, *optional*, defaults to `True`): Whether or not to apply the pooling only to the query or to query, key and values for the attention layers. """ + model_type = "funnel" attribute_map = { "hidden_size": "d_model", diff --git a/src/transformers/models/fuyu/configuration_fuyu.py b/src/transformers/models/fuyu/configuration_fuyu.py index c385ad06130c..9376ccb5ef4e 100644 --- a/src/transformers/models/fuyu/configuration_fuyu.py +++ b/src/transformers/models/fuyu/configuration_fuyu.py @@ -102,6 +102,7 @@ class FuyuConfig(PretrainedConfig): >>> # Initializing a Fuyu fuyu-7b style configuration >>> configuration = FuyuConfig() ```""" + model_type = "fuyu" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/fuyu/processing_fuyu.py b/src/transformers/models/fuyu/processing_fuyu.py index e0f362a6c876..f7078554cbc0 100644 --- a/src/transformers/models/fuyu/processing_fuyu.py +++ b/src/transformers/models/fuyu/processing_fuyu.py @@ -319,6 +319,7 @@ class FuyuProcessor(ProcessorMixin): tokenizer ([`LlamaTokenizerFast`]): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "FuyuImageProcessor" tokenizer_class = "AutoTokenizer" diff --git a/src/transformers/models/git/configuration_git.py b/src/transformers/models/git/configuration_git.py index 41f54612afdb..bfc2b4bf745b 100644 --- a/src/transformers/models/git/configuration_git.py +++ b/src/transformers/models/git/configuration_git.py @@ -188,6 +188,7 @@ class GitConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "git" def __init__( diff --git a/src/transformers/models/git/processing_git.py b/src/transformers/models/git/processing_git.py index 3e11be322b4a..2f0851c06274 100644 --- a/src/transformers/models/git/processing_git.py +++ b/src/transformers/models/git/processing_git.py @@ -33,6 +33,7 @@ class GitProcessor(ProcessorMixin): tokenizer ([`AutoTokenizer`]): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" diff --git a/src/transformers/models/glpn/configuration_glpn.py b/src/transformers/models/glpn/configuration_glpn.py index 63056c4c04fe..5408ee94a8ad 100644 --- a/src/transformers/models/glpn/configuration_glpn.py +++ b/src/transformers/models/glpn/configuration_glpn.py @@ -90,6 +90,7 @@ class GLPNConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "glpn" def __init__( diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index b5adf39ecee7..bc95c774039f 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -1534,7 +1534,20 @@ def __init__(self, config): output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_loss=0.25, - expected_output=["Lead", "Lead", "Lead", "Position", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead"], + expected_output=[ + "Lead", + "Lead", + "Lead", + "Position", + "Lead", + "Lead", + "Lead", + "Lead", + "Lead", + "Lead", + "Lead", + "Lead", + ], ) # fmt: on def forward( diff --git a/src/transformers/models/gpt_neo/configuration_gpt_neo.py b/src/transformers/models/gpt_neo/configuration_gpt_neo.py index 9b84b18e26c0..96c04cb87526 100644 --- a/src/transformers/models/gpt_neo/configuration_gpt_neo.py +++ b/src/transformers/models/gpt_neo/configuration_gpt_neo.py @@ -102,6 +102,7 @@ class GPTNeoConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "gpt_neo" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} diff --git a/src/transformers/models/gpt_neox/configuration_gpt_neox.py b/src/transformers/models/gpt_neox/configuration_gpt_neox.py index e263a8a9fee6..f24f185b6839 100644 --- a/src/transformers/models/gpt_neox/configuration_gpt_neox.py +++ b/src/transformers/models/gpt_neox/configuration_gpt_neox.py @@ -101,6 +101,7 @@ class GPTNeoXConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config # doctest: +SKIP ```""" + model_type = "gpt_neox" def __init__( diff --git a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py index 8d8519b9eae8..ddf3d4dec8b9 100644 --- a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py @@ -81,6 +81,7 @@ class GPTNeoXJapaneseConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "gpt_neox_japanese" def __init__( diff --git a/src/transformers/models/gptj/configuration_gptj.py b/src/transformers/models/gptj/configuration_gptj.py index b40861c354be..47b122427932 100644 --- a/src/transformers/models/gptj/configuration_gptj.py +++ b/src/transformers/models/gptj/configuration_gptj.py @@ -85,6 +85,7 @@ class GPTJConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "gptj" attribute_map = { "max_position_embeddings": "n_positions", diff --git a/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py index d20b79daacfd..c25e4b0e1ea2 100644 --- a/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py @@ -87,6 +87,7 @@ class GPTSanJapaneseConfig(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) """ + model_type = "gptsan-japanese" keys_to_ignore_at_inference = [ "past_key_values", diff --git a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py index dcbda1d7384f..2ac76be8d259 100644 --- a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py @@ -1288,7 +1288,7 @@ def prepare_inputs_for_generation( past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, **kwargs, ): - if type(spout) is list: + if isinstance(spout, list): spout = torch.tensor(spout).float() if input_ids is not None: spout = spout.to(input_ids.device) diff --git a/src/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py index 3c3132edb444..df3f94dc1e89 100644 --- a/src/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py @@ -375,7 +375,7 @@ def _batch_encode_plus( verbose: bool = True, ) -> BatchEncoding: # This tokenizer converts input text pairs into Prefix input and subsequent input - if type(batch_text_or_text_pairs[0]) is tuple or type(batch_text_or_text_pairs[0]) is list: + if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list): # As a single text with an explicit un-prefix position batch_prefix_texts = [] for pref, txt in batch_text_or_text_pairs: diff --git a/src/transformers/models/graphormer/configuration_graphormer.py b/src/transformers/models/graphormer/configuration_graphormer.py index 7f270f943434..9d49fbea2944 100644 --- a/src/transformers/models/graphormer/configuration_graphormer.py +++ b/src/transformers/models/graphormer/configuration_graphormer.py @@ -129,6 +129,7 @@ class GraphormerConfig(PretrainedConfig): >>> configuration = model.config ``` """ + model_type = "graphormer" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/groupvit/configuration_groupvit.py b/src/transformers/models/groupvit/configuration_groupvit.py index 8acf0d1c4e3b..3638ea06923c 100644 --- a/src/transformers/models/groupvit/configuration_groupvit.py +++ b/src/transformers/models/groupvit/configuration_groupvit.py @@ -88,6 +88,7 @@ class GroupViTTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "groupvit_text_model" def __init__( diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 3ed415741941..c99c96ec87f8 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -783,9 +783,7 @@ def _init_weights(self, module): nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, GroupViTMLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/hubert/configuration_hubert.py b/src/transformers/models/hubert/configuration_hubert.py index 8c9f3d6929e2..7e9f1d9f9046 100644 --- a/src/transformers/models/hubert/configuration_hubert.py +++ b/src/transformers/models/hubert/configuration_hubert.py @@ -159,6 +159,7 @@ class HubertConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "hubert" def __init__( diff --git a/src/transformers/models/idefics/configuration_idefics.py b/src/transformers/models/idefics/configuration_idefics.py index 12d710d726dc..a61c96e0a418 100644 --- a/src/transformers/models/idefics/configuration_idefics.py +++ b/src/transformers/models/idefics/configuration_idefics.py @@ -72,6 +72,7 @@ class IdeficsVisionConfig(PretrainedConfig): initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ + model_type = "idefics" attribute_map = { "hidden_size": "embed_dim", @@ -134,6 +135,7 @@ class IdeficsPerceiverConfig(PretrainedConfig): qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`): Whether or not to use qk layer norms in perceiver """ + model_type = "idefics" def __init__( @@ -236,6 +238,7 @@ class IdeficsConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "idefics" is_composition = False diff --git a/src/transformers/models/idefics/processing_idefics.py b/src/transformers/models/idefics/processing_idefics.py index e6e0a9254aa1..590e2475ca62 100644 --- a/src/transformers/models/idefics/processing_idefics.py +++ b/src/transformers/models/idefics/processing_idefics.py @@ -119,6 +119,7 @@ class IdeficsProcessor(ProcessorMixin): An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input. image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image) """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "IdeficsImageProcessor" tokenizer_class = "LlamaTokenizerFast" diff --git a/src/transformers/models/informer/configuration_informer.py b/src/transformers/models/informer/configuration_informer.py index d8af8c793cdb..dedf09bb2bbb 100644 --- a/src/transformers/models/informer/configuration_informer.py +++ b/src/transformers/models/informer/configuration_informer.py @@ -133,6 +133,7 @@ class InformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "informer" attribute_map = { "hidden_size": "d_model", diff --git a/src/transformers/models/informer/modeling_informer.py b/src/transformers/models/informer/modeling_informer.py index c0a5a2059502..3abf48eaec03 100644 --- a/src/transformers/models/informer/modeling_informer.py +++ b/src/transformers/models/informer/modeling_informer.py @@ -1222,7 +1222,8 @@ def forward( # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerDecoder with TimeSeriesTransformer->Informer,TimeSeriesTransformerConfig->InformerConfig,time-series-transformer->informer,Transformer->Informer,TimeSeries->Informer class InformerDecoder(InformerPreTrainedModel): """ - Informer decoder consisting of *config.decoder_layers* layers. Each layer is a [`InformerDecoderLayer`] + Informer decoder consisting of *config.decoder_layers* layers. Each layer is a + [`InformerDecoderLayer`] Args: config: InformerConfig @@ -1781,7 +1782,9 @@ def forward( ... ) >>> batch = torch.load(file) - >>> model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly") + >>> model = InformerForPrediction.from_pretrained( + ... "huggingface/informer-tourism-monthly" + ... ) >>> # during training, one provides both past and future values >>> # as well as possible additional features diff --git a/src/transformers/models/instructblip/configuration_instructblip.py b/src/transformers/models/instructblip/configuration_instructblip.py index 78c7e4e8b65c..98c06d2fe899 100644 --- a/src/transformers/models/instructblip/configuration_instructblip.py +++ b/src/transformers/models/instructblip/configuration_instructblip.py @@ -192,6 +192,7 @@ class InstructBlipQFormerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "instructblip_qformer" def __init__( diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index 4e0173bd9970..e175cd57285a 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -1176,13 +1176,13 @@ def forward( # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: - if type(encoder_hidden_states) == list: + if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - if type(encoder_attention_mask) == list: + if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) diff --git a/src/transformers/models/instructblip/processing_instructblip.py b/src/transformers/models/instructblip/processing_instructblip.py index ab4fa0f6753d..4d266d8b98e3 100644 --- a/src/transformers/models/instructblip/processing_instructblip.py +++ b/src/transformers/models/instructblip/processing_instructblip.py @@ -43,6 +43,7 @@ class InstructBlipProcessor(ProcessorMixin): qformer_tokenizer (`AutoTokenizer`): An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "BlipImageProcessor" tokenizer_class = "AutoTokenizer" @@ -142,8 +143,8 @@ def batch_decode(self, *args, **kwargs): # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer def decode(self, *args, **kwargs): """ - This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer - to the docstring of this method for more information. + This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/models/kosmos2/configuration_kosmos2.py b/src/transformers/models/kosmos2/configuration_kosmos2.py index d97269733ff7..198016a92871 100644 --- a/src/transformers/models/kosmos2/configuration_kosmos2.py +++ b/src/transformers/models/kosmos2/configuration_kosmos2.py @@ -77,6 +77,7 @@ class Kosmos2TextConfig(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). ```""" + model_type = "kosmos_2_text_model" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { @@ -271,6 +272,7 @@ class Kosmos2Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "kosmos-2" is_composition = True diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index 797523493eb2..e99be059f86b 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -1374,9 +1374,7 @@ def _init_weights(self, module): if module.out_proj.bias is not None: module.out_proj.bias.data.zero_() elif isinstance(module, Kosmos2VisionMLP): - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/kosmos2/processing_kosmos2.py b/src/transformers/models/kosmos2/processing_kosmos2.py index 6943c12c4871..a203ee4c506f 100644 --- a/src/transformers/models/kosmos2/processing_kosmos2.py +++ b/src/transformers/models/kosmos2/processing_kosmos2.py @@ -52,6 +52,7 @@ class Kosmos2Processor(ProcessorMixin): num_patch_index_tokens (`int`, *optional*, defaults to 1024): The number of tokens that represent patch indices. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "CLIPImageProcessor" tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") @@ -390,8 +391,8 @@ def batch_decode(self, *args, **kwargs): # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer def decode(self, *args, **kwargs): """ - This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer - to the docstring of this method for more information. + This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/models/layoutlm/configuration_layoutlm.py b/src/transformers/models/layoutlm/configuration_layoutlm.py index 0ca51e6d5790..77d62ded403b 100644 --- a/src/transformers/models/layoutlm/configuration_layoutlm.py +++ b/src/transformers/models/layoutlm/configuration_layoutlm.py @@ -101,6 +101,7 @@ class LayoutLMConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "layoutlm" def __init__( diff --git a/src/transformers/models/layoutlm/tokenization_layoutlm.py b/src/transformers/models/layoutlm/tokenization_layoutlm.py index de6bc4de953d..6105d5d77c15 100644 --- a/src/transformers/models/layoutlm/tokenization_layoutlm.py +++ b/src/transformers/models/layoutlm/tokenization_layoutlm.py @@ -262,8 +262,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A LayoutLM - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A LayoutLM sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py b/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py index afa92abaf877..c0bc1072f7f5 100644 --- a/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py +++ b/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py @@ -175,8 +175,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A LayoutLM - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A LayoutLM sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py b/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py index 3cc8027c1dd5..1a8e94c2334a 100644 --- a/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py @@ -114,6 +114,7 @@ class LayoutLMv2Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "layoutlmv2" def __init__( diff --git a/src/transformers/models/layoutlmv2/processing_layoutlmv2.py b/src/transformers/models/layoutlmv2/processing_layoutlmv2.py index fe52c16fd250..1edf87465bbf 100644 --- a/src/transformers/models/layoutlmv2/processing_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/processing_layoutlmv2.py @@ -43,6 +43,7 @@ class LayoutLMv2Processor(ProcessorMixin): tokenizer (`LayoutLMv2Tokenizer` or `LayoutLMv2TokenizerFast`, *optional*): An instance of [`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "LayoutLMv2ImageProcessor" tokenizer_class = ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast") diff --git a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py index 31ca2e00e471..1dfee1f29d79 100644 --- a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py @@ -119,6 +119,7 @@ class LayoutLMv3Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "layoutlmv3" def __init__( diff --git a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py index fe1cbcc2c5c9..3148155a4350 100644 --- a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py @@ -904,8 +904,9 @@ def forward( final_bbox = final_position_ids = None patch_height = patch_width = None if pixel_values is not None: - patch_height, patch_width = int(pixel_values.shape[2] / self.config.patch_size), int( - pixel_values.shape[3] / self.config.patch_size + patch_height, patch_width = ( + int(pixel_values.shape[2] / self.config.patch_size), + int(pixel_values.shape[3] / self.config.patch_size), ) visual_embeddings = self.forward_image(pixel_values) visual_attention_mask = torch.ones( diff --git a/src/transformers/models/layoutlmv3/processing_layoutlmv3.py b/src/transformers/models/layoutlmv3/processing_layoutlmv3.py index 31d0c5e60a54..369bd51bec28 100644 --- a/src/transformers/models/layoutlmv3/processing_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/processing_layoutlmv3.py @@ -43,6 +43,7 @@ class LayoutLMv3Processor(ProcessorMixin): tokenizer (`LayoutLMv3Tokenizer` or `LayoutLMv3TokenizerFast`, *optional*): An instance of [`LayoutLMv3Tokenizer`] or [`LayoutLMv3TokenizerFast`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "LayoutLMv3ImageProcessor" tokenizer_class = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") diff --git a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py index 4d3d1078db6a..351e811b814f 100644 --- a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py @@ -268,6 +268,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer): only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES diff --git a/src/transformers/models/led/configuration_led.py b/src/transformers/models/led/configuration_led.py index 34c286ce1891..d9efc308fec3 100644 --- a/src/transformers/models/led/configuration_led.py +++ b/src/transformers/models/led/configuration_led.py @@ -97,6 +97,7 @@ class LEDConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "led" attribute_map = { "num_attention_heads": "encoder_attention_heads", diff --git a/src/transformers/models/levit/configuration_levit.py b/src/transformers/models/levit/configuration_levit.py index 06c7925a8f37..3a9546a652e6 100644 --- a/src/transformers/models/levit/configuration_levit.py +++ b/src/transformers/models/levit/configuration_levit.py @@ -87,6 +87,7 @@ class LevitConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "levit" def __init__( diff --git a/src/transformers/models/lilt/configuration_lilt.py b/src/transformers/models/lilt/configuration_lilt.py index d11899c94312..3db595e86e17 100644 --- a/src/transformers/models/lilt/configuration_lilt.py +++ b/src/transformers/models/lilt/configuration_lilt.py @@ -90,6 +90,7 @@ class LiltConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "lilt" def __init__( diff --git a/src/transformers/models/llama/configuration_llama.py b/src/transformers/models/llama/configuration_llama.py index 354bcb5b2e8f..cd16ec728115 100644 --- a/src/transformers/models/llama/configuration_llama.py +++ b/src/transformers/models/llama/configuration_llama.py @@ -110,6 +110,7 @@ class LlamaConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "llama" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/longformer/configuration_longformer.py b/src/transformers/models/longformer/configuration_longformer.py index 1542c497989f..2935dd4aaaae 100644 --- a/src/transformers/models/longformer/configuration_longformer.py +++ b/src/transformers/models/longformer/configuration_longformer.py @@ -104,6 +104,7 @@ class LongformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "longformer" def __init__( diff --git a/src/transformers/models/longformer/tokenization_longformer.py b/src/transformers/models/longformer/tokenization_longformer.py index 7661634a0009..4f76f16d5180 100644 --- a/src/transformers/models/longformer/tokenization_longformer.py +++ b/src/transformers/models/longformer/tokenization_longformer.py @@ -416,8 +416,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does - not make use of token type ids, therefore a list of zeros is returned. + Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not + make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): diff --git a/src/transformers/models/longformer/tokenization_longformer_fast.py b/src/transformers/models/longformer/tokenization_longformer_fast.py index 32c6f6c2deef..fb35a8b67bba 100644 --- a/src/transformers/models/longformer/tokenization_longformer_fast.py +++ b/src/transformers/models/longformer/tokenization_longformer_fast.py @@ -254,8 +254,8 @@ def mask_token(self) -> str: `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. - Longformer tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will - greedily comprise the space before the **. + Longformer tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily + comprise the space before the **. """ if self._mask_token is None: if self.verbose: @@ -309,8 +309,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does - not make use of token type ids, therefore a list of zeros is returned. + Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not + make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): diff --git a/src/transformers/models/longt5/configuration_longt5.py b/src/transformers/models/longt5/configuration_longt5.py index b9a67a970b14..0095af0e246c 100644 --- a/src/transformers/models/longt5/configuration_longt5.py +++ b/src/transformers/models/longt5/configuration_longt5.py @@ -82,6 +82,7 @@ class LongT5Config(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ + model_type = "longt5" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} diff --git a/src/transformers/models/luke/configuration_luke.py b/src/transformers/models/luke/configuration_luke.py index 099a5cf6d9b4..53ab1a352803 100644 --- a/src/transformers/models/luke/configuration_luke.py +++ b/src/transformers/models/luke/configuration_luke.py @@ -97,6 +97,7 @@ class LukeConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "luke" def __init__( diff --git a/src/transformers/models/lxmert/tokenization_lxmert.py b/src/transformers/models/lxmert/tokenization_lxmert.py index 17ff0ff8e7f8..1557be1add68 100644 --- a/src/transformers/models/lxmert/tokenization_lxmert.py +++ b/src/transformers/models/lxmert/tokenization_lxmert.py @@ -254,8 +254,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/lxmert/tokenization_lxmert_fast.py b/src/transformers/models/lxmert/tokenization_lxmert_fast.py index 0584f1fe83c3..7d9758a601b4 100644 --- a/src/transformers/models/lxmert/tokenization_lxmert_fast.py +++ b/src/transformers/models/lxmert/tokenization_lxmert_fast.py @@ -161,8 +161,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/m2m_100/configuration_m2m_100.py b/src/transformers/models/m2m_100/configuration_m2m_100.py index 07414c1b822f..1b15658c03d7 100644 --- a/src/transformers/models/m2m_100/configuration_m2m_100.py +++ b/src/transformers/models/m2m_100/configuration_m2m_100.py @@ -99,6 +99,7 @@ class M2M100Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "m2m_100" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/marian/configuration_marian.py b/src/transformers/models/marian/configuration_marian.py index a2fdd41d7442..201788673e6c 100644 --- a/src/transformers/models/marian/configuration_marian.py +++ b/src/transformers/models/marian/configuration_marian.py @@ -102,6 +102,7 @@ class MarianConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "marian" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/markuplm/configuration_markuplm.py b/src/transformers/models/markuplm/configuration_markuplm.py index 1455150598ac..ff0ab9691983 100644 --- a/src/transformers/models/markuplm/configuration_markuplm.py +++ b/src/transformers/models/markuplm/configuration_markuplm.py @@ -97,6 +97,7 @@ class MarkupLMConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "markuplm" def __init__( diff --git a/src/transformers/models/markuplm/feature_extraction_markuplm.py b/src/transformers/models/markuplm/feature_extraction_markuplm.py index b20349fafb0a..73c16bad302b 100644 --- a/src/transformers/models/markuplm/feature_extraction_markuplm.py +++ b/src/transformers/models/markuplm/feature_extraction_markuplm.py @@ -67,7 +67,7 @@ def get_three_from_single(self, html_string): string2xsubs_seq = [] for element in html_code.descendants: - if type(element) == bs4.element.NavigableString: + if isinstance(element, bs4.element.NavigableString): if type(element.parent) != bs4.element.Tag: continue diff --git a/src/transformers/models/markuplm/processing_markuplm.py b/src/transformers/models/markuplm/processing_markuplm.py index 51307d20eb5f..81aaca9e5cce 100644 --- a/src/transformers/models/markuplm/processing_markuplm.py +++ b/src/transformers/models/markuplm/processing_markuplm.py @@ -41,6 +41,7 @@ class MarkupLMProcessor(ProcessorMixin): parse_html (`bool`, *optional*, defaults to `True`): Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths. """ + feature_extractor_class = "MarkupLMFeatureExtractor" tokenizer_class = ("MarkupLMTokenizer", "MarkupLMTokenizerFast") parse_html = True diff --git a/src/transformers/models/mask2former/configuration_mask2former.py b/src/transformers/models/mask2former/configuration_mask2former.py index 1fe241473934..a7ca3dbc506a 100644 --- a/src/transformers/models/mask2former/configuration_mask2former.py +++ b/src/transformers/models/mask2former/configuration_mask2former.py @@ -119,6 +119,7 @@ class Mask2FormerConfig(PretrainedConfig): ``` """ + model_type = "mask2former" backbones_supported = ["swin"] attribute_map = {"hidden_size": "hidden_dim"} diff --git a/src/transformers/models/mask2former/modeling_mask2former.py b/src/transformers/models/mask2former/modeling_mask2former.py index c62e3a04c138..08d43bd29604 100644 --- a/src/transformers/models/mask2former/modeling_mask2former.py +++ b/src/transformers/models/mask2former/modeling_mask2former.py @@ -2537,5 +2537,5 @@ def forward( if not return_dict: output = tuple(v for v in output.values() if v is not None) if loss is not None: - output = ((loss)) + output + output = (loss) + output return output diff --git a/src/transformers/models/maskformer/configuration_maskformer.py b/src/transformers/models/maskformer/configuration_maskformer.py index baf907ee53c0..f3d83a0bbf5e 100644 --- a/src/transformers/models/maskformer/configuration_maskformer.py +++ b/src/transformers/models/maskformer/configuration_maskformer.py @@ -94,6 +94,7 @@ class MaskFormerConfig(PretrainedConfig): ``` """ + model_type = "maskformer" attribute_map = {"hidden_size": "mask_feature_size"} backbones_supported = ["resnet", "swin"] diff --git a/src/transformers/models/maskformer/configuration_maskformer_swin.py b/src/transformers/models/maskformer/configuration_maskformer_swin.py index 7c3ac54bd80d..6a4d9c2c0a4c 100644 --- a/src/transformers/models/maskformer/configuration_maskformer_swin.py +++ b/src/transformers/models/maskformer/configuration_maskformer_swin.py @@ -90,6 +90,7 @@ class MaskFormerSwinConfig(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "maskformer-swin" attribute_map = { diff --git a/src/transformers/models/mbart/configuration_mbart.py b/src/transformers/models/mbart/configuration_mbart.py index 1a775f57fdfb..176ce52dbfab 100644 --- a/src/transformers/models/mbart/configuration_mbart.py +++ b/src/transformers/models/mbart/configuration_mbart.py @@ -104,6 +104,7 @@ class MBartConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mbart" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/mbart/tokenization_mbart.py b/src/transformers/models/mbart/tokenization_mbart.py index 9c0904496982..37f4c849ab9d 100644 --- a/src/transformers/models/mbart/tokenization_mbart.py +++ b/src/transformers/models/mbart/tokenization_mbart.py @@ -45,9 +45,7 @@ "facebook/mbart-large-cc25": 1024, } -# fmt: off -FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] -# fmt: on +FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip class MBartTokenizer(PreTrainedTokenizer): diff --git a/src/transformers/models/mbart/tokenization_mbart_fast.py b/src/transformers/models/mbart/tokenization_mbart_fast.py index ed0d0de9c864..8638ab974e2a 100644 --- a/src/transformers/models/mbart/tokenization_mbart_fast.py +++ b/src/transformers/models/mbart/tokenization_mbart_fast.py @@ -55,9 +55,7 @@ "facebook/mbart-large-cc25": 1024, } -# fmt: off -FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] -# fmt: on +FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip class MBartTokenizerFast(PreTrainedTokenizerFast): diff --git a/src/transformers/models/mbart50/tokenization_mbart50.py b/src/transformers/models/mbart50/tokenization_mbart50.py index 39986851b055..5fbeb6786749 100644 --- a/src/transformers/models/mbart50/tokenization_mbart50.py +++ b/src/transformers/models/mbart50/tokenization_mbart50.py @@ -41,9 +41,7 @@ "facebook/mbart-large-50-one-to-many-mmt": 1024, } -# fmt: off -FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] -# fmt: on +FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] # fmt: skip class MBart50Tokenizer(PreTrainedTokenizer): diff --git a/src/transformers/models/mbart50/tokenization_mbart50_fast.py b/src/transformers/models/mbart50/tokenization_mbart50_fast.py index 7bd302ee8c81..701e30d916d9 100644 --- a/src/transformers/models/mbart50/tokenization_mbart50_fast.py +++ b/src/transformers/models/mbart50/tokenization_mbart50_fast.py @@ -51,9 +51,7 @@ "facebook/mbart-large-50-one-to-many-mmt": 1024, } -# fmt: off -FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] -# fmt: on +FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] # fmt: skip class MBart50TokenizerFast(PreTrainedTokenizerFast): diff --git a/src/transformers/models/mega/configuration_mega.py b/src/transformers/models/mega/configuration_mega.py index cade307c84e5..34f858569cd5 100644 --- a/src/transformers/models/mega/configuration_mega.py +++ b/src/transformers/models/mega/configuration_mega.py @@ -145,6 +145,7 @@ class MegaConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mega" def __init__( diff --git a/src/transformers/models/mega/modeling_mega.py b/src/transformers/models/mega/modeling_mega.py index 45ce5242428f..60628fb5df81 100644 --- a/src/transformers/models/mega/modeling_mega.py +++ b/src/transformers/models/mega/modeling_mega.py @@ -539,9 +539,7 @@ def __init__(self, config: MegaConfig): self.config = config self.activation = ACT2FN[self.config.activation] self.attention_activation = self.config.attention_activation - self.scaling = ( - self.config.shared_representation_size**-0.5 if self.attention_activation == "softmax" else None - ) + self.scaling = self.config.shared_representation_size**-0.5 if self.attention_activation == "softmax" else None self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) self.hidden_dropout = MegaDropout( diff --git a/src/transformers/models/megatron_bert/configuration_megatron_bert.py b/src/transformers/models/megatron_bert/configuration_megatron_bert.py index db9b67090ac7..874aaa331d7e 100644 --- a/src/transformers/models/megatron_bert/configuration_megatron_bert.py +++ b/src/transformers/models/megatron_bert/configuration_megatron_bert.py @@ -90,6 +90,7 @@ class MegatronBertConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "megatron-bert" def __init__( diff --git a/src/transformers/models/mgp_str/configuration_mgp_str.py b/src/transformers/models/mgp_str/configuration_mgp_str.py index b553c6a0ff68..4644b4f0cc17 100644 --- a/src/transformers/models/mgp_str/configuration_mgp_str.py +++ b/src/transformers/models/mgp_str/configuration_mgp_str.py @@ -89,6 +89,7 @@ class MgpstrConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mgp-str" def __init__( diff --git a/src/transformers/models/mgp_str/processing_mgp_str.py b/src/transformers/models/mgp_str/processing_mgp_str.py index 6e18e2dd4855..71422e844d0f 100644 --- a/src/transformers/models/mgp_str/processing_mgp_str.py +++ b/src/transformers/models/mgp_str/processing_mgp_str.py @@ -49,6 +49,7 @@ class MgpstrProcessor(ProcessorMixin): tokenizer ([`MgpstrTokenizer`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "char_tokenizer"] image_processor_class = "ViTImageProcessor" char_tokenizer_class = "MgpstrTokenizer" diff --git a/src/transformers/models/mobilebert/configuration_mobilebert.py b/src/transformers/models/mobilebert/configuration_mobilebert.py index afe6c3b3d927..b14d25ea9ed5 100644 --- a/src/transformers/models/mobilebert/configuration_mobilebert.py +++ b/src/transformers/models/mobilebert/configuration_mobilebert.py @@ -108,6 +108,7 @@ class MobileBertConfig(PretrainedConfig): Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints. """ + pretrained_config_archive_map = MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = "mobilebert" diff --git a/src/transformers/models/mobilebert/tokenization_mobilebert.py b/src/transformers/models/mobilebert/tokenization_mobilebert.py index 398f054a9926..f27873e92fcf 100644 --- a/src/transformers/models/mobilebert/tokenization_mobilebert.py +++ b/src/transformers/models/mobilebert/tokenization_mobilebert.py @@ -252,8 +252,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py b/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py index f8d62158b22c..2b137d2ed60a 100644 --- a/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py +++ b/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py @@ -159,8 +159,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py index 2ee20cd2bafa..59f025c621d2 100644 --- a/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py @@ -78,6 +78,7 @@ class MobileNetV1Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mobilenet_v1" def __init__( diff --git a/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py index ab4eef23cfb4..161f0e6d8fff 100644 --- a/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py @@ -96,6 +96,7 @@ class MobileNetV2Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mobilenet_v2" def __init__( diff --git a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py index 8f19869e47f9..99791efe04c6 100644 --- a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py @@ -312,8 +312,7 @@ def preprocess( # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileNetV2 def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): """ - Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports - PyTorch. + Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MobileNetV2ForSemanticSegmentation`]): diff --git a/src/transformers/models/mobilevit/configuration_mobilevit.py b/src/transformers/models/mobilevit/configuration_mobilevit.py index a4aafe997eb2..48811c28ba0f 100644 --- a/src/transformers/models/mobilevit/configuration_mobilevit.py +++ b/src/transformers/models/mobilevit/configuration_mobilevit.py @@ -111,6 +111,7 @@ class MobileViTConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mobilevit" def __init__( diff --git a/src/transformers/models/mobilevit/image_processing_mobilevit.py b/src/transformers/models/mobilevit/image_processing_mobilevit.py index ecd8cac9e950..79eaeac9007b 100644 --- a/src/transformers/models/mobilevit/image_processing_mobilevit.py +++ b/src/transformers/models/mobilevit/image_processing_mobilevit.py @@ -311,8 +311,7 @@ def preprocess( # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileViT def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): """ - Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports - PyTorch. + Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MobileViTForSemanticSegmentation`]): diff --git a/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py b/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py index 0181d17c3517..c3bc44f38e04 100644 --- a/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py +++ b/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py @@ -97,6 +97,7 @@ class MobileViTV2Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mobilevitv2" def __init__( diff --git a/src/transformers/models/mpnet/configuration_mpnet.py b/src/transformers/models/mpnet/configuration_mpnet.py index 5a11a3905038..fe492a963e5a 100644 --- a/src/transformers/models/mpnet/configuration_mpnet.py +++ b/src/transformers/models/mpnet/configuration_mpnet.py @@ -79,6 +79,7 @@ class MPNetConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mpnet" def __init__( diff --git a/src/transformers/models/mpt/modeling_mpt.py b/src/transformers/models/mpt/modeling_mpt.py index 2477b9065922..e2d12f0e777e 100644 --- a/src/transformers/models/mpt/modeling_mpt.py +++ b/src/transformers/models/mpt/modeling_mpt.py @@ -51,7 +51,7 @@ "mosaicml/mpt-7b-8k-chat", "mosaicml/mpt-30b", "mosaicml/mpt-30b-instruct", - "mosaicml/mpt-30b-chat" + "mosaicml/mpt-30b-chat", # See all MPT models at https://huggingface.co/models?filter=mpt ] diff --git a/src/transformers/models/mra/configuration_mra.py b/src/transformers/models/mra/configuration_mra.py index bc6aeebc907e..17b0f21ff4cc 100644 --- a/src/transformers/models/mra/configuration_mra.py +++ b/src/transformers/models/mra/configuration_mra.py @@ -90,6 +90,7 @@ class MraConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mra" def __init__( diff --git a/src/transformers/models/mt5/configuration_mt5.py b/src/transformers/models/mt5/configuration_mt5.py index 93b7c42d3457..aab93711dfc6 100644 --- a/src/transformers/models/mt5/configuration_mt5.py +++ b/src/transformers/models/mt5/configuration_mt5.py @@ -68,6 +68,7 @@ class MT5Config(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ + model_type = "mt5" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/mt5/modeling_flax_mt5.py b/src/transformers/models/mt5/modeling_flax_mt5.py index 0046e02ca730..98406439dfbf 100644 --- a/src/transformers/models/mt5/modeling_flax_mt5.py +++ b/src/transformers/models/mt5/modeling_flax_mt5.py @@ -61,6 +61,7 @@ class FlaxMT5Model(FlaxT5Model): >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=decoder_input_ids) >>> hidden_states = outputs.last_hidden_state ```""" + model_type = "mt5" config_class = MT5Config @@ -87,6 +88,7 @@ class FlaxMT5EncoderModel(FlaxT5EncoderModel): >>> outputs = model(input_ids=inputs["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" + model_type = "mt5" config_class = MT5Config diff --git a/src/transformers/models/mt5/modeling_mt5.py b/src/transformers/models/mt5/modeling_mt5.py index 799b4c54bffb..e4a217196a62 100644 --- a/src/transformers/models/mt5/modeling_mt5.py +++ b/src/transformers/models/mt5/modeling_mt5.py @@ -1331,6 +1331,7 @@ class MT5Model(MT5PreTrainedModel): >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" + model_type = "mt5" config_class = MT5Config _keys_to_ignore_on_load_missing = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] diff --git a/src/transformers/models/mt5/modeling_tf_mt5.py b/src/transformers/models/mt5/modeling_tf_mt5.py index ba7bd33c3447..f8350eb19798 100644 --- a/src/transformers/models/mt5/modeling_tf_mt5.py +++ b/src/transformers/models/mt5/modeling_tf_mt5.py @@ -44,6 +44,7 @@ class TFMT5Model(TFT5Model): >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" + model_type = "mt5" config_class = MT5Config diff --git a/src/transformers/models/musicgen/configuration_musicgen.py b/src/transformers/models/musicgen/configuration_musicgen.py index e954181242ed..c0f56626409b 100644 --- a/src/transformers/models/musicgen/configuration_musicgen.py +++ b/src/transformers/models/musicgen/configuration_musicgen.py @@ -79,6 +79,7 @@ class MusicgenDecoderConfig(PretrainedConfig): Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate audio stream for the left/right output channels. Mono models generate a single audio stream output. """ + model_type = "musicgen_decoder" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/musicgen/processing_musicgen.py b/src/transformers/models/musicgen/processing_musicgen.py index ed8d1277f2f7..847c542a6016 100644 --- a/src/transformers/models/musicgen/processing_musicgen.py +++ b/src/transformers/models/musicgen/processing_musicgen.py @@ -37,6 +37,7 @@ class MusicgenProcessor(ProcessorMixin): tokenizer (`T5Tokenizer`): An instance of [`T5Tokenizer`]. The tokenizer is a required input. """ + feature_extractor_class = "EncodecFeatureExtractor" tokenizer_class = ("T5Tokenizer", "T5TokenizerFast") diff --git a/src/transformers/models/mvp/configuration_mvp.py b/src/transformers/models/mvp/configuration_mvp.py index 0880985b7930..9f60c79efa6d 100644 --- a/src/transformers/models/mvp/configuration_mvp.py +++ b/src/transformers/models/mvp/configuration_mvp.py @@ -104,6 +104,7 @@ class MvpConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "mvp" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/mvp/tokenization_mvp_fast.py b/src/transformers/models/mvp/tokenization_mvp_fast.py index afe2a0a89e2a..a6ff13c08989 100644 --- a/src/transformers/models/mvp/tokenization_mvp_fast.py +++ b/src/transformers/models/mvp/tokenization_mvp_fast.py @@ -130,6 +130,7 @@ class MvpTokenizerFast(PreTrainedTokenizerFast): trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES diff --git a/src/transformers/models/nat/configuration_nat.py b/src/transformers/models/nat/configuration_nat.py index e24ad679995f..2afaa0646686 100644 --- a/src/transformers/models/nat/configuration_nat.py +++ b/src/transformers/models/nat/configuration_nat.py @@ -92,6 +92,7 @@ class NatConfig(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "nat" attribute_map = { diff --git a/src/transformers/models/nezha/configuration_nezha.py b/src/transformers/models/nezha/configuration_nezha.py index f41a9b2bf895..e47f6e721f61 100644 --- a/src/transformers/models/nezha/configuration_nezha.py +++ b/src/transformers/models/nezha/configuration_nezha.py @@ -63,6 +63,7 @@ class NezhaConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + pretrained_config_archive_map = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = "nezha" diff --git a/src/transformers/models/nllb/tokenization_nllb.py b/src/transformers/models/nllb/tokenization_nllb.py index f37eb69cc9e7..7daf729c132b 100644 --- a/src/transformers/models/nllb/tokenization_nllb.py +++ b/src/transformers/models/nllb/tokenization_nllb.py @@ -41,9 +41,7 @@ "facebook/nllb-200-distilled-600M": 1024, } -# fmt: off -FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] -# fmt: on +FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip class NllbTokenizer(PreTrainedTokenizer): diff --git a/src/transformers/models/nllb/tokenization_nllb_fast.py b/src/transformers/models/nllb/tokenization_nllb_fast.py index 2b4b09da8300..7240133e1d91 100644 --- a/src/transformers/models/nllb/tokenization_nllb_fast.py +++ b/src/transformers/models/nllb/tokenization_nllb_fast.py @@ -53,9 +53,7 @@ "facebook/nllb-200-distilled-600M": 1024, } -# fmt: off -FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] -# fmt: on +FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip class NllbTokenizerFast(PreTrainedTokenizerFast): diff --git a/src/transformers/models/nllb_moe/configuration_nllb_moe.py b/src/transformers/models/nllb_moe/configuration_nllb_moe.py index f2701e3781b3..435d7caa17c6 100644 --- a/src/transformers/models/nllb_moe/configuration_nllb_moe.py +++ b/src/transformers/models/nllb_moe/configuration_nllb_moe.py @@ -125,6 +125,7 @@ class NllbMoeConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "nllb-moe" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/nougat/processing_nougat.py b/src/transformers/models/nougat/processing_nougat.py index b63639e2dd1f..8f94c6718ba6 100644 --- a/src/transformers/models/nougat/processing_nougat.py +++ b/src/transformers/models/nougat/processing_nougat.py @@ -37,6 +37,7 @@ class NougatProcessor(ProcessorMixin): tokenizer ([`NougatTokenizerFast`]): An instance of [`NougatTokenizerFast`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" diff --git a/src/transformers/models/nougat/tokenization_nougat_fast.py b/src/transformers/models/nougat/tokenization_nougat_fast.py index 9d95940875e1..d02aec757521 100644 --- a/src/transformers/models/nougat/tokenization_nougat_fast.py +++ b/src/transformers/models/nougat/tokenization_nougat_fast.py @@ -251,7 +251,7 @@ def remove_numbers(lines): def _clean(s): return re.sub(r"(?:[\d_]|\*\*)", "", s).strip() - if type(lines) is str: + if isinstance(lines, str): return _clean(lines) out = [] for l in lines: diff --git a/src/transformers/models/nystromformer/configuration_nystromformer.py b/src/transformers/models/nystromformer/configuration_nystromformer.py index 98b3e511ac0e..eeba112ebb41 100644 --- a/src/transformers/models/nystromformer/configuration_nystromformer.py +++ b/src/transformers/models/nystromformer/configuration_nystromformer.py @@ -89,6 +89,7 @@ class NystromformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "nystromformer" def __init__( diff --git a/src/transformers/models/oneformer/configuration_oneformer.py b/src/transformers/models/oneformer/configuration_oneformer.py index 06c75b92b1c0..672f1dcba8da 100644 --- a/src/transformers/models/oneformer/configuration_oneformer.py +++ b/src/transformers/models/oneformer/configuration_oneformer.py @@ -137,6 +137,7 @@ class OneFormerConfig(PretrainedConfig): >>> configuration = model.config ``` """ + model_type = "oneformer" attribute_map = {"hidden_size": "hidden_dim"} diff --git a/src/transformers/models/oneformer/modeling_oneformer.py b/src/transformers/models/oneformer/modeling_oneformer.py index 33095e53e2f9..d0c0d405502e 100644 --- a/src/transformers/models/oneformer/modeling_oneformer.py +++ b/src/transformers/models/oneformer/modeling_oneformer.py @@ -3247,5 +3247,5 @@ def forward( if not return_dict: output = tuple(v for v in output.values()) if loss is not None: - output = ((loss)) + output + output = (loss) + output return output diff --git a/src/transformers/models/oneformer/processing_oneformer.py b/src/transformers/models/oneformer/processing_oneformer.py index c4479110ae77..dc20f48f68b0 100644 --- a/src/transformers/models/oneformer/processing_oneformer.py +++ b/src/transformers/models/oneformer/processing_oneformer.py @@ -42,6 +42,7 @@ class OneFormerProcessor(ProcessorMixin): task_seq_len (`int`, *optional*, defaults to 77): Sequence length for input task token. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "OneFormerImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") diff --git a/src/transformers/models/opt/configuration_opt.py b/src/transformers/models/opt/configuration_opt.py index d2b7a4347ea4..2918ee269aeb 100644 --- a/src/transformers/models/opt/configuration_opt.py +++ b/src/transformers/models/opt/configuration_opt.py @@ -93,6 +93,7 @@ class OPTConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "opt" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/owlv2/configuration_owlv2.py b/src/transformers/models/owlv2/configuration_owlv2.py index 4b96cf1f142c..fd15c0e7972f 100644 --- a/src/transformers/models/owlv2/configuration_owlv2.py +++ b/src/transformers/models/owlv2/configuration_owlv2.py @@ -46,8 +46,8 @@ class Owlv2TextConfig(PretrainedConfig): Args: vocab_size (`int`, *optional*, defaults to 49408): - Vocabulary size of the OWLv2 text model. Defines the number of different tokens that can be represented by - the `inputs_ids` passed when calling [`Owlv2TextModel`]. + Vocabulary size of the OWLv2 text model. Defines the number of different tokens that can be represented + by the `inputs_ids` passed when calling [`Owlv2TextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): @@ -92,6 +92,7 @@ class Owlv2TextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "owlv2_text_model" def __init__( @@ -148,8 +149,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], # Copied from transformers.models.owlvit.configuration_owlvit.OwlViTVisionConfig with OwlViT->Owlv2, owlvit-base-patch32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2, 32->16 class Owlv2VisionConfig(PretrainedConfig): r""" - This is the configuration class to store the configuration of an [`Owlv2VisionModel`]. It is used to instantiate an - OWLv2 image encoder according to the specified arguments, defining the model architecture. Instantiating a + This is the configuration class to store the configuration of an [`Owlv2VisionModel`]. It is used to instantiate + an OWLv2 image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. @@ -255,8 +256,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], class Owlv2Config(PretrainedConfig): r""" [`Owlv2Config`] is the configuration class to store the configuration of an [`Owlv2Model`]. It is used to - instantiate an OWLv2 model according to the specified arguments, defining the text model and vision model configs. - Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 + instantiate an OWLv2 model according to the specified arguments, defining the text model and vision model + configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the @@ -270,7 +271,8 @@ class Owlv2Config(PretrainedConfig): projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): - The inital value of the *logit_scale* parameter. Default is used as per the original OWLv2 implementation. + The inital value of the *logit_scale* parameter. Default is used as per the original OWLv2 + implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): @@ -323,8 +325,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], @classmethod def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs): r""" - Instantiate a [`Owlv2Config`] (or a derived class) from owlv2 text model configuration and owlv2 vision model - configuration. + Instantiate a [`Owlv2Config`] (or a derived class) from owlv2 text model configuration and owlv2 vision + model configuration. Returns: [`Owlv2Config`]: An instance of a configuration object diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index 0ee5372afbe6..9c7cede8fbf9 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -245,11 +245,11 @@ class Owlv2ImageGuidedObjectDetectionOutput(ModelOutput): (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): - Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image - embeddings for each patch. + Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes + image embeddings for each patch. query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): - Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image - embeddings for each patch. + Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes + image embeddings for each patch. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. @@ -548,9 +548,7 @@ def _init_weights(self, module): nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, Owlv2MLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/owlv2/processing_owlv2.py b/src/transformers/models/owlv2/processing_owlv2.py index 23fd14835b95..77493f6cb2de 100644 --- a/src/transformers/models/owlv2/processing_owlv2.py +++ b/src/transformers/models/owlv2/processing_owlv2.py @@ -37,6 +37,7 @@ class Owlv2Processor(ProcessorMixin): tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "Owlv2ImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") diff --git a/src/transformers/models/owlvit/configuration_owlvit.py b/src/transformers/models/owlvit/configuration_owlvit.py index f9d3914ed8b5..254619cccd15 100644 --- a/src/transformers/models/owlvit/configuration_owlvit.py +++ b/src/transformers/models/owlvit/configuration_owlvit.py @@ -96,6 +96,7 @@ class OwlViTTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "owlvit_text_model" def __init__( diff --git a/src/transformers/models/owlvit/image_processing_owlvit.py b/src/transformers/models/owlvit/image_processing_owlvit.py index 4242356cabd4..d190bc1d636e 100644 --- a/src/transformers/models/owlvit/image_processing_owlvit.py +++ b/src/transformers/models/owlvit/image_processing_owlvit.py @@ -124,6 +124,7 @@ class OwlViTImageProcessor(BaseImageProcessor): image_std (`List[int]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): The sequence of standard deviations for each channel, to be used when normalizing images. """ + model_input_names = ["pixel_values"] def __init__( diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index 77446f6512af..8c502c410d3b 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -540,9 +540,7 @@ def _init_weights(self, module): nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, OwlViTMLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/owlvit/processing_owlvit.py b/src/transformers/models/owlvit/processing_owlvit.py index 088693a057f3..670f7206fd87 100644 --- a/src/transformers/models/owlvit/processing_owlvit.py +++ b/src/transformers/models/owlvit/processing_owlvit.py @@ -38,6 +38,7 @@ class OwlViTProcessor(ProcessorMixin): tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "OwlViTImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") diff --git a/src/transformers/models/pegasus/configuration_pegasus.py b/src/transformers/models/pegasus/configuration_pegasus.py index fd7de9a1a490..51b506c4e039 100644 --- a/src/transformers/models/pegasus/configuration_pegasus.py +++ b/src/transformers/models/pegasus/configuration_pegasus.py @@ -97,6 +97,7 @@ class PegasusConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "pegasus" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/pegasus/tokenization_pegasus.py b/src/transformers/models/pegasus/tokenization_pegasus.py index 9e2fd0d979a0..e1c8f6933ffc 100644 --- a/src/transformers/models/pegasus/tokenization_pegasus.py +++ b/src/transformers/models/pegasus/tokenization_pegasus.py @@ -96,6 +96,7 @@ class PegasusTokenizer(PreTrainedTokenizer): - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES diff --git a/src/transformers/models/pegasus/tokenization_pegasus_fast.py b/src/transformers/models/pegasus/tokenization_pegasus_fast.py index aadd3c32271d..3faeccd2500c 100644 --- a/src/transformers/models/pegasus/tokenization_pegasus_fast.py +++ b/src/transformers/models/pegasus/tokenization_pegasus_fast.py @@ -91,6 +91,7 @@ class PegasusTokenizerFast(PreTrainedTokenizerFast): tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66) that uses the tokens 2 - 104 only for pretraining """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES diff --git a/src/transformers/models/pegasus_x/configuration_pegasus_x.py b/src/transformers/models/pegasus_x/configuration_pegasus_x.py index f48e19bdcbca..be092c018a42 100644 --- a/src/transformers/models/pegasus_x/configuration_pegasus_x.py +++ b/src/transformers/models/pegasus_x/configuration_pegasus_x.py @@ -103,6 +103,7 @@ class PegasusXConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "pegasus_x" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/perceiver/configuration_perceiver.py b/src/transformers/models/perceiver/configuration_perceiver.py index 72b13a11e113..d741b287e5db 100644 --- a/src/transformers/models/perceiver/configuration_perceiver.py +++ b/src/transformers/models/perceiver/configuration_perceiver.py @@ -117,6 +117,7 @@ class PerceiverConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "perceiver" def __init__( diff --git a/src/transformers/models/persimmon/configuration_persimmon.py b/src/transformers/models/persimmon/configuration_persimmon.py index c48070a9da0c..6997e159d522 100644 --- a/src/transformers/models/persimmon/configuration_persimmon.py +++ b/src/transformers/models/persimmon/configuration_persimmon.py @@ -88,6 +88,7 @@ class PersimmonConfig(PretrainedConfig): >>> # Initializing a Persimmon persimmon-7b style configuration >>> configuration = PersimmonConfig() ```""" + model_type = "persimmon" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/phi/configuration_phi.py b/src/transformers/models/phi/configuration_phi.py index aec31054ce1d..5025ef798ff9 100644 --- a/src/transformers/models/phi/configuration_phi.py +++ b/src/transformers/models/phi/configuration_phi.py @@ -103,6 +103,7 @@ class PhiConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "phi" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/pix2struct/configuration_pix2struct.py b/src/transformers/models/pix2struct/configuration_pix2struct.py index c23d2cdcfe63..d0b81d105bd8 100644 --- a/src/transformers/models/pix2struct/configuration_pix2struct.py +++ b/src/transformers/models/pix2struct/configuration_pix2struct.py @@ -90,6 +90,7 @@ class Pix2StructTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "pix2struct_text_model" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/pix2struct/processing_pix2struct.py b/src/transformers/models/pix2struct/processing_pix2struct.py index bc54e14604f8..269fa8c62fb2 100644 --- a/src/transformers/models/pix2struct/processing_pix2struct.py +++ b/src/transformers/models/pix2struct/processing_pix2struct.py @@ -37,6 +37,7 @@ class Pix2StructProcessor(ProcessorMixin): tokenizer (Union[`T5TokenizerFast`, `T5Tokenizer`]): An instance of ['T5TokenizerFast`] or ['T5Tokenizer`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "Pix2StructImageProcessor" tokenizer_class = ("T5Tokenizer", "T5TokenizerFast") diff --git a/src/transformers/models/plbart/configuration_plbart.py b/src/transformers/models/plbart/configuration_plbart.py index 25f4c31c5778..836cf5900c8e 100644 --- a/src/transformers/models/plbart/configuration_plbart.py +++ b/src/transformers/models/plbart/configuration_plbart.py @@ -102,6 +102,7 @@ class PLBartConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "plbart" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/poolformer/configuration_poolformer.py b/src/transformers/models/poolformer/configuration_poolformer.py index 7444de8ec2b2..d859cefc90ef 100644 --- a/src/transformers/models/poolformer/configuration_poolformer.py +++ b/src/transformers/models/poolformer/configuration_poolformer.py @@ -91,6 +91,7 @@ class PoolFormerConfig(PretrainedConfig): >>> configuration = model.config ``` """ + model_type = "poolformer" def __init__( diff --git a/src/transformers/models/pop2piano/feature_extraction_pop2piano.py b/src/transformers/models/pop2piano/feature_extraction_pop2piano.py index 0568fb74efba..9bf5326c0b6e 100644 --- a/src/transformers/models/pop2piano/feature_extraction_pop2piano.py +++ b/src/transformers/models/pop2piano/feature_extraction_pop2piano.py @@ -76,6 +76,7 @@ class Pop2PianoFeatureExtractor(SequenceFeatureExtractor): num_bars (`int`, *optional*, defaults to 2): Determines interval between each sequence. """ + model_input_names = ["input_features", "beatsteps", "extrapolated_beatstep"] def __init__( diff --git a/src/transformers/models/pop2piano/processing_pop2piano.py b/src/transformers/models/pop2piano/processing_pop2piano.py index 5ea579111ddb..639d2e7aea4b 100644 --- a/src/transformers/models/pop2piano/processing_pop2piano.py +++ b/src/transformers/models/pop2piano/processing_pop2piano.py @@ -39,6 +39,7 @@ class Pop2PianoProcessor(ProcessorMixin): tokenizer (`Pop2PianoTokenizer`): An instance of ['Pop2PianoTokenizer`]. The tokenizer is a required input. """ + attributes = ["feature_extractor", "tokenizer"] feature_extractor_class = "Pop2PianoFeatureExtractor" tokenizer_class = "Pop2PianoTokenizer" diff --git a/src/transformers/models/prophetnet/configuration_prophetnet.py b/src/transformers/models/prophetnet/configuration_prophetnet.py index 35988eaa1321..4072709af961 100644 --- a/src/transformers/models/prophetnet/configuration_prophetnet.py +++ b/src/transformers/models/prophetnet/configuration_prophetnet.py @@ -98,6 +98,7 @@ class ProphetNetConfig(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ + model_type = "prophetnet" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/pvt/configuration_pvt.py b/src/transformers/models/pvt/configuration_pvt.py index 12fb3a5b9a94..ac7d5add7f59 100644 --- a/src/transformers/models/pvt/configuration_pvt.py +++ b/src/transformers/models/pvt/configuration_pvt.py @@ -97,6 +97,7 @@ class PvtConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "pvt" def __init__( diff --git a/src/transformers/models/qdqbert/configuration_qdqbert.py b/src/transformers/models/qdqbert/configuration_qdqbert.py index c4f8c1559e61..eaa8af4af28f 100644 --- a/src/transformers/models/qdqbert/configuration_qdqbert.py +++ b/src/transformers/models/qdqbert/configuration_qdqbert.py @@ -85,6 +85,7 @@ class QDQBertConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "qdqbert" def __init__( diff --git a/src/transformers/models/rag/modeling_rag.py b/src/transformers/models/rag/modeling_rag.py index 7048168a0642..df7c68cef076 100644 --- a/src/transformers/models/rag/modeling_rag.py +++ b/src/transformers/models/rag/modeling_rag.py @@ -229,6 +229,7 @@ class RagPreTrainedModel(PreTrainedModel): generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ + config_class = RagConfig base_model_prefix = "rag" diff --git a/src/transformers/models/rag/modeling_tf_rag.py b/src/transformers/models/rag/modeling_tf_rag.py index a58bdb6e7538..d1151bcd5a64 100644 --- a/src/transformers/models/rag/modeling_tf_rag.py +++ b/src/transformers/models/rag/modeling_tf_rag.py @@ -224,6 +224,7 @@ class TFRagPreTrainedModel(TFPreTrainedModel): generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ + config_class = RagConfig base_model_prefix = "rag" _keys_to_ignore_on_load_missing = [r"position_ids"] diff --git a/src/transformers/models/realm/configuration_realm.py b/src/transformers/models/realm/configuration_realm.py index bef2baf05f20..d70033021492 100644 --- a/src/transformers/models/realm/configuration_realm.py +++ b/src/transformers/models/realm/configuration_realm.py @@ -125,6 +125,7 @@ class RealmConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "realm" def __init__( diff --git a/src/transformers/models/reformer/configuration_reformer.py b/src/transformers/models/reformer/configuration_reformer.py index af712ced1eed..e01f25a5fbfe 100755 --- a/src/transformers/models/reformer/configuration_reformer.py +++ b/src/transformers/models/reformer/configuration_reformer.py @@ -158,6 +158,7 @@ class ReformerConfig(PretrainedConfig): >>> configuration = model.config ``` """ + model_type = "reformer" keys_to_ignore_at_inference = ["past_buckets_states"] attribute_map = {} diff --git a/src/transformers/models/regnet/configuration_regnet.py b/src/transformers/models/regnet/configuration_regnet.py index 201354d1553c..4969e426bcb3 100644 --- a/src/transformers/models/regnet/configuration_regnet.py +++ b/src/transformers/models/regnet/configuration_regnet.py @@ -66,6 +66,7 @@ class RegNetConfig(PretrainedConfig): >>> configuration = model.config ``` """ + model_type = "regnet" layer_types = ["x", "y"] diff --git a/src/transformers/models/regnet/convert_regnet_to_pytorch.py b/src/transformers/models/regnet/convert_regnet_to_pytorch.py index 14d01ae44525..d29077c1a729 100644 --- a/src/transformers/models/regnet/convert_regnet_to_pytorch.py +++ b/src/transformers/models/regnet/convert_regnet_to_pytorch.py @@ -192,7 +192,7 @@ def convert_weight_and_push( ) from_output = from_model(x) - from_output = from_output[-1] if type(from_output) is list else from_output + from_output = from_output[-1] if isinstance(from_output, list) else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: diff --git a/src/transformers/models/rembert/configuration_rembert.py b/src/transformers/models/rembert/configuration_rembert.py index 792a6dbcfadf..9dfa8cc6b245 100644 --- a/src/transformers/models/rembert/configuration_rembert.py +++ b/src/transformers/models/rembert/configuration_rembert.py @@ -96,6 +96,7 @@ class RemBertConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "rembert" def __init__( diff --git a/src/transformers/models/resnet/configuration_resnet.py b/src/transformers/models/resnet/configuration_resnet.py index da465f5d039a..69117ff07abf 100644 --- a/src/transformers/models/resnet/configuration_resnet.py +++ b/src/transformers/models/resnet/configuration_resnet.py @@ -84,6 +84,7 @@ class ResNetConfig(BackboneConfigMixin, PretrainedConfig): >>> configuration = model.config ``` """ + model_type = "resnet" layer_types = ["basic", "bottleneck"] diff --git a/src/transformers/models/roberta/configuration_roberta.py b/src/transformers/models/roberta/configuration_roberta.py index f82033f4588f..86334f0a224e 100644 --- a/src/transformers/models/roberta/configuration_roberta.py +++ b/src/transformers/models/roberta/configuration_roberta.py @@ -101,6 +101,7 @@ class RobertaConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "roberta" def __init__( diff --git a/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py index fca6763f274e..1957a30f41b2 100644 --- a/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py @@ -34,10 +34,9 @@ # Copied from transformers.models.roberta.configuration_roberta.RobertaConfig with roberta-base->andreasmadsen/efficient_mlm_m0.40,RoBERTa->RoBERTa-PreLayerNorm,Roberta->RobertaPreLayerNorm,roberta->roberta-prelayernorm class RobertaPreLayerNormConfig(PretrainedConfig): r""" - This is the configuration class to store the configuration of a [`RobertaPreLayerNormModel`] or a - [`TFRobertaPreLayerNormModel`]. It is used to instantiate a RoBERTa-PreLayerNorm model according to the specified - arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar - configuration to that of the RoBERTa-PreLayerNorm + This is the configuration class to store the configuration of a [`RobertaPreLayerNormModel`] or a [`TFRobertaPreLayerNormModel`]. It is + used to instantiate a RoBERTa-PreLayerNorm model according to the specified arguments, defining the model architecture. + Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa-PreLayerNorm [andreasmadsen/efficient_mlm_m0.40](https://huggingface.co/andreasmadsen/efficient_mlm_m0.40) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the @@ -46,9 +45,8 @@ class RobertaPreLayerNormConfig(PretrainedConfig): Args: vocab_size (`int`, *optional*, defaults to 50265): - Vocabulary size of the RoBERTa-PreLayerNorm model. Defines the number of different tokens that can be - represented by the `inputs_ids` passed when calling [`RobertaPreLayerNormModel`] or - [`TFRobertaPreLayerNormModel`]. + Vocabulary size of the RoBERTa-PreLayerNorm model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`RobertaPreLayerNormModel`] or [`TFRobertaPreLayerNormModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): @@ -68,8 +66,7 @@ class RobertaPreLayerNormConfig(PretrainedConfig): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): - The vocabulary size of the `token_type_ids` passed when calling [`RobertaPreLayerNormModel`] or - [`TFRobertaPreLayerNormModel`]. + The vocabulary size of the `token_type_ids` passed when calling [`RobertaPreLayerNormModel`] or [`TFRobertaPreLayerNormModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): @@ -102,6 +99,7 @@ class RobertaPreLayerNormConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "roberta-prelayernorm" def __init__( diff --git a/src/transformers/models/roc_bert/configuration_roc_bert.py b/src/transformers/models/roc_bert/configuration_roc_bert.py index 2f0a0dd0e0f7..23a9e01be77b 100644 --- a/src/transformers/models/roc_bert/configuration_roc_bert.py +++ b/src/transformers/models/roc_bert/configuration_roc_bert.py @@ -109,6 +109,7 @@ class RoCBertConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "roc_bert" def __init__( diff --git a/src/transformers/models/roc_bert/modeling_roc_bert.py b/src/transformers/models/roc_bert/modeling_roc_bert.py index ff2900774faf..f3de92fed389 100644 --- a/src/transformers/models/roc_bert/modeling_roc_bert.py +++ b/src/transformers/models/roc_bert/modeling_roc_bert.py @@ -56,9 +56,7 @@ # Token Classification output _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "ArthurZ/dummy-rocbert-ner" -# fmt: off -_TOKEN_CLASS_EXPECTED_OUTPUT = ["S-EVENT", "S-FAC", "I-ORDINAL", "I-ORDINAL", "E-ORG", "E-LANGUAGE", "E-ORG", "E-ORG", "E-ORG", "E-ORG", "I-EVENT", "S-TIME", "S-TIME", "E-LANGUAGE", "S-TIME", "E-DATE", "I-ORDINAL", "E-QUANTITY", "E-LANGUAGE", "S-TIME", "B-ORDINAL", "S-PRODUCT", "E-LANGUAGE", "E-LANGUAGE", "E-ORG", "E-LOC", "S-TIME", "I-ORDINAL", "S-FAC", "O", "S-GPE", "I-EVENT", "S-GPE", "E-LANGUAGE", "E-ORG", "S-EVENT", "S-FAC", "S-FAC", "S-FAC", "E-ORG", "S-FAC", "E-ORG", "S-GPE"] -# fmt: on +_TOKEN_CLASS_EXPECTED_OUTPUT = ["S-EVENT", "S-FAC", "I-ORDINAL", "I-ORDINAL", "E-ORG", "E-LANGUAGE", "E-ORG", "E-ORG", "E-ORG", "E-ORG", "I-EVENT", "S-TIME", "S-TIME", "E-LANGUAGE", "S-TIME", "E-DATE", "I-ORDINAL", "E-QUANTITY", "E-LANGUAGE", "S-TIME", "B-ORDINAL", "S-PRODUCT", "E-LANGUAGE", "E-LANGUAGE", "E-ORG", "E-LOC", "S-TIME", "I-ORDINAL", "S-FAC", "O", "S-GPE", "I-EVENT", "S-GPE", "E-LANGUAGE", "E-ORG", "S-EVENT", "S-FAC", "S-FAC", "S-FAC", "E-ORG", "S-FAC", "E-ORG", "S-GPE"] # fmt: skip _TOKEN_CLASS_EXPECTED_LOSS = 3.62 # SequenceClassification docstring diff --git a/src/transformers/models/roformer/configuration_roformer.py b/src/transformers/models/roformer/configuration_roformer.py index a5af26aa5d61..5d8f9919b10c 100644 --- a/src/transformers/models/roformer/configuration_roformer.py +++ b/src/transformers/models/roformer/configuration_roformer.py @@ -106,6 +106,7 @@ class RoFormerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "roformer" def __init__( diff --git a/src/transformers/models/roformer/modeling_flax_roformer.py b/src/transformers/models/roformer/modeling_flax_roformer.py index cb7c2e4bb313..10a9bdece68c 100644 --- a/src/transformers/models/roformer/modeling_flax_roformer.py +++ b/src/transformers/models/roformer/modeling_flax_roformer.py @@ -49,7 +49,7 @@ "junnyu/roformer_chinese_char_small", "junnyu/roformer_chinese_char_base", "junnyu/roformer_small_discriminator", - "junnyu/roformer_small_generator" + "junnyu/roformer_small_generator", # See all RoFormer models at https://huggingface.co/models?filter=roformer ] diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index 95dc0c993941..7aa9a0b12d7d 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -58,7 +58,7 @@ "junnyu/roformer_chinese_char_small", "junnyu/roformer_chinese_char_base", "junnyu/roformer_small_discriminator", - "junnyu/roformer_small_generator" + "junnyu/roformer_small_generator", # See all RoFormer models at https://huggingface.co/models?filter=roformer ] diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index f6067f9237f4..cea286c828b4 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -69,7 +69,7 @@ "junnyu/roformer_chinese_char_small", "junnyu/roformer_chinese_char_base", "junnyu/roformer_small_discriminator", - "junnyu/roformer_small_generator" + "junnyu/roformer_small_generator", # See all RoFormer models at https://huggingface.co/models?filter=roformer ] diff --git a/src/transformers/models/roformer/tokenization_roformer.py b/src/transformers/models/roformer/tokenization_roformer.py index 88c0f398b300..27a7281600a3 100644 --- a/src/transformers/models/roformer/tokenization_roformer.py +++ b/src/transformers/models/roformer/tokenization_roformer.py @@ -358,6 +358,7 @@ class RoFormerTokenizer(PreTrainedTokenizer): >>> tokenizer.tokenize("今天天气非常好。") ['今', '天', '天', '气', '非常', '好', '。'] ```""" + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES diff --git a/src/transformers/models/sam/processing_sam.py b/src/transformers/models/sam/processing_sam.py index 0ec47a995af1..7c632b2151cc 100644 --- a/src/transformers/models/sam/processing_sam.py +++ b/src/transformers/models/sam/processing_sam.py @@ -44,6 +44,7 @@ class SamProcessor(ProcessorMixin): image_processor (`SamImageProcessor`): An instance of [`SamImageProcessor`]. The image processor is a required input. """ + attributes = ["image_processor"] image_processor_class = "SamImageProcessor" diff --git a/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py b/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py index c3296cfabc76..e1fc44b492d6 100644 --- a/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py @@ -239,6 +239,7 @@ class SeamlessM4TConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "seamless_m4t" def __init__( diff --git a/src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py b/src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py index e97ca3046a98..a90a30f5795f 100644 --- a/src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py +++ b/src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py @@ -33,23 +33,10 @@ from transformers.utils import logging -# fmt: off -UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ] -# fmt: on - -# fmt: off -VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",] -# fmt: on - - -# fmt: off -MEDIUM_SUPPORTED_LANGUAGES = ["ace","ace_Latn","acm","acq","aeb","afr","ajp","aka","amh","apc","arb","ars","ary","arz","asm","ast","awa","ayr","azb","azj","bak","bam","ban","bel","bem","ben","bho","bjn","bjn_Latn","bod","bos","bug","bul","cat","ceb","ces","cjk","ckb","crh","cym","dan","deu","dik","dyu","dzo","ell","eng","epo","est","eus","ewe","fao","pes","fij","fin","fon","fra","fur","fuv","gla","gle","glg","grn","guj","hat","hau","heb","hin","hne","hrv","hun","hye","ibo","ilo","ind","isl","ita","jav","jpn","kab","kac","kam","kan","kas","kas_Deva","kat","knc","knc_Latn","kaz","kbp","kea","khm","kik","kin","kir","kmb","kon","kor","kmr","lao","lvs","lij","lim","lin","lit","lmo","ltg","ltz","lua","lug","luo","lus","mag","mai","mal","mar","min","mkd","plt","mlt","mni","khk","mos","mri","zsm","mya","nld","nno","nob","npi","nso","nus","nya","oci","gaz","ory","pag","pan","pap","pol","por","prs","pbt","quy","ron","run","rus","sag","san","sat","scn","shn","sin","slk","slv","smo","sna","snd","som","sot","spa","als","srd","srp","ssw","sun","swe","swh","szl","tam","tat","tel","tgk","tgl","tha","tir","taq","taq_Tfng","tpi","tsn","tso","tuk","tum","tur","twi","tzm","uig","ukr","umb","urd","uzn","vec","vie","war","wol","xho","ydd","yor","yue","cmn","cmn_Hant","zul",] -# fmt: on - - -# fmt: off -LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",] -# fmt: on +UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ] # fmt: skip +VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",] # fmt: skip +MEDIUM_SUPPORTED_LANGUAGES = ["ace","ace_Latn","acm","acq","aeb","afr","ajp","aka","amh","apc","arb","ars","ary","arz","asm","ast","awa","ayr","azb","azj","bak","bam","ban","bel","bem","ben","bho","bjn","bjn_Latn","bod","bos","bug","bul","cat","ceb","ces","cjk","ckb","crh","cym","dan","deu","dik","dyu","dzo","ell","eng","epo","est","eus","ewe","fao","pes","fij","fin","fon","fra","fur","fuv","gla","gle","glg","grn","guj","hat","hau","heb","hin","hne","hrv","hun","hye","ibo","ilo","ind","isl","ita","jav","jpn","kab","kac","kam","kan","kas","kas_Deva","kat","knc","knc_Latn","kaz","kbp","kea","khm","kik","kin","kir","kmb","kon","kor","kmr","lao","lvs","lij","lim","lin","lit","lmo","ltg","ltz","lua","lug","luo","lus","mag","mai","mal","mar","min","mkd","plt","mlt","mni","khk","mos","mri","zsm","mya","nld","nno","nob","npi","nso","nus","nya","oci","gaz","ory","pag","pan","pap","pol","por","prs","pbt","quy","ron","run","rus","sag","san","sat","scn","shn","sin","slk","slv","smo","sna","snd","som","sot","spa","als","srd","srp","ssw","sun","swe","swh","szl","tam","tat","tel","tgk","tgl","tha","tir","taq","taq_Tfng","tpi","tsn","tso","tuk","tum","tur","twi","tzm","uig","ukr","umb","urd","uzn","vec","vie","war","wol","xho","ydd","yor","yue","cmn","cmn_Hant","zul",] # fmt: skip +LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",] # fmt: skip def assert_param_count(model_1, model_2): diff --git a/src/transformers/models/seamless_m4t/processing_seamless_m4t.py b/src/transformers/models/seamless_m4t/processing_seamless_m4t.py index 4f22e9e33d0c..7e838913ca14 100644 --- a/src/transformers/models/seamless_m4t/processing_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/processing_seamless_m4t.py @@ -34,6 +34,7 @@ class SeamlessM4TProcessor(ProcessorMixin): tokenizer ([`SeamlessM4TTokenizerFast`]): The tokenizer is a required input. """ + feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = ("SeamlessM4TTokenizer", "SeamlessM4TTokenizerFast") diff --git a/src/transformers/models/segformer/configuration_segformer.py b/src/transformers/models/segformer/configuration_segformer.py index 7f95657e1975..ad1c2053295b 100644 --- a/src/transformers/models/segformer/configuration_segformer.py +++ b/src/transformers/models/segformer/configuration_segformer.py @@ -100,6 +100,7 @@ class SegformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "segformer" def __init__( diff --git a/src/transformers/models/segformer/image_processing_segformer.py b/src/transformers/models/segformer/image_processing_segformer.py index 27687fde03fb..57f2628a9cd3 100644 --- a/src/transformers/models/segformer/image_processing_segformer.py +++ b/src/transformers/models/segformer/image_processing_segformer.py @@ -440,8 +440,7 @@ def preprocess( # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->Segformer def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): """ - Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps. Only supports - PyTorch. + Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`SegformerForSemanticSegmentation`]): diff --git a/src/transformers/models/sew/configuration_sew.py b/src/transformers/models/sew/configuration_sew.py index 662eaff7f268..f5db6fd2c104 100644 --- a/src/transformers/models/sew/configuration_sew.py +++ b/src/transformers/models/sew/configuration_sew.py @@ -154,6 +154,7 @@ class SEWConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "sew" def __init__( diff --git a/src/transformers/models/sew_d/configuration_sew_d.py b/src/transformers/models/sew_d/configuration_sew_d.py index 78c0f2a71254..2f08ff81f50e 100644 --- a/src/transformers/models/sew_d/configuration_sew_d.py +++ b/src/transformers/models/sew_d/configuration_sew_d.py @@ -169,6 +169,7 @@ class SEWDConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "sew-d" def __init__( diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index cbb74dcfa243..8e890f207d41 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -847,15 +847,11 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ if "c2p" in self.pos_att_type: pos_key_layer = self.transpose_for_scores( self.pos_key_proj(rel_embeddings), self.num_attention_heads - ).repeat( - query_layer.size(0) // self.num_attention_heads, 1, 1 - ) # .split(self.all_head_size, dim=-1) + ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1) if "p2c" in self.pos_att_type: pos_query_layer = self.transpose_for_scores( self.pos_query_proj(rel_embeddings), self.num_attention_heads - ).repeat( - query_layer.size(0) // self.num_attention_heads, 1, 1 - ) # .split(self.all_head_size, dim=-1) + ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1) score = 0 # content->position diff --git a/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py index 4a144514fd3b..378f082e4b9c 100644 --- a/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py @@ -69,6 +69,7 @@ class SpeechEncoderDecoderConfig(PretrainedConfig): >>> encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained("my-model") >>> model = SpeechEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config) ```""" + model_type = "speech-encoder-decoder" is_composition = True diff --git a/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py index 78a652e91d0c..5028e30344cc 100644 --- a/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py @@ -177,6 +177,7 @@ class SpeechEncoderDecoderModel(PreTrainedModel): :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder. """ + config_class = SpeechEncoderDecoderConfig base_model_prefix = "speech_encoder_decoder" main_input_name = "inputs" diff --git a/src/transformers/models/speech_to_text/configuration_speech_to_text.py b/src/transformers/models/speech_to_text/configuration_speech_to_text.py index 89d8e9a9105b..fb1a8e1b5ac2 100644 --- a/src/transformers/models/speech_to_text/configuration_speech_to_text.py +++ b/src/transformers/models/speech_to_text/configuration_speech_to_text.py @@ -121,6 +121,7 @@ class Speech2TextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "speech_to_text" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/speech_to_text/processing_speech_to_text.py b/src/transformers/models/speech_to_text/processing_speech_to_text.py index 29af8ae6b901..42e900633867 100644 --- a/src/transformers/models/speech_to_text/processing_speech_to_text.py +++ b/src/transformers/models/speech_to_text/processing_speech_to_text.py @@ -36,6 +36,7 @@ class Speech2TextProcessor(ProcessorMixin): tokenizer (`Speech2TextTokenizer`): An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input. """ + feature_extractor_class = "Speech2TextFeatureExtractor" tokenizer_class = "Speech2TextTokenizer" diff --git a/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py index 596f6bea0bbc..5dd34cb86baa 100644 --- a/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py @@ -86,6 +86,7 @@ class Speech2Text2Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "speech_to_text_2" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py index 1472eb70be51..47a45d700f79 100644 --- a/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py @@ -35,6 +35,7 @@ class Speech2Text2Processor(ProcessorMixin): tokenizer (`Speech2Text2Tokenizer`): An instance of [`Speech2Text2Tokenizer`]. The tokenizer is a required input. """ + feature_extractor_class = "AutoFeatureExtractor" tokenizer_class = "Speech2Text2Tokenizer" diff --git a/src/transformers/models/speecht5/configuration_speecht5.py b/src/transformers/models/speecht5/configuration_speecht5.py index 8d6a61023c7c..c7cd7d2f62ff 100644 --- a/src/transformers/models/speecht5/configuration_speecht5.py +++ b/src/transformers/models/speecht5/configuration_speecht5.py @@ -194,6 +194,7 @@ class SpeechT5Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "speecht5" attribute_map = {"num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers"} @@ -398,6 +399,7 @@ class SpeechT5HifiGanConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "hifigan" def __init__( diff --git a/src/transformers/models/speecht5/processing_speecht5.py b/src/transformers/models/speecht5/processing_speecht5.py index 27353b4702b1..468a0c1d89ab 100644 --- a/src/transformers/models/speecht5/processing_speecht5.py +++ b/src/transformers/models/speecht5/processing_speecht5.py @@ -30,6 +30,7 @@ class SpeechT5Processor(ProcessorMixin): tokenizer (`SpeechT5Tokenizer`): An instance of [`SpeechT5Tokenizer`]. The tokenizer is a required input. """ + feature_extractor_class = "SpeechT5FeatureExtractor" tokenizer_class = "SpeechT5Tokenizer" diff --git a/src/transformers/models/splinter/configuration_splinter.py b/src/transformers/models/splinter/configuration_splinter.py index bdbe5f013143..38b33c45cba5 100644 --- a/src/transformers/models/splinter/configuration_splinter.py +++ b/src/transformers/models/splinter/configuration_splinter.py @@ -88,6 +88,7 @@ class SplinterConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "splinter" def __init__( diff --git a/src/transformers/models/squeezebert/configuration_squeezebert.py b/src/transformers/models/squeezebert/configuration_squeezebert.py index 5757b9410fce..4926a7317767 100644 --- a/src/transformers/models/squeezebert/configuration_squeezebert.py +++ b/src/transformers/models/squeezebert/configuration_squeezebert.py @@ -109,6 +109,7 @@ class SqueezeBertConfig(PretrainedConfig): Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints. """ + pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = "squeezebert" diff --git a/src/transformers/models/squeezebert/tokenization_squeezebert.py b/src/transformers/models/squeezebert/tokenization_squeezebert.py index 0cefa03edf3e..c655ba8ddaa2 100644 --- a/src/transformers/models/squeezebert/tokenization_squeezebert.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert.py @@ -266,8 +266,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py index 23faab71349f..a06aaf615e10 100644 --- a/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py @@ -182,8 +182,8 @@ def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT - sequence pair mask has the following format: + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence + pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 diff --git a/src/transformers/models/swiftformer/configuration_swiftformer.py b/src/transformers/models/swiftformer/configuration_swiftformer.py index 21dfe4cd8c52..3e06b2feab24 100644 --- a/src/transformers/models/swiftformer/configuration_swiftformer.py +++ b/src/transformers/models/swiftformer/configuration_swiftformer.py @@ -85,6 +85,7 @@ class SwiftFormerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "swiftformer" def __init__( diff --git a/src/transformers/models/swin/configuration_swin.py b/src/transformers/models/swin/configuration_swin.py index 1b0efca1c47f..29f38f725579 100644 --- a/src/transformers/models/swin/configuration_swin.py +++ b/src/transformers/models/swin/configuration_swin.py @@ -105,6 +105,7 @@ class SwinConfig(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "swin" attribute_map = { diff --git a/src/transformers/models/swin2sr/configuration_swin2sr.py b/src/transformers/models/swin2sr/configuration_swin2sr.py index 622001f29fca..81c6af31e27f 100644 --- a/src/transformers/models/swin2sr/configuration_swin2sr.py +++ b/src/transformers/models/swin2sr/configuration_swin2sr.py @@ -97,6 +97,7 @@ class Swin2SRConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "swin2sr" attribute_map = { diff --git a/src/transformers/models/swinv2/configuration_swinv2.py b/src/transformers/models/swinv2/configuration_swinv2.py index 595d920c6b54..1dac62583caa 100644 --- a/src/transformers/models/swinv2/configuration_swinv2.py +++ b/src/transformers/models/swinv2/configuration_swinv2.py @@ -89,6 +89,7 @@ class Swinv2Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "swinv2" attribute_map = { diff --git a/src/transformers/models/switch_transformers/configuration_switch_transformers.py b/src/transformers/models/switch_transformers/configuration_switch_transformers.py index 291a9f1f3ab9..9d8bfe8ba329 100644 --- a/src/transformers/models/switch_transformers/configuration_switch_transformers.py +++ b/src/transformers/models/switch_transformers/configuration_switch_transformers.py @@ -94,6 +94,7 @@ class SwitchTransformersConfig(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ + model_type = "switch_transformers" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} diff --git a/src/transformers/models/t5/configuration_t5.py b/src/transformers/models/t5/configuration_t5.py index 10afdea0c1ed..05d737d035af 100644 --- a/src/transformers/models/t5/configuration_t5.py +++ b/src/transformers/models/t5/configuration_t5.py @@ -77,6 +77,7 @@ class T5Config(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ + model_type = "t5" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} diff --git a/src/transformers/models/table_transformer/configuration_table_transformer.py b/src/transformers/models/table_transformer/configuration_table_transformer.py index 9cc903656a4c..d79734b383c0 100644 --- a/src/transformers/models/table_transformer/configuration_table_transformer.py +++ b/src/transformers/models/table_transformer/configuration_table_transformer.py @@ -132,6 +132,7 @@ class TableTransformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "table-transformer" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/table_transformer/modeling_table_transformer.py b/src/transformers/models/table_transformer/modeling_table_transformer.py index dbff6476a7c6..92aac58e7456 100644 --- a/src/transformers/models/table_transformer/modeling_table_transformer.py +++ b/src/transformers/models/table_transformer/modeling_table_transformer.py @@ -65,10 +65,9 @@ # Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions): """ - Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to - BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output - of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary - decoding losses. + Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, + namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them + gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): @@ -97,10 +96,9 @@ class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions): # Copied from transformers.models.detr.modeling_detr.DetrModelOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer class TableTransformerModelOutput(Seq2SeqModelOutput): """ - Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to - Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder - layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding - losses. + Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, + namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them + gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): @@ -153,8 +151,8 @@ class TableTransformerObjectDetectionOutput(ModelOutput): pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding - possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to - retrieve the unnormalized bounding boxes. + possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to retrieve the + unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and @@ -1583,15 +1581,15 @@ def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: f # Copied from transformers.models.detr.modeling_detr.DetrLoss with Detr->TableTransformer,detr->table_transformer class TableTransformerLoss(nn.Module): """ - This class computes the losses for TableTransformerForObjectDetection/TableTransformerForSegmentation. The process - happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) - we supervise each pair of matched ground-truth / prediction (supervise class and box). - - A note on the `num_classes` argument (copied from original repo in table_transformer.py): "the naming of the - `num_classes` parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where - `max_obj_id` is the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass - `num_classes` to be 91. As another example, for a dataset that has a single class with `id` 1, you should pass - `num_classes` to be 2 (`max_obj_id` + 1). For more details on this, check the following discussion + This class computes the losses for TableTransformerForObjectDetection/TableTransformerForSegmentation. The process happens in two steps: 1) + we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair + of matched ground-truth / prediction (supervise class and box). + + A note on the `num_classes` argument (copied from original repo in table_transformer.py): "the naming of the `num_classes` + parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is + the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to + be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2 + (`max_obj_id` + 1). For more details on this, check the following discussion https://github.com/facebookresearch/table_transformer/issues/108#issuecomment-650269223" diff --git a/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py b/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py index 9676b50ed0b9..a2e31ba48d3b 100644 --- a/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py @@ -127,6 +127,7 @@ class TimeSeriesTransformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "time_series_transformer" attribute_map = { "hidden_size": "d_model", diff --git a/src/transformers/models/timesformer/configuration_timesformer.py b/src/transformers/models/timesformer/configuration_timesformer.py index dcd7b2a518aa..cb743ee29088 100644 --- a/src/transformers/models/timesformer/configuration_timesformer.py +++ b/src/transformers/models/timesformer/configuration_timesformer.py @@ -85,6 +85,7 @@ class TimesformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "timesformer" def __init__( diff --git a/src/transformers/models/timm_backbone/configuration_timm_backbone.py b/src/transformers/models/timm_backbone/configuration_timm_backbone.py index 23d2aa223704..0f2f1b0b6c31 100644 --- a/src/transformers/models/timm_backbone/configuration_timm_backbone.py +++ b/src/transformers/models/timm_backbone/configuration_timm_backbone.py @@ -60,6 +60,7 @@ class TimmBackboneConfig(PretrainedConfig): >>> configuration = model.config ``` """ + model_type = "timm_backbone" def __init__( diff --git a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py b/src/transformers/models/transfo_xl/tokenization_transfo_xl.py index 91f3d78aae76..8a2aba92f7a8 100644 --- a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py +++ b/src/transformers/models/transfo_xl/tokenization_transfo_xl.py @@ -212,7 +212,7 @@ def __init__( # Loading a torch-saved transfo-xl vocab dict with pickle results in an integer # Entering this if statement means that we tried to load a torch-saved file with pickle, and we failed. # We therefore load it with torch, if it's available. - if type(vocab_dict) == int: + if isinstance(vocab_dict, int): if not is_torch_available(): raise ImportError( "Not trying to load dict with PyTorch as you need to install pytorch to load " diff --git a/src/transformers/models/trocr/configuration_trocr.py b/src/transformers/models/trocr/configuration_trocr.py index b3f033736184..4964ab27acb8 100644 --- a/src/transformers/models/trocr/configuration_trocr.py +++ b/src/transformers/models/trocr/configuration_trocr.py @@ -91,6 +91,7 @@ class TrOCRConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "trocr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/trocr/processing_trocr.py b/src/transformers/models/trocr/processing_trocr.py index 6b7723a975bb..e7ce7362d49a 100644 --- a/src/transformers/models/trocr/processing_trocr.py +++ b/src/transformers/models/trocr/processing_trocr.py @@ -35,6 +35,7 @@ class TrOCRProcessor(ProcessorMixin): tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*): An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" diff --git a/src/transformers/models/tvlt/configuration_tvlt.py b/src/transformers/models/tvlt/configuration_tvlt.py index 013952dbb1ba..e37fd20912f8 100644 --- a/src/transformers/models/tvlt/configuration_tvlt.py +++ b/src/transformers/models/tvlt/configuration_tvlt.py @@ -110,6 +110,7 @@ class TvltConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "tvlt" def __init__( diff --git a/src/transformers/models/tvlt/processing_tvlt.py b/src/transformers/models/tvlt/processing_tvlt.py index b14a3437c285..c67a3a8c6d6d 100644 --- a/src/transformers/models/tvlt/processing_tvlt.py +++ b/src/transformers/models/tvlt/processing_tvlt.py @@ -32,6 +32,7 @@ class TvltProcessor(ProcessorMixin): feature_extractor (`TvltFeatureExtractor`): An instance of [`TvltFeatureExtractor`]. The feature extractor is a required input. """ + attributes = ["image_processor", "feature_extractor"] image_processor_class = "TvltImageProcessor" feature_extractor_class = "TvltFeatureExtractor" diff --git a/src/transformers/models/umt5/configuration_umt5.py b/src/transformers/models/umt5/configuration_umt5.py index 980686da6e70..93025c5bc4ad 100644 --- a/src/transformers/models/umt5/configuration_umt5.py +++ b/src/transformers/models/umt5/configuration_umt5.py @@ -73,6 +73,7 @@ class UMT5Config(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ + model_type = "umt5" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/umt5/modeling_umt5.py b/src/transformers/models/umt5/modeling_umt5.py index 220aff273bc6..be13cee193df 100644 --- a/src/transformers/models/umt5/modeling_umt5.py +++ b/src/transformers/models/umt5/modeling_umt5.py @@ -940,6 +940,7 @@ class UMT5Model(UMT5PreTrainedModel): >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" + model_type = "uumt5" config_class = UMT5Config _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] diff --git a/src/transformers/models/unispeech/configuration_unispeech.py b/src/transformers/models/unispeech/configuration_unispeech.py index 0cf270d1fa13..ef8da01e3255 100644 --- a/src/transformers/models/unispeech/configuration_unispeech.py +++ b/src/transformers/models/unispeech/configuration_unispeech.py @@ -187,6 +187,7 @@ class UniSpeechConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "unispeech" def __init__( diff --git a/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py b/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py index 9d0a30611584..f0aa57141f55 100644 --- a/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py @@ -197,6 +197,7 @@ class UniSpeechSatConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "unispeech-sat" def __init__( diff --git a/src/transformers/models/upernet/configuration_upernet.py b/src/transformers/models/upernet/configuration_upernet.py index f7ad5d04652c..ba4afad10fff 100644 --- a/src/transformers/models/upernet/configuration_upernet.py +++ b/src/transformers/models/upernet/configuration_upernet.py @@ -69,6 +69,7 @@ class UperNetConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "upernet" def __init__( diff --git a/src/transformers/models/videomae/configuration_videomae.py b/src/transformers/models/videomae/configuration_videomae.py index 8120bb23fc2a..61bfe1d6a890 100644 --- a/src/transformers/models/videomae/configuration_videomae.py +++ b/src/transformers/models/videomae/configuration_videomae.py @@ -94,6 +94,7 @@ class VideoMAEConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "videomae" def __init__( diff --git a/src/transformers/models/vilt/configuration_vilt.py b/src/transformers/models/vilt/configuration_vilt.py index 3db6535e5f07..1fc7aa58195a 100644 --- a/src/transformers/models/vilt/configuration_vilt.py +++ b/src/transformers/models/vilt/configuration_vilt.py @@ -96,6 +96,7 @@ class ViltConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vilt" def __init__( diff --git a/src/transformers/models/vilt/processing_vilt.py b/src/transformers/models/vilt/processing_vilt.py index e86aa34c0995..0ccb884ea00c 100644 --- a/src/transformers/models/vilt/processing_vilt.py +++ b/src/transformers/models/vilt/processing_vilt.py @@ -37,6 +37,7 @@ class ViltProcessor(ProcessorMixin): tokenizer (`BertTokenizerFast`, *optional*): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "ViltImageProcessor" tokenizer_class = ("BertTokenizer", "BertTokenizerFast") diff --git a/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py index 8a8fd2f0f631..ba380ed3ea3f 100644 --- a/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py @@ -76,6 +76,7 @@ class VisionEncoderDecoderConfig(PretrainedConfig): >>> encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained("my-model") >>> model = VisionEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config) ```""" + model_type = "vision-encoder-decoder" is_composition = True diff --git a/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py index 3d914c9658da..899acd10703b 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py @@ -272,6 +272,7 @@ class FlaxVisionEncoderDecoderModel(FlaxPreTrainedModel): another one as decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder. """ + config_class = VisionEncoderDecoderConfig base_model_prefix = "vision_encoder_decoder" main_input_name = "pixel_values" diff --git a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py index dea1aaaf59c0..65f55d55e821 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py @@ -180,6 +180,7 @@ class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLos decoder when created with the [`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class method for the decoder. """ + config_class = VisionEncoderDecoderConfig base_model_prefix = "vision_encoder_decoder" load_weight_prefix = "tf_vision_encoder_decoder_model" diff --git a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py index f9c6c25cd8d1..f7134c94ff01 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py @@ -155,6 +155,7 @@ class VisionEncoderDecoderModel(PreTrainedModel): :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder. """ + config_class = VisionEncoderDecoderConfig base_model_prefix = "vision_encoder_decoder" main_input_name = "pixel_values" diff --git a/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py index e6449914680b..322c13aadcca 100644 --- a/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py @@ -37,6 +37,7 @@ class VisionTextDualEncoderProcessor(ProcessorMixin): tokenizer ([`PreTrainedTokenizer`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" diff --git a/src/transformers/models/visual_bert/configuration_visual_bert.py b/src/transformers/models/visual_bert/configuration_visual_bert.py index a7282ef2bb53..85020ba9ac91 100644 --- a/src/transformers/models/visual_bert/configuration_visual_bert.py +++ b/src/transformers/models/visual_bert/configuration_visual_bert.py @@ -35,7 +35,7 @@ "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" - ) + ), # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py index 30fe60ef7a13..f8a146ed2c4e 100755 --- a/src/transformers/models/visual_bert/modeling_visual_bert.py +++ b/src/transformers/models/visual_bert/modeling_visual_bert.py @@ -57,7 +57,7 @@ "uclanlp/visualbert-vcr-coco-pre", "uclanlp/visualbert-nlvr2", "uclanlp/visualbert-nlvr2-pre", - "uclanlp/visualbert-nlvr2-coco-pre" + "uclanlp/visualbert-nlvr2-coco-pre", # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert ] diff --git a/src/transformers/models/vit/configuration_vit.py b/src/transformers/models/vit/configuration_vit.py index cbf67a010934..5eda0385c30c 100644 --- a/src/transformers/models/vit/configuration_vit.py +++ b/src/transformers/models/vit/configuration_vit.py @@ -88,6 +88,7 @@ class ViTConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vit" def __init__( diff --git a/src/transformers/models/vit_hybrid/configuration_vit_hybrid.py b/src/transformers/models/vit_hybrid/configuration_vit_hybrid.py index 5e5db3600d78..0b8a0da75fff 100644 --- a/src/transformers/models/vit_hybrid/configuration_vit_hybrid.py +++ b/src/transformers/models/vit_hybrid/configuration_vit_hybrid.py @@ -86,6 +86,7 @@ class ViTHybridConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vit-hybrid" def __init__( diff --git a/src/transformers/models/vit_mae/configuration_vit_mae.py b/src/transformers/models/vit_mae/configuration_vit_mae.py index aed808d73251..fa57fbe4fb05 100644 --- a/src/transformers/models/vit_mae/configuration_vit_mae.py +++ b/src/transformers/models/vit_mae/configuration_vit_mae.py @@ -93,6 +93,7 @@ class ViTMAEConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vit_mae" def __init__( diff --git a/src/transformers/models/vit_msn/configuration_vit_msn.py b/src/transformers/models/vit_msn/configuration_vit_msn.py index 87d9a37a68e0..4ee05e3c393b 100644 --- a/src/transformers/models/vit_msn/configuration_vit_msn.py +++ b/src/transformers/models/vit_msn/configuration_vit_msn.py @@ -81,6 +81,7 @@ class ViTMSNConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vit_msn" def __init__( diff --git a/src/transformers/models/vitdet/configuration_vitdet.py b/src/transformers/models/vitdet/configuration_vitdet.py index 45dc9e9296f5..c2e7aff6d99a 100644 --- a/src/transformers/models/vitdet/configuration_vitdet.py +++ b/src/transformers/models/vitdet/configuration_vitdet.py @@ -100,6 +100,7 @@ class VitDetConfig(BackboneConfigMixin, PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vitdet" def __init__( diff --git a/src/transformers/models/vitmatte/configuration_vitmatte.py b/src/transformers/models/vitmatte/configuration_vitmatte.py index aee3463dd90b..562abbe5e5ae 100644 --- a/src/transformers/models/vitmatte/configuration_vitmatte.py +++ b/src/transformers/models/vitmatte/configuration_vitmatte.py @@ -67,6 +67,7 @@ class VitMatteConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vitmatte" def __init__( diff --git a/src/transformers/models/vits/configuration_vits.py b/src/transformers/models/vits/configuration_vits.py index 2cadd39792b7..72f69e75a51b 100644 --- a/src/transformers/models/vits/configuration_vits.py +++ b/src/transformers/models/vits/configuration_vits.py @@ -150,6 +150,7 @@ class VitsConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vits" def __init__( diff --git a/src/transformers/models/vits/modeling_vits.py b/src/transformers/models/vits/modeling_vits.py index f347e900c384..33e02abe868c 100644 --- a/src/transformers/models/vits/modeling_vits.py +++ b/src/transformers/models/vits/modeling_vits.py @@ -809,9 +809,7 @@ def forward(self, inputs, padding_mask, global_conditioning=None, durations=None latents = torch.flip(latents, [1]) log_determinant_sum += log_determinant - nll = ( - torch.sum(0.5 * (math.log(2 * math.pi) + (latents**2)) * padding_mask, [1, 2]) - log_determinant_sum - ) + nll = torch.sum(0.5 * (math.log(2 * math.pi) + (latents**2)) * padding_mask, [1, 2]) - log_determinant_sum return nll + logq else: flows = list(reversed(self.flows)) diff --git a/src/transformers/models/vivit/configuration_vivit.py b/src/transformers/models/vivit/configuration_vivit.py index c554999b9064..0e367fcb9b79 100644 --- a/src/transformers/models/vivit/configuration_vivit.py +++ b/src/transformers/models/vivit/configuration_vivit.py @@ -83,6 +83,7 @@ class VivitConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "vivit" def __init__( diff --git a/src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py b/src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py index bcd2e37c0a6a..571dfe896ef5 100644 --- a/src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py +++ b/src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py @@ -100,31 +100,26 @@ def transform_state_encoder_block(state_dict, i): prefix + "layernorm_before.weight": state["LayerNorm_0"]["scale"], prefix + "layernorm_after.bias": state["LayerNorm_1"]["bias"], prefix + "layernorm_after.weight": state["LayerNorm_1"]["scale"], - prefix - + "attention.attention.query.bias": transform_attention( + prefix + "attention.attention.query.bias": transform_attention( state["MultiHeadDotProductAttention_0"]["query"]["bias"] ), - prefix - + "attention.attention.query.weight": transform_attention( + prefix + "attention.attention.query.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["query"]["kernel"] ), - prefix - + "attention.attention.key.bias": transform_attention(state["MultiHeadDotProductAttention_0"]["key"]["bias"]), - prefix - + "attention.attention.key.weight": transform_attention( + prefix + "attention.attention.key.bias": transform_attention( + state["MultiHeadDotProductAttention_0"]["key"]["bias"] + ), + prefix + "attention.attention.key.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["key"]["kernel"] ), - prefix - + "attention.attention.value.bias": transform_attention( + prefix + "attention.attention.value.bias": transform_attention( state["MultiHeadDotProductAttention_0"]["value"]["bias"] ), - prefix - + "attention.attention.value.weight": transform_attention( + prefix + "attention.attention.value.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["value"]["kernel"] ), prefix + "attention.output.dense.bias": state["MultiHeadDotProductAttention_0"]["out"]["bias"], - prefix - + "attention.output.dense.weight": transform_attention_output_weight( + prefix + "attention.output.dense.weight": transform_attention_output_weight( state["MultiHeadDotProductAttention_0"]["out"]["kernel"] ), } diff --git a/src/transformers/models/wav2vec2/configuration_wav2vec2.py b/src/transformers/models/wav2vec2/configuration_wav2vec2.py index 91be7cf85b60..32cdaa29d965 100644 --- a/src/transformers/models/wav2vec2/configuration_wav2vec2.py +++ b/src/transformers/models/wav2vec2/configuration_wav2vec2.py @@ -203,6 +203,7 @@ class Wav2Vec2Config(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "wav2vec2" def __init__( diff --git a/src/transformers/models/wav2vec2/processing_wav2vec2.py b/src/transformers/models/wav2vec2/processing_wav2vec2.py index d6585a4f4dd6..dc6e9d14ee66 100644 --- a/src/transformers/models/wav2vec2/processing_wav2vec2.py +++ b/src/transformers/models/wav2vec2/processing_wav2vec2.py @@ -37,6 +37,7 @@ class Wav2Vec2Processor(ProcessorMixin): tokenizer ([`PreTrainedTokenizer`]): An instance of [`PreTrainedTokenizer`]. The tokenizer is a required input. """ + feature_extractor_class = "Wav2Vec2FeatureExtractor" tokenizer_class = "AutoTokenizer" diff --git a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py index f408338b457d..7e78d8d85e41 100644 --- a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py @@ -209,6 +209,7 @@ class Wav2Vec2ConformerConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "wav2vec2-conformer" def __init__( diff --git a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py index f7b519185b78..324cbf0a71eb 100644 --- a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py @@ -1445,10 +1445,7 @@ def forward( ```python >>> import torch >>> from transformers import AutoFeatureExtractor, Wav2Vec2ConformerForPreTraining - >>> from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import ( - ... _compute_mask_indices, - ... _sample_negative_indices, - ... ) + >>> from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import _compute_mask_indices, _sample_negative_indices >>> from datasets import load_dataset >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") diff --git a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py index fdc06a806880..916cca51a989 100644 --- a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +++ b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py @@ -77,6 +77,7 @@ class Wav2Vec2ProcessorWithLM(ProcessorMixin): decoder (`pyctcdecode.BeamSearchDecoderCTC`): An instance of [`pyctcdecode.BeamSearchDecoderCTC`]. The decoder is a required input. """ + feature_extractor_class = "Wav2Vec2FeatureExtractor" tokenizer_class = "Wav2Vec2CTCTokenizer" diff --git a/src/transformers/models/wavlm/configuration_wavlm.py b/src/transformers/models/wavlm/configuration_wavlm.py index 831b85f24c65..589741c520fa 100644 --- a/src/transformers/models/wavlm/configuration_wavlm.py +++ b/src/transformers/models/wavlm/configuration_wavlm.py @@ -192,6 +192,7 @@ class WavLMConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "wavlm" def __init__( diff --git a/src/transformers/models/whisper/configuration_whisper.py b/src/transformers/models/whisper/configuration_whisper.py index 6ff5e529b196..5af324214180 100644 --- a/src/transformers/models/whisper/configuration_whisper.py +++ b/src/transformers/models/whisper/configuration_whisper.py @@ -189,6 +189,7 @@ class WhisperConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "whisper" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} diff --git a/src/transformers/models/whisper/processing_whisper.py b/src/transformers/models/whisper/processing_whisper.py index b0d0d6c95450..cae72aa0e13f 100644 --- a/src/transformers/models/whisper/processing_whisper.py +++ b/src/transformers/models/whisper/processing_whisper.py @@ -34,6 +34,7 @@ class WhisperProcessor(ProcessorMixin): tokenizer (`WhisperTokenizer`): An instance of [`WhisperTokenizer`]. The tokenizer is a required input. """ + feature_extractor_class = "WhisperFeatureExtractor" tokenizer_class = "WhisperTokenizer" diff --git a/src/transformers/models/x_clip/configuration_x_clip.py b/src/transformers/models/x_clip/configuration_x_clip.py index fbccdf1167d9..f95bc0a5a0b2 100644 --- a/src/transformers/models/x_clip/configuration_x_clip.py +++ b/src/transformers/models/x_clip/configuration_x_clip.py @@ -81,6 +81,7 @@ class XCLIPTextConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "xclip_text_model" def __init__( diff --git a/src/transformers/models/x_clip/modeling_x_clip.py b/src/transformers/models/x_clip/modeling_x_clip.py index 754358b352f1..e341b9639d87 100644 --- a/src/transformers/models/x_clip/modeling_x_clip.py +++ b/src/transformers/models/x_clip/modeling_x_clip.py @@ -492,9 +492,7 @@ def _init_weights(self, module): nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, XCLIPMLP): factor = self.config.initializer_factor - in_proj_std = ( - (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor - ) + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) diff --git a/src/transformers/models/x_clip/processing_x_clip.py b/src/transformers/models/x_clip/processing_x_clip.py index 6e54c9e7876a..a11aeb18dc4f 100644 --- a/src/transformers/models/x_clip/processing_x_clip.py +++ b/src/transformers/models/x_clip/processing_x_clip.py @@ -35,6 +35,7 @@ class XCLIPProcessor(ProcessorMixin): tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ + attributes = ["image_processor", "tokenizer"] image_processor_class = "VideoMAEImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") diff --git a/src/transformers/models/xglm/configuration_xglm.py b/src/transformers/models/xglm/configuration_xglm.py index 8a59ee6682d6..9377bbce6f01 100644 --- a/src/transformers/models/xglm/configuration_xglm.py +++ b/src/transformers/models/xglm/configuration_xglm.py @@ -85,6 +85,7 @@ class XGLMConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "xglm" keys_to_ignore_at_inference = ["past_key_values"] diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index 63d214da0c54..9343f6cb524b 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -387,9 +387,10 @@ def call( # check inputs # assert shape_list(lengths)[0] == bs - tf.debugging.assert_equal( - shape_list(lengths)[0], bs - ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" + ( + tf.debugging.assert_equal(shape_list(lengths)[0], bs), + f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched", + ) # assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) @@ -408,17 +409,19 @@ def call( position_ids = tf.tile(position_ids, (bs, 1)) # assert shape_list(position_ids) == [bs, slen] # (slen, bs) - tf.debugging.assert_equal( - shape_list(position_ids), [bs, slen] - ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" + ( + tf.debugging.assert_equal(shape_list(position_ids), [bs, slen]), + f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched", + ) # position_ids = position_ids.transpose(0, 1) # langs if langs is not None: # assert shape_list(langs) == [bs, slen] # (slen, bs) - tf.debugging.assert_equal( - shape_list(langs), [bs, slen] - ), f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched" + ( + tf.debugging.assert_equal(shape_list(langs), [bs, slen]), + f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched", + ) # langs = langs.transpose(0, 1) # Prepare head mask if needed diff --git a/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py index 29c8678f2799..88ca83a73226 100644 --- a/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py @@ -100,6 +100,7 @@ class XLMProphetNetConfig(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ + model_type = "xlm-prophetnet" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { diff --git a/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py index f99cd4549a49..37bd32186af4 100644 --- a/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py @@ -1469,9 +1469,7 @@ def forward( >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") - >>> model = XLMProphetNetDecoder.from_pretrained( - ... "patrickvonplaten/xprophetnet-large-uncased-standalone", add_cross_attention=False - ... ) + >>> model = XLMProphetNetDecoder.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone", add_cross_attention=False) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) @@ -1948,9 +1946,7 @@ def forward( >>> from transformers import AutoTokenizer, XLMProphetNetForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") - >>> model = XLMProphetNetForConditionalGeneration.from_pretrained( - ... "patrickvonplaten/xprophetnet-large-uncased-standalone" - ... ) + >>> model = XLMProphetNetForConditionalGeneration.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" diff --git a/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py index 98e12d07826e..517b751f4220 100644 --- a/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py @@ -110,6 +110,7 @@ class XLMRobertaConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "xlm-roberta" def __init__( diff --git a/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py index acf30bf3878a..e2dee1cbe4e1 100644 --- a/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py @@ -97,6 +97,7 @@ class XLMRobertaXLConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "xlm-roberta-xl" def __init__( diff --git a/src/transformers/models/xmod/configuration_xmod.py b/src/transformers/models/xmod/configuration_xmod.py index 012b7446c4c4..abf7a3275c54 100644 --- a/src/transformers/models/xmod/configuration_xmod.py +++ b/src/transformers/models/xmod/configuration_xmod.py @@ -119,6 +119,7 @@ class XmodConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "xmod" def __init__( diff --git a/src/transformers/models/yolos/configuration_yolos.py b/src/transformers/models/yolos/configuration_yolos.py index 77a036f5adb7..8b969bdd8b1a 100644 --- a/src/transformers/models/yolos/configuration_yolos.py +++ b/src/transformers/models/yolos/configuration_yolos.py @@ -103,6 +103,7 @@ class YolosConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "yolos" def __init__( diff --git a/src/transformers/models/yolos/modeling_yolos.py b/src/transformers/models/yolos/modeling_yolos.py index 08e7f0777c63..b2f0ca0db532 100755 --- a/src/transformers/models/yolos/modeling_yolos.py +++ b/src/transformers/models/yolos/modeling_yolos.py @@ -904,9 +904,9 @@ def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: f # Copied from transformers.models.detr.modeling_detr.DetrLoss with Detr->Yolos class YolosLoss(nn.Module): """ - This class computes the losses for YolosForObjectDetection/YolosForSegmentation. The process happens in two steps: - 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each - pair of matched ground-truth / prediction (supervise class and box). + This class computes the losses for YolosForObjectDetection/YolosForSegmentation. The process happens in two steps: 1) + we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair + of matched ground-truth / prediction (supervise class and box). A note on the `num_classes` argument (copied from original repo in detr.py): "the naming of the `num_classes` parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is diff --git a/src/transformers/models/yoso/configuration_yoso.py b/src/transformers/models/yoso/configuration_yoso.py index c6d2b176ef94..85501ac9d08b 100644 --- a/src/transformers/models/yoso/configuration_yoso.py +++ b/src/transformers/models/yoso/configuration_yoso.py @@ -94,6 +94,7 @@ class YosoConfig(PretrainedConfig): >>> # Accessing the model configuration >>> configuration = model.config ```""" + model_type = "yoso" def __init__( diff --git a/src/transformers/models/yoso/modeling_yoso.py b/src/transformers/models/yoso/modeling_yoso.py index 6666c0a5aa8e..4e08b999ad30 100644 --- a/src/transformers/models/yoso/modeling_yoso.py +++ b/src/transformers/models/yoso/modeling_yoso.py @@ -88,7 +88,7 @@ def to_contiguous(input_tensors): def normalize(input_tensors): - if type(input_tensors) is list: + if isinstance(input_tensors, list): out = [] for tensor in input_tensors: out.append(nn.functional.normalize(tensor, p=2, dim=-1)) diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index ea730bc00d07..723de720a5f9 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -1593,9 +1593,7 @@ def __init__(self, **kwargs): # By default, do not split special tokens for both fast and slow tokenizers self.split_special_tokens = kwargs.pop("split_special_tokens", False) - self.deprecation_warnings = ( - {} - ) # Use to store when we have already noticed a deprecation warning (avoid overlogging). + self.deprecation_warnings = {} # Use to store when we have already noticed a deprecation warning (avoid overlogging). self._in_target_context_manager = False # Stores a Jinja template that formats chat histories into tokenizable strings diff --git a/src/transformers/trainer_pt_utils.py b/src/transformers/trainer_pt_utils.py index cb6249f19a93..b8dfb3124c5e 100644 --- a/src/transformers/trainer_pt_utils.py +++ b/src/transformers/trainer_pt_utils.py @@ -896,7 +896,7 @@ def metrics_format(self, metrics: Dict[str, float]) -> Dict[str, float]: metrics_copy[k] = _secs2timedelta(v) elif k == "total_flos": metrics_copy[k] = f"{ int(v) >> 30 }GF" - elif type(metrics_copy[k]) == float: + elif isinstance(metrics_copy[k], float): metrics_copy[k] = round(v, 4) return metrics_copy diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index 0eba32d25931..50320dabb704 100755 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -174,7 +174,7 @@ def _generate_supported_model_class_names( "Speech2Text2Decoder", "TrOCRDecoder", "PeftModelForCausalLM", - "PeftModelForSeq2SeqLM" + "PeftModelForSeq2SeqLM", # TODO: add support for them as it should be quite easy to do so (small blocking issues). # XLNetForQuestionAnswering, ] diff --git a/src/transformers/utils/sentencepiece_model_pb2.py b/src/transformers/utils/sentencepiece_model_pb2.py index 458fe913d63a..b4b2992a6308 100644 --- a/src/transformers/utils/sentencepiece_model_pb2.py +++ b/src/transformers/utils/sentencepiece_model_pb2.py @@ -1445,7 +1445,7 @@ (_message.Message,), { "DESCRIPTOR": _TRAINERSPEC, - "__module__": "sentencepiece_model_pb2" + "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.TrainerSpec) }, ) @@ -1456,7 +1456,7 @@ (_message.Message,), { "DESCRIPTOR": _NORMALIZERSPEC, - "__module__": "sentencepiece_model_pb2" + "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.NormalizerSpec) }, ) @@ -1471,12 +1471,12 @@ (_message.Message,), { "DESCRIPTOR": _SELFTESTDATA_SAMPLE, - "__module__": "sentencepiece_model_pb2" + "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData.Sample) }, ), "DESCRIPTOR": _SELFTESTDATA, - "__module__": "sentencepiece_model_pb2" + "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData) }, ) @@ -1492,12 +1492,12 @@ (_message.Message,), { "DESCRIPTOR": _MODELPROTO_SENTENCEPIECE, - "__module__": "sentencepiece_model_pb2" + "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto.SentencePiece) }, ), "DESCRIPTOR": _MODELPROTO, - "__module__": "sentencepiece_model_pb2" + "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto) }, ) diff --git a/templates/adding_a_missing_tokenization_test/cookiecutter-template-{{cookiecutter.modelname}}/test_tokenization_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_missing_tokenization_test/cookiecutter-template-{{cookiecutter.modelname}}/test_tokenization_{{cookiecutter.lowercase_modelname}}.py index 36e35c04ed33..6cbe8bd481ac 100644 --- a/templates/adding_a_missing_tokenization_test/cookiecutter-template-{{cookiecutter.modelname}}/test_tokenization_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_missing_tokenization_test/cookiecutter-template-{{cookiecutter.modelname}}/test_tokenization_{{cookiecutter.lowercase_modelname}}.py @@ -75,4 +75,4 @@ def setUp(self): "`self.tmpdirname`." ) - # TODO: add tests with hard-coded target values \ No newline at end of file + # TODO: add tests with hard-coded target values diff --git a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py index e447246da2c5..acdfe49090e8 100755 --- a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py +++ b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py @@ -502,7 +502,7 @@ def tokenize_function(examples): trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) - + # write custom code for saving predictions according to task def _mp_fn(index): @@ -900,7 +900,7 @@ def tokenize_function(examples): model.eval() for step, batch in enumerate(eval_dataloader): - with torch.no_grad(): + with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) metric.add_batch( diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/configuration_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/configuration_{{cookiecutter.lowercase_modelname}}.py index 2898b5cf6f8f..3f9b5d1fb67f 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/configuration_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/configuration_{{cookiecutter.lowercase_modelname}}.py @@ -137,7 +137,7 @@ class {{cookiecutter.camelcase_modelname}}Config(PretrainedConfig): {% else -%} keys_to_ignore_at_inference = ["past_key_values"] {% endif -%} - + {% if cookiecutter.is_encoder_decoder_model == "False" %} {%- else %} attribute_map = { @@ -238,4 +238,3 @@ def __init__( **kwargs ) - \ No newline at end of file diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_flax_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_flax_{{cookiecutter.lowercase_modelname}}.py index 37b22a75c3e9..a01ab3e19adf 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_flax_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_flax_{{cookiecutter.lowercase_modelname}}.py @@ -541,7 +541,7 @@ def prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict( class Flax{{cookiecutter.camelcase_modelname}}ModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( - Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, + Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, Flax{{cookiecutter.camelcase_modelname}}Model, diff --git a/tests/models/albert/test_tokenization_albert.py b/tests/models/albert/test_tokenization_albert.py index c25cfaec77b4..d9bb86bf2994 100644 --- a/tests/models/albert/test_tokenization_albert.py +++ b/tests/models/albert/test_tokenization_albert.py @@ -123,9 +123,7 @@ def test_sequence_builders(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/altclip/test_modeling_altclip.py b/tests/models/altclip/test_modeling_altclip.py index f4f7a8fd5237..610a66f8ae3a 100755 --- a/tests/models/altclip/test_modeling_altclip.py +++ b/tests/models/altclip/test_modeling_altclip.py @@ -574,7 +574,7 @@ def test_inference(self): processor = AltCLIPProcessor.from_pretrained(model_name) image = prepare_img() - inputs = processor(text=["一张猫的照片", "一张狗的照片"], images=image, padding=True, return_tensors="pt").to(torch_device) + inputs = processor(text=["一张猫的照片", "一张狗的照片"], images=image, padding=True, return_tensors="pt").to(torch_device) # fmt: skip # forward pass with torch.no_grad(): diff --git a/tests/models/bark/test_modeling_bark.py b/tests/models/bark/test_modeling_bark.py index 4a71f9f723ca..713fb6c3ee79 100644 --- a/tests/models/bark/test_modeling_bark.py +++ b/tests/models/bark/test_modeling_bark.py @@ -1029,10 +1029,8 @@ def fine_generation_config(self): def test_generate_semantic(self): input_ids = self.inputs - # fmt: off # check first ids - expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] - # fmt: on + expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip # greedy decoding with torch.no_grad(): @@ -1049,10 +1047,8 @@ def test_generate_semantic_early_stop(self): input_ids = self.inputs min_eos_p = 0.01 - # fmt: off # check first ids - expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] - # fmt: on + expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip # Should be able to read min_eos_p from kwargs with torch.no_grad(): @@ -1095,10 +1091,8 @@ def test_generate_coarse(self): history_prompt = input_ids["history_prompt"] - # fmt: off # check first ids - expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ] - # fmt: on + expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ] # fmt: skip with torch.no_grad(): output_ids = self.model.semantic.generate( diff --git a/tests/models/barthez/test_tokenization_barthez.py b/tests/models/barthez/test_tokenization_barthez.py index fa128f5091b9..7759d3560def 100644 --- a/tests/models/barthez/test_tokenization_barthez.py +++ b/tests/models/barthez/test_tokenization_barthez.py @@ -96,9 +96,7 @@ def test_rust_and_python_full_tokenizers(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip # moussaKam/mbarthez is a french model. So we also use french texts. sequences = [ diff --git a/tests/models/bert_generation/test_tokenization_bert_generation.py b/tests/models/bert_generation/test_tokenization_bert_generation.py index 12be95d53ebd..41d992883518 100644 --- a/tests/models/bert_generation/test_tokenization_bert_generation.py +++ b/tests/models/bert_generation/test_tokenization_bert_generation.py @@ -233,9 +233,7 @@ def test_torch_encode_plus_sent_to_model(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/bert_japanese/test_tokenization_bert_japanese.py b/tests/models/bert_japanese/test_tokenization_bert_japanese.py index 3e840018bdc1..bc7800697976 100644 --- a/tests/models/bert_japanese/test_tokenization_bert_japanese.py +++ b/tests/models/bert_japanese/test_tokenization_bert_japanese.py @@ -198,12 +198,12 @@ def test_pickle_sudachi_tokenizer(self): def test_sudachi_tokenizer_core(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core") + # fmt: off self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), - # fmt: off - [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], - # fmt: on + [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], ) + # fmt: on @require_sudachi def test_sudachi_tokenizer_split_mode_A(self): @@ -227,23 +227,13 @@ def test_sudachi_tokenizer_split_mode_C(self): def test_sudachi_tokenizer_lower(self): tokenizer = SudachiTokenizer(do_lower_case=True, sudachi_dict_type="core") - self.assertListEqual( - tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), - # fmt: off - [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], - # fmt: on - ) + self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),[" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "]) # fmt: skip @require_sudachi def test_sudachi_tokenizer_no_normalize(self): tokenizer = SudachiTokenizer(normalize_text=False, sudachi_dict_type="core") - self.assertListEqual( - tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), - # fmt: off - [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "], - # fmt: on - ) + self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "]) # fmt: skip @require_sudachi def test_sudachi_tokenizer_trim_whitespace(self): @@ -280,33 +270,19 @@ def test_jumanpp_tokenizer(self): tokenizer = JumanppTokenizer() self.assertListEqual( - tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), - # fmt: off - ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], - # fmt: on - ) + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"]) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_lower(self): tokenizer = JumanppTokenizer(do_lower_case=True) - self.assertListEqual( - tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), - # fmt: off - ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], - # fmt: on - ) + self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_no_normalize(self): tokenizer = JumanppTokenizer(normalize_text=False) - self.assertListEqual( - tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), - # fmt: off - ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], - # fmt: on - ) + self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_trim_whitespace(self): @@ -327,7 +303,7 @@ def test_jumanpp_tokenizer_ext(self): ) def test_wordpiece_tokenizer(self): - vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] + vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] # fmt: skip vocab = {} for i, token in enumerate(vocab_tokens): @@ -340,14 +316,14 @@ def test_wordpiece_tokenizer(self): self.assertListEqual(tokenizer.tokenize("こんばんは"), ["こん", "##ばんは"]) - self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"]) + self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"]) # fmt: skip def test_sentencepiece_tokenizer(self): tokenizer = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp") subword_tokenizer = tokenizer.subword_tokenizer tokens = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。") - self.assertListEqual(tokens, ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"]) + self.assertListEqual(tokens, ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"]) # fmt: skip tokens = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは") self.assertListEqual(tokens, ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"]) @@ -401,9 +377,7 @@ def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type="character") tokens = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。") - self.assertListEqual( - tokens, ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] - ) + self.assertListEqual(tokens, ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"]) # fmt: skip self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) diff --git a/tests/models/big_bird/test_tokenization_big_bird.py b/tests/models/big_bird/test_tokenization_big_bird.py index fd4323cb0f57..23b25e402942 100644 --- a/tests/models/big_bird/test_tokenization_big_bird.py +++ b/tests/models/big_bird/test_tokenization_big_bird.py @@ -171,9 +171,7 @@ def test_tokenization_base_hard_symbols(self): 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to , such as saoneuhaoesuth" ) - # fmt: off - original_tokenizer_encodings = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231 - # fmt: on + original_tokenizer_encodings = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # fmt: skip self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @require_torch @@ -225,9 +223,7 @@ def test_special_tokens(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/bloom/test_modeling_bloom.py b/tests/models/bloom/test_modeling_bloom.py index 5d090c5785fb..95160179c204 100644 --- a/tests/models/bloom/test_modeling_bloom.py +++ b/tests/models/bloom/test_modeling_bloom.py @@ -699,9 +699,7 @@ def test_embeddings(self): }, } - # fmt: off - EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] - # fmt: on + EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] # fmt: skip EMBEDDINGS_DS_AFTER_LN_MEAN = { 3478: -6.580352783203125e-05, @@ -782,9 +780,7 @@ def test_hidden_states_transformers(self): ) model.eval() - # fmt: off - EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] - # fmt: on + EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] # fmt: skip MEAN_VALUE_LAST_LM = -4.3392181396484375e-05 MIN_MAX_DICT = {"min": -2.0625, "max": 2.75} @@ -812,9 +808,7 @@ def test_logits(self): ) # load in bf16 model.eval() - # fmt: off - EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] - # fmt: on + EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] # fmt: skip MEAN_LOGITS_GPU_1 = -1.823902130126953e-05 MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05 diff --git a/tests/models/byt5/test_tokenization_byt5.py b/tests/models/byt5/test_tokenization_byt5.py index 486f9d1747fc..dcda3e3bf7a2 100644 --- a/tests/models/byt5/test_tokenization_byt5.py +++ b/tests/models/byt5/test_tokenization_byt5.py @@ -119,9 +119,7 @@ def test_multibytes_char(self): def test_prepare_batch_integration(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] - # fmt: off - expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] - # fmt: on + expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: skip batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) @@ -160,10 +158,8 @@ def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. "] tgt_text = ["Summary of the text. "] - # fmt: off - expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] - expected_tgt_tokens = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] - # fmt: on + expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] # fmt: skip + expected_tgt_tokens = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: skip batch = tokenizer(src_text, text_target=tgt_text) diff --git a/tests/models/camembert/test_tokenization_camembert.py b/tests/models/camembert/test_tokenization_camembert.py index 8ece3b04f494..7f72d304d5c0 100644 --- a/tests/models/camembert/test_tokenization_camembert.py +++ b/tests/models/camembert/test_tokenization_camembert.py @@ -115,9 +115,7 @@ def test_rust_and_python_full_tokenizers(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip # camembert is a french model. So we also use french texts. sequences = [ diff --git a/tests/models/canine/test_tokenization_canine.py b/tests/models/canine/test_tokenization_canine.py index bfa5ae28aaa4..2d9ffa797168 100644 --- a/tests/models/canine/test_tokenization_canine.py +++ b/tests/models/canine/test_tokenization_canine.py @@ -49,9 +49,7 @@ def get_tokenizer(self, **kwargs) -> CanineTokenizer: def test_prepare_batch_integration(self): tokenizer = self.canine_tokenizer src_text = ["Life is like a box of chocolates.", "You never know what you're gonna get."] - # fmt: off - expected_src_tokens = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0] - # fmt: on + expected_src_tokens = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0] # fmt: skip batch = tokenizer(src_text, padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) diff --git a/tests/models/chinese_clip/test_modeling_chinese_clip.py b/tests/models/chinese_clip/test_modeling_chinese_clip.py index c2030ebec126..8d0eb131e238 100644 --- a/tests/models/chinese_clip/test_modeling_chinese_clip.py +++ b/tests/models/chinese_clip/test_modeling_chinese_clip.py @@ -714,9 +714,9 @@ def test_inference(self): processor = ChineseCLIPProcessor.from_pretrained(model_name) image = prepare_img() - inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, padding=True, return_tensors="pt").to( - torch_device - ) + inputs = processor( + text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, padding=True, return_tensors="pt" + ).to(torch_device) # forward pass with torch.no_grad(): diff --git a/tests/models/clip/test_processor_clip.py b/tests/models/clip/test_processor_clip.py index 59e1f135462b..fb88ef270532 100644 --- a/tests/models/clip/test_processor_clip.py +++ b/tests/models/clip/test_processor_clip.py @@ -38,9 +38,7 @@ class CLIPProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() - # fmt: off - vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l", "w", "r", "t", "low", "er", "lowest", "newer", "wider", "", "<|startoftext|>", "<|endoftext|>"] - # fmt: on + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l", "w", "r", "t", "low", "er", "lowest", "newer", "wider", "", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r", ""] self.special_tokens_map = {"unk_token": ""} diff --git a/tests/models/clip/test_tokenization_clip.py b/tests/models/clip/test_tokenization_clip.py index 27387be42bad..4f1d9a73ef0f 100644 --- a/tests/models/clip/test_tokenization_clip.py +++ b/tests/models/clip/test_tokenization_clip.py @@ -36,9 +36,7 @@ class CLIPTokenizationTest(TokenizerTesterMixin, unittest.TestCase): def setUp(self): super().setUp() - # fmt: off - vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l", "w", "r", "t", "low", "er", "lowest", "newer", "wider", "", "<|startoftext|>", "<|endoftext|>"] - # fmt: on + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l", "w", "r", "t", "low", "er", "lowest", "newer", "wider", "", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r"] self.special_tokens_map = {"unk_token": ""} diff --git a/tests/models/clipseg/test_processor_clipseg.py b/tests/models/clipseg/test_processor_clipseg.py index 2bc82dd022cb..e33049b2768f 100644 --- a/tests/models/clipseg/test_processor_clipseg.py +++ b/tests/models/clipseg/test_processor_clipseg.py @@ -38,9 +38,7 @@ class CLIPSegProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() - # fmt: off - vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l", "w", "r", "t", "low", "er", "lowest", "newer", "wider", "", "<|startoftext|>", "<|endoftext|>"] - # fmt: on + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l", "w", "r", "t", "low", "er", "lowest", "newer", "wider", "", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r", ""] self.special_tokens_map = {"unk_token": ""} diff --git a/tests/models/code_llama/test_tokenization_code_llama.py b/tests/models/code_llama/test_tokenization_code_llama.py index 7452990ba755..a72322802396 100644 --- a/tests/models/code_llama/test_tokenization_code_llama.py +++ b/tests/models/code_llama/test_tokenization_code_llama.py @@ -289,9 +289,7 @@ def test_special_tokens_initialization(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[1, 4103, 689, 414, 313, 24784, 368, 2998, 408, 282, 3637, 25350, 29899, 9067, 414, 322, 282, 3637, 25350, 29899, 1457, 3018, 1312, 29899, 2151, 29897, 8128, 2498, 29899, 15503, 4220, 6956, 1973, 313, 13635, 29911, 29892, 402, 7982, 29899, 29906, 29892, 1528, 13635, 29911, 29874, 29892, 1060, 26369, 29892, 6652, 309, 29933, 814, 29892, 1060, 29931, 6779, 11410, 363, 18385, 17088, 7634, 11235, 313, 25103, 29965, 29897, 322, 18385, 17088, 28203, 313, 25103, 29954, 29897, 411, 975, 29871, 29941, 29906, 29974, 758, 3018, 1312, 4733, 297, 29871, 29896, 29900, 29900, 29974, 10276, 322, 6483, 1006, 3372, 3097, 1546, 435, 1165, 29892, 10772, 29911, 25350, 322, 323, 6073, 17907, 29889], [1, 350, 20161, 338, 8688, 304, 758, 29899, 14968, 6483, 21000, 8684, 284, 22540, 515, 443, 29880, 24025, 1426, 491, 14002, 368, 4195, 292, 373, 1716, 2175, 322, 1492, 3030, 297, 599, 15359, 29889], [1, 450, 4996, 17354, 1701, 29916, 432, 17204, 975, 278, 17366, 11203, 29889]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} - # fmt: on + expected_encoding = {'input_ids': [[1, 4103, 689, 414, 313, 24784, 368, 2998, 408, 282, 3637, 25350, 29899, 9067, 414, 322, 282, 3637, 25350, 29899, 1457, 3018, 1312, 29899, 2151, 29897, 8128, 2498, 29899, 15503, 4220, 6956, 1973, 313, 13635, 29911, 29892, 402, 7982, 29899, 29906, 29892, 1528, 13635, 29911, 29874, 29892, 1060, 26369, 29892, 6652, 309, 29933, 814, 29892, 1060, 29931, 6779, 11410, 363, 18385, 17088, 7634, 11235, 313, 25103, 29965, 29897, 322, 18385, 17088, 28203, 313, 25103, 29954, 29897, 411, 975, 29871, 29941, 29906, 29974, 758, 3018, 1312, 4733, 297, 29871, 29896, 29900, 29900, 29974, 10276, 322, 6483, 1006, 3372, 3097, 1546, 435, 1165, 29892, 10772, 29911, 25350, 322, 323, 6073, 17907, 29889], [1, 350, 20161, 338, 8688, 304, 758, 29899, 14968, 6483, 21000, 8684, 284, 22540, 515, 443, 29880, 24025, 1426, 491, 14002, 368, 4195, 292, 373, 1716, 2175, 322, 1492, 3030, 297, 599, 15359, 29889], [1, 450, 4996, 17354, 1701, 29916, 432, 17204, 975, 278, 17366, 11203, 29889]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, @@ -409,8 +407,8 @@ def test_simple_encode_decode(self): self.assertEqual(rust_tokenizer.decode([1, 910, 338, 263, 1243], skip_special_tokens=True), "This is a test") # bytefallback showcase - self.assertEqual(pyth_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) - self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) + self.assertEqual(pyth_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) # fmt: skip + self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) # fmt: skip self.assertEqual( pyth_tokenizer.decode( [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392], skip_special_tokens=True diff --git a/tests/models/deberta_v2/test_tokenization_deberta_v2.py b/tests/models/deberta_v2/test_tokenization_deberta_v2.py index 404aaa9e7e11..c75f45bfe8d4 100644 --- a/tests/models/deberta_v2/test_tokenization_deberta_v2.py +++ b/tests/models/deberta_v2/test_tokenization_deberta_v2.py @@ -252,9 +252,7 @@ def test_sequence_builders(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py b/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py index 1d8d4e985b6e..48d9a03e5789 100644 --- a/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py +++ b/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py @@ -474,7 +474,7 @@ def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, nam def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): - if type(key) == bool: + if isinstance(key, bool): pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) diff --git a/tests/models/ernie_m/test_tokenization_ernie_m.py b/tests/models/ernie_m/test_tokenization_ernie_m.py index 2e06bb20c10c..19f144df4532 100644 --- a/tests/models/ernie_m/test_tokenization_ernie_m.py +++ b/tests/models/ernie_m/test_tokenization_ernie_m.py @@ -125,9 +125,7 @@ def test_sequence_builders(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 9, 304, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 5, 5, 5, 16, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 6460, 1328, 4589, 42, 122009, 115774, 23, 3559, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} - # fmt: on + expected_encoding = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 9, 304, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 5, 5, 5, 16, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 6460, 1328, 4589, 42, 122009, 115774, 23, 3559, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/esm/test_tokenization_esm.py b/tests/models/esm/test_tokenization_esm.py index 242f6d77081f..539baaf34150 100644 --- a/tests/models/esm/test_tokenization_esm.py +++ b/tests/models/esm/test_tokenization_esm.py @@ -32,9 +32,7 @@ class ESMTokenizationTest(unittest.TestCase): def setUp(self): super().setUp() self.tmpdirname = tempfile.mkdtemp() - # fmt: off - vocab_tokens: List[str] = ["", "", "", "", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "", ""] # noqa: E501 - # fmt: on + vocab_tokens: List[str] = ["", "", "", "", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "", ""] # fmt: skip self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) diff --git a/tests/models/flava/test_processor_flava.py b/tests/models/flava/test_processor_flava.py index f89d7edfaa9a..a83e459153d5 100644 --- a/tests/models/flava/test_processor_flava.py +++ b/tests/models/flava/test_processor_flava.py @@ -45,9 +45,7 @@ class FlavaProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() - # fmt: off - vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] - # fmt: on + vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: skip self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: diff --git a/tests/models/fnet/test_modeling_fnet.py b/tests/models/fnet/test_modeling_fnet.py index ba89542ee66f..83b84edddccd 100644 --- a/tests/models/fnet/test_modeling_fnet.py +++ b/tests/models/fnet/test_modeling_fnet.py @@ -560,9 +560,7 @@ def test_inference_long_sentence(self): max_length=512, ) - # fmt: off - torch.testing.assert_allclose(inputs["input_ids"], torch.tensor([[4, 13, 283, 2479, 106, 8, 6, 845, 5, 168, 65, 367, 6, 845, 5, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3]])) - # fmt: on + torch.testing.assert_allclose(inputs["input_ids"], torch.tensor([[4, 13, 283, 2479, 106, 8, 6, 845, 5, 168, 65, 367, 6, 845, 5, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3]])) # fmt: skip inputs = {k: v.to(torch_device) for k, v in inputs.items()} diff --git a/tests/models/fnet/test_tokenization_fnet.py b/tests/models/fnet/test_tokenization_fnet.py index 17fe3e0dd308..85080efc3e59 100644 --- a/tests/models/fnet/test_tokenization_fnet.py +++ b/tests/models/fnet/test_tokenization_fnet.py @@ -425,11 +425,15 @@ def assert_batch_padded_input_match( model_main_input_name: str = "input_ids", ): for i_r in input_r.values(): - self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual( - len(i_r[1]), max_length + ( + self.assertEqual(len(i_r), 2), + self.assertEqual(len(i_r[0]), max_length), + self.assertEqual(len(i_r[1]), max_length), ) - self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual( - len(i_r[1]), max_length + ( + self.assertEqual(len(i_r), 2), + self.assertEqual(len(i_r[0]), max_length), + self.assertEqual(len(i_r[1]), max_length), ) for i_r, i_p in zip(input_r[model_main_input_name], input_p[model_main_input_name]): @@ -437,9 +441,7 @@ def assert_batch_padded_input_match( @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[4, 4616, 107, 163, 328, 14, 63, 1726, 106, 11954, 16659, 23, 83, 16688, 11427, 328, 107, 36, 11954, 16659, 23, 83, 16688, 6153, 82, 961, 16688, 3474, 16710, 1696, 2306, 16688, 10854, 2524, 3827, 561, 163, 3474, 16680, 62, 226, 2092, 16680, 379, 3474, 16660, 16680, 2436, 16667, 16671, 16680, 999, 87, 3474, 16680, 2436, 16667, 5208, 800, 16710, 68, 2018, 2959, 3037, 163, 16663, 11617, 16710, 36, 2018, 2959, 4737, 163, 16663, 16667, 16674, 16710, 91, 372, 5087, 16745, 2205, 82, 961, 3608, 38, 1770, 16745, 7984, 36, 2565, 751, 9017, 1204, 864, 218, 1244, 16680, 11954, 16659, 23, 83, 36, 14686, 23, 7619, 16678, 5], [4, 28, 532, 65, 1929, 33, 391, 16688, 3979, 9, 2565, 7849, 299, 225, 34, 2040, 305, 167, 289, 16667, 16078, 32, 1966, 181, 4626, 63, 10575, 71, 851, 1491, 36, 624, 4757, 38, 208, 8038, 16678, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], [4, 13, 1467, 5187, 26, 2521, 4567, 16664, 372, 13, 16209, 3314, 16678, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[4, 4616, 107, 163, 328, 14, 63, 1726, 106, 11954, 16659, 23, 83, 16688, 11427, 328, 107, 36, 11954, 16659, 23, 83, 16688, 6153, 82, 961, 16688, 3474, 16710, 1696, 2306, 16688, 10854, 2524, 3827, 561, 163, 3474, 16680, 62, 226, 2092, 16680, 379, 3474, 16660, 16680, 2436, 16667, 16671, 16680, 999, 87, 3474, 16680, 2436, 16667, 5208, 800, 16710, 68, 2018, 2959, 3037, 163, 16663, 11617, 16710, 36, 2018, 2959, 4737, 163, 16663, 16667, 16674, 16710, 91, 372, 5087, 16745, 2205, 82, 961, 3608, 38, 1770, 16745, 7984, 36, 2565, 751, 9017, 1204, 864, 218, 1244, 16680, 11954, 16659, 23, 83, 36, 14686, 23, 7619, 16678, 5], [4, 28, 532, 65, 1929, 33, 391, 16688, 3979, 9, 2565, 7849, 299, 225, 34, 2040, 305, 167, 289, 16667, 16078, 32, 1966, 181, 4626, 63, 10575, 71, 851, 1491, 36, 624, 4757, 38, 208, 8038, 16678, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], [4, 13, 1467, 5187, 26, 2521, 4567, 16664, 372, 13, 16209, 3314, 16678, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/fuyu/test_modeling_fuyu.py b/tests/models/fuyu/test_modeling_fuyu.py index aaed47a2aea4..62d58127973d 100644 --- a/tests/models/fuyu/test_modeling_fuyu.py +++ b/tests/models/fuyu/test_modeling_fuyu.py @@ -343,9 +343,7 @@ def test_model_8b_chat_greedy_generation_bus_color(self): @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_chart_vqa(self): - # fmt: off - EXPECTED_TEXT_TOKENS = ["The","life expectancy","at","birth","of male","s in","","20","18","is","","80",".","7",".","\n","|ENDOFTEXT|",] - # fmt: on + EXPECTED_TEXT_TOKENS = ["The","life expectancy","at","birth","of male","s in","","20","18","is","","80",".","7",".","\n","|ENDOFTEXT|",] # fmt: skip expected_text_completion = " ".join(EXPECTED_TEXT_TOKENS) # TODO make sure the end string matches text_prompt_chart_vqa = "What is the highest life expectancy at birth of male?\n" diff --git a/tests/models/gpt2/test_modeling_gpt2.py b/tests/models/gpt2/test_modeling_gpt2.py index 4c49c0b746dc..245b29d56a6c 100644 --- a/tests/models/gpt2/test_modeling_gpt2.py +++ b/tests/models/gpt2/test_modeling_gpt2.py @@ -736,11 +736,7 @@ def _test_lm_generate_gpt2_helper( input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog was found in a field near the intersection of West and West Streets.\n\nThe dog - # fmt: off - expected_output_ids = [ - 464, 3290, 373, 1043, 287, 257, 2214, 1474, 262, 16246, 286, 2688, 290, 2688, 27262, 13, 198, 198, 464, 3290, - ] - # fmt: on + expected_output_ids = [464, 3290, 373, 1043, 287, 257, 2214, 1474, 262, 16246, 286, 2688, 290, 2688, 27262, 13, 198, 198, 464, 3290,] # fmt: skip output_ids = model.generate(input_ids, do_sample=False) if verify_outputs: self.assertListEqual(output_ids[0].tolist(), expected_output_ids) diff --git a/tests/models/gpt_neo/test_modeling_gpt_neo.py b/tests/models/gpt_neo/test_modeling_gpt_neo.py index 075b9a26633c..d4a9dd90eb80 100644 --- a/tests/models/gpt_neo/test_modeling_gpt_neo.py +++ b/tests/models/gpt_neo/test_modeling_gpt_neo.py @@ -537,10 +537,8 @@ def test_lm_generate_gpt_neo(self): else: model.gradient_checkpointing_disable() input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog - # fmt: off # The dog-eared copy of the book, which is a collection of essays by the late author, - expected_output_ids = [464, 3290, 12, 3380, 4866, 286, 262, 1492, 11, 543, 318, 257, 4947, 286, 27126, 416, 262, 2739, 1772, 11] - # fmt: on + expected_output_ids = [464, 3290, 12, 3380, 4866, 286, 262, 1492, 11, 543, 318, 257, 4947, 286, 27126, 416, 262, 2739, 1772, 11] # fmt: skip output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids) diff --git a/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py b/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py index fc78b8bdd420..752b7a0b8d55 100644 --- a/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py +++ b/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py @@ -238,7 +238,7 @@ def test_model_for_causal_lm(self): def test_generation(self): model_id = "abeja/gpt-neox-japanese-2.7b" - prompts = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] + prompts = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] # fmt: skip EXPECTED_OUTPUTS = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", diff --git a/tests/models/gpt_sw3/test_tokenization_gpt_sw3.py b/tests/models/gpt_sw3/test_tokenization_gpt_sw3.py index 040f6c771176..6f9fe0dfa95d 100644 --- a/tests/models/gpt_sw3/test_tokenization_gpt_sw3.py +++ b/tests/models/gpt_sw3/test_tokenization_gpt_sw3.py @@ -120,9 +120,7 @@ def test_tokenizer_integration(self): "Det är inget fel på Mr. Cool", ] - # fmt: off - expected_encoding = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} - # fmt: on + expected_encoding = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="AI-Sweden/gpt-sw3-126m", diff --git a/tests/models/gptj/test_modeling_gptj.py b/tests/models/gptj/test_modeling_gptj.py index 4fc41ba6d310..42ded9c81ae0 100644 --- a/tests/models/gptj/test_modeling_gptj.py +++ b/tests/models/gptj/test_modeling_gptj.py @@ -534,10 +534,8 @@ def test_lm_generate_gptj(self): model.gradient_checkpointing_disable() model.to(torch_device) input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog - # fmt: off # The dog is a man's best friend. It is a loyal companion, and it is a friend - expected_output_ids = [464, 3290, 318, 257, 582, 338, 1266, 1545, 13, 632, 318, 257, 9112, 15185, 11, 290, 340, 318, 257, 1545] - # fmt: on + expected_output_ids = [464, 3290, 318, 257, 582, 338, 1266, 1545, 13, 632, 318, 257, 9112, 15185, 11, 290, 340, 318, 257, 1545] # fmt: skip output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids) diff --git a/tests/models/gptj/test_modeling_tf_gptj.py b/tests/models/gptj/test_modeling_tf_gptj.py index 896df148058c..166bd3a45a9e 100644 --- a/tests/models/gptj/test_modeling_tf_gptj.py +++ b/tests/models/gptj/test_modeling_tf_gptj.py @@ -384,10 +384,8 @@ class TFGPTJModelLanguageGenerationTest(unittest.TestCase): def test_lm_generate_gptj(self): model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", from_pt=True) input_ids = tf.convert_to_tensor([[464, 3290]], dtype=tf.int32) # The dog - # fmt: off # The dog is a man's best friend. It is a loyal companion, and it is a friend - expected_output_ids = [464, 3290, 318, 257, 582, 338, 1266, 1545, 13, 632, 318, 257, 9112, 15185, 11, 290, 340, 318, 257, 1545] - # fmt: on + expected_output_ids = [464, 3290, 318, 257, 582, 338, 1266, 1545, 13, 632, 318, 257, 9112, 15185, 11, 290, 340, 318, 257, 1545] # fmt: skip output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) diff --git a/tests/models/gptsan_japanese/test_modeling_gptsan_japanese.py b/tests/models/gptsan_japanese/test_modeling_gptsan_japanese.py index 1a86e23fdccf..716b8b9fc6d3 100644 --- a/tests/models/gptsan_japanese/test_modeling_gptsan_japanese.py +++ b/tests/models/gptsan_japanese/test_modeling_gptsan_japanese.py @@ -220,8 +220,8 @@ def test_logits(self): outputs = model(input_ids) output_logits = outputs.logits.detach().cpu().numpy() # Output of original model created with mesh-tensoflow + # fmt: off target = [ - # fmt: off [-12.037839889526367, -12.433061599731445, -14.333840370178223, -12.450345993041992, -11.1661376953125, -11.930137634277344, -10.659740447998047, -12.909574508666992, -13.241043090820312, -13.398579597473145, -11.107524871826172, -12.3685941696167, -22.97943115234375, -10.481067657470703, -12.484030723571777, @@ -242,8 +242,8 @@ def test_logits(self): -10.113405227661133, -10.546867370605469, -10.04369068145752, -10.907809257507324, -10.504216194152832, -11.129199028015137, -10.151124000549316, -21.96586799621582, -9.086349487304688, -11.730339050292969, -10.460667610168457, -10.298049926757812, -10.784148216247559, -10.840693473815918, -22.03152847290039], - # fmt: on ] + # fmt: on target = np.array(target).flatten() predict = output_logits[0, :, :20].flatten() @@ -341,8 +341,8 @@ def test_spout_generation(self): input_ids_batch = tokenizer([input_text, input_text], return_tensors="pt").input_ids.to(torch_device) # spout from uniform and one-hot + spouts = [ - # fmt: off [0.87882208, 0.38426396, 0.33220248, 0.43890406, 0.16562252, 0.04803985, 0.211572 , 0.23188473, 0.37153068, 0.7836377 , 0.02160172, 0.38761719, 0.75290772, 0.90198857, 0.34365777, @@ -378,8 +378,7 @@ def test_spout_generation(self): 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - # fmt: on - ] + ] # fmt: skip output1 = model.generate( input_ids=input_ids, diff --git a/tests/models/gptsan_japanese/test_tokenization_gptsan_japanese.py b/tests/models/gptsan_japanese/test_tokenization_gptsan_japanese.py index 2c6fd962edbd..6d656b2d0ff0 100644 --- a/tests/models/gptsan_japanese/test_tokenization_gptsan_japanese.py +++ b/tests/models/gptsan_japanese/test_tokenization_gptsan_japanese.py @@ -36,9 +36,7 @@ class GPTSanJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): def setUp(self): super().setUp() - # fmt: off - vocab_tokens = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "
", "", "", "", "", "", "", "", "", "", "", "<|emoji1|>", "", "<|bagoftoken|>", "<|endoftext|>"] - # fmt: on + vocab_tokens = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "
", "", "", "", "", "", "", "", "", "", "", "<|emoji1|>", "", "<|bagoftoken|>", "<|endoftext|>"] # fmt: skip emoji_tokens = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀 self.special_tokens_map = {"unk_token": ""} diff --git a/tests/models/instructblip/test_modeling_instructblip.py b/tests/models/instructblip/test_modeling_instructblip.py index 3b7dc002aff7..0af427c3586d 100644 --- a/tests/models/instructblip/test_modeling_instructblip.py +++ b/tests/models/instructblip/test_modeling_instructblip.py @@ -567,9 +567,7 @@ def test_inference_vicuna_7b(self): outputs = model.generate(**inputs, max_new_tokens=30) generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() - # fmt: off - expected_outputs = [2, 450, 22910, 9565, 310, 445, 1967, 338, 393, 263, 767, 338, 13977, 292, 22095, 373, 278, 1250, 310, 263, 13328, 20134, 29963, 1550, 19500, 1623, 263, 19587, 4272, 11952, 29889] - # fmt: on + expected_outputs = [2, 450, 22910, 9565, 310, 445, 1967, 338, 393, 263, 767, 338, 13977, 292, 22095, 373, 278, 1250, 310, 263, 13328, 20134, 29963, 1550, 19500, 1623, 263, 19587, 4272, 11952, 29889] # fmt: skip self.assertEqual(outputs[0].tolist(), expected_outputs) self.assertEqual( generated_text, @@ -606,9 +604,7 @@ def test_inference_flant5_xl(self): ) generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0] - # fmt: off - expected_outputs = [0, 37, 1023, 9850, 7, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4459, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 5119, 3, 9, 4459, 8677, 28, 3, 9, 2756, 4459, 6177, 6, 11, 3, 88, 19, 338, 46, 3575, 53, 1476, 12, 743, 112, 2491, 5, 37, 1023, 19, 7225, 788, 12, 8, 685, 24, 34, 1267, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 94, 19, 487, 24, 8, 388, 19, 1119, 12, 1097, 540, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 6, 68, 34, 19, 92, 487, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 3, 13865, 13, 8, 1053, 21, 8, 388, 31, 7, 2874, 6, 34, 19, 964, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 1] - # fmt: on + expected_outputs = [0, 37, 1023, 9850, 7, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4459, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 5119, 3, 9, 4459, 8677, 28, 3, 9, 2756, 4459, 6177, 6, 11, 3, 88, 19, 338, 46, 3575, 53, 1476, 12, 743, 112, 2491, 5, 37, 1023, 19, 7225, 788, 12, 8, 685, 24, 34, 1267, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 94, 19, 487, 24, 8, 388, 19, 1119, 12, 1097, 540, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 6, 68, 34, 19, 92, 487, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 3, 13865, 13, 8, 1053, 21, 8, 388, 31, 7, 2874, 6, 34, 19, 964, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 1] # fmt: skip self.assertEqual(outputs[0].tolist(), expected_outputs) self.assertEqual( generated_text, diff --git a/tests/models/layoutlmv2/test_processor_layoutlmv2.py b/tests/models/layoutlmv2/test_processor_layoutlmv2.py index 91a8da9cafb0..61d8e2e195d8 100644 --- a/tests/models/layoutlmv2/test_processor_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_processor_layoutlmv2.py @@ -241,9 +241,7 @@ def test_processor_case_1(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # noqa: E231 - # fmt: on + expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # fmt: skip decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -261,9 +259,7 @@ def test_processor_case_1(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]" # noqa: E231 - # fmt: on + expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]" # fmt: skip decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -408,9 +404,7 @@ def test_processor_case_4(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # noqa: E231 - # fmt: on + expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # fmt: skip decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -432,9 +426,7 @@ def test_processor_case_4(self): self.assertSequenceEqual(decoding, expected_decoding) # verify bbox - # fmt: off - expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # noqa: E231 - # fmt: on + expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # fmt: skip self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) @slow diff --git a/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py b/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py index ca90eb9641ea..3360933be678 100644 --- a/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py @@ -2412,9 +2412,7 @@ def test_layoutlmv2_integration_test(self): # CASE 1: not batched words, boxes = self.get_words_and_boxes() - # fmt: off - expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) @@ -2424,9 +2422,7 @@ def test_layoutlmv2_integration_test(self): # CASE 1: batched words, boxes = self.get_words_and_boxes_batch() - # fmt: off - expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) @@ -2437,9 +2433,7 @@ def test_layoutlmv2_integration_test(self): words, boxes = self.get_words_and_boxes() word_labels = [1, 2, 3] - # fmt: off - expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) @@ -2450,9 +2444,7 @@ def test_layoutlmv2_integration_test(self): words, boxes = self.get_words_and_boxes_batch() word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]] - # fmt: off - expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) @@ -2462,9 +2454,7 @@ def test_layoutlmv2_integration_test(self): # CASE 3: not batched question, words, boxes = self.get_question_words_and_boxes() - # fmt: off - expected_results = {'input_ids': [101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20) @@ -2474,9 +2464,7 @@ def test_layoutlmv2_integration_test(self): # CASE 3: batched questions, words, boxes = self.get_question_words_and_boxes_batch() - # fmt: off - expected_results = {'input_ids': [[101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], [101, 2129, 2003, 2002, 2170, 1029, 102, 2054, 1037, 21110, 2546, 3806, 2102, 2078, 102, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [[101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], [101, 2129, 2003, 2002, 2170, 1029, 102, 2054, 1037, 21110, 2546, 3806, 2102, 2078, 102, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20) diff --git a/tests/models/layoutlmv3/test_processor_layoutlmv3.py b/tests/models/layoutlmv3/test_processor_layoutlmv3.py index f649e0c275a2..0c7e0d666d81 100644 --- a/tests/models/layoutlmv3/test_processor_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_processor_layoutlmv3.py @@ -223,9 +223,7 @@ def test_processor_case_1(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = " 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer" # noqa: E231 - # fmt: on + expected_decoding = " 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer" # fmt: skip decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -245,9 +243,7 @@ def test_processor_case_1(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = " 7 ITC Limited REPORT AND ACCOUNTS 2013 ITC’s Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITC’s value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITC’s brands national assets, adding to India’s competitiveness. It is ITC’s aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source: https://www.industrydocuments.ucsf.edu/docs/snbx0223" # noqa: E231 - # fmt: on + expected_decoding = " 7 ITC Limited REPORT AND ACCOUNTS 2013 ITC’s Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITC’s value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITC’s brands national assets, adding to India’s competitiveness. It is ITC’s aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source: https://www.industrydocuments.ucsf.edu/docs/snbx0223" # fmt: skip decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -392,9 +388,7 @@ def test_processor_case_4(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = " What's his name? 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer" # noqa: E231 - # fmt: on + expected_decoding = " What's his name? 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer" # fmt: skip decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -416,9 +410,7 @@ def test_processor_case_4(self): self.assertSequenceEqual(decoding, expected_decoding) # verify bbox - # fmt: off - expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [0, 0, 0, 0]] # noqa: E231 - # fmt: on + expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [0, 0, 0, 0]] # fmt: skip self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) @slow diff --git a/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py b/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py index 1e2bb6610e30..db02dc65d65c 100644 --- a/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py @@ -2329,9 +2329,7 @@ def test_layoutlmv3_integration_test(self): # CASE 1: not batched words, boxes = self.get_words_and_boxes() - # fmt: off - expected_results = {'input_ids': [0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) @@ -2341,9 +2339,7 @@ def test_layoutlmv3_integration_test(self): # CASE 1: batched words, boxes = self.get_words_and_boxes_batch() - # fmt: off - expected_results = {'input_ids': [[0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 92, 614, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [[0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 92, 614, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) @@ -2354,9 +2350,7 @@ def test_layoutlmv3_integration_test(self): words, boxes = self.get_words_and_boxes() word_labels = [1, 2] - # fmt: off - expected_results = {'input_ids': [0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) @@ -2367,9 +2361,7 @@ def test_layoutlmv3_integration_test(self): words, boxes = self.get_words_and_boxes_batch() word_labels = [[1, 2], [2, 46]] - # fmt: off - expected_results = {'input_ids': [[0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 92, 614, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [[0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 92, 614, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) @@ -2379,9 +2371,7 @@ def test_layoutlmv3_integration_test(self): # # CASE 3: not batched question, words, boxes = self.get_question_words_and_boxes() - # fmt: off - expected_results = {'input_ids': [0, 99, 18, 39, 766, 116, 2, 2, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [0, 99, 18, 39, 766, 116, 2, 2, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20) @@ -2391,9 +2381,7 @@ def test_layoutlmv3_integration_test(self): # # CASE 3: batched questions, words, boxes = self.get_question_words_and_boxes_batch() - # fmt: off - expected_results = {'input_ids': [[0, 99, 18, 39, 766, 116, 2, 2, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 141, 16, 37, 373, 116, 2, 2, 13964, 795, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [256, 38, 330, 58], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [[0, 99, 18, 39, 766, 116, 2, 2, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 141, 16, 37, 373, 116, 2, 2, 13964, 795, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [256, 38, 330, 58], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20) diff --git a/tests/models/layoutxlm/test_processor_layoutxlm.py b/tests/models/layoutxlm/test_processor_layoutxlm.py index 57c349b26b7f..240c2ae05c2d 100644 --- a/tests/models/layoutxlm/test_processor_layoutxlm.py +++ b/tests/models/layoutxlm/test_processor_layoutxlm.py @@ -238,9 +238,7 @@ def test_processor_case_1(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = " 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer" # noqa: E231 - # fmt: on + expected_decoding = " 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer" # fmt: skip decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -260,9 +258,7 @@ def test_processor_case_1(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = " 7 ITC Limited REPORT AND ACCOUNTS 2013 ITC’s Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITC’s value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITC’s brands national assets, adding to India’s competitiveness. It is ITC’s aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source: https://www.industrydocuments.ucsf.edu/docs/snbx0223" # noqa: E231 - # fmt: on + expected_decoding = " 7 ITC Limited REPORT AND ACCOUNTS 2013 ITC’s Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITC’s value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITC’s brands national assets, adding to India’s competitiveness. It is ITC’s aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source: https://www.industrydocuments.ucsf.edu/docs/snbx0223" # fmt: skip decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -407,9 +403,7 @@ def test_processor_case_4(self): # verify input_ids # this was obtained with Tesseract 4.1.1 - # fmt: off - expected_decoding = " What's his name? 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer" # noqa: E231 - # fmt: on + expected_decoding = " What's his name? 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer" # fmt: skip decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -431,9 +425,7 @@ def test_processor_case_4(self): self.assertSequenceEqual(decoding, expected_decoding) # verify bbox - # fmt: off - expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [1000, 1000, 1000, 1000]] # noqa: E231 - # fmt: on + expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [1000, 1000, 1000, 1000]] # fmt: skip self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) @slow diff --git a/tests/models/layoutxlm/test_tokenization_layoutxlm.py b/tests/models/layoutxlm/test_tokenization_layoutxlm.py index 0b502748d131..086bbc6ba0bc 100644 --- a/tests/models/layoutxlm/test_tokenization_layoutxlm.py +++ b/tests/models/layoutxlm/test_tokenization_layoutxlm.py @@ -1876,9 +1876,7 @@ def test_layoutxlm_integration_test(self): # CASE 1: not batched words, boxes = self.get_words_and_boxes() - # fmt: off - expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) @@ -1888,9 +1886,7 @@ def test_layoutxlm_integration_test(self): # CASE 1: batched words, boxes = self.get_words_and_boxes_batch() - # fmt: off - expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) @@ -1901,9 +1897,7 @@ def test_layoutxlm_integration_test(self): words, boxes = self.get_words_and_boxes() word_labels = [1, 2, 3] - # fmt: off - expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) @@ -1914,9 +1908,7 @@ def test_layoutxlm_integration_test(self): words, boxes = self.get_words_and_boxes_batch() word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]] - # fmt: off - expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) @@ -1926,9 +1918,7 @@ def test_layoutxlm_integration_test(self): # CASE 3: not batched question, words, boxes = self.get_question_words_and_boxes() - # fmt: off - expected_results = {'input_ids': [0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids': [0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20) @@ -1938,10 +1928,7 @@ def test_layoutxlm_integration_test(self): # CASE 3: batched questions, words, boxes = self.get_question_words_and_boxes_batch() - # fmt: off - expected_results = {'input_ids': [[0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], [0, 3642, 83, 764, 35839, 32, 2, 2, 2367, 10, 21, 3190, 53496, 19, 2, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]} # noqa: E231 - # fmt: on - + expected_results = {'input_ids': [[0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], [0, 3642, 83, 764, 35839, 32, 2, 2, 2367, 10, 21, 3190, 53496, 19, 2, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]} # fmt: skip encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py index a646e5ab7a5c..21fb4f44d2b8 100644 --- a/tests/models/llama/test_modeling_llama.py +++ b/tests/models/llama/test_modeling_llama.py @@ -431,9 +431,7 @@ def test_model_7b_logits(self): EXPECTED_MEAN = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2) # slicing logits[0, 0, 0:30] - # fmt: off - EXPECTED_SLICE = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,]) - # fmt: on + EXPECTED_SLICE = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-5, rtol=1e-5) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!") @@ -446,9 +444,7 @@ def test_model_13b_logits(self): EXPECTED_MEAN = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2) # slicing logits[0, 0, 0:30] - # fmt: off - EXPECTED_SLICE = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273]) - # fmt: on + EXPECTED_SLICE = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-5, rtol=1e-5) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!") @@ -461,9 +457,7 @@ def test_model_13bf_logits(self): EXPECTED_MEAN = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2) # slicing logits[0, 0, 0:30] - # fmt: off - EXPECTED_SLICE = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513]) - # fmt: on + EXPECTED_SLICE = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513]) # fmt: skip torch.testing.assert_close(out.mean(-1), EXPECTED_SLICE, atol=1e-2, rtol=1e-2) @unittest.skip( @@ -479,9 +473,7 @@ def test_model_70b_logits(self): [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]], dtype=torch.float32 ) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2) - # fmt: off - EXPECTED_SLICE = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312]) - # fmt: on + EXPECTED_SLICE = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-5, rtol=1e-5) @unittest.skip("Model is curently gated") diff --git a/tests/models/llama/test_tokenization_llama.py b/tests/models/llama/test_tokenization_llama.py index e45cf253e68b..d77e56ed7d6d 100644 --- a/tests/models/llama/test_tokenization_llama.py +++ b/tests/models/llama/test_tokenization_llama.py @@ -282,9 +282,7 @@ def test_special_tokens_initialization(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[1, 4103, 689, 414, 313, 24784, 368, 2998, 408, 282, 3637, 25350, 29899, 9067, 414, 322, 282, 3637, 25350, 29899, 1457, 3018, 1312, 29899, 2151, 29897, 8128, 2498, 29899, 15503, 4220, 6956, 1973, 313, 13635, 29911, 29892, 402, 7982, 29899, 29906, 29892, 1528, 13635, 29911, 29874, 29892, 1060, 26369, 29892, 6652, 309, 29933, 814, 29892, 1060, 29931, 6779, 11410, 363, 18385, 17088, 7634, 11235, 313, 25103, 29965, 29897, 322, 18385, 17088, 28203, 313, 25103, 29954, 29897, 411, 975, 29871, 29941, 29906, 29974, 758, 3018, 1312, 4733, 297, 29871, 29896, 29900, 29900, 29974, 10276, 322, 6483, 1006, 3372, 3097, 1546, 435, 1165, 29892, 10772, 29911, 25350, 322, 323, 6073, 17907, 29889], [1, 350, 20161, 338, 8688, 304, 758, 29899, 14968, 6483, 21000, 8684, 284, 22540, 515, 443, 29880, 24025, 1426, 491, 14002, 368, 4195, 292, 373, 1716, 2175, 322, 1492, 3030, 297, 599, 15359, 29889], [1, 450, 4996, 17354, 1701, 29916, 432, 17204, 975, 278, 17366, 11203, 29889]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} - # fmt: on + expected_encoding = {'input_ids': [[1, 4103, 689, 414, 313, 24784, 368, 2998, 408, 282, 3637, 25350, 29899, 9067, 414, 322, 282, 3637, 25350, 29899, 1457, 3018, 1312, 29899, 2151, 29897, 8128, 2498, 29899, 15503, 4220, 6956, 1973, 313, 13635, 29911, 29892, 402, 7982, 29899, 29906, 29892, 1528, 13635, 29911, 29874, 29892, 1060, 26369, 29892, 6652, 309, 29933, 814, 29892, 1060, 29931, 6779, 11410, 363, 18385, 17088, 7634, 11235, 313, 25103, 29965, 29897, 322, 18385, 17088, 28203, 313, 25103, 29954, 29897, 411, 975, 29871, 29941, 29906, 29974, 758, 3018, 1312, 4733, 297, 29871, 29896, 29900, 29900, 29974, 10276, 322, 6483, 1006, 3372, 3097, 1546, 435, 1165, 29892, 10772, 29911, 25350, 322, 323, 6073, 17907, 29889], [1, 350, 20161, 338, 8688, 304, 758, 29899, 14968, 6483, 21000, 8684, 284, 22540, 515, 443, 29880, 24025, 1426, 491, 14002, 368, 4195, 292, 373, 1716, 2175, 322, 1492, 3030, 297, 599, 15359, 29889], [1, 450, 4996, 17354, 1701, 29916, 432, 17204, 975, 278, 17366, 11203, 29889]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, @@ -402,8 +400,8 @@ def test_simple_encode_decode(self): self.assertEqual(rust_tokenizer.decode([1, 910, 338, 263, 1243], skip_special_tokens=True), "This is a test") # bytefallback showcase - self.assertEqual(pyth_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) - self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) + self.assertEqual(pyth_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) # fmt: skip + self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) # fmt: skip self.assertEqual( pyth_tokenizer.decode( [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392], skip_special_tokens=True diff --git a/tests/models/lxmert/test_modeling_lxmert.py b/tests/models/lxmert/test_modeling_lxmert.py index 489d22d92efe..63d83de36b75 100644 --- a/tests/models/lxmert/test_modeling_lxmert.py +++ b/tests/models/lxmert/test_modeling_lxmert.py @@ -751,7 +751,7 @@ def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value) elif isinstance(value, (list, tuple)): tf_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value) - elif type(value) == bool: + elif isinstance(value, bool): tf_inputs_dict[key] = value elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) diff --git a/tests/models/lxmert/test_modeling_tf_lxmert.py b/tests/models/lxmert/test_modeling_tf_lxmert.py index 52f64d614423..d40ffbb3b968 100644 --- a/tests/models/lxmert/test_modeling_tf_lxmert.py +++ b/tests/models/lxmert/test_modeling_tf_lxmert.py @@ -499,7 +499,7 @@ def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value) elif isinstance(value, (list, tuple)): pt_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value) - elif type(key) == bool: + elif isinstance(key, bool): pt_inputs_dict[key] = value elif key == "input_values": pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32) diff --git a/tests/models/m2m_100/test_tokenization_m2m_100.py b/tests/models/m2m_100/test_tokenization_m2m_100.py index 13345a899f68..50087a7d9d27 100644 --- a/tests/models/m2m_100/test_tokenization_m2m_100.py +++ b/tests/models/m2m_100/test_tokenization_m2m_100.py @@ -116,9 +116,7 @@ def test_full_tokenizer(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, @@ -141,9 +139,7 @@ class M2M100TokenizerIntegrationTest(unittest.TestCase): "L'affaire NSA souligne l'absence totale de débat sur le renseignement", ] - # fmt: off - expected_src_tokens = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] - # fmt: on + expected_src_tokens = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] # fmt: skip @classmethod def setUpClass(cls): @@ -172,9 +168,7 @@ def test_tokenizer_batch_encode_plus(self): def test_tokenizer_decode_ignores_language_codes(self): self.assertIn(FR_CODE, self.tokenizer.all_special_ids) - # fmt: off - generated_ids = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] - # fmt: on + generated_ids = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: skip result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_french = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_french) diff --git a/tests/models/marian/test_tokenization_marian.py b/tests/models/marian/test_tokenization_marian.py index f32026be1a0a..6fb3c9a85d03 100644 --- a/tests/models/marian/test_tokenization_marian.py +++ b/tests/models/marian/test_tokenization_marian.py @@ -121,10 +121,7 @@ def test_outputs_can_be_shorter(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on - + expected_encoding = {'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="Helsinki-NLP/opus-mt-en-de", diff --git a/tests/models/markuplm/test_processor_markuplm.py b/tests/models/markuplm/test_processor_markuplm.py index 3959b231ff74..aa5cb6253fb4 100644 --- a/tests/models/markuplm/test_processor_markuplm.py +++ b/tests/models/markuplm/test_processor_markuplm.py @@ -46,9 +46,7 @@ class MarkupLMProcessorTest(unittest.TestCase): def setUp(self): # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt - # fmt: off - vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "\u0120hello", "\u0120world", "",] # noqa - # fmt: on + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "\u0120hello", "\u0120world", "",] # fmt: skip self.tmpdirname = tempfile.mkdtemp() vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] @@ -339,15 +337,11 @@ def test_processor_case_3(self): self.assertSequenceEqual(inputs.input_ids[1].tolist(), expected_ids) # verify xpath_tags_seq - # fmt: off - expected_xpaths_tags_seq = [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]] # noqa: - # fmt: on + expected_xpaths_tags_seq = [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]] # fmt: skip self.assertSequenceEqual(inputs.xpath_tags_seq[1].tolist(), expected_xpaths_tags_seq) # verify labels - # fmt: off - expected_labels = [-100, 6, 3, 10, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100] - # fmt: on + expected_labels = [-100, 6, 3, 10, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100] # fmt: skip self.assertListEqual(inputs.labels[1].tolist(), expected_labels) @slow @@ -371,9 +365,7 @@ def test_processor_case_4(self): self.assertListEqual(actual_keys, expected_keys) # verify input_ids - # fmt: off - expected_decoding = "What's his name?Hello worldWelcomeHere is my website." # noqa: E231 - # fmt: on + expected_decoding = "What's his name?Hello worldWelcomeHere is my website." # fmt: skip decoding = processor.decode(inputs.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) @@ -401,9 +393,7 @@ def test_processor_case_4(self): self.assertSequenceEqual(decoding, expected_decoding) # verify xpath_subs_seq - # fmt: off - expected_xpath_subs_seq = [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]] - # fmt: on + expected_xpath_subs_seq = [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]] # fmt: skip self.assertListEqual(inputs.xpath_subs_seq[1].tolist(), expected_xpath_subs_seq) @slow @@ -457,7 +447,5 @@ def test_processor_case_5(self): self.assertSequenceEqual(decoding, expected_decoding) # verify xpath_subs_seq - # fmt: off - expected_xpath_subs_seq = [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]] - # fmt: on + expected_xpath_subs_seq = [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]] # fmt: skip self.assertListEqual(inputs.xpath_subs_seq[1].tolist()[-5:], expected_xpath_subs_seq) diff --git a/tests/models/markuplm/test_tokenization_markuplm.py b/tests/models/markuplm/test_tokenization_markuplm.py index d795aa2b2b9a..9d2af513e1a4 100644 --- a/tests/models/markuplm/test_tokenization_markuplm.py +++ b/tests/models/markuplm/test_tokenization_markuplm.py @@ -51,9 +51,7 @@ def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt - # fmt: off - vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "\u0120hello", "\u0120world", "",] # noqa - # fmt: on + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "\u0120hello", "\u0120world", "",] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.tags_dict = {"a": 0, "abbr": 1, "acronym": 2, "address": 3} @@ -2202,8 +2200,7 @@ def test_markuplm_integration_test(self): # CASE 1: not batched nodes, xpaths = self.get_nodes_and_xpaths() - # fmt: off - expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: on + expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(nodes, xpaths=xpaths, padding="max_length", max_length=20) encoding_r = tokenizer_r(nodes, xpaths=xpaths, padding="max_length", max_length=20) @@ -2213,9 +2210,7 @@ def test_markuplm_integration_test(self): # CASE 1: batched nodes, xpaths = self.get_nodes_and_xpaths_batch() - # fmt: off - expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} - # fmt: on + expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(nodes, xpaths=xpaths, padding="max_length", max_length=20) encoding_r = tokenizer_r(nodes, xpaths=xpaths, padding="max_length", max_length=20) @@ -2226,9 +2221,7 @@ def test_markuplm_integration_test(self): nodes, xpaths = self.get_nodes_and_xpaths() node_labels = [1, 2, 3] - # fmt: off - expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} - # fmt: on + expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20) @@ -2239,8 +2232,7 @@ def test_markuplm_integration_test(self): nodes, xpaths = self.get_nodes_and_xpaths_batch() node_labels = [[1, 2, 3], [2, 46, 17, 22, 3]] - # fmt: off - expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, -100, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on + expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, -100, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20) @@ -2250,9 +2242,7 @@ def test_markuplm_integration_test(self): # CASE 3: not batched question, nodes, xpaths = self.get_question_nodes_and_xpaths() - # fmt: off - expected_results = {'input_ids': [0, 12196, 18, 39, 766, 116, 2, 42891, 232, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} - # fmt: on + expected_results = {'input_ids': [0, 12196, 18, 39, 766, 116, 2, 42891, 232, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_p = tokenizer_p(question, nodes, xpaths, padding="max_length", max_length=20) encoding_r = tokenizer_r(question, nodes, xpaths, padding="max_length", max_length=20) @@ -2262,9 +2252,7 @@ def test_markuplm_integration_test(self): # CASE 3: batched questions, nodes, xpaths = self.get_question_nodes_and_xpaths_batch() - # fmt: off - expected_results = {'input_ids': [[0, 12196, 18, 39, 766, 116, 2, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 9178, 16, 37, 373, 116, 2, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]]} - # fmt: on + expected_results = {'input_ids': [[0, 12196, 18, 39, 766, 116, 2, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 9178, 16, 37, 373, 116, 2, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_p = tokenizer_p(questions, nodes, xpaths, padding="max_length", max_length=20) encoding_r = tokenizer_r(questions, nodes, xpaths, padding="max_length", max_length=20) diff --git a/tests/models/mask2former/test_image_processing_mask2former.py b/tests/models/mask2former/test_image_processing_mask2former.py index b3fe50164e5f..24d5b8cf89ec 100644 --- a/tests/models/mask2former/test_image_processing_mask2former.py +++ b/tests/models/mask2former/test_image_processing_mask2former.py @@ -391,13 +391,9 @@ def create_panoptic_map(annotation, segments_info): # verify the class labels self.assertEqual(len(inputs["class_labels"]), 2) - # fmt: off - expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][0], torch.tensor(expected_class_labels))) - # fmt: off - expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels)) # verify the mask labels diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index e7dc0077765a..e4779f896a7b 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -391,13 +391,9 @@ def create_panoptic_map(annotation, segments_info): # verify the class labels self.assertEqual(len(inputs["class_labels"]), 2) - # fmt: off - expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][0], torch.tensor(expected_class_labels))) - # fmt: off - expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels)) # verify the mask labels diff --git a/tests/models/mbart50/test_tokenization_mbart50.py b/tests/models/mbart50/test_tokenization_mbart50.py index d10d51df907c..a5ba802b6c33 100644 --- a/tests/models/mbart50/test_tokenization_mbart50.py +++ b/tests/models/mbart50/test_tokenization_mbart50.py @@ -84,12 +84,7 @@ def test_full_tokenizer(self): ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") - self.assertListEqual( - tokens, - # fmt: off - [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."], - # fmt: on - ) + self.assertListEqual(tokens,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."]) # fmt: skip ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, @@ -100,18 +95,11 @@ def test_full_tokenizer(self): ) back_tokens = tokenizer.convert_ids_to_tokens(ids) - self.assertListEqual( - back_tokens, - # fmt: off - [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "", "."], - # fmt: on - ) + self.assertListEqual(back_tokens,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "", "."],) # fmt: skip @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/mgp_str/test_processor_mgp_str.py b/tests/models/mgp_str/test_processor_mgp_str.py index 7c373b447369..3fd7d8353486 100644 --- a/tests/models/mgp_str/test_processor_mgp_str.py +++ b/tests/models/mgp_str/test_processor_mgp_str.py @@ -52,9 +52,7 @@ def setUp(self): self.image_size = (3, 32, 128) self.tmpdirname = tempfile.mkdtemp() - # fmt: off - vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] - # fmt: on + vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) diff --git a/tests/models/mgp_str/test_tokenization_mgp_str.py b/tests/models/mgp_str/test_tokenization_mgp_str.py index a05d7f3cbf90..0d0e6bb0bf14 100644 --- a/tests/models/mgp_str/test_tokenization_mgp_str.py +++ b/tests/models/mgp_str/test_tokenization_mgp_str.py @@ -35,9 +35,7 @@ class MgpstrTokenizationTest(TokenizerTesterMixin, unittest.TestCase): def setUp(self): super().setUp() - # fmt: off - vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] - # fmt: on + vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py index 2989f4030465..b30e70ba71f9 100644 --- a/tests/models/mistral/test_modeling_mistral.py +++ b/tests/models/mistral/test_modeling_mistral.py @@ -450,9 +450,7 @@ def test_model_7b_logits(self): EXPECTED_MEAN = torch.tensor([[-2.5548, -2.5737, -3.0600, -2.5906, -2.8478, -2.8118, -2.9325, -2.7694]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2) # slicing logits[0, 0, 0:30] - # fmt: off - EXPECTED_SLICE = torch.tensor([-5.8781, -5.8616, -0.1052, -4.7200, -5.8781, -5.8774, -5.8773, -5.8777, -5.8781, -5.8780, -5.8781, -5.8779, -1.0787, 1.7583, -5.8779, -5.8780, -5.8783, -5.8778, -5.8776, -5.8781, -5.8784, -5.8778, -5.8778, -5.8777, -5.8779, -5.8778, -5.8776, -5.8780, -5.8779, -5.8781]) - # fmt: on + EXPECTED_SLICE = torch.tensor([-5.8781, -5.8616, -0.1052, -4.7200, -5.8781, -5.8774, -5.8773, -5.8777, -5.8781, -5.8780, -5.8781, -5.8779, -1.0787, 1.7583, -5.8779, -5.8780, -5.8783, -5.8778, -5.8776, -5.8781, -5.8784, -5.8778, -5.8778, -5.8777, -5.8779, -5.8778, -5.8776, -5.8780, -5.8779, -5.8781]) # fmt: skip print(out[0, 0, :30]) torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4) diff --git a/tests/models/mvp/test_modeling_mvp.py b/tests/models/mvp/test_modeling_mvp.py index ab9dbd95f751..3e0a48023718 100644 --- a/tests/models/mvp/test_modeling_mvp.py +++ b/tests/models/mvp/test_modeling_mvp.py @@ -572,9 +572,7 @@ def test_inference_no_head(self): def test_summarization_inference(self): model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp").to(torch_device) tok = self.default_tokenizer - # fmt: off - PGE_ARTICLE = """ Listen to local radio broadcasts for advertisements that reference casinos in your area.\nIf none are in your area, listen to national radio broadcasts for advertisements of casinos in other areas.\nNote the location that is mentioned in each advertisement that involves a casino.\nIf no locations are mentioned, note any additional contact information, such as a website or phone number. Use that information to find out where the casinos are.;\n,\n\nIf you learn about more than 1 casino on the radio, use the Internet to search the distance between your location and each casino. Sites such as maps.google.com or mapquest.com will help you in this search.'""" - # fmt: on + PGE_ARTICLE = """ Listen to local radio broadcasts for advertisements that reference casinos in your area.\nIf none are in your area, listen to national radio broadcasts for advertisements of casinos in other areas.\nNote the location that is mentioned in each advertisement that involves a casino.\nIf no locations are mentioned, note any additional contact information, such as a website or phone number. Use that information to find out where the casinos are.;\n,\n\nIf you learn about more than 1 casino on the radio, use the Internet to search the distance between your location and each casino. Sites such as maps.google.com or mapquest.com will help you in this search.'""" # fmt: skip EXPECTED_SUMMARY = "Listen to the radio.\nUse the Internet." dct = tok.batch_encode_plus( [PGE_ARTICLE], diff --git a/tests/models/nllb/test_tokenization_nllb.py b/tests/models/nllb/test_tokenization_nllb.py index 8b6605204cbe..10e2a47be8d9 100644 --- a/tests/models/nllb/test_tokenization_nllb.py +++ b/tests/models/nllb/test_tokenization_nllb.py @@ -345,9 +345,7 @@ def test_enro_tokenizer_batch_encode_plus(self): def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) - # fmt: off - generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] - # fmt: on + generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: skip result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) diff --git a/tests/models/nllb_moe/test_modeling_nllb_moe.py b/tests/models/nllb_moe/test_modeling_nllb_moe.py index c3bf17ed8e69..1109948e0e70 100644 --- a/tests/models/nllb_moe/test_modeling_nllb_moe.py +++ b/tests/models/nllb_moe/test_modeling_nllb_moe.py @@ -408,9 +408,7 @@ def test_inference_logits(self): with torch.no_grad(): output = model(**self.model_inputs) - # fmt: off - EXPECTED_LOGTIS = torch.Tensor([-0.3059, 0.0000, 9.3029, 0.6456, -0.9148, 1.7836, 0.6478, 0.9438, -0.5272, -0.6617, -1.2717, 0.4564, 0.1345, -0.2301, -1.0140, 1.1427, -1.5535, 0.1337, 0.2082, -0.8112, -0.3842, -0.3377, 0.1256, 0.6450, -0.0452, 0.0219, 1.4274, -0.4991, -0.2063, -0.4409,]) - # fmt: on + EXPECTED_LOGTIS = torch.Tensor([-0.3059, 0.0000, 9.3029, 0.6456, -0.9148, 1.7836, 0.6478, 0.9438, -0.5272, -0.6617, -1.2717, 0.4564, 0.1345, -0.2301, -1.0140, 1.1427, -1.5535, 0.1337, 0.2082, -0.8112, -0.3842, -0.3377, 0.1256, 0.6450, -0.0452, 0.0219, 1.4274, -0.4991, -0.2063, -0.4409,]) # fmt: skip torch.testing.assert_allclose(output.logits[1, 0, :30], EXPECTED_LOGTIS, rtol=6e-3, atol=9e-3) @unittest.skip("This requires 300GB of RAM") @@ -474,6 +472,7 @@ class NllbMoeRouterTest(unittest.TestCase): Original implementation of the routers here: """ + config = NllbMoeConfig( num_experts=4, hidden_size=32, @@ -516,9 +515,7 @@ def test_top_2_routing(self): masked_hidden_states[idx, token_indices] = torch.einsum("b,be->be", combining_weights, expert_output) hidden_states = masked_hidden_states.sum(dim=0).reshape(self.batch_size, self.sequence_length, hidden_dim) - # fmt: off - EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES = torch.Tensor([[ 7.0340e-04, 2.7997e-03, -1.3351e-02, -7.6705e-03, -3.5089e-03,3.9773e-03, 7.4593e-03, 1.2566e-02, 3.5860e-03, -2.7448e-02,-1.3731e-02, -1.0534e-02, -1.3606e-02, -1.5048e-02, -2.8914e-03,-5.0371e-03, -1.3963e-03, 6.0076e-03, -1.1380e-02, -1.4620e-02, 5.2401e-03, 8.4660e-04, -1.5319e-03, -1.6735e-02, 1.1302e-02, 3.6119e-03, 4.6084e-03, -1.3458e-02, 7.7792e-05, 1.4312e-02, 4.9107e-03, -5.0936e-03], [-4.4538e-03, 3.1026e-03, 1.4121e-04, -4.8121e-03, -5.6279e-03, 7.2493e-03, 3.9769e-03, 1.1114e-02, -1.5666e-03, -2.3477e-02, 8.7268e-03, 1.3446e-02, -2.8845e-05, -1.7287e-02, 8.7619e-03, -4.5316e-03, -1.2164e-02, 5.7461e-03, -4.5861e-03, -9.3907e-03, 2.9808e-02, 8.9206e-04, -7.6232e-04, -1.4173e-02, 3.0208e-03, 1.5310e-02, 9.7717e-03, 3.1014e-03, 7.8042e-03, 8.0197e-03, 3.4784e-03, -7.1728e-03]]) - # fmt: on + EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES = torch.Tensor([[ 7.0340e-04, 2.7997e-03, -1.3351e-02, -7.6705e-03, -3.5089e-03,3.9773e-03, 7.4593e-03, 1.2566e-02, 3.5860e-03, -2.7448e-02,-1.3731e-02, -1.0534e-02, -1.3606e-02, -1.5048e-02, -2.8914e-03,-5.0371e-03, -1.3963e-03, 6.0076e-03, -1.1380e-02, -1.4620e-02, 5.2401e-03, 8.4660e-04, -1.5319e-03, -1.6735e-02, 1.1302e-02, 3.6119e-03, 4.6084e-03, -1.3458e-02, 7.7792e-05, 1.4312e-02, 4.9107e-03, -5.0936e-03], [-4.4538e-03, 3.1026e-03, 1.4121e-04, -4.8121e-03, -5.6279e-03, 7.2493e-03, 3.9769e-03, 1.1114e-02, -1.5666e-03, -2.3477e-02, 8.7268e-03, 1.3446e-02, -2.8845e-05, -1.7287e-02, 8.7619e-03, -4.5316e-03, -1.2164e-02, 5.7461e-03, -4.5861e-03, -9.3907e-03, 2.9808e-02, 8.9206e-04, -7.6232e-04, -1.4173e-02, 3.0208e-03, 1.5310e-02, 9.7717e-03, 3.1014e-03, 7.8042e-03, 8.0197e-03, 3.4784e-03, -7.1728e-03]]) # fmt: skip self.assertTrue(torch.allclose(hidden_states.mean(1), EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES, 1e-4)) def test_batch_prioritized_routing(self): diff --git a/tests/models/oneformer/test_processor_oneformer.py b/tests/models/oneformer/test_processor_oneformer.py index f6d976438106..6c30ee94d460 100644 --- a/tests/models/oneformer/test_processor_oneformer.py +++ b/tests/models/oneformer/test_processor_oneformer.py @@ -499,13 +499,9 @@ def create_panoptic_map(annotation, segments_info): # verify the class labels self.assertEqual(len(inputs["class_labels"]), 2) - # fmt: off - expected_class_labels = torch.tensor([4, 17, 32, 42, 12, 3, 5, 0, 43, 96, 104, 31, 125, 138, 87, 149]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([4, 17, 32, 42, 12, 3, 5, 0, 43, 96, 104, 31, 125, 138, 87, 149]) # noqa: E231 # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][0], expected_class_labels)) - # fmt: off - expected_class_labels = torch.tensor([19, 67, 82, 17, 12, 42, 3, 14, 5, 0, 115, 43, 8, 138, 125, 143]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([19, 67, 82, 17, 12, 42, 3, 14, 5, 0, 115, 43, 8, 138, 125, 143]) # noqa: E231 # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels)) # verify the task inputs @@ -591,13 +587,9 @@ def create_panoptic_map(annotation, segments_info): # verify the class labels self.assertEqual(len(inputs["class_labels"]), 2) - # fmt: off - expected_class_labels = torch.tensor([32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 43, 43, 43, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 43, 43, 43, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][0], expected_class_labels)) - # fmt: off - expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 12, 12, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 12, 12, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels)) # verify the task inputs @@ -683,13 +675,9 @@ def create_panoptic_map(annotation, segments_info): # verify the class labels self.assertEqual(len(inputs["class_labels"]), 2) - # fmt: off - expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][0], expected_class_labels)) - # fmt: off - expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # noqa: E231 - # fmt: on + expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels)) # verify the task inputs diff --git a/tests/models/owlv2/test_modeling_owlv2.py b/tests/models/owlv2/test_modeling_owlv2.py index d9913596ac5c..51d9e5378409 100644 --- a/tests/models/owlv2/test_modeling_owlv2.py +++ b/tests/models/owlv2/test_modeling_owlv2.py @@ -141,8 +141,8 @@ def prepare_config_and_inputs_for_common(self): # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTVisionModelTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2 class Owlv2VisionModelTest(ModelTesterMixin, unittest.TestCase): """ - Here we also overwrite some of the tests of test_modeling_common.py, as OWLV2 does not use input_ids, - inputs_embeds, attention_mask and seq_length. + Here we also overwrite some of the tests of test_modeling_common.py, as OWLV2 does not use input_ids, inputs_embeds, + attention_mask and seq_length. """ all_model_classes = (Owlv2VisionModel,) if is_torch_available() else () diff --git a/tests/models/owlvit/test_processor_owlvit.py b/tests/models/owlvit/test_processor_owlvit.py index b8bd17e027c6..b271c8880bfd 100644 --- a/tests/models/owlvit/test_processor_owlvit.py +++ b/tests/models/owlvit/test_processor_owlvit.py @@ -38,9 +38,7 @@ class OwlViTProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() - # fmt: off - vocab = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l", "w", "r", "t", "low", "er", "lowest", "newer", "wider", "", "<|startoftext|>", "<|endoftext|>"] - # fmt: on + vocab = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l", "w", "r", "t", "low", "er", "lowest", "newer", "wider", "", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r", ""] self.special_tokens_map = {"unk_token": ""} diff --git a/tests/models/pegasus/test_tokenization_pegasus.py b/tests/models/pegasus/test_tokenization_pegasus.py index 9a40854e86c5..3abe36d1183b 100644 --- a/tests/models/pegasus/test_tokenization_pegasus.py +++ b/tests/models/pegasus/test_tokenization_pegasus.py @@ -119,9 +119,7 @@ def test_large_seq2seq_truncation(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/perceiver/test_tokenization_perceiver.py b/tests/models/perceiver/test_tokenization_perceiver.py index e08f2e4c5c79..f2366120097a 100644 --- a/tests/models/perceiver/test_tokenization_perceiver.py +++ b/tests/models/perceiver/test_tokenization_perceiver.py @@ -113,9 +113,7 @@ def test_multibytes_char(self): def test_prepare_batch_integration(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] - # fmt: off - expected_src_tokens = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] - # fmt: on + expected_src_tokens = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: skip batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) diff --git a/tests/models/phi/test_modeling_phi.py b/tests/models/phi/test_modeling_phi.py index 200fac25907a..93c5ca85e9a4 100644 --- a/tests/models/phi/test_modeling_phi.py +++ b/tests/models/phi/test_modeling_phi.py @@ -361,9 +361,7 @@ def test_model_phi_1_logits(self): output = model(**input_ids).logits - # fmt: off - EXPECTED_OUTPUT = torch.tensor([[2.2671, 6.7684, -2.0107, -1.2440, -1.5335, -2.3828, 6.9186, 6.4245, 3.1548, 0.9998, 0.0760, 4.4653, 4.9857, 4.2956, 1.2308, -1.4178, 0.1361, 0.5191, -0.5699, -2.2201, -3.0750, -3.9600, -4.5936, -3.7394, -2.7777, 6.1874, -0.4148, -1.5684, -0.5967, 0.2395], [1.7004, 4.0383, 0.0546, 0.4530, -0.3619, -0.9021, 1.8355, 1.3587, 1.2406, 2.5775, -0.8834, 5.1910, 4.2565, 4.1406, 3.0752, -0.9099, 1.1595, 0.0264, 0.3243, -1.1803, -1.3945, -2.1406, -3.9939, -1.4438, -2.9546, 3.9204, 1.0851, -1.0598, -1.7819, -0.4827]]).to(torch_device) - # fmt: on + EXPECTED_OUTPUT = torch.tensor([[2.2671, 6.7684, -2.0107, -1.2440, -1.5335, -2.3828, 6.9186, 6.4245, 3.1548, 0.9998, 0.0760, 4.4653, 4.9857, 4.2956, 1.2308, -1.4178, 0.1361, 0.5191, -0.5699, -2.2201, -3.0750, -3.9600, -4.5936, -3.7394, -2.7777, 6.1874, -0.4148, -1.5684, -0.5967, 0.2395], [1.7004, 4.0383, 0.0546, 0.4530, -0.3619, -0.9021, 1.8355, 1.3587, 1.2406, 2.5775, -0.8834, 5.1910, 4.2565, 4.1406, 3.0752, -0.9099, 1.1595, 0.0264, 0.3243, -1.1803, -1.3945, -2.1406, -3.9939, -1.4438, -2.9546, 3.9204, 1.0851, -1.0598, -1.7819, -0.4827]]).to(torch_device) # fmt: skip self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4)) @@ -379,8 +377,6 @@ def test_model_phi_1_5_logits(self): output = model(**input_ids).logits - # fmt: off - EXPECTED_OUTPUT = torch.tensor([[12.2922, 13.3507, 8.6963, 9.1355, 9.3502, 9.2667, 14.2027, 13.1363, 13.5446, 11.1337, 9.9279, 16.7195, 13.0768, 14.9141, 11.9965, 8.0233, 10.3129, 10.6118, 10.0204, 9.3827, 8.8344, 8.2806, 8.0153, 8.0540, 7.0964, 16.5743, 11.1256, 9.6987, 11.4770, 10.5440], [12.3323, 14.6050, 8.9986, 8.1580, 9.5654, 6.6728, 12.5966, 12.6662, 12.2784, 11.7522, 8.2039, 16.3102, 11.2203, 13.6088, 12.0125, 9.1021, 9.8216, 10.0987, 9.0926, 8.4260, 8.8009, 7.6547, 6.8075, 7.7881, 7.4501, 15.7451, 10.5053, 8.3129, 10.0027, 9.2612]]).to(torch_device) - # fmt: on + EXPECTED_OUTPUT = torch.tensor([[12.2922, 13.3507, 8.6963, 9.1355, 9.3502, 9.2667, 14.2027, 13.1363, 13.5446, 11.1337, 9.9279, 16.7195, 13.0768, 14.9141, 11.9965, 8.0233, 10.3129, 10.6118, 10.0204, 9.3827, 8.8344, 8.2806, 8.0153, 8.0540, 7.0964, 16.5743, 11.1256, 9.6987, 11.4770, 10.5440], [12.3323, 14.6050, 8.9986, 8.1580, 9.5654, 6.6728, 12.5966, 12.6662, 12.2784, 11.7522, 8.2039, 16.3102, 11.2203, 13.6088, 12.0125, 9.1021, 9.8216, 10.0987, 9.0926, 8.4260, 8.8009, 7.6547, 6.8075, 7.7881, 7.4501, 15.7451, 10.5053, 8.3129, 10.0027, 9.2612]]).to(torch_device) # fmt: skip self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4)) diff --git a/tests/models/reformer/test_tokenization_reformer.py b/tests/models/reformer/test_tokenization_reformer.py index a2a0db6c3705..0f72bf311a22 100644 --- a/tests/models/reformer/test_tokenization_reformer.py +++ b/tests/models/reformer/test_tokenization_reformer.py @@ -351,9 +351,7 @@ def test_torch_encode_plus_sent_to_model(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. diff --git a/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py b/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py index a2f56e31a091..c975718778da 100644 --- a/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py +++ b/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py @@ -493,8 +493,8 @@ def test_create_position_ids_respects_padding_index(self): """Ensure that the default position ids only assign a sequential . This is a regression test for https://github.com/huggingface/transformers/issues/1761 - The position ids should be masked with the embedding object's padding index. Therefore, the first available - non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1 + The position ids should be masked with the embedding object's padding index. Therefore, the + first available non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] model = RobertaPreLayerNormEmbeddings(config=config) @@ -513,8 +513,8 @@ def test_create_position_ids_from_inputs_embeds(self): """Ensure that the default position ids only assign a sequential . This is a regression test for https://github.com/huggingface/transformers/issues/1761 - The position ids should be masked with the embedding object's padding index. Therefore, the first available - non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1 + The position ids should be masked with the embedding object's padding index. Therefore, the + first available non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] embeddings = RobertaPreLayerNormEmbeddings(config=config) diff --git a/tests/models/sam/test_modeling_sam.py b/tests/models/sam/test_modeling_sam.py index eb8f265c2946..a84e3695c13f 100644 --- a/tests/models/sam/test_modeling_sam.py +++ b/tests/models/sam/test_modeling_sam.py @@ -728,9 +728,7 @@ def test_inference_mask_generation_two_points_point_batch(self): raw_image = prepare_image() - # fmt: off - input_points = torch.Tensor([[[400, 650]], [[220, 470]]]).cpu() - # fmt: on + input_points = torch.Tensor([[[400, 650]], [[220, 470]]]).cpu() # fmt: skip input_points = input_points.unsqueeze(0) diff --git a/tests/models/sam/test_modeling_tf_sam.py b/tests/models/sam/test_modeling_tf_sam.py index a14b99128671..4478815e7ce9 100644 --- a/tests/models/sam/test_modeling_tf_sam.py +++ b/tests/models/sam/test_modeling_tf_sam.py @@ -628,9 +628,7 @@ def test_inference_mask_generation_two_points_point_batch(self): raw_image = prepare_image() - # fmt: off - input_points = tf.convert_to_tensor([[[400, 650]], [[220, 470]]]) - # fmt: on + input_points = tf.convert_to_tensor([[[400, 650]], [[220, 470]]]) # fmt: skip input_points = tf.expand_dims(input_points, 0) diff --git a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py index 6963433e01b3..ab7a48694d2f 100644 --- a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py +++ b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py @@ -1001,9 +1001,7 @@ def processor(self): def input_text(self): # corresponds to "C'est un test." with seamlessM4T_medium checkpoint - # fmt: off - input_ids = torch.tensor([[256057, 152, 248116, 354, 159, 7356, 248075, 3]]) - # fmt: on + input_ids = torch.tensor([[256057, 152, 248116, 354, 159, 7356, 248075, 3]]) # fmt: skip input_ids = input_ids.to(torch_device) @@ -1049,9 +1047,7 @@ def test_to_eng_text(self): # test text - tgt lang: eng - # fmt: off - expected_text_tokens = [3, 256047, 3291, 248116, 248066, 9, 7356, 248075, 3] - # fmt: on + expected_text_tokens = [3, 256047, 3291, 248116, 248066, 9, 7356, 248075, 3] # fmt: skip # fmt: off expected_unit_tokens = [ @@ -1062,9 +1058,7 @@ def test_to_eng_text(self): ] # fmt: on - # fmt: off - expected_wav_slice = [-3e-05, -0.0004, -0.00037, -0.00013, -6e-05, 0.00012, -0.00016, 0.00025, 7e-05, -3e-05] - # fmt: on + expected_wav_slice = [-3e-05, -0.0004, -0.00037, -0.00013, -6e-05, 0.00012, -0.00016, 0.00025, 7e-05, -3e-05] # fmt: skip set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="eng", return_intermediate_token_ids=True) @@ -1081,9 +1075,7 @@ def test_to_swh_text(self): # test text - tgt lang: swh - # fmt: off - expected_text_tokens = [3, 256168, 1665, 188589, 7040, 248075, 3] - # fmt: on + expected_text_tokens = [3, 256168, 1665, 188589, 7040, 248075, 3] # fmt: skip # fmt: off expected_unit_tokens = [ @@ -1093,9 +1085,7 @@ def test_to_swh_text(self): ] # fmt: on - # fmt: off - expected_wav_slice = [1e-05, -7e-05, -4e-05, -4e-05, -6e-05, -9e-05, -0.0001, -2e-05, -7e-05, -2e-05] - # fmt: on + expected_wav_slice = [1e-05, -7e-05, -4e-05, -4e-05, -6e-05, -9e-05, -0.0001, -2e-05, -7e-05, -2e-05] # fmt: skip set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="swh", return_intermediate_token_ids=True) @@ -1111,9 +1101,7 @@ def test_to_rus_speech(self): # test audio - tgt lang: rus - # fmt: off - expected_text_tokens = [3, 256147, 1197, 73565, 3413, 537, 233331, 248075, 3] - # fmt: on + expected_text_tokens = [3, 256147, 1197, 73565, 3413, 537, 233331, 248075, 3] # fmt: skip # fmt: off expected_unit_tokens = [ @@ -1124,9 +1112,7 @@ def test_to_rus_speech(self): ] # fmt: on - # fmt: off - expected_wav_slice = [0.00013, 0.00012, 0.00014, 3e-05, 0.0, -6e-05, -0.00018, -0.00016, -0.00021, -0.00018] - # fmt: on + expected_wav_slice = [0.00013, 0.00012, 0.00014, 3e-05, 0.0, -6e-05, -0.00018, -0.00016, -0.00021, -0.00018] # fmt: skip set_seed(0) output = model.generate(**self.input_audio, num_beams=1, tgt_lang="rus", return_intermediate_token_ids=True) diff --git a/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py b/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py index 2cd9e8c56b52..7752156eab64 100644 --- a/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py +++ b/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py @@ -449,9 +449,7 @@ class SeamlessM4TDistilledIntegrationTest(unittest.TestCase): " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] - # fmt: off - expected_src_tokens = [256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 3] - # fmt: on + expected_src_tokens = [256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 3] # fmt: skip @classmethod def setUpClass(cls): @@ -483,9 +481,7 @@ def test_tokenizer_tgt_lang(self): # Copied from tests.models.nllb.test_tokenization_nllb.NllbDistilledIntegrationTest.test_enro_tokenizer_decode_ignores_language_codes def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) - # fmt: off - generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] - # fmt: on + generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: skip result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) diff --git a/tests/models/speech_to_text/test_tokenization_speech_to_text.py b/tests/models/speech_to_text/test_tokenization_speech_to_text.py index 46c242796727..b0cb1acc8563 100644 --- a/tests/models/speech_to_text/test_tokenization_speech_to_text.py +++ b/tests/models/speech_to_text/test_tokenization_speech_to_text.py @@ -90,28 +90,16 @@ def test_full_tokenizer(self): ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") - self.assertListEqual( - tokens, - # fmt: off - [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."], - # fmt: on - ) + self.assertListEqual(tokens,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."]) # fmt: skip ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8]) back_tokens = tokenizer.convert_ids_to_tokens(ids) - self.assertListEqual( - back_tokens, - # fmt: off - [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "", "."], - # fmt: on - ) + self.assertListEqual(back_tokens,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "", "."]) # fmt: skip @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/speecht5/test_tokenization_speecht5.py b/tests/models/speecht5/test_tokenization_speecht5.py index 32468abe6ad6..f078402d505a 100644 --- a/tests/models/speecht5/test_tokenization_speecht5.py +++ b/tests/models/speecht5/test_tokenization_speecht5.py @@ -152,9 +152,7 @@ def test_full_tokenizer(self): tokenizer = self.get_tokenizer(normalize=True) tokens = tokenizer.tokenize("This is a test") - # fmt: off - self.assertListEqual(tokens, [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't']) - # fmt: on + self.assertListEqual(tokens, [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't']) # fmt: skip self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), @@ -162,25 +160,13 @@ def test_full_tokenizer(self): ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") - self.assertListEqual( - tokens, - # fmt: off - [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] - # fmt: on - ) + self.assertListEqual(tokens,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) # fmt: skip ids = tokenizer.convert_tokens_to_ids(tokens) - # fmt: off - self.assertListEqual(ids, [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 9, 10, 9, 5, 6, 22, 4, 6, 20, 8, 4, 6, 11, 8, 16, 12, 7, 9, 14, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) - # fmt: on + self.assertListEqual(ids, [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 9, 10, 9, 5, 6, 22, 4, 6, 20, 8, 4, 6, 11, 8, 16, 12, 7, 9, 14, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) # fmt: skip back_tokens = tokenizer.convert_ids_to_tokens(ids) - self.assertListEqual( - back_tokens, - # fmt: off - [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] - # fmt: on - ) + self.assertListEqual(back_tokens,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) # fmt: skip @slow def test_tokenizer_integration(self): diff --git a/tests/models/switch_transformers/test_modeling_switch_transformers.py b/tests/models/switch_transformers/test_modeling_switch_transformers.py index 6a2c3f5525e4..5458b5666679 100644 --- a/tests/models/switch_transformers/test_modeling_switch_transformers.py +++ b/tests/models/switch_transformers/test_modeling_switch_transformers.py @@ -905,6 +905,7 @@ class SwitchTransformerRouterTest(unittest.TestCase): Original implementation of the routers here: """ + config = SwitchTransformersConfig( num_experts=2, hidden_size=8, diff --git a/tests/models/t5/test_tokenization_t5.py b/tests/models/t5/test_tokenization_t5.py index 26cd20c74c15..a141dea86b71 100644 --- a/tests/models/t5/test_tokenization_t5.py +++ b/tests/models/t5/test_tokenization_t5.py @@ -369,9 +369,7 @@ def test_pretrained_model_lists(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[31220, 7, 41, 14034, 801, 38, 3, 102, 63, 17, 127, 524, 18, 7031, 2032, 277, 11, 3, 102, 63, 17, 127, 524, 18, 2026, 17, 10761, 18, 7041, 61, 795, 879, 18, 19681, 4648, 7, 41, 12920, 382, 6, 350, 6383, 4949, 6, 2158, 12920, 382, 9, 6, 3, 4, 11160, 6, 2043, 17153, 279, 49, 17, 6, 3, 4, 434, 9688, 11439, 21, 6869, 10509, 17725, 41, 567, 9138, 61, 11, 6869, 10509, 11946, 41, 18207, 517, 61, 28, 147, 3538, 1220, 7140, 10761, 2250, 16, 910, 1220, 8024, 11, 1659, 1413, 32, 883, 2020, 344, 2215, 226, 6, 12901, 382, 127, 524, 11, 4738, 7, 127, 15390, 5, 1], [272, 24203, 19, 876, 12, 554, 18, 9719, 1659, 2647, 26352, 6497, 7, 45, 73, 9339, 400, 26, 1499, 57, 22801, 10760, 30, 321, 646, 11, 269, 2625, 16, 66, 7500, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [37, 1704, 4216, 3, 20400, 4418, 7, 147, 8, 19743, 1782, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[31220, 7, 41, 14034, 801, 38, 3, 102, 63, 17, 127, 524, 18, 7031, 2032, 277, 11, 3, 102, 63, 17, 127, 524, 18, 2026, 17, 10761, 18, 7041, 61, 795, 879, 18, 19681, 4648, 7, 41, 12920, 382, 6, 350, 6383, 4949, 6, 2158, 12920, 382, 9, 6, 3, 4, 11160, 6, 2043, 17153, 279, 49, 17, 6, 3, 4, 434, 9688, 11439, 21, 6869, 10509, 17725, 41, 567, 9138, 61, 11, 6869, 10509, 11946, 41, 18207, 517, 61, 28, 147, 3538, 1220, 7140, 10761, 2250, 16, 910, 1220, 8024, 11, 1659, 1413, 32, 883, 2020, 344, 2215, 226, 6, 12901, 382, 127, 524, 11, 4738, 7, 127, 15390, 5, 1], [272, 24203, 19, 876, 12, 554, 18, 9719, 1659, 2647, 26352, 6497, 7, 45, 73, 9339, 400, 26, 1499, 57, 22801, 10760, 30, 321, 646, 11, 269, 2625, 16, 66, 7500, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [37, 1704, 4216, 3, 20400, 4418, 7, 147, 8, 19743, 1782, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/tapas/test_tokenization_tapas.py b/tests/models/tapas/test_tokenization_tapas.py index 9d82c468aa30..692dc91b6d88 100644 --- a/tests/models/tapas/test_tokenization_tapas.py +++ b/tests/models/tapas/test_tokenization_tapas.py @@ -1214,9 +1214,7 @@ def test_tapas_integration_test(self): tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq", model_max_length=512) - # fmt: off - expected_results = {'input_ids':[101,2043,2001,8226,15091,2141,1029,102,5889,2287,2193,1997,5691,3058,1997,4182,8226,15091,5179,6584,2324,2285,3699,14720,4487,6178,9488,3429,5187,2340,2281,3326,2577,18856,7828,3240,5354,6353,1020,2089,3777],'attention_mask':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],'token_type_ids':[[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[1,1,0,0,0,0,0],[1,2,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,1,1,0,0,0,0],[1,1,1,0,0,0,0],[1,2,1,0,2,2,0],[1,3,1,0,3,1,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,2,2,0,1,3,0],[1,3,2,0,1,3,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,2,3,0,3,1,0],[1,3,3,0,2,2,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0]]} # noqa: E231 - # fmt: on + expected_results = {'input_ids':[101,2043,2001,8226,15091,2141,1029,102,5889,2287,2193,1997,5691,3058,1997,4182,8226,15091,5179,6584,2324,2285,3699,14720,4487,6178,9488,3429,5187,2340,2281,3326,2577,18856,7828,3240,5354,6353,1020,2089,3777],'attention_mask':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],'token_type_ids':[[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[1,1,0,0,0,0,0],[1,2,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,1,1,0,0,0,0],[1,1,1,0,0,0,0],[1,2,1,0,2,2,0],[1,3,1,0,3,1,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,2,2,0,1,3,0],[1,3,2,0,1,3,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,2,3,0,3,1,0],[1,3,3,0,2,2,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0]]} # fmt: skip new_encoded_inputs = tokenizer.encode_plus(table=table, query=queries[0]) @@ -1258,9 +1256,7 @@ def test_full_tokenizer(self): column_ids = token_type_ids[:, 1] row_ids = token_type_ids[:, 2] - # fmt: off - expected_results = {'input_ids':[101,2054,2020,1996,6853,3415,1029,102,13433,2015,2053,4062,2136,10876,2051,1013,3394,8370,2685,1015,3590,4754,29267,4765,3771,2136,2447,1005,1055,6584,1015,1024,4466,1024,2340,1012,6185,2509,1015,2570,1016,1015,10391,12022,4226,7895,10625,1013,22996,3868,6584,1009,1014,1012,1022,10819,2015,1016,2459,1017,1017,2703,10555,2136,2447,1005,1055,6584,1009,2654,1012,1020,10819,2015,1017,2403,1018,1023,8709,8183,3126,21351,2078,1010,3781,1012,2136,10958,8865,6584,1009,2871,1012,1022,10819,2015,2410,2260,1019,4090,7986,5292,5677,8151,2771,1011,2990,9187,3868,6584,1009,4413,1012,1015,10819,2015,1020,2184,1020,2322,2030,20282,14262,9035,4754,3868,6584,1009,1015,1024,4002,1012,1016,2184,1022,1021,4868,7918,12023,12023,3868,6584,1009,1015,1024,5890,1012,1018,1019,1020,1022,2260,5261,12436,18116,2137,4382,2136,26447,6584,1009,1015,1024,5890,1012,1022,1022,1019,1023,1021,27339,3995,10125,9711,4906,25101,24657,1011,22033,2386,3868,6564,1009,1015,5001,2321,1018,2184,4583,7986,14383,2075,29488,14906,9351,2971,6564,1009,1015,5001,2340,1017,2340,2676,8527,2014,2696,1052,2243,3868,6564,1009,1015,5001,2260,1016,2260,2861,4575,4477,1011,2128,4710,2137,4382,2136,26447,6564,1009,1015,5001,2459,1015,2410,2539,8963,11503,25457,3022,8512,2522,9654,3868,5594,1009,1016,10876,2324,1014,2403,3943,4074,6415,15204,2072,12496,25378,3868,5594,1009,1016,10876,2403,1014,2321,1018,10704,17921,14906,9351,2971,5594,1009,1016,10876,1023,1014,2385,2340,14915,5795,8512,2522,9654,3868,6640,6228,2539,1014,2459,1016,28328,8945,3126,21351,2015,10625,1013,22996,3868,6255,6228,1018,1014,2324,2321,12270,11956,5232,3868,2260,6228,1021,1014,2539,1019,8473,28027,2080,2474,6371,5232,3868,2184,6228,2385,1014,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'column_ids':[0,0,0,0,0,0,0,0,1,1,2,3,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,3,3,3,3,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,7,8,1,2,3,3,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,5,6,7,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'row_ids':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,19,19,19,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'segment_ids':[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]} # noqa: E231 - # fmt: on + expected_results = {'input_ids':[101,2054,2020,1996,6853,3415,1029,102,13433,2015,2053,4062,2136,10876,2051,1013,3394,8370,2685,1015,3590,4754,29267,4765,3771,2136,2447,1005,1055,6584,1015,1024,4466,1024,2340,1012,6185,2509,1015,2570,1016,1015,10391,12022,4226,7895,10625,1013,22996,3868,6584,1009,1014,1012,1022,10819,2015,1016,2459,1017,1017,2703,10555,2136,2447,1005,1055,6584,1009,2654,1012,1020,10819,2015,1017,2403,1018,1023,8709,8183,3126,21351,2078,1010,3781,1012,2136,10958,8865,6584,1009,2871,1012,1022,10819,2015,2410,2260,1019,4090,7986,5292,5677,8151,2771,1011,2990,9187,3868,6584,1009,4413,1012,1015,10819,2015,1020,2184,1020,2322,2030,20282,14262,9035,4754,3868,6584,1009,1015,1024,4002,1012,1016,2184,1022,1021,4868,7918,12023,12023,3868,6584,1009,1015,1024,5890,1012,1018,1019,1020,1022,2260,5261,12436,18116,2137,4382,2136,26447,6584,1009,1015,1024,5890,1012,1022,1022,1019,1023,1021,27339,3995,10125,9711,4906,25101,24657,1011,22033,2386,3868,6564,1009,1015,5001,2321,1018,2184,4583,7986,14383,2075,29488,14906,9351,2971,6564,1009,1015,5001,2340,1017,2340,2676,8527,2014,2696,1052,2243,3868,6564,1009,1015,5001,2260,1016,2260,2861,4575,4477,1011,2128,4710,2137,4382,2136,26447,6564,1009,1015,5001,2459,1015,2410,2539,8963,11503,25457,3022,8512,2522,9654,3868,5594,1009,1016,10876,2324,1014,2403,3943,4074,6415,15204,2072,12496,25378,3868,5594,1009,1016,10876,2403,1014,2321,1018,10704,17921,14906,9351,2971,5594,1009,1016,10876,1023,1014,2385,2340,14915,5795,8512,2522,9654,3868,6640,6228,2539,1014,2459,1016,28328,8945,3126,21351,2015,10625,1013,22996,3868,6255,6228,1018,1014,2324,2321,12270,11956,5232,3868,2260,6228,1021,1014,2539,1019,8473,28027,2080,2474,6371,5232,3868,2184,6228,2385,1014,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'column_ids':[0,0,0,0,0,0,0,0,1,1,2,3,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,3,3,3,3,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,7,8,1,2,3,3,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,5,6,7,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'row_ids':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,19,19,19,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'segment_ids':[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]} # fmt: skip self.assertListEqual(input_ids, expected_results["input_ids"]) self.assertListEqual(segment_ids.tolist(), expected_results["segment_ids"]) diff --git a/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py b/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py index 88e759307ed6..fdbff90b24b0 100644 --- a/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py +++ b/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py @@ -251,9 +251,7 @@ class TFTransfoXLModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_transfo_xl_wt103(self): model = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") - # fmt: off - input_ids = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]],dtype=tf.int32) # noqa: E231 - # fmt: on + input_ids = tf.convert_to_tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]]) # fmt: skip # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the @@ -265,22 +263,20 @@ def test_lm_generate_transfo_xl_wt103(self): # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . - # fmt: off - expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 - # fmt: on - # In 1991, the remains of Russian Tsar Nicholas II and his family ( - # except for Alexei and Maria ) are discovered. The voice of young son, - # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. - # 1883 Western Siberia, a young Grigori Rasputin is asked by his father - # and a group of men to perform magic. Rasputin has a vision and - # denounces one of the men as a horse thief. Although his father initially - # slaps him for making such an accusation, Rasputin watches as the man - # is chased outside and beaten. Twenty years later, Rasputin sees a vision - # of the Virgin Mary, prompting him to become a priest. - # Rasputin quickly becomes famous, with people, even a bishop, begging for - # his blessing. In the 1990s, the remains of Russian Tsar - # Nicholas II and his family were discovered. The voice of young son, - # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. + expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # fmt: skip + # In 1991, the remains of Russian Tsar Nicholas II and his family ( except for + # Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei + # Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young + # Grigori Rasputin is asked by his father and a group of men to perform magic. + # Rasputin has a vision and denounces one of the men as a horse thief. Although + # his father initially slaps him for making such an accusation, Rasputin watches + # as the man is chased outside and beaten. Twenty years later, Rasputin sees a + # vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly + # becomes famous, with people, even a bishop, begging for his blessing. In the + # early 20th century, Rasputin became a symbol of the Russian Orthodox Church. + # The image of Rasputin was used in the Russian national anthem, " Nearer, My God, + # to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit + # of Heaven " output_ids = model.generate(input_ids, max_length=200, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) diff --git a/tests/models/transfo_xl/test_modeling_transfo_xl.py b/tests/models/transfo_xl/test_modeling_transfo_xl.py index d100ea381d64..9534b13c8526 100644 --- a/tests/models/transfo_xl/test_modeling_transfo_xl.py +++ b/tests/models/transfo_xl/test_modeling_transfo_xl.py @@ -502,9 +502,7 @@ def test_lm_generate_transfo_xl_wt103(self): model = TransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") model.to(torch_device) - # fmt: off - input_ids = torch.tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]],dtype=torch.long,device=torch_device) # noqa: E231 - # fmt: on + input_ids = torch.tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]], dtype=torch.long,device=torch_device) # fmt: skip # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the @@ -516,9 +514,7 @@ def test_lm_generate_transfo_xl_wt103(self): # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . - # fmt: off - expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # noqa: E231 - # fmt: on + expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # fmt: skip # In 1991, the remains of Russian Tsar Nicholas II and his family ( except for # Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei # Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young diff --git a/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py index 2cb5e446724f..db38e4a98992 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py @@ -407,7 +407,7 @@ def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, nam def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): - if type(key) == bool: + if isinstance(key, bool): pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py index 755d59e71cfa..8ac4449cf3c9 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py @@ -928,9 +928,7 @@ def test_inference_cordv2(self): sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token # verify generated sequence - # fmt: off - expected_sequence = " CINNAMON SUGAR 17,000 1 x 17,000 17,000 17,000 20,000 3,000" # noqa: E231 - # fmt: on + expected_sequence = " CINNAMON SUGAR 17,000 1 x 17,000 17,000 17,000 20,000 3,000" # noqa: E231 # fmt: skip self.assertEqual(sequence, expected_sequence) # verify scores diff --git a/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py b/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py index 7cd8b7645db9..aebe723bd5fd 100644 --- a/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py +++ b/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py @@ -38,9 +38,7 @@ class VisionTextDualEncoderProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() - # fmt: off - vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] - # fmt: on + vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: skip self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) diff --git a/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py b/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py index 393e056b84d4..7af3cd34e8a4 100644 --- a/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py @@ -850,8 +850,9 @@ def test_inference_keyword_spotting(self): input_values = inputs.input_values attention_mask = inputs.attention_mask outputs = model(input_values, attention_mask) - predicted_logits, predicted_ids = tf.math.reduce_max(outputs.logits, axis=-1), tf.argmax( - outputs.logits, axis=-1 + predicted_logits, predicted_ids = ( + tf.math.reduce_max(outputs.logits, axis=-1), + tf.argmax(outputs.logits, axis=-1), ) expected_labels = [7, 6, 10, 9] expected_logits = tf.convert_to_tensor([6.1186, 11.8961, 10.2931, 6.0898]) @@ -866,15 +867,18 @@ def test_inference_intent_classification(self): input_values = inputs.input_values attention_mask = inputs.attention_mask outputs = model(input_values, attention_mask=attention_mask) - predicted_logits_action, predicted_ids_action = tf.math.reduce_max(outputs.logits[:, :6], axis=-1), tf.argmax( - outputs.logits[:, :6], axis=-1 + predicted_logits_action, predicted_ids_action = ( + tf.math.reduce_max(outputs.logits[:, :6], axis=-1), + tf.argmax(outputs.logits[:, :6], axis=-1), + ) + predicted_logits_object, predicted_ids_object = ( + tf.math.reduce_max(outputs.logits[:, 6:20], axis=-1), + tf.argmax(outputs.logits[:, 6:20], axis=-1), + ) + predicted_logits_location, predicted_ids_location = ( + tf.math.reduce_max(outputs.logits[:, 20:24], axis=-1), + tf.argmax(outputs.logits[:, 20:24], axis=-1), ) - predicted_logits_object, predicted_ids_object = tf.math.reduce_max( - outputs.logits[:, 6:20], axis=-1 - ), tf.argmax(outputs.logits[:, 6:20], axis=-1) - predicted_logits_location, predicted_ids_location = tf.math.reduce_max( - outputs.logits[:, 20:24], axis=-1 - ), tf.argmax(outputs.logits[:, 20:24], axis=-1) expected_labels_action = [0, 0, 2, 3] expected_logits_action = tf.convert_to_tensor([0.4568, 11.0848, 1.6621, 9.3841]) expected_labels_object = [3, 10, 3, 4] @@ -915,8 +919,9 @@ def test_inference_emotion_recognition(self): input_values = inputs.input_values attention_mask = inputs.attention_mask outputs = model(input_values, attention_mask=attention_mask) - predicted_logits, predicted_ids = tf.math.reduce_max(outputs.logits, axis=-1), tf.argmax( - outputs.logits, axis=-1 + predicted_logits, predicted_ids = ( + tf.math.reduce_max(outputs.logits, axis=-1), + tf.argmax(outputs.logits, axis=-1), ) expected_labels = [1, 1, 2, 2] diff --git a/tests/models/whisper/test_modeling_flax_whisper.py b/tests/models/whisper/test_modeling_flax_whisper.py index 982dcb4827a0..d06166376929 100644 --- a/tests/models/whisper/test_modeling_flax_whisper.py +++ b/tests/models/whisper/test_modeling_flax_whisper.py @@ -670,9 +670,7 @@ def test_tiny_timestamp_generation(self): generated_ids = generate_fn(input_features) - # fmt: off - EXPECTED_OUTPUT = np.array([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) - # fmt: on + EXPECTED_OUTPUT = np.array([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) # fmt: skip self.assertTrue(np.allclose(generated_ids, EXPECTED_OUTPUT)) diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index c7d6fb692654..e26527267fcb 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -1566,9 +1566,7 @@ def test_tiny_timestamp_generation(self): generated_ids = model.generate(input_features, max_length=448, return_timestamps=True).to("cpu") - # fmt: off - EXPECTED_OUTPUT = torch.tensor([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) - # fmt: on + EXPECTED_OUTPUT = torch.tensor([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) # fmt: skip self.assertTrue(torch.allclose(generated_ids, EXPECTED_OUTPUT)) diff --git a/tests/models/whisper/test_tokenization_whisper.py b/tests/models/whisper/test_tokenization_whisper.py index fd1c135deb53..731abd3a283e 100644 --- a/tests/models/whisper/test_tokenization_whisper.py +++ b/tests/models/whisper/test_tokenization_whisper.py @@ -77,20 +77,16 @@ def test_full_tokenizer(self): tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, - # fmt: off - ['I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ9', '2000', ',', 'Ġand', 'Ġ', 'this', 'Ġis', 'Ġfals', 'é', '.'], - # fmt: on - ) + ["I", "Ġwas", "Ġborn", "Ġin", "Ġ9", "2000", ",", "Ġand", "Ġ", "this", "Ġis", "Ġfals", "é", "."], # fmt: skip + ) # fmt: skip ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [40, 390, 4232, 294, 1722, 25743, 11, 293, 220, 11176, 307, 16720, 526, 13]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, - # fmt: off - ['I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ9', '2000', ',', 'Ġand', 'Ġ', 'this', 'Ġis', 'Ġfals', 'é', '.'], - # fmt: on - ) + ["I", "Ġwas", "Ġborn", "Ġin", "Ġ9", "2000", ",", "Ġand", "Ġ", "this", "Ġis", "Ġfals", "é", "."], # fmt: skip + ) # fmt: skip def test_tokenizer_slow_store_full_signature(self): pass @@ -108,9 +104,7 @@ def test_special_tokens_initialization(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[50257, 50362, 41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13, 50256], [50257, 50362, 13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13, 50256], [50257, 50362, 464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13, 50256]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[50257, 50362, 41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13, 50256], [50257, 50362, 13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13, 50256], [50257, 50362, 464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13, 50256]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="openai/whisper-tiny.en", padding=False @@ -128,9 +122,7 @@ def test_output_offsets(self): ) # Merge when the previous sequence is a suffix of the next sequence - # fmt: off - next_sequences_1 = [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] - # fmt: on + next_sequences_1 = [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] # fmt: skip self.assertEqual( tokenizer.decode(next_sequences_1, output_offsets=True), { diff --git a/tests/models/xglm/test_modeling_tf_xglm.py b/tests/models/xglm/test_modeling_tf_xglm.py index 54641693c771..3950ccf6524f 100644 --- a/tests/models/xglm/test_modeling_tf_xglm.py +++ b/tests/models/xglm/test_modeling_tf_xglm.py @@ -177,9 +177,7 @@ def test_lm_generate_xglm(self, verify_outputs=True): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") input_ids = tf.convert_to_tensor([[2, 268, 9865]], dtype=tf.int32) # The dog # The dog is a very friendly dog. He is very affectionate and loves to play with other - # fmt: off - expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] - # fmt: on + expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: skip output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) diff --git a/tests/models/xglm/test_modeling_xglm.py b/tests/models/xglm/test_modeling_xglm.py index 5fa92c8082c0..457317f07836 100644 --- a/tests/models/xglm/test_modeling_xglm.py +++ b/tests/models/xglm/test_modeling_xglm.py @@ -383,9 +383,7 @@ def _test_lm_generate_xglm_helper( model.to(torch_device) input_ids = torch.tensor([[2, 268, 9865]], dtype=torch.long, device=torch_device) # The dog # The dog is a very friendly dog. He is very affectionate and loves to play with other - # fmt: off - expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] - # fmt: on + expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: skip output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].tolist(), expected_output_ids) diff --git a/tests/models/xglm/test_tokenization_xglm.py b/tests/models/xglm/test_tokenization_xglm.py index 74dd4dab5e3e..61674976a382 100644 --- a/tests/models/xglm/test_tokenization_xglm.py +++ b/tests/models/xglm/test_tokenization_xglm.py @@ -182,9 +182,7 @@ def test_tokenization_base_hard_symbols(self): 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) - # fmt: off - original_tokenizer_encodings = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] - # fmt: on + original_tokenizer_encodings = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: skip self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) diff --git a/tests/models/xlm_prophetnet/test_modeling_xlm_prophetnet.py b/tests/models/xlm_prophetnet/test_modeling_xlm_prophetnet.py index 1af9ecef6c18..614ccd6ebc2b 100644 --- a/tests/models/xlm_prophetnet/test_modeling_xlm_prophetnet.py +++ b/tests/models/xlm_prophetnet/test_modeling_xlm_prophetnet.py @@ -114,9 +114,7 @@ def test_xprophetnet_ntg_inference(self): " этой системы не смогут получать обновления безопасности, из-за чего их компьютеры могут стать уязвимыми" " к кибератакам." ) - ZH_SENTENCE = ( - "根据该组织的官方门户网站,微软公司打算在2020年1月14日之后正式终止对Windows 7操作系统的免费支持。从那时起,该系统的用户将无法接收安全更新,这可能会使他们的计算机容易受到网络攻击。" - ) + ZH_SENTENCE = "根据该组织的官方门户网站,微软公司打算在2020年1月14日之后正式终止对Windows 7操作系统的免费支持。从那时起,该系统的用户将无法接收安全更新,这可能会使他们的计算机容易受到网络攻击。" input_ids = tokenizer( [EN_SENTENCE, RU_SENTENCE, ZH_SENTENCE], padding=True, max_length=255, return_tensors="pt" diff --git a/tests/models/xlm_prophetnet/test_tokenization_xlm_prophetnet.py b/tests/models/xlm_prophetnet/test_tokenization_xlm_prophetnet.py index 13c02b5415f8..679e808dc975 100644 --- a/tests/models/xlm_prophetnet/test_tokenization_xlm_prophetnet.py +++ b/tests/models/xlm_prophetnet/test_tokenization_xlm_prophetnet.py @@ -144,9 +144,7 @@ def test_tokenization_base_easy_symbols(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/xlm_roberta/test_tokenization_xlm_roberta.py b/tests/models/xlm_roberta/test_tokenization_xlm_roberta.py index 0dde56481cc7..1cba1c01d580 100644 --- a/tests/models/xlm_roberta/test_tokenization_xlm_roberta.py +++ b/tests/models/xlm_roberta/test_tokenization_xlm_roberta.py @@ -334,9 +334,7 @@ def test_tokenization_base_hard_symbols(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/models/xlnet/test_tokenization_xlnet.py b/tests/models/xlnet/test_tokenization_xlnet.py index 216eb0f637a9..9fb28658aab4 100644 --- a/tests/models/xlnet/test_tokenization_xlnet.py +++ b/tests/models/xlnet/test_tokenization_xlnet.py @@ -199,9 +199,7 @@ def test_sequence_builders(self): @slow def test_tokenizer_integration(self): - # fmt: off - expected_encoding = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 - # fmt: on + expected_encoding = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip self.tokenizer_integration_test_util( expected_encoding=expected_encoding, diff --git a/tests/pipelines/test_pipelines_automatic_speech_recognition.py b/tests/pipelines/test_pipelines_automatic_speech_recognition.py index df9b23b407f0..b5dee1e00fc9 100644 --- a/tests/pipelines/test_pipelines_automatic_speech_recognition.py +++ b/tests/pipelines/test_pipelines_automatic_speech_recognition.py @@ -517,9 +517,7 @@ def test_find_longest_common_subsequence(self): ) # Merge when the previous sequence is not included in the current sequence - # fmt: off - next_sequences_3 = [[50364, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50584, 50257]] - # fmt: on + next_sequences_3 = [[50364, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50584, 50257]] # fmt: skip # {'text': ' His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)} merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 120_000, 0)]], @@ -527,12 +525,10 @@ def test_find_longest_common_subsequence(self): processor.feature_extractor, max_source_positions, ) - # fmt: off self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51832], - ) - # fmt: on + ) # fmt: skip self.assertEqual( processor.decode(merge, output_offsets=True), { @@ -550,23 +546,19 @@ def test_find_longest_common_subsequence(self): }, ) # last case is when the sequence is not in the first next predicted start and end of timestamp - # fmt: off next_sequences_3 = [ [50364, 2812, 9836, 14783, 390, 406, 3163, 1953, 466, 13, 50634, 50634, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50934] - ] - # fmt: on + ] # fmt: skip merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 167_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) - # fmt: off self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51912] - ) - # fmt: on + ) # fmt: skip self.assertEqual( processor.decode(merge, output_offsets=True), { diff --git a/tests/pipelines/test_pipelines_conversational.py b/tests/pipelines/test_pipelines_conversational.py index 6ba2d8379d2a..e80d6a646f99 100644 --- a/tests/pipelines/test_pipelines_conversational.py +++ b/tests/pipelines/test_pipelines_conversational.py @@ -242,9 +242,7 @@ def test_integration_torch_conversation_llama2_input_ids(self): ], ) inputs = tokenizer._build_conversation_input_ids(conversation) - # fmt: off - EXPECTED_INPUTS_IDS = [ 1, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 29871, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29902, 626, 2675, 304, 3681, 29892, 825, 881, 306, 1074, 29973, 518, 29914, 25580, 29962, 3681, 29892, 278, 7483, 310, 3444, 29892, 338, 2998, 363, 967, 380, 27389, 11258, 29892, 1616, 19133, 29879, 29892, 15839, 2982, 22848, 29892, 322, 6017, 7716, 25005, 29889, 2266, 526, 777, 310, 278, 2246, 19650, 1953, 304, 1074, 297, 3681, 29901, 13, 13, 29896, 29889, 450, 382, 2593, 295, 23615, 29901, 450, 9849, 293, 382, 2593, 295, 23615, 338, 697, 310, 278, 1556, 5936, 13902, 2982, 22848, 297, 278, 3186, 322, 16688, 2078, 271, 400, 5086, 8386, 310, 278, 4272, 29889, 13, 29906, 29889, 450, 4562, 12675, 6838, 29901, 450, 4562, 12675, 338, 697, 310, 278, 3186, 29915, 29879, 10150, 322, 1556, 13834, 19133, 29879, 29892, 27261, 385, 21210, 573, 4333, 310, 1616, 322, 24238, 29879, 29892, 3704, 278, 2598, 29874, 29420, 29889, 13, 29941, 29889, 24337, 29899, 29928, 420, 315, 21471, 29901, 910, 9560, 274, 21471, 338, 697, 310, 278, 1556, 13834, 2982, 22848, 297, 3681, 322, 338, 2998, 363, 967, 22883, 293, 11258, 322, 380, 27389, 380, 7114, 12917, 5417, 29889, 13, 13, 1349, 968, 526, 925, 263, 2846, 310, 278, 1784, 19650, 1953, 393, 3681, 756, 304, 5957, 29889, 2973, 577, 1568, 304, 1074, 322, 437, 29892, 372, 29915, 29879, 694, 4997, 393, 3681, 338, 697, 310, 278, 1556, 5972, 6282, 391, 15422, 800, 297, 278, 3186, 29889, 29871, 2, 1, 518, 25580, 29962, 1724, 338, 577, 2107, 1048, 396, 29896, 29973, 518, 29914, 25580, 29962] - # fmt: on + EXPECTED_INPUTS_IDS = [ 1, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 29871, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29902, 626, 2675, 304, 3681, 29892, 825, 881, 306, 1074, 29973, 518, 29914, 25580, 29962, 3681, 29892, 278, 7483, 310, 3444, 29892, 338, 2998, 363, 967, 380, 27389, 11258, 29892, 1616, 19133, 29879, 29892, 15839, 2982, 22848, 29892, 322, 6017, 7716, 25005, 29889, 2266, 526, 777, 310, 278, 2246, 19650, 1953, 304, 1074, 297, 3681, 29901, 13, 13, 29896, 29889, 450, 382, 2593, 295, 23615, 29901, 450, 9849, 293, 382, 2593, 295, 23615, 338, 697, 310, 278, 1556, 5936, 13902, 2982, 22848, 297, 278, 3186, 322, 16688, 2078, 271, 400, 5086, 8386, 310, 278, 4272, 29889, 13, 29906, 29889, 450, 4562, 12675, 6838, 29901, 450, 4562, 12675, 338, 697, 310, 278, 3186, 29915, 29879, 10150, 322, 1556, 13834, 19133, 29879, 29892, 27261, 385, 21210, 573, 4333, 310, 1616, 322, 24238, 29879, 29892, 3704, 278, 2598, 29874, 29420, 29889, 13, 29941, 29889, 24337, 29899, 29928, 420, 315, 21471, 29901, 910, 9560, 274, 21471, 338, 697, 310, 278, 1556, 13834, 2982, 22848, 297, 3681, 322, 338, 2998, 363, 967, 22883, 293, 11258, 322, 380, 27389, 380, 7114, 12917, 5417, 29889, 13, 13, 1349, 968, 526, 925, 263, 2846, 310, 278, 1784, 19650, 1953, 393, 3681, 756, 304, 5957, 29889, 2973, 577, 1568, 304, 1074, 322, 437, 29892, 372, 29915, 29879, 694, 4997, 393, 3681, 338, 697, 310, 278, 1556, 5972, 6282, 391, 15422, 800, 297, 278, 3186, 29889, 29871, 2, 1, 518, 25580, 29962, 1724, 338, 577, 2107, 1048, 396, 29896, 29973, 518, 29914, 25580, 29962] # fmt: skip self.assertEqual(inputs, EXPECTED_INPUTS_IDS) model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf") diff --git a/tests/pipelines/test_pipelines_question_answering.py b/tests/pipelines/test_pipelines_question_answering.py index 0ab574f5f2c5..f7683aec15c3 100644 --- a/tests/pipelines/test_pipelines_question_answering.py +++ b/tests/pipelines/test_pipelines_question_answering.py @@ -198,17 +198,14 @@ def test_small_model_japanese(self): "question-answering", model="KoichiYasuoka/deberta-base-japanese-aozora-ud-head", ) - output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") + output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") # fmt: skip # Wrong answer, the whole text is identified as one "word" since the tokenizer does not include # a pretokenizer - self.assertEqual( - nested_simplify(output), - {"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}, - ) + self.assertEqual(nested_simplify(output),{"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}) # fmt: skip # Disable word alignment - output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) + output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) # fmt: skip self.assertEqual( nested_simplify(output), {"score": 1.0, "start": 15, "end": 18, "answer": "教科書"}, diff --git a/tests/pipelines/test_pipelines_token_classification.py b/tests/pipelines/test_pipelines_token_classification.py index f6f47accc2bb..b139fbfd2f79 100644 --- a/tests/pipelines/test_pipelines_token_classification.py +++ b/tests/pipelines/test_pipelines_token_classification.py @@ -486,8 +486,7 @@ def test_aggregation_strategy_no_b_i_prefix(self): token_classifier.model.config.id2label = {0: "O", 1: "MISC", 2: "PER", 3: "ORG", 4: "LOC"} example = [ { - # fmt : off - "scores": np.array([0, 0, 0, 0, 0.9968166351318359]), + "scores": np.array([0, 0, 0, 0, 0.9968166351318359]), # fmt : skip "index": 1, "is_subword": False, "word": "En", @@ -495,8 +494,7 @@ def test_aggregation_strategy_no_b_i_prefix(self): "end": 2, }, { - # fmt : off - "scores": np.array([0, 0, 0, 0, 0.9957635998725891]), + "scores": np.array([0, 0, 0, 0, 0.9957635998725891]), # fmt : skip "index": 2, "is_subword": True, "word": "##zo", @@ -504,9 +502,7 @@ def test_aggregation_strategy_no_b_i_prefix(self): "end": 4, }, { - # fmt: off - "scores": np.array([0, 0, 0, 0.9986497163772583, 0]), - # fmt: on + "scores": np.array([0, 0, 0, 0.9986497163772583, 0]), # fmt : skip "index": 7, "word": "UN", "is_subword": False, @@ -542,8 +538,7 @@ def test_aggregation_strategy(self): ) example = [ { - # fmt : off - "scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]), + "scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]), # fmt : skip "index": 1, "is_subword": False, "word": "En", @@ -551,8 +546,7 @@ def test_aggregation_strategy(self): "end": 2, }, { - # fmt : off - "scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]), + "scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]), # fmt : skip "index": 2, "is_subword": True, "word": "##zo", @@ -560,9 +554,7 @@ def test_aggregation_strategy(self): "end": 4, }, { - # fmt: off - "scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0, ]), - # fmt: on + "scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0]), # fmt : skip "index": 7, "word": "UN", "is_subword": False, diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 31c3c7af030d..0edc23c7af20 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2039,7 +2039,7 @@ def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, tensor in pt_inputs_dict.items(): # skip key that does not exist in tf - if type(tensor) == bool: + if isinstance(tensor, bool): tf_inputs_dict[key] = tensor elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index 0d7bf373a4d4..d7cd62b41a02 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -576,7 +576,7 @@ def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, nam def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): - if type(key) == bool: + if isinstance(key, bool): pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index 25d4ab873e84..9b60b2f18673 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -362,11 +362,15 @@ def assert_batch_padded_input_match( model_main_input_name: str = "input_ids", ): for i_r in input_r.values(): - self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual( - len(i_r[1]), max_length + ( + self.assertEqual(len(i_r), 2), + self.assertEqual(len(i_r[0]), max_length), + self.assertEqual(len(i_r[1]), max_length), ) - self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual( - len(i_r[1]), max_length + ( + self.assertEqual(len(i_r), 2), + self.assertEqual(len(i_r[0]), max_length), + self.assertEqual(len(i_r[1]), max_length), ) for i_r, i_p in zip(input_r[model_main_input_name], input_p[model_main_input_name]): @@ -1565,7 +1569,9 @@ def test_special_tokens_mask(self): # Testing single inputs encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( - sequence_0, add_special_tokens=True, return_special_tokens_mask=True # , add_prefix_space=False + sequence_0, + add_special_tokens=True, + return_special_tokens_mask=True, # , add_prefix_space=False ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] diff --git a/tests/tokenization/test_tokenization_fast.py b/tests/tokenization/test_tokenization_fast.py index ad3b2e81841c..48ac31b97c41 100644 --- a/tests/tokenization/test_tokenization_fast.py +++ b/tests/tokenization/test_tokenization_fast.py @@ -156,9 +156,7 @@ def test_init_from_tokenizers_model(self): self.assertEqual(tok.pad_token, "") self.assertEqual(tok.init_kwargs["max_length"], 512) self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8) - # fmt: off - self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) - # fmt: on + self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right") self.assertEqual( @@ -175,9 +173,7 @@ def test_init_from_tokenizers_model(self): self.assertEqual(tok.init_kwargs["stride"], 0) # NOTE even if the model has a default max_length, it is not used... # thus tok(sentences, truncation = True) does nothing and does not warn either - # fmt: off - self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) - # fmt: on + self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip @require_tokenizers diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 9df5ac84d747..b9f801fabd7f 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -2201,9 +2201,7 @@ def test_no_wd_param_group(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) trainer = Trainer(model=model) trainer.create_optimizer_and_scheduler(10) - # fmt: off - wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] - # fmt: on + wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] # fmt: skip wd_params = [p for n, p in model.named_parameters() if n in wd_names] no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names] self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params) diff --git a/utils/check_copies.py b/utils/check_copies.py index 667c11ec724b..c536eb800ff0 100644 --- a/utils/check_copies.py +++ b/utils/check_copies.py @@ -40,11 +40,9 @@ import glob import os import re +import subprocess from typing import List, Optional, Tuple -import black -from doc_builder.style_doc import style_docstrings_in_code - from transformers.utils import direct_transformers_import @@ -226,9 +224,17 @@ def get_indent(code: str) -> str: return "" -def blackify(code: str) -> str: +def run_ruff(code): + command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"] + process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + stdout, _ = process.communicate(input=code.encode()) + return stdout.decode() + + +def stylify(code: str) -> str: """ - Applies the black part of our `make style` command to some code. + Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`. + As `ruff` does not provide a python api this cannot be done on the fly. Args: code (`str`): The code to format. @@ -239,10 +245,8 @@ def blackify(code: str) -> str: has_indent = len(get_indent(code)) > 0 if has_indent: code = f"class Bla:\n{code}" - mode = black.Mode(target_versions={black.TargetVersion.PY37}, line_length=119) - result = black.format_str(code, mode=mode) - result, _ = style_docstrings_in_code(result) - return result[len("class Bla:\n") :] if has_indent else result + formatted_code = run_ruff(code) + return formatted_code[len("class Bla:\n") :] if has_indent else formatted_code def check_codes_match(observed_code: str, theoretical_code: str) -> Optional[int]: @@ -351,8 +355,7 @@ def is_copy_consistent(filename: str, overwrite: bool = False) -> Optional[List[ if option.strip() == "all-casing": theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code) theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code) - - theoretical_code = blackify(theoretical_code) + theoretical_code = stylify(theoretical_code) # Test for a diff and act accordingly. diff_index = check_codes_match(observed_code, theoretical_code) diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 728a76bde008..c7638a129a0c 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -515,9 +515,7 @@ def extract_imports(module_fname: str, cache: Dict[str, List[str]] = None) -> Li # Filter out all docstrings to not get imports in code examples. As before we need to deactivate formatting to # keep this as escaped quotes and avoid this function failing on this file. - # fmt: off - splits = content.split('\"\"\"') - # fmt: on + splits = content.split('\"\"\"') # fmt: skip content = "".join(splits[::2]) module_parts = str(module_fname).split(os.path.sep) From 12b50c6130ccfbb2381a181dc4bbb2bf07a3c62a Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Thu, 16 Nov 2023 18:54:20 +0000 Subject: [PATCH 202/268] Generate: improve assisted generation tests (#27540) --- tests/generation/test_utils.py | 153 ++++++++++++++++++--------------- 1 file changed, 86 insertions(+), 67 deletions(-) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 1705142f8f1f..1e76c88c71f9 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -23,6 +23,7 @@ from transformers import is_torch_available, pipeline from transformers.testing_utils import ( + is_flaky, require_accelerate, require_torch, require_torch_multi_accelerator, @@ -1506,10 +1507,14 @@ def test_contrastive_generate_low_memory(self): ) self.assertListEqual(low_output.tolist(), high_output.tolist()) - @slow # TODO(Joao): remove this. Some models (e.g. data2vec, xcom, roberta) have an error rate between 1 and 10%. + @is_flaky() # Read NOTE (1) below. If there are API issues, all attempts will fail. def test_assisted_decoding_matches_greedy_search(self): # This test ensures that the assisted generation does not introduce output changes over greedy search. - # It breaks the pattern in the tests above, for multiple reasons: + # NOTE (1): The sentence above is true most of the time, there is a tiny difference in the logits due to matmul + # shape differences -- and it may result in a different output. The input shape difference happens in the + # main model, that runs the forward pass with several candidates at once (as opposed to generating one token at + # a time). See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 for more info. + # NOTE (2): It breaks the pattern in the tests above, for multiple reasons: # - assisted_decoding, contrarily to the other methods, can't be called on its own (e.g. needs to # prepare the assistant encoder outputs in the main generate body); # - assisted_decoding does not support `use_cache = False` @@ -1520,77 +1525,82 @@ def test_assisted_decoding_matches_greedy_search(self): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() - for model_name in ["bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet"] + for model_name in [ + "bigbirdpegasus", + "led", + "mega", + "speech2text", + "git", + "prophetnet", + "seamlessm4t", + "clvp", + ] ): self.skipTest("May fix in the future: need model-specific fixes") - # This for loop is a naive and temporary effort to make the test less flaky. - failed = 0 - for i in range(10): - # enable cache - config, input_ids, attention_mask, max_length = self._get_input_ids_and_config(batch_size=1) - - # NOTE: assisted generation only works with cache on at the moment. - if not hasattr(config, "use_cache"): - self.skipTest("This model doesn't support caching") + # enable cache + config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) - config.use_cache = True - config.is_decoder = True - model = model_class(config).to(torch_device).eval() - output_greedy = model.generate( - input_ids, - attention_mask=attention_mask, - max_length=max_length, - num_beams=1, - do_sample=False, - output_scores=True, - output_hidden_states=True, - output_attentions=True, - return_dict_in_generate=True, - ) - # Note: with assisted generate, if the same model is used as assistant, then all assistant tokens will - # be correct - output_assisted = model.generate( - input_ids, - attention_mask=attention_mask, - max_length=max_length, - num_beams=1, - do_sample=False, - assistant_model=model, - output_scores=True, - output_hidden_states=True, - output_attentions=True, - return_dict_in_generate=True, - ) + # NOTE: assisted generation only works with cache on at the moment. + if not hasattr(config, "use_cache"): + self.skipTest("This model doesn't support caching") - try: - self.assertListEqual(output_greedy.sequences.tolist(), output_assisted.sequences.tolist()) + config.use_cache = True + config.is_decoder = True + model = model_class(config).to(torch_device).eval() + # Sets assisted generation arguments such that: + # a) no EOS is generated, to ensure generation doesn't break early + # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of + # the assistant model is correct + # c) there are at least two forward passes in the main model, to ensure the input preparation of + # the main model is correct + generation_kwargs = { + "eos_token_id": -1, # see a) + "max_new_tokens": 4, # see c) + "num_beams": 1, + "do_sample": False, + "output_scores": True, + "output_hidden_states": True, + "output_attentions": True, + "return_dict_in_generate": True, + } + output_greedy = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) - for output in (output_greedy, output_assisted): - self._check_outputs(output, input_ids, model.config, use_cache=True) - except AssertionError: - failed += 1 - if failed > 1: - self.assertListEqual(output_greedy.sequences.tolist(), output_assisted.sequences.tolist()) + assistant_model = model + assistant_model.generation_config.num_assistant_tokens = 2 # see b) + assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) + generation_kwargs.update({"assistant_model": assistant_model}) + output_assisted = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) - for output in (output_greedy, output_assisted): - self._check_outputs(output, input_ids, model.config, use_cache=True) + # The two outputs must match and their shape must be as expected + self.assertListEqual(output_greedy.sequences.tolist(), output_assisted.sequences.tolist()) + for output in (output_greedy, output_assisted): + self._check_outputs(output, input_ids, model.config, use_cache=True) - @unittest.skip("Failing for a lot of models du to attention mask size missmatch. Works well when standalone.") def test_assisted_decoding_sample(self): - # Seeded assisted decoding will not match sample for the same seed, as the forward pass does not return the - # exact same logits (the forward pass of the main model, now with several tokens at once, has causal masking). + # In this test we don't check assisted vs non-assisted output -- seeded assisted decoding with sample will not + # match sample for the same seed, as the forward pass does not return the exact same logits (due to matmul with + # different shapes, see https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535). for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() - for model_name in ["bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t"] + for model_name in [ + "bigbirdpegasus", + "led", + "mega", + "speech2text", + "git", + "prophetnet", + "seamlessm4t", + "clvp", + ] ): self.skipTest("May fix in the future: need model-specific fixes") # enable cache - config, input_ids, attention_mask, max_length = self._get_input_ids_and_config(batch_size=1) + config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): @@ -1599,18 +1609,27 @@ def test_assisted_decoding_sample(self): config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() - output_assisted = model.generate( - input_ids, - attention_mask=attention_mask, - max_length=max_length, - num_beams=1, - do_sample=True, - assistant_model=model, # triggers assisted decoding - output_scores=True, - output_hidden_states=True, - output_attentions=True, - return_dict_in_generate=True, - ) + # Sets assisted generation arguments such that: + # a) no EOS is generated, to ensure generation doesn't break early + # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of + # the assistant model is correct + # c) there are at least two forward passes in the main model, to ensure the input preparation of + # the main model is correct + assistant_model = model + assistant_model.generation_config.num_assistant_tokens = 2 # see b) + assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) + generation_kwargs = { + "eos_token_id": -1, # see a) + "max_new_tokens": 4, # see c) + "num_beams": 1, + "do_sample": True, + "assistant_model": assistant_model, + "output_scores": True, + "output_hidden_states": True, + "output_attentions": True, + "return_dict_in_generate": True, + } + output_assisted = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) self._check_outputs(output_assisted, input_ids, model.config, use_cache=True) From 93f31e0e78e6d4bc7341ff3d34d60d78dafe1128 Mon Sep 17 00:00:00 2001 From: Nathaniel Egwu Date: Thu, 16 Nov 2023 20:44:36 +0100 Subject: [PATCH 203/268] Updated albert.md doc for ALBERT model (#27223) * Updated albert.md doc for ALBERT model * Update docs/source/en/model_doc/albert.md Fixed Resources heading Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update the ALBERT model doc resources Fixed resource example for fine-tuning the ALBERT sentence-pair classification. Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/model_doc/albert.md Removed resource duplicate Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Updated albert.md doc with reviewed changes * Updated albert.md doc for ALBERT * Update docs/source/en/model_doc/albert.md Removed duplicates from updated docs/source/en/model_doc/albert.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/model_doc/albert.md --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/en/model_doc/albert.md | 64 +++++++++++++++++++++++++++--- 1 file changed, 59 insertions(+), 5 deletions(-) diff --git a/docs/source/en/model_doc/albert.md b/docs/source/en/model_doc/albert.md index b7a819b2ed46..a75e67578048 100644 --- a/docs/source/en/model_doc/albert.md +++ b/docs/source/en/model_doc/albert.md @@ -59,13 +59,67 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). This - Layers are split in groups that share parameters (to save memory). Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not. + + +This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by +[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT). + + ## Resources -- [Text classification task guide](../tasks/sequence_classification) -- [Token classification task guide](../tasks/token_classification) -- [Question answering task guide](../tasks/question_answering) -- [Masked language modeling task guide](../tasks/masked_language_modeling) -- [Multiple choice task guide](../tasks/multiple_choice) + +The resources provided in the following sections consist of a list of official Hugging Face and community (indicated by 🌎) resources to help you get started with AlBERT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. + + + + + +- [`AlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification). + + +- [`TFAlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification). + +- [`FlaxAlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb). +- Check the [Text classification task guide](../tasks/sequence_classification) on how to use the model. + + + + + +- [`AlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification). + + +- [`TFAlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). + + + +- [`FlaxAlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification). +- [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course. +- Check the [Token classification task guide](../tasks/token_classification) on how to use the model. + + + +- [`AlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). +- [`TFAlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). +- [`FlaxAlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb). +- [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course. +- Check the [Masked language modeling task guide](../tasks/masked_language_modeling) on how to use the model. + + + +- [`AlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb). +- [`TFAlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb). +- [`FlaxAlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering). +- [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course. +- Check the [Question answering task guide](../tasks/question_answering) on how to use the model. + +**Multiple choice** + +- [`AlbertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb). +- [`TFAlbertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb). + +- Check the [Multiple choice task guide](../tasks/multiple_choice) on how to use the model. + ## AlbertConfig From b074461ef0f54ce37c5239d30ee960ece28d11ec Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Thu, 16 Nov 2023 14:07:15 -0600 Subject: [PATCH 204/268] translate Trainer.md to chinese (#27527) * translate * update * update --- docs/source/zh/_toctree.yml | 4 +- docs/source/zh/main_classes/trainer.md | 665 +++++++++++++++++++++++++ 2 files changed, 668 insertions(+), 1 deletion(-) create mode 100644 docs/source/zh/main_classes/trainer.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index 914ce68fd26d..dd3b6cf9982b 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -70,5 +70,7 @@ - sections: - local: main_classes/model title: 模型 + - local: main_classes/trainer + title: Trainer title: 主要类 - title: 应用程序接口 (API) \ No newline at end of file + title: 应用程序接口 (API) diff --git a/docs/source/zh/main_classes/trainer.md b/docs/source/zh/main_classes/trainer.md new file mode 100644 index 000000000000..049a3724114b --- /dev/null +++ b/docs/source/zh/main_classes/trainer.md @@ -0,0 +1,665 @@ + + +# Trainer + +[`Trainer`] 类提供了一个 PyTorch 的 API,用于处理大多数标准用例的全功能训练。它在大多数[示例脚本](https://github.com/huggingface/transformers/tree/main/examples)中被使用。 + + + +如果你想要使用自回归技术在文本数据集上微调像 Llama-2 或 Mistral 这样的语言模型,考虑使用 [`trl`](https://github.com/huggingface/trl) 的 [`~trl.SFTTrainer`]。[`~trl.SFTTrainer`] 封装了 [`Trainer`],专门针对这个特定任务进行了优化,并支持序列打包、LoRA、量化和 DeepSpeed,以有效扩展到任何模型大小。另一方面,[`Trainer`] 是一个更通用的选项,适用于更广泛的任务。 + + + +在实例化你的 [`Trainer`] 之前,创建一个 [`TrainingArguments`],以便在训练期间访问所有定制点。 + +这个 API 支持在多个 GPU/TPU 上进行分布式训练,支持 [NVIDIA Apex](https://github.com/NVIDIA/apex) 的混合精度和 PyTorch 的原生 AMP。 + +[`Trainer`] 包含基本的训练循环,支持上述功能。如果需要自定义训练,你可以继承 `Trainer` 并覆盖以下方法: + +- **get_train_dataloader** -- 创建训练 DataLoader。 +- **get_eval_dataloader** -- 创建评估 DataLoader。 +- **get_test_dataloader** -- 创建测试 DataLoader。 +- **log** -- 记录观察训练的各种对象的信息。 +- **create_optimizer_and_scheduler** -- 如果它们没有在初始化时传递,请设置优化器和学习率调度器。请注意,你还可以单独继承或覆盖 `create_optimizer` 和 `create_scheduler` 方法。 +- **create_optimizer** -- 如果在初始化时没有传递,则设置优化器。 +- **create_scheduler** -- 如果在初始化时没有传递,则设置学习率调度器。 +- **compute_loss** - 计算单批训练输入的损失。 +- **training_step** -- 执行一步训练。 +- **prediction_step** -- 执行一步评估/测试。 +- **evaluate** -- 运行评估循环并返回指标。 +- **predict** -- 返回在测试集上的预测(如果有标签,则包括指标)。 + + + +[`Trainer`] 类被优化用于 🤗 Transformers 模型,并在你在其他模型上使用时可能会有一些令人惊讶的结果。当在你自己的模型上使用时,请确保: + +- 你的模型始终返回元组或 [`~utils.ModelOutput`] 的子类。 +- 如果提供了 `labels` 参数,你的模型可以计算损失,并且损失作为元组的第一个元素返回(如果你的模型返回元组)。 +- 你的模型可以接受多个标签参数(在 [`TrainingArguments`] 中使用 `label_names` 将它们的名称指示给 [`Trainer`]),但它们中没有一个应该被命名为 `"label"`。 + + + +以下是如何自定义 [`Trainer`] 以使用加权损失的示例(在训练集不平衡时很有用): + +```python +from torch import nn +from transformers import Trainer + + +class CustomTrainer(Trainer): + def compute_loss(self, model, inputs, return_outputs=False): + labels = inputs.pop("labels") + # forward pass + outputs = model(**inputs) + logits = outputs.get("logits") + # compute custom loss (suppose one has 3 labels with different weights) + loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 3.0], device=model.device)) + loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1)) + return (loss, outputs) if return_outputs else loss +``` + +在 PyTorch [`Trainer`] 中自定义训练循环行为的另一种方法是使用 [callbacks](callback),这些回调可以检查训练循环状态(用于进度报告、在 TensorBoard 或其他 ML 平台上记录日志等)并做出决策(比如提前停止)。 + + +## Trainer + +[[autodoc]] Trainer - all + +## Seq2SeqTrainer + +[[autodoc]] Seq2SeqTrainer - evaluate - predict + +## TrainingArguments + +[[autodoc]] TrainingArguments - all + +## Seq2SeqTrainingArguments + +[[autodoc]] Seq2SeqTrainingArguments - all + +## Checkpoints + +默认情况下,[`Trainer`] 会将所有checkpoints保存在你使用的 [`TrainingArguments`] 中设置的 `output_dir` 中。这些checkpoints将位于名为 `checkpoint-xxx` 的子文件夹中,xxx 是训练的步骤。 + +从checkpoints恢复训练可以通过调用 [`Trainer.train`] 时使用以下任一方式进行: + +- `resume_from_checkpoint=True`,这将从最新的checkpoint恢复训练。 +- `resume_from_checkpoint=checkpoint_dir`,这将从指定目录中的特定checkpoint恢复训练。 + +此外,当使用 `push_to_hub=True` 时,你可以轻松将checkpoints保存在 Model Hub 中。默认情况下,保存在训练中间过程的checkpoints中的所有模型都保存在不同的提交中,但不包括优化器状态。你可以根据需要调整 [`TrainingArguments`] 的 `hub-strategy` 值: + +- `"checkpoint"`: 最新的checkpoint也被推送到一个名为 last-checkpoint 的子文件夹中,让你可以通过 `trainer.train(resume_from_checkpoint="output_dir/last-checkpoint")` 轻松恢复训练。 +- `"all_checkpoints"`: 所有checkpoints都像它们出现在输出文件夹中一样被推送(因此你将在最终存储库中的每个文件夹中获得一个checkpoint文件夹)。 + +## Logging + +默认情况下,[`Trainer`] 将对主进程使用 `logging.INFO`,对副本(如果有的话)使用 `logging.WARNING`。 + +可以通过 [`TrainingArguments`] 的参数覆盖这些默认设置,使用其中的 5 个 `logging` 级别: + +- `log_level` - 用于主进程 +- `log_level_replica` - 用于副本 + +此外,如果 [`TrainingArguments`] 的 `log_on_each_node` 设置为 `False`,则只有主节点将使用其主进程的日志级别设置,所有其他节点将使用副本的日志级别设置。 + +请注意,[`Trainer`] 将在其 [`Trainer.__init__`] 中分别为每个节点设置 `transformers` 的日志级别。因此,如果在创建 [`Trainer`] 对象之前要调用其他 `transformers` 功能,可能需要更早地设置这一点(请参见下面的示例)。 + +以下是如何在应用程序中使用的示例: + +```python +[...] +logger = logging.getLogger(__name__) + +# Setup logging +logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], +) + +# set the main code and the modules it uses to the same log-level according to the node +log_level = training_args.get_process_log_level() +logger.setLevel(log_level) +datasets.utils.logging.set_verbosity(log_level) +transformers.utils.logging.set_verbosity(log_level) + +trainer = Trainer(...) +``` + +然后,如果你只想在主节点上看到警告,并且所有其他节点不打印任何可能重复的警告,可以这样运行: + +```bash +my_app.py ... --log_level warning --log_level_replica error +``` + +在多节点环境中,如果你也不希望每个节点的主进程的日志重复输出,你需要将上面的代码更改为: + +```bash +my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0 +``` + +然后,只有第一个节点的主进程将以 "warning" 级别记录日志,主节点上的所有其他进程和其他节点上的所有进程将以 "error" 级别记录日志。 + +如果你希望应用程序尽可能”安静“,可以执行以下操作: + + +```bash +my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0 +``` + +(如果在多节点环境,添加 `--log_on_each_node 0`) + + +## 随机性 + +当从 [`Trainer`] 生成的checkpoint恢复训练时,程序会尽一切努力将 _python_、_numpy_ 和 _pytorch_ 的 RNG(随机数生成器)状态恢复为保存检查点时的状态,这样可以使“停止和恢复”式训练尽可能接近“非停止式”训练。 + +然而,由于各种默认的非确定性 PyTorch 设置,这可能无法完全实现。如果你想要完全确定性,请参阅[控制随机源](https://pytorch.org/docs/stable/notes/randomness)。正如文档中所解释的那样,使事物变得确定的一些设置(例如 `torch.backends.cudnn.deterministic`)可能会减慢速度,因此不能默认执行,但如果需要,你可以自行启用这些设置。 + + +## 特定GPU选择 + +让我们讨论一下如何告诉你的程序应该使用哪些 GPU 以及使用的顺序。 + +当使用 [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) 且仅使用部分 GPU 时,你只需指定要使用的 GPU 数量。例如,如果你有 4 个 GPU,但只想使用前 2 个,可以执行以下操作: + + +```bash +python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ... +``` + +如果你安装了 [`accelerate`](https://github.com/huggingface/accelerate) 或 [`deepspeed`](https://github.com/microsoft/DeepSpeed),你还可以通过以下任一方法实现相同的效果: + + +```bash +accelerate launch --num_processes 2 trainer-program.py ... +``` + +```bash +deepspeed --num_gpus 2 trainer-program.py ... +``` + +你不需要使用 Accelerate 或 [Deepspeed 集成](Deepspeed) 功能来使用这些启动器。 + +到目前为止,你已经能够告诉程序要使用多少个 GPU。现在让我们讨论如何选择特定的 GPU 并控制它们的顺序。 + +以下环境变量可帮助你控制使用哪些 GPU 以及它们的顺序。 + + +**`CUDA_VISIBLE_DEVICES`** + +如果你有多个 GPU,想要仅使用其中的一个或几个 GPU,请将环境变量 `CUDA_VISIBLE_DEVICES` 设置为要使用的 GPU 列表。 + +例如,假设你有 4 个 GPU:0、1、2 和 3。要仅在物理 GPU 0 和 2 上运行,你可以执行以下操作: + + +```bash +CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ... +``` + +现在,PyTorch 将只看到 2 个 GPU,其中你的物理 GPU 0 和 2 分别映射到 `cuda:0` 和 `cuda:1`。 + +你甚至可以改变它们的顺序: + + +```bash +CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ... +``` + +这里,你的物理 GPU 0 和 2 分别映射到 `cuda:1` 和 `cuda:0`。 + +上面的例子都是针对 `DistributedDataParallel` 使用模式的,但同样的方法也适用于 [`DataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html): + + +```bash +CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ... +``` + +为了模拟没有 GPU 的环境,只需将此环境变量设置为空值,如下所示: + +```bash +CUDA_VISIBLE_DEVICES= python trainer-program.py ... +``` + +与任何环境变量一样,你当然可以将其export到环境变量而不是将其添加到命令行,如下所示: + + +```bash +export CUDA_VISIBLE_DEVICES=0,2 +python -m torch.distributed.launch trainer-program.py ... +``` + +这种方法可能会令人困惑,因为你可能会忘记之前设置了环境变量,进而不明白为什么会使用错误的 GPU。因此,在同一命令行中仅为特定运行设置环境变量是一种常见做法,正如本节大多数示例所示。 + + +**`CUDA_DEVICE_ORDER`** + +还有一个额外的环境变量 `CUDA_DEVICE_ORDER`,用于控制物理设备的排序方式。有两个选择: + +1. 按 PCIe 总线 ID 排序(与 nvidia-smi 的顺序相匹配)- 这是默认选项。 + + +```bash +export CUDA_DEVICE_ORDER=PCI_BUS_ID +``` + +2. 按 GPU 计算能力排序。 + +```bash +export CUDA_DEVICE_ORDER=FASTEST_FIRST +``` + +大多数情况下,你不需要关心这个环境变量,但如果你的设置不均匀,那么这将非常有用,例如,您的旧 GPU 和新 GPU 物理上安装在一起,但让速度较慢的旧卡排在运行的第一位。解决这个问题的一种方法是交换卡的位置。但如果不能交换卡(例如,如果设备的散热受到影响),那么设置 `CUDA_DEVICE_ORDER=FASTEST_FIRST` 将始终将较新、更快的卡放在第一位。但这可能会有点混乱,因为 `nvidia-smi` 仍然会按照 PCIe 顺序报告它们。 + +交换卡的顺序的另一种方法是使用: + + +```bash +export CUDA_VISIBLE_DEVICES=1,0 +``` + +在此示例中,我们只使用了 2 个 GPU,但是当然,对于计算机上有的任何数量的 GPU,都适用相同的方法。 + +此外,如果你设置了这个环境变量,最好将其设置在 `~/.bashrc` 文件或其他启动配置文件中,然后就可以忘记它了。 + + +## Trainer集成 + +[`Trainer`] 已经被扩展,以支持可能显著提高训练时间并适应更大模型的库。 + +目前,它支持第三方解决方案 [DeepSpeed](https://github.com/microsoft/DeepSpeed) 和 [PyTorch FSDP](https://pytorch.org/docs/stable/fsdp.html),它们实现了论文 [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models, by Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He](https://arxiv.org/abs/1910.02054) 的部分内容。 + +截至撰写本文,此提供的支持是新的且实验性的。尽管我们欢迎围绕 DeepSpeed 和 PyTorch FSDP 的issues,但我们不再支持 FairScale 集成,因为它已经集成到了 PyTorch 主线(参见 [PyTorch FSDP 集成](#pytorch-fully-sharded-data-parallel))。 + + + + +### CUDA拓展安装注意事项 + + +撰写时,Deepspeed 需要在使用之前编译 CUDA C++ 代码。 + +虽然所有安装问题都应通过 [Deepspeed](https://github.com/microsoft/DeepSpeed/issues) 的 GitHub Issues处理,但在构建依赖CUDA 扩展的任何 PyTorch 扩展时,可能会遇到一些常见问题。 + +因此,如果在执行以下操作时遇到与 CUDA 相关的构建问题: + + +```bash +pip install deepspeed +``` + +请首先阅读以下说明。 + +在这些说明中,我们提供了在 `pytorch` 使用 CUDA `10.2` 构建时应采取的操作示例。如果你的情况有所不同,请记得将版本号调整为您所需的版本。 + + +#### 可能的问题 #1 + +尽管 PyTorch 自带了其自己的 CUDA 工具包,但要构建这两个项目,你必须在整个系统上安装相同版本的 CUDA。 + +例如,如果你在 Python 环境中使用 `cudatoolkit==10.2` 安装了 `pytorch`,你还需要在整个系统上安装 CUDA `10.2`。 + +确切的位置可能因系统而异,但在许多 Unix 系统上,`/usr/local/cuda-10.2` 是最常见的位置。当 CUDA 正确设置并添加到 `PATH` 环境变量时,可以通过执行以下命令找到安装位置: + + +```bash +which nvcc +``` + +如果你尚未在整个系统上安装 CUDA,请首先安装。你可以使用你喜欢的搜索引擎查找说明。例如,如果你使用的是 Ubuntu,你可能想搜索:[ubuntu cuda 10.2 install](https://www.google.com/search?q=ubuntu+cuda+10.2+install)。 + + +#### 可能的问题 #2 + +另一个可能的常见问题是你可能在整个系统上安装了多个 CUDA 工具包。例如,你可能有: + + +```bash +/usr/local/cuda-10.2 +/usr/local/cuda-11.0 +``` + +在这种情况下,你需要确保 `PATH` 和 `LD_LIBRARY_PATH` 环境变量包含所需 CUDA 版本的正确路径。通常,软件包安装程序将设置这些变量以包含最新安装的版本。如果遇到构建失败的问题,且是因为在整个系统安装但软件仍找不到正确的 CUDA 版本,这意味着你需要调整这两个环境变量。 + +首先,你以查看它们的内容: + + +```bash +echo $PATH +echo $LD_LIBRARY_PATH +``` + +因此,您可以了解其中的内容。 + +`LD_LIBRARY_PATH` 可能是空的。 + +`PATH` 列出了可以找到可执行文件的位置,而 `LD_LIBRARY_PATH` 用于查找共享库。在这两种情况下,较早的条目优先于较后的条目。 `:` 用于分隔多个条目。 + +现在,为了告诉构建程序在哪里找到特定的 CUDA 工具包,请插入所需的路径,让其首先列出: + + +```bash +export PATH=/usr/local/cuda-10.2/bin:$PATH +export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:$LD_LIBRARY_PATH +``` + +请注意,我们没有覆盖现有值,而是在前面添加新的值。 + +当然,根据需要调整版本号和完整路径。检查你分配的目录是否实际存在。`lib64` 子目录是各种 CUDA `.so` 对象(如 `libcudart.so`)的位置,这个名字可能在你的系统中是不同的,如果是,请调整以反映实际情况。 + + +#### 可能的问题 #3 + +一些较旧的 CUDA 版本可能会拒绝使用更新的编译器。例如,你可能有 `gcc-9`,但 CUDA 可能需要 `gcc-7`。 + +有各种方法可以解决这个问题。 + +如果你可以安装最新的 CUDA 工具包,通常它应该支持更新的编译器。 + +或者,你可以在已经拥有的编译器版本之外安装较低版本,或者你可能已经安装了它但它不是默认的编译器,因此构建系统无法找到它。如果你已经安装了 `gcc-7` 但构建系统找不到它,以下操作可能会解决问题: + + +```bash +sudo ln -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc +sudo ln -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++ +``` + +这里,我们正在从 `/usr/local/cuda-10.2/bin/gcc` 创建到 `gcc-7` 的软链接,由于 `/usr/local/cuda-10.2/bin/` 应该在 `PATH` 环境变量中(参见前一个问题的解决方案),它应该能够找到 `gcc-7`(和 `g++7`),然后构建将成功。 + +与往常一样,请确保编辑示例中的路径以匹配你的情况。 + + + +### PyTorch完全分片数据并行(FSDP) + +为了加速在更大批次大小上训练庞大模型,我们可以使用完全分片的数据并行模型。这种数据并行范例通过对优化器状态、梯度和参数进行分片,实现了在更多数据和更大模型上的训练。要了解更多信息以及其优势,请查看[完全分片的数据并行博客](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/)。我们已经集成了最新的PyTorch完全分片的数据并行(FSDP)训练功能。您只需通过配置启用它。 + +**FSDP支持所需的PyTorch版本**: PyTorch Nightly(或者如果你在发布后阅读这个,使用1.12.0版本,因为带有激活的FSDP的模型保存仅在最近的修复中可用。 + + +**用法**: + +- 如果你尚未使用过分布式启动器,确保你已经添加了它 `-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE`。 + +- **分片策略**: + - FULL_SHARD:在数据并行线程/GPU之间,对优化器状态、梯度和模型参数进行分片。 + 为此,请在命令行参数中添加 `--fsdp full_shard`。 + - SHARD_GRAD_OP:在数据并行线程/GPU之间对优化器状态和梯度进行分片。 + 为此,请在命令行参数中添加 `--fsdp shard_grad_op`。 + - NO_SHARD:不进行分片。为此,请在命令行参数中添加 `--fsdp no_shard`。 +- 要将参数和梯度卸载到CPU,添加 `--fsdp "full_shard offload"` 或 `--fsdp "shard_grad_op offload"` 到命令行参数中。 +- 要使用 `default_auto_wrap_policy` 自动递归地用FSDP包装层,请添加 `--fsdp "full_shard auto_wrap"` 或 `--fsdp "shard_grad_op auto_wrap"` 到命令行参数中。 +- 要同时启用CPU卸载和自动包装层工具,请添加 `--fsdp "full_shard offload auto_wrap"` 或 `--fsdp "shard_grad_op offload auto_wrap"` 到命令行参数中。 +- 其余的FSDP配置通过 `--fsdp_config ` 传递。它可以是FSDP json配置文件的位置(例如,`fsdp_config.json`)或已加载的json文件作为 `dict`。 + - 如果启用了自动包装,您可以使用基于transformer的自动包装策略或基于大小的自动包装策略。 + - 对于基于transformer的自动包装策略,建议在配置文件中指定 `fsdp_transformer_layer_cls_to_wrap`。如果未指定,则默认值为 `model._no_split_modules`(如果可用)。这将指定要包装的transformer层类名(区分大小写),例如 [`BertLayer`]、[`GPTJBlock`]、[`T5Block`] 等。这很重要,因为共享权重的子模块(例如,embedding层)不应最终出现在不同的FSDP包装单元中。使用此策略,每个包装的块将包含多头注意力和后面的几个MLP层。剩余的层,包括共享的embedding层,都将被方便地包装在同一个最外层的FSDP单元中。因此,对于基于transformer的模型,请使用这个方法。 + - 对于基于大小的自动包装策略,请在配置文件中添加 `fsdp_min_num_params`。它指定了FSDP进行自动包装的最小参数数量。 + - 可以在配置文件中指定 `fsdp_backward_prefetch`。它控制何时预取下一组参数。`backward_pre` 和 `backward_pos` 是可用的选项。有关更多信息,请参阅 `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch` + - 可以在配置文件中指定 `fsdp_forward_prefetch`。它控制何时预取下一组参数。如果是`"True"`,在执行前向传递时,FSDP明确地预取下一次即将发生的全局聚集。 + - 可以在配置文件中指定 `limit_all_gathers`。如果是`"True"`,FSDP明确地同步CPU线程,以防止太多的进行中的全局聚集。 + - 可以在配置文件中指定 `activation_checkpointing`。如果是`"True"`,FSDP activation checkpoint是一种通过清除某些层的激活值并在反向传递期间重新计算它们来减少内存使用的技术。实际上,这以更多的计算时间为代价减少了内存使用。 + + +**需要注意几个注意事项** +- 它与 `generate` 不兼容,因此与所有seq2seq/clm脚本(翻译/摘要/clm等)中的 `--predict_with_generate` 不兼容。请参阅issue[#21667](https://github.com/huggingface/transformers/issues/21667)。 + + +### PyTorch/XLA 完全分片数据并行 + +对于所有TPU用户,有个好消息!PyTorch/XLA现在支持FSDP。所有最新的完全分片数据并行(FSDP)训练都受支持。有关更多信息,请参阅[在云端TPU上使用FSDP扩展PyTorch模型](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/)和[PyTorch/XLA FSDP的实现](https://github.com/pytorch/xla/tree/master/torch_xla/distributed/fsdp)。使用它只需通过配置启用。 + +**需要的 PyTorch/XLA 版本以支持 FSDP**:>=2.0 + +**用法**: + +传递 `--fsdp "full shard"`,同时对 `--fsdp_config ` 进行以下更改: +- `xla` 应设置为 `True` 以启用 PyTorch/XLA FSDP。 +- `xla_fsdp_settings` 的值是一个字典,存储 XLA FSDP 封装参数。完整的选项列表,请参见[此处](https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py)。 +- `xla_fsdp_grad_ckpt`。当 `True` 时,在每个嵌套的 XLA FSDP 封装层上使用梯度checkpoint。该设置只能在将 xla 标志设置为 true,并通过 `fsdp_min_num_params` 或 `fsdp_transformer_layer_cls_to_wrap` 指定自动包装策略时使用。 +- 您可以使用基于transformer的自动包装策略或基于大小的自动包装策略。 + - 对于基于transformer的自动包装策略,建议在配置文件中指定 `fsdp_transformer_layer_cls_to_wrap`。如果未指定,默认值为 `model._no_split_modules`(如果可用)。这指定了要包装的transformer层类名列表(区分大小写),例如 [`BertLayer`]、[`GPTJBlock`]、[`T5Block`] 等。这很重要,因为共享权重的子模块(例如,embedding层)不应最终出现在不同的FSDP包装单元中。使用此策略,每个包装的块将包含多头注意力和后面的几个MLP层。剩余的层,包括共享的embedding层,都将被方便地包装在同一个最外层的FSDP单元中。因此,对于基于transformer的模型,请使用这个方法。 + - 对于基于大小的自动包装策略,请在配置文件中添加 `fsdp_min_num_params`。它指定了自动包装的 FSDP 的最小参数数量。 + + +### 在 Mac 上使用 Trainer 进行加速的 PyTorch 训练 + +随着 PyTorch v1.12 版本的发布,开发人员和研究人员可以利用 Apple Silicon GPU 进行显著更快的模型训练。这使得可以在 Mac 上本地执行原型设计和微调等机器学习工作流程。Apple 的 Metal Performance Shaders(MPS)作为 PyTorch 的后端实现了这一点,并且可以通过新的 `"mps"` 设备来使用。 +这将在 MPS 图形框架上映射计算图和神经图元,并使用 MPS 提供的优化内核。更多信息,请参阅官方文档 [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/) 和 [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html)。 + + + + +我们强烈建议在你的 MacOS 机器上安装 PyTorch >= 1.13(在撰写本文时为最新版本)。对于基于 transformer 的模型, 它提供与模型正确性和性能改进相关的重大修复。有关更多详细信息,请参阅[pytorch/pytorch#82707](https://github.com/pytorch/pytorch/issues/82707)。 + + + +**使用 Apple Silicon 芯片进行训练和推理的好处** + +1. 使用户能够在本地训练更大的网络或批量数据。 +2. 由于统一内存架构,减少数据检索延迟,并为 GPU 提供对完整内存存储的直接访问。从而提高端到端性能。 +3. 降低与基于云的开发或需要额外本地 GPU 的成本。 + +**先决条件**:要安装带有 mps 支持的 torch,请按照这篇精彩的 Medium 文章操作 [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1)。 + +**用法**: +如果可用,`mps` 设备将默认使用,类似于使用 `cuda` 设备的方式。因此,用户无需采取任何操作。例如,您可以使用以下命令在 Apple Silicon GPU 上运行官方的 Glue 文本分类任务(从根文件夹运行): + +```bash +export TASK_NAME=mrpc + +python examples/pytorch/text-classification/run_glue.py \ + --model_name_or_path bert-base-cased \ + --task_name $TASK_NAME \ + --do_train \ + --do_eval \ + --max_seq_length 128 \ + --per_device_train_batch_size 32 \ + --learning_rate 2e-5 \ + --num_train_epochs 3 \ + --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir +``` + +**需要注意的一些注意事项** + +1. 一些 PyTorch 操作尚未在 mps 中实现,将引发错误。解决此问题的一种方法是设置环境变量 `PYTORCH_ENABLE_MPS_FALLBACK=1`,它将把这些操作回退到 CPU 进行。然而,它仍然会抛出 UserWarning 信息。 +2. 分布式设置 `gloo` 和 `nccl` 在 `mps` 设备上不起作用。这意味着当前只能使用 `mps` 设备类型的单个 GPU。 + +最后,请记住,🤗 `Trainer` 仅集成了 MPS 后端,因此如果你在使用 MPS 后端时遇到任何问题或有疑问,请在 [PyTorch GitHub](https://github.com/pytorch/pytorch/issues) 上提交问题。 + + +## 通过 Accelerate Launcher 使用 Trainer + +Accelerate 现在支持 Trainer。用户可以期待以下内容: +- 他们可以继续使用 Trainer 的迭代,如 FSDP、DeepSpeed 等,而无需做任何更改。 +- 现在可以在 Trainer 中使用 Accelerate Launcher(建议使用)。 + +通过 Accelerate Launcher 使用 Trainer 的步骤: +1. 确保已安装 🤗 Accelerate,无论如何,如果没有它,你无法使用 `Trainer`。如果没有,请执行 `pip install accelerate`。你可能还需要更新 Accelerate 的版本:`pip install accelerate --upgrade`。 +2. 运行 `accelerate config` 并填写问题。以下是一些加速配置的示例: + + a. DDP 多节点多 GPU 配置: + + ```yaml + compute_environment: LOCAL_MACHINE + distributed_type: MULTI_GPU + downcast_bf16: 'no' + gpu_ids: all + machine_rank: 0 #change rank as per the node + main_process_ip: 192.168.20.1 + main_process_port: 9898 + main_training_function: main + mixed_precision: fp16 + num_machines: 2 + num_processes: 8 + rdzv_backend: static + same_network: true + tpu_env: [] + tpu_use_cluster: false + tpu_use_sudo: false + use_cpu: false + ``` + + b. FSDP 配置: + + ```yaml + compute_environment: LOCAL_MACHINE + distributed_type: FSDP + downcast_bf16: 'no' + fsdp_config: + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP + fsdp_backward_prefetch_policy: BACKWARD_PRE + fsdp_forward_prefetch: true + fsdp_offload_params: false + fsdp_sharding_strategy: 1 + fsdp_state_dict_type: FULL_STATE_DICT + fsdp_sync_module_states: true + fsdp_transformer_layer_cls_to_wrap: BertLayer + fsdp_use_orig_params: true + machine_rank: 0 + main_training_function: main + mixed_precision: bf16 + num_machines: 1 + num_processes: 2 + rdzv_backend: static + same_network: true + tpu_env: [] + tpu_use_cluster: false + tpu_use_sudo: false + use_cpu: false + ``` + + c. 指向文件的 DeepSpeed 配置: + + ```yaml + compute_environment: LOCAL_MACHINE + deepspeed_config: + deepspeed_config_file: /home/user/configs/ds_zero3_config.json + zero3_init_flag: true + distributed_type: DEEPSPEED + downcast_bf16: 'no' + machine_rank: 0 + main_training_function: main + num_machines: 1 + num_processes: 4 + rdzv_backend: static + same_network: true + tpu_env: [] + tpu_use_cluster: false + tpu_use_sudo: false + use_cpu: false + ``` + + d. 使用 accelerate 插件的 DeepSpeed 配置: + + ```yaml + compute_environment: LOCAL_MACHINE + deepspeed_config: + gradient_accumulation_steps: 1 + gradient_clipping: 0.7 + offload_optimizer_device: cpu + offload_param_device: cpu + zero3_init_flag: true + zero_stage: 2 + distributed_type: DEEPSPEED + downcast_bf16: 'no' + machine_rank: 0 + main_training_function: main + mixed_precision: bf16 + num_machines: 1 + num_processes: 4 + rdzv_backend: static + same_network: true + tpu_env: [] + tpu_use_cluster: false + tpu_use_sudo: false + use_cpu: false + ``` + +3. 使用accelerate配置文件参数或启动器参数以外的参数运行Trainer脚本。以下是一个使用上述FSDP配置从accelerate启动器运行`run_glue.py`的示例。 + +```bash +cd transformers + +accelerate launch \ +./examples/pytorch/text-classification/run_glue.py \ +--model_name_or_path bert-base-cased \ +--task_name $TASK_NAME \ +--do_train \ +--do_eval \ +--max_seq_length 128 \ +--per_device_train_batch_size 16 \ +--learning_rate 5e-5 \ +--num_train_epochs 3 \ +--output_dir /tmp/$TASK_NAME/ \ +--overwrite_output_dir +``` + +4. 你也可以直接使用`accelerate launch`的cmd参数。上面的示例将映射到: + +```bash +cd transformers + +accelerate launch --num_processes=2 \ +--use_fsdp \ +--mixed_precision=bf16 \ +--fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP \ +--fsdp_transformer_layer_cls_to_wrap="BertLayer" \ +--fsdp_sharding_strategy=1 \ +--fsdp_state_dict_type=FULL_STATE_DICT \ +./examples/pytorch/text-classification/run_glue.py +--model_name_or_path bert-base-cased \ +--task_name $TASK_NAME \ +--do_train \ +--do_eval \ +--max_seq_length 128 \ +--per_device_train_batch_size 16 \ +--learning_rate 5e-5 \ +--num_train_epochs 3 \ +--output_dir /tmp/$TASK_NAME/ \ +--overwrite_output_dir +``` + +有关更多信息,请参阅 🤗 Accelerate CLI 指南:[启动您的 🤗 Accelerate 脚本](https://huggingface.co/docs/accelerate/basic_tutorials/launch)。 + +已移动的部分: + +[ DeepSpeed | Installation | Deployment with multiple GPUs | Deployment with one GPU | Deployment in Notebooks | Configuration | Passing Configuration | Shared Configuration | ZeRO | ZeRO-2 Config | ZeRO-3 Config | NVMe Support | ZeRO-2 vs ZeRO-3 Performance | ZeRO-2 Example | ZeRO-3 Example | Optimizer | Scheduler | fp32 Precision | Automatic Mixed Precision | Batch Size | Gradient Accumulation | Gradient Clipping | Getting The Model Weights Out] + + +## 通过 NEFTune 提升微调性能 + +NEFTune 是一种提升聊天模型性能的技术,由 Jain 等人在论文“NEFTune: Noisy Embeddings Improve Instruction Finetuning” 中引入。该技术在训练过程中向embedding向量添加噪音。根据论文摘要: + +> 使用 Alpaca 对 LLaMA-2-7B 进行标准微调,可以在 AlpacaEval 上达到 29.79%,而使用带有噪音embedding的情况下,性能提高至 64.69%。NEFTune 还在modern instruction数据集上大大优于基线。Evol-Instruct 训练的模型表现提高了 10%,ShareGPT 提高了 8%,OpenPlatypus 提高了 8%。即使像 LLaMA-2-Chat 这样通过 RLHF 进一步细化的强大模型,通过 NEFTune 的额外训练也能受益。 + +
+ +
+ +要在 `Trainer` 中使用它,只需在创建 `TrainingArguments` 实例时传递 `neftune_noise_alpha`。请注意,为了避免任何意外行为,NEFTune在训练后被禁止,以此恢复原始的embedding层。 + +```python +from transformers import Trainer, TrainingArguments + +args = TrainingArguments(..., neftune_noise_alpha=0.1) +trainer = Trainer(..., args=args) + +... + +trainer.train() +``` From fe3ce061c4a2ff0bbd37f6a6a1c4b533fcbcca58 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 17 Nov 2023 10:35:04 +0100 Subject: [PATCH 205/268] Skip some fuyu tests (#27553) * fix * fix --------- Co-authored-by: ydshieh --- tests/models/fuyu/test_modeling_fuyu.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/models/fuyu/test_modeling_fuyu.py b/tests/models/fuyu/test_modeling_fuyu.py index 62d58127973d..d475e1e0ca04 100644 --- a/tests/models/fuyu/test_modeling_fuyu.py +++ b/tests/models/fuyu/test_modeling_fuyu.py @@ -293,6 +293,21 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass + # TODO: Fix me (once this model gets more usage) + @unittest.skip("Does not work on the tiny model.") + def test_disk_offload_bin(self): + super().test_disk_offload() + + # TODO: Fix me (once this model gets more usage) + @unittest.skip("Does not work on the tiny model.") + def test_disk_offload_safetensors(self): + super().test_disk_offload() + + # TODO: Fix me (once this model gets more usage) + @unittest.skip("Does not work on the tiny model.") + def test_model_parallelism(self): + super().test_model_parallelism() + @slow @require_torch_gpu From d903abfccc0f0fc8e73364cf5418a26118bda99e Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 17 Nov 2023 10:44:37 +0100 Subject: [PATCH 206/268] Fix AMD CI not showing GPU (#27555) fix Co-authored-by: ydshieh --- .github/workflows/self-push-amd.yml | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/self-push-amd.yml b/.github/workflows/self-push-amd.yml index c72f224a300c..19857981b12d 100644 --- a/.github/workflows/self-push-amd.yml +++ b/.github/workflows/self-push-amd.yml @@ -38,14 +38,16 @@ jobs: runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] container: image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now - options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: ROCM-SMI + run: | + rocm-smi + - name: ROCM-INFO run: | rocminfo | grep "Agent" -A 14 - - name: Show HIP environment + - name: Show ROCR environment run: | - echo "HIP: $HIP_VISIBLE_DEVICES" echo "ROCR: $ROCR_VISIBLE_DEVICES" setup_gpu: @@ -57,7 +59,7 @@ jobs: runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] container: image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now - options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} test_map: ${{ steps.set-matrix.outputs.test_map }} @@ -155,7 +157,7 @@ jobs: runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] container: image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now - options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: # Necessary to get the correct branch name and commit SHA for `workflow_run` event # We also take into account the `push` event (we might want to test some changes in a branch) @@ -206,11 +208,13 @@ jobs: echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV - name: ROCM-SMI + run: | + rocm-smi + - name: ROCM-INFO run: | rocminfo | grep "Agent" -A 14 - - name: Show HIP environment + - name: Show ROCR environment run: | - echo "HIP: $HIP_VISIBLE_DEVICES" echo "ROCR: $ROCR_VISIBLE_DEVICES" - name: Environment From 913d03dc5e78b82c24be7a52c9ad06dd1022f1e2 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Fri, 17 Nov 2023 10:15:00 +0000 Subject: [PATCH 207/268] Generate: fix flaky tests (#27543) --- src/transformers/generation/logits_process.py | 3 +- tests/generation/test_logits_process.py | 2 +- tests/generation/test_utils.py | 40 ++++++++----------- 3 files changed, 20 insertions(+), 25 deletions(-) diff --git a/src/transformers/generation/logits_process.py b/src/transformers/generation/logits_process.py index 3d1801b24804..d1704ed02074 100644 --- a/src/transformers/generation/logits_process.py +++ b/src/transformers/generation/logits_process.py @@ -1301,8 +1301,9 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> to # set all nan values to 0.0 scores[scores != scores] = 0.0 - # set all inf values to max possible value + # set all +/-inf values to max/min possible value scores[scores == float("inf")] = torch.finfo(scores.dtype).max + scores[scores == float("-inf")] = torch.finfo(scores.dtype).min return scores diff --git a/tests/generation/test_logits_process.py b/tests/generation/test_logits_process.py index 15f5cf1e4f46..9e5ccd16eb7d 100644 --- a/tests/generation/test_logits_process.py +++ b/tests/generation/test_logits_process.py @@ -692,7 +692,7 @@ def test_remove_nan_inf_logits_processor(self): torch.allclose( scores, torch.tensor( - [[0.0, 0.7, 0.8, 0.0], [0.1, torch.finfo(scores.dtype).max, 0.3, float("-inf")]], + [[0.0, 0.7, 0.8, 0.0], [0.1, torch.finfo(scores.dtype).max, 0.3, torch.finfo(scores.dtype).min]], device=torch_device, ), atol=1e-6, diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 1e76c88c71f9..729c7f873404 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -124,9 +124,14 @@ def _get_logits_processor_and_kwargs( process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, "bad_words_ids": [[1, 0]], - "no_repeat_ngram_size": 2, "repetition_penalty": 1.2, + "remove_invalid_values": True, } + # NoRepeatNGramLogitsProcessor + forced tokens may result in no valid continuations + if forced_bos_token_id is None and forced_eos_token_id is None: + process_kwargs["no_repeat_ngram_size"] = 2 + + # NOTE: the order of operations here should match `generate` for accurate testing logits_processor = LogitsProcessorList( ( [ @@ -154,12 +159,16 @@ def _get_logits_processor_and_kwargs( if forced_eos_token_id is not None else [] ) - + [ - NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id), - NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]), - RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]), - ] + + [NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id)] + + ( + [NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"])] + if forced_bos_token_id is None and forced_eos_token_id is None + else [] + ) + + [RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"])] + + [InfNanRemoveLogitsProcessor()] # prevent flaky generation test failures ) + return process_kwargs, logits_processor @staticmethod @@ -282,7 +291,6 @@ def _greedy_generate( output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, - remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, ) @@ -340,7 +348,6 @@ def _sample_generate( output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, - remove_invalid_values=True, **logits_warper_kwargs, **process_kwargs, **model_kwargs, @@ -361,9 +368,6 @@ def _sample_generate( elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) - # prevent flaky generation test failures - logits_processor.append(InfNanRemoveLogitsProcessor()) - with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( @@ -405,7 +409,6 @@ def _beam_search_generate( output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, - remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, @@ -467,7 +470,6 @@ def _beam_sample_generate( output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, - remove_invalid_values=True, **beam_kwargs, **logits_warper_kwargs, **model_kwargs, @@ -534,7 +536,6 @@ def _group_beam_search_generate( output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, - remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, @@ -596,7 +597,6 @@ def _constrained_beam_search_generate( output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, - remove_invalid_values=True, constraints=constraints, **beam_kwargs, **logits_process_kwargs, @@ -671,7 +671,6 @@ def _contrastive_generate( output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, - remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, **contrastive_search_kwargs, @@ -1284,13 +1283,8 @@ def test_constrained_beam_search_generate(self): # check `generate()` and `constrained_beam_search()` are equal # Sample constraints - if not input_ids.dtype == torch.float32: - min_id = torch.min(input_ids) + 3 - max_id = torch.max(input_ids) - else: - # otherwise this throws an error for Speech2TextModel since its inputs are floating points - min_id = 3 - max_id = 100 + min_id = 3 + max_id = config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ From 5330b83bc5637b8e7eafe095c22ef19e21baff2d Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Fri, 17 Nov 2023 11:23:09 +0000 Subject: [PATCH 208/268] Generate: update compute transition scores doctest (#27558) --- src/transformers/generation/utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 14e4b1012911..077bc16aff8b 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -1217,9 +1217,10 @@ def compute_transition_scores( ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False ... ) >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores. - >>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the + >>> # Tip 1: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the >>> # use case, you might want to recompute it with `normalize_logits=True`. - >>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1) + >>> # Tip 2: the output length does NOT include the input length + >>> output_length = np.sum(transition_scores.numpy() < 0, axis=1) >>> length_penalty = model.generation_config.length_penalty >>> reconstructed_scores = transition_scores.sum(axis=1) / (output_length**length_penalty) >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores)) From 638d49983f36af910934b38771b4e55c835c1774 Mon Sep 17 00:00:00 2001 From: "V.Prasanna kumar" Date: Fri, 17 Nov 2023 21:50:42 +0530 Subject: [PATCH 209/268] fixed broken link (#27560) --- docs/source/en/tasks/language_modeling.md | 2 +- docs/source/en/tasks/masked_language_modeling.md | 2 +- docs/source/es/tasks/language_modeling.md | 2 +- docs/source/ko/tasks/language_modeling.md | 2 +- docs/source/ko/tasks/masked_language_modeling.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/source/en/tasks/language_modeling.md b/docs/source/en/tasks/language_modeling.md index 0b9c24870219..2eac6ec12328 100644 --- a/docs/source/en/tasks/language_modeling.md +++ b/docs/source/en/tasks/language_modeling.md @@ -110,7 +110,7 @@ The next step is to load a DistilGPT2 tokenizer to process the `text` subfield: ``` You'll notice from the example above, the `text` field is actually nested inside `answers`. This means you'll need to -extract the `text` subfield from its nested structure with the [`flatten`](https://huggingface.co/docs/datasets/process.html#flatten) method: +extract the `text` subfield from its nested structure with the [`flatten`](https://huggingface.co/docs/datasets/process#flatten) method: ```py >>> eli5 = eli5.flatten() diff --git a/docs/source/en/tasks/masked_language_modeling.md b/docs/source/en/tasks/masked_language_modeling.md index ba1e9e50dbe8..e716447b83bb 100644 --- a/docs/source/en/tasks/masked_language_modeling.md +++ b/docs/source/en/tasks/masked_language_modeling.md @@ -105,7 +105,7 @@ For masked language modeling, the next step is to load a DistilRoBERTa tokenizer ``` You'll notice from the example above, the `text` field is actually nested inside `answers`. This means you'll need to e -xtract the `text` subfield from its nested structure with the [`flatten`](https://huggingface.co/docs/datasets/process.html#flatten) method: +xtract the `text` subfield from its nested structure with the [`flatten`](https://huggingface.co/docs/datasets/process#flatten) method: ```py >>> eli5 = eli5.flatten() diff --git a/docs/source/es/tasks/language_modeling.md b/docs/source/es/tasks/language_modeling.md index 66ac8fb0d4b5..34bd8a2f70e0 100644 --- a/docs/source/es/tasks/language_modeling.md +++ b/docs/source/es/tasks/language_modeling.md @@ -94,7 +94,7 @@ Para modelados de lenguaje por enmascaramiento carga el tokenizador DistilRoBERT >>> tokenizer = AutoTokenizer.from_pretrained("distilroberta-base") ``` -Extrae el subcampo `text` desde su estructura anidado con el método [`flatten`](https://huggingface.co/docs/datasets/process.html#flatten): +Extrae el subcampo `text` desde su estructura anidado con el método [`flatten`](https://huggingface.co/docs/datasets/process#flatten): ```py >>> eli5 = eli5.flatten() diff --git a/docs/source/ko/tasks/language_modeling.md b/docs/source/ko/tasks/language_modeling.md index ba540825c295..bf10660c61c1 100644 --- a/docs/source/ko/tasks/language_modeling.md +++ b/docs/source/ko/tasks/language_modeling.md @@ -107,7 +107,7 @@ pip install transformers datasets evaluate >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") ``` -위의 예제에서 알 수 있듯이, `text` 필드는 `answers` 아래에 중첩되어 있습니다. 따라서 [`flatten`](https://huggingface.co/docs/datasets/process.html#flatten) 메소드를 사용하여 중첩 구조에서 `text` 하위 필드를 추출해야 합니다. +위의 예제에서 알 수 있듯이, `text` 필드는 `answers` 아래에 중첩되어 있습니다. 따라서 [`flatten`](https://huggingface.co/docs/datasets/process#flatten) 메소드를 사용하여 중첩 구조에서 `text` 하위 필드를 추출해야 합니다. ```py >>> eli5 = eli5.flatten() diff --git a/docs/source/ko/tasks/masked_language_modeling.md b/docs/source/ko/tasks/masked_language_modeling.md index d22d439dbd51..ee835d13ebc0 100644 --- a/docs/source/ko/tasks/masked_language_modeling.md +++ b/docs/source/ko/tasks/masked_language_modeling.md @@ -107,7 +107,7 @@ Hugging Face 계정에 로그인하여 모델을 업로드하고 커뮤니티와 ``` 위의 예제에서와 마찬가지로, `text` 필드는 `answers` 안에 중첩되어 있습니다. -따라서 중첩된 구조에서 [`flatten`](https://huggingface.co/docs/datasets/process.html#flatten) 메소드를 사용하여 `text` 하위 필드를 추출합니다: +따라서 중첩된 구조에서 [`flatten`](https://huggingface.co/docs/datasets/process#flatten) 메소드를 사용하여 `text` 하위 필드를 추출합니다: ```py >>> eli5 = eli5.flatten() From ffbcfc0166a5413176dc9401dbe5d3892c36fff6 Mon Sep 17 00:00:00 2001 From: "V.Prasanna kumar" Date: Sat, 18 Nov 2023 03:14:09 +0530 Subject: [PATCH 210/268] Broken links fixed related to datasets docs (#27569) fixed the broken links belogs to dataset library of transformers --- docs/source/de/preprocessing.md | 6 +++--- docs/source/de/quicktour.md | 2 +- docs/source/de/training.md | 2 +- docs/source/en/preprocessing.md | 8 ++++---- docs/source/en/training.md | 2 +- docs/source/es/preprocessing.md | 8 ++++---- docs/source/es/tasks/image_classification.md | 2 +- docs/source/es/tasks/language_modeling.md | 4 ++-- docs/source/es/training.md | 6 +++--- docs/source/it/preprocessing.md | 6 +++--- docs/source/it/quicktour.md | 2 +- docs/source/it/training.md | 6 +++--- docs/source/ja/preprocessing.md | 10 +++++----- docs/source/ja/training.md | 2 +- docs/source/ko/preprocessing.md | 10 +++++----- docs/source/ko/training.md | 2 +- docs/source/pt/quicktour.md | 2 +- docs/source/pt/tasks/sequence_classification.md | 2 +- docs/source/pt/tasks/token_classification.md | 2 +- docs/source/pt/training.md | 8 ++++---- docs/source/zh/preprocessing.md | 10 +++++----- docs/source/zh/training.md | 2 +- examples/flax/image-captioning/README.md | 2 +- .../flax/image-captioning/run_image_captioning_flax.py | 2 +- examples/flax/language-modeling/run_bart_dlm_flax.py | 2 +- examples/flax/language-modeling/run_clm_flax.py | 2 +- examples/flax/language-modeling/run_mlm_flax.py | 2 +- examples/flax/language-modeling/run_t5_mlm_flax.py | 2 +- examples/flax/question-answering/run_qa.py | 2 +- examples/flax/summarization/README.md | 2 +- examples/flax/summarization/run_summarization_flax.py | 2 +- examples/flax/text-classification/run_flax_glue.py | 4 ++-- examples/flax/token-classification/run_flax_ner.py | 2 +- examples/pytorch/contrastive-image-text/run_clip.py | 2 +- examples/pytorch/language-modeling/run_clm.py | 2 +- .../pytorch/language-modeling/run_clm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_mlm.py | 2 +- .../pytorch/language-modeling/run_mlm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_plm.py | 2 +- examples/pytorch/multiple-choice/run_swag.py | 2 +- .../pytorch/multiple-choice/run_swag_no_trainer.py | 2 +- examples/pytorch/question-answering/run_qa.py | 2 +- .../pytorch/question-answering/run_qa_beam_search.py | 2 +- .../run_qa_beam_search_no_trainer.py | 2 +- .../pytorch/question-answering/run_qa_no_trainer.py | 2 +- examples/pytorch/question-answering/run_seq2seq_qa.py | 2 +- examples/pytorch/speech-recognition/README.md | 2 +- examples/pytorch/summarization/README.md | 2 +- examples/pytorch/summarization/run_summarization.py | 2 +- .../summarization/run_summarization_no_trainer.py | 2 +- .../pytorch/text-classification/run_classification.py | 2 +- examples/pytorch/text-classification/run_glue.py | 4 ++-- .../pytorch/text-classification/run_glue_no_trainer.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- .../pytorch/token-classification/run_ner_no_trainer.py | 2 +- examples/pytorch/translation/README.md | 2 +- .../pytorch/translation/run_translation_no_trainer.py | 2 +- examples/research_projects/jax-projects/README.md | 2 +- .../jax-projects/dataset-streaming/README.md | 2 +- .../jax-projects/model_parallel/run_clm_mp.py | 2 +- .../research_projects/jax-projects/wav2vec2/README.md | 2 +- .../research_projects/luke/run_luke_ner_no_trainer.py | 2 +- examples/research_projects/mlm_wwm/run_mlm_wwm.py | 2 +- .../research_projects/performer/run_mlm_performer.py | 2 +- .../quantization-qdqbert/evaluate-hf-trt-qa.py | 2 +- .../quantization-qdqbert/run_quant_qa.py | 2 +- .../rag-end2end-retriever/use_own_knowledge_dataset.py | 2 +- .../research_projects/rag/use_own_knowledge_dataset.py | 2 +- .../research_projects/robust-speech-event/README.md | 2 +- .../research_projects/tapex/run_tabfact_with_tapex.py | 2 +- .../research_projects/tapex/run_wikisql_with_tapex.py | 2 +- .../tapex/run_wikitablequestions_with_tapex.py | 2 +- examples/tensorflow/contrastive-image-text/run_clip.py | 2 +- .../image-classification/run_image_classification.py | 2 +- examples/tensorflow/language-modeling/run_clm.py | 2 +- examples/tensorflow/language-modeling/run_mlm.py | 2 +- examples/tensorflow/multiple-choice/run_swag.py | 2 +- examples/tensorflow/question-answering/run_qa.py | 2 +- examples/tensorflow/summarization/run_summarization.py | 2 +- examples/tensorflow/text-classification/run_glue.py | 2 +- .../text-classification/run_text_classification.py | 2 +- examples/tensorflow/token-classification/run_ner.py | 2 +- .../run_{{cookiecutter.example_shortcut}}.py | 4 ++-- .../scripts/pytorch/run_glue_model_parallelism.py | 4 ++-- 84 files changed, 118 insertions(+), 118 deletions(-) diff --git a/docs/source/de/preprocessing.md b/docs/source/de/preprocessing.md index 1e8f6ff4062a..9c977e10a538 100644 --- a/docs/source/de/preprocessing.md +++ b/docs/source/de/preprocessing.md @@ -209,7 +209,7 @@ Audioeingaben werden anders vorverarbeitet als Texteingaben, aber das Endziel bl pip install datasets ``` -Laden Sie den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz (weitere Informationen zum Laden eines Datensatzes finden Sie im 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html)): +Laden Sie den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz (weitere Informationen zum Laden eines Datensatzes finden Sie im 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub)): ```py >>> from datasets import load_dataset, Audio @@ -344,7 +344,7 @@ Laden wir den [food101](https://huggingface.co/datasets/food101) Datensatz für >>> dataset = load_dataset("food101", split="train[:100]") ``` -Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild] (https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image) an: +Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild] (https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) an: ```py >>> dataset[0]["image"] @@ -385,7 +385,7 @@ Bei Bildverarbeitungsaufgaben ist es üblich, den Bildern als Teil der Vorverarb ... return examples ``` -3. Dann verwenden Sie 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform), um die Transformationen im laufenden Betrieb anzuwenden: +3. Dann verwenden Sie 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform), um die Transformationen im laufenden Betrieb anzuwenden: ```py >>> dataset.set_transform(transforms) diff --git a/docs/source/de/quicktour.md b/docs/source/de/quicktour.md index 139869e5d1ee..2b66d2d6a917 100644 --- a/docs/source/de/quicktour.md +++ b/docs/source/de/quicktour.md @@ -121,7 +121,7 @@ Erstellen wir eine [`pipeline`] mit der Aufgabe die wir lösen und dem Modell we >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` -Als nächstes laden wir den Datensatz (siehe 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) für mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz: +Als nächstes laden wir den Datensatz (siehe 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) für mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz: ```py >>> from datasets import load_dataset, Audio diff --git a/docs/source/de/training.md b/docs/source/de/training.md index 493de3052bbf..b1b7c14f261a 100644 --- a/docs/source/de/training.md +++ b/docs/source/de/training.md @@ -43,7 +43,7 @@ Laden Sie zunächst den Datensatz [Yelp Reviews](https://huggingface.co/datasets 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'} ``` -Wie Sie nun wissen, benötigen Sie einen Tokenizer, um den Text zu verarbeiten und eine Auffüll- und Abschneidungsstrategie einzubauen, um mit variablen Sequenzlängen umzugehen. Um Ihren Datensatz in einem Schritt zu verarbeiten, verwenden Sie die 🤗 Methode Datasets [`map`](https://huggingface.co/docs/datasets/process.html#map), um eine Vorverarbeitungsfunktion auf den gesamten Datensatz anzuwenden: +Wie Sie nun wissen, benötigen Sie einen Tokenizer, um den Text zu verarbeiten und eine Auffüll- und Abschneidungsstrategie einzubauen, um mit variablen Sequenzlängen umzugehen. Um Ihren Datensatz in einem Schritt zu verarbeiten, verwenden Sie die 🤗 Methode Datasets [`map`](https://huggingface.co/docs/datasets/process#map), um eine Vorverarbeitungsfunktion auf den gesamten Datensatz anzuwenden: ```py >>> from transformers import AutoTokenizer diff --git a/docs/source/en/preprocessing.md b/docs/source/en/preprocessing.md index f08808433c26..743904cc994c 100644 --- a/docs/source/en/preprocessing.md +++ b/docs/source/en/preprocessing.md @@ -220,7 +220,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], For audio tasks, you'll need a [feature extractor](main_classes/feature_extractor) to prepare your dataset for the model. The feature extractor is designed to extract features from raw audio data, and convert them into tensors. -Load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html) for more details on how to load a dataset) to see how you can use a feature extractor with audio datasets: +Load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use a feature extractor with audio datasets: ```py >>> from datasets import load_dataset, Audio @@ -340,7 +340,7 @@ You can use any library you like for image augmentation. For image preprocessing -Load the [food101](https://huggingface.co/datasets/food101) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html) for more details on how to load a dataset) to see how you can use an image processor with computer vision datasets: +Load the [food101](https://huggingface.co/datasets/food101) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use an image processor with computer vision datasets: @@ -354,7 +354,7 @@ Use 🤗 Datasets `split` parameter to only load a small sample from the trainin >>> dataset = load_dataset("food101", split="train[:100]") ``` -Next, take a look at the image with 🤗 Datasets [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image) feature: +Next, take a look at the image with 🤗 Datasets [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) feature: ```py >>> dataset[0]["image"] @@ -467,7 +467,7 @@ from [`DetrImageProcessor`] and define a custom `collate_fn` to batch images tog For tasks involving multimodal inputs, you'll need a [processor](main_classes/processors) to prepare your dataset for the model. A processor couples together two processing objects such as as tokenizer and feature extractor. -Load the [LJ Speech](https://huggingface.co/datasets/lj_speech) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html) for more details on how to load a dataset) to see how you can use a processor for automatic speech recognition (ASR): +Load the [LJ Speech](https://huggingface.co/datasets/lj_speech) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use a processor for automatic speech recognition (ASR): ```py >>> from datasets import load_dataset diff --git a/docs/source/en/training.md b/docs/source/en/training.md index fb4a0b6a279e..1744a441535d 100644 --- a/docs/source/en/training.md +++ b/docs/source/en/training.md @@ -43,7 +43,7 @@ Begin by loading the [Yelp Reviews](https://huggingface.co/datasets/yelp_review_ 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'} ``` -As you now know, you need a tokenizer to process the text and include a padding and truncation strategy to handle any variable sequence lengths. To process your dataset in one step, use 🤗 Datasets [`map`](https://huggingface.co/docs/datasets/process.html#map) method to apply a preprocessing function over the entire dataset: +As you now know, you need a tokenizer to process the text and include a padding and truncation strategy to handle any variable sequence lengths. To process your dataset in one step, use 🤗 Datasets [`map`](https://huggingface.co/docs/datasets/process#map) method to apply a preprocessing function over the entire dataset: ```py >>> from transformers import AutoTokenizer diff --git a/docs/source/es/preprocessing.md b/docs/source/es/preprocessing.md index f4eec4862be8..5ac4c018090b 100644 --- a/docs/source/es/preprocessing.md +++ b/docs/source/es/preprocessing.md @@ -195,7 +195,7 @@ Las entradas de audio se preprocesan de forma diferente a las entradas textuales pip install datasets ``` -Carga la tarea de detección de palabras clave del benchmark [SUPERB](https://huggingface.co/datasets/superb) (consulta el [tutorial 🤗 Dataset](https://huggingface.co/docs/datasets/load_hub.html) para que obtengas más detalles sobre cómo cargar un dataset): +Carga la tarea de detección de palabras clave del benchmark [SUPERB](https://huggingface.co/datasets/superb) (consulta el [tutorial 🤗 Dataset](https://huggingface.co/docs/datasets/load_hub) para que obtengas más detalles sobre cómo cargar un dataset): ```py >>> from datasets import load_dataset, Audio @@ -234,7 +234,7 @@ Por ejemplo, carga el dataset [LJ Speech](https://huggingface.co/datasets/lj_spe 'sampling_rate': 22050} ``` -1. Usa el método 🤗 Datasets' [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.cast_column) para reducir la tasa de muestreo a 16kHz: +1. Usa el método 🤗 Datasets' [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.cast_column) para reducir la tasa de muestreo a 16kHz: ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) @@ -329,7 +329,7 @@ Vamos a cargar el dataset [food101](https://huggingface.co/datasets/food101) par >>> dataset = load_dataset("food101", split="train[:100]") ``` -A continuación, observa la imagen con la función 🤗 Datasets [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image): +A continuación, observa la imagen con la función 🤗 Datasets [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image): ```py >>> dataset[0]["image"] @@ -370,7 +370,7 @@ Para las tareas de visión por computadora es común añadir algún tipo de aume ... return examples ``` -3. A continuación, utiliza 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform) para aplicar las transformaciones sobre la marcha: +3. A continuación, utiliza 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform) para aplicar las transformaciones sobre la marcha: ```py >>> dataset.set_transform(transforms) diff --git a/docs/source/es/tasks/image_classification.md b/docs/source/es/tasks/image_classification.md index 3a959aa934ff..f09730caf69f 100644 --- a/docs/source/es/tasks/image_classification.md +++ b/docs/source/es/tasks/image_classification.md @@ -99,7 +99,7 @@ Crea una función de preprocesamiento que aplique las transformaciones y devuelv ... return examples ``` -Utiliza el método [`with_transform`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?#datasets.Dataset.with_transform) de 🤗 Dataset para aplicar las transformaciones sobre todo el dataset. Las transformaciones se aplican sobre la marcha cuando se carga un elemento del dataset: +Utiliza el método [`with_transform`](https://huggingface.co/docs/datasets/package_reference/main_classes?#datasets.Dataset.with_transform) de 🤗 Dataset para aplicar las transformaciones sobre todo el dataset. Las transformaciones se aplican sobre la marcha cuando se carga un elemento del dataset: ```py >>> food = food.with_transform(transforms) diff --git a/docs/source/es/tasks/language_modeling.md b/docs/source/es/tasks/language_modeling.md index 34bd8a2f70e0..b3f22f084633 100644 --- a/docs/source/es/tasks/language_modeling.md +++ b/docs/source/es/tasks/language_modeling.md @@ -249,7 +249,7 @@ A este punto, solo faltan tres pasos: ```
-Para realizar el fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset). Especifica los inputs y etiquetas en `columns`, ya sea para mezclar el dataset, tamaño de lote, y el data collator: +Para realizar el fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Especifica los inputs y etiquetas en `columns`, ya sea para mezclar el dataset, tamaño de lote, y el data collator: ```py >>> tf_train_set = lm_dataset["train"].to_tf_dataset( @@ -356,7 +356,7 @@ A este punto, solo faltan tres pasos: ```
-Para realizar el fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset). Especifica los inputs y etiquetas en `columns`, ya sea para mezclar el dataset, tamaño de lote, y el data collator: +Para realizar el fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Especifica los inputs y etiquetas en `columns`, ya sea para mezclar el dataset, tamaño de lote, y el data collator: ```py >>> tf_train_set = lm_dataset["train"].to_tf_dataset( diff --git a/docs/source/es/training.md b/docs/source/es/training.md index 7b7b0657bd8f..4f224b0797a3 100644 --- a/docs/source/es/training.md +++ b/docs/source/es/training.md @@ -102,7 +102,7 @@ Especifica dónde vas a guardar los checkpoints de tu entrenamiento: ### Métricas -El [`Trainer`] no evalúa automáticamente el rendimiento del modelo durante el entrenamiento. Tendrás que pasarle a [`Trainer`] una función para calcular y hacer un reporte de las métricas. La biblioteca de 🤗 Datasets proporciona una función de [`accuracy`](https://huggingface.co/metrics/accuracy) simple que puedes cargar con la función `load_metric` (ver este [tutorial](https://huggingface.co/docs/datasets/metrics.html) para más información): +El [`Trainer`] no evalúa automáticamente el rendimiento del modelo durante el entrenamiento. Tendrás que pasarle a [`Trainer`] una función para calcular y hacer un reporte de las métricas. La biblioteca de 🤗 Datasets proporciona una función de [`accuracy`](https://huggingface.co/metrics/accuracy) simple que puedes cargar con la función `load_metric` (ver este [tutorial](https://huggingface.co/docs/datasets/metrics) para más información): ```py >>> import numpy as np @@ -172,7 +172,7 @@ El [`DefaultDataCollator`] junta los tensores en un batch para que el modelo se -A continuación, convierte los datasets tokenizados en datasets de TensorFlow con el método [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset). Especifica tus entradas en `columns` y tu etiqueta en `label_cols`: +A continuación, convierte los datasets tokenizados en datasets de TensorFlow con el método [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Especifica tus entradas en `columns` y tu etiqueta en `label_cols`: ```py >>> tf_train_dataset = small_train_dataset.to_tf_dataset( @@ -342,7 +342,7 @@ Para hacer un seguimiento al progreso del entrenamiento, utiliza la biblioteca [ ### Métricas -De la misma manera que necesitas añadir una función de evaluación al [`Trainer`], necesitas hacer lo mismo cuando escribas tu propio ciclo de entrenamiento. Pero en lugar de calcular y reportar la métrica al final de cada época, esta vez acumularás todos los batches con [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch) y calcularás la métrica al final. +De la misma manera que necesitas añadir una función de evaluación al [`Trainer`], necesitas hacer lo mismo cuando escribas tu propio ciclo de entrenamiento. Pero en lugar de calcular y reportar la métrica al final de cada época, esta vez acumularás todos los batches con [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=add_batch#datasets.Metric.add_batch) y calcularás la métrica al final. ```py >>> metric = load_metric("accuracy") diff --git a/docs/source/it/preprocessing.md b/docs/source/it/preprocessing.md index 94578dfe166b..76addd2aa0ea 100644 --- a/docs/source/it/preprocessing.md +++ b/docs/source/it/preprocessing.md @@ -194,7 +194,7 @@ Gli input audio sono processati in modo differente rispetto al testo, ma l'obiet pip install datasets ``` -Carica il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) (vedi il 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html) per avere maggiori dettagli su come caricare un dataset): +Carica il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) (vedi il 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) per avere maggiori dettagli su come caricare un dataset): ```py >>> from datasets import load_dataset, Audio @@ -233,7 +233,7 @@ Per esempio, il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds1 'sampling_rate': 8000} ``` -1. Usa il metodo di 🤗 Datasets' [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.cast_column) per alzare la frequenza di campionamento a 16kHz: +1. Usa il metodo di 🤗 Datasets' [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.cast_column) per alzare la frequenza di campionamento a 16kHz: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) @@ -370,7 +370,7 @@ Per le attività di visione, è usuale aggiungere alcuni tipi di data augmentati ... return examples ``` -3. Poi utilizza 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform)per applicare al volo la trasformazione: +3. Poi utilizza 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)per applicare al volo la trasformazione: ```py >>> dataset.set_transform(transforms) diff --git a/docs/source/it/quicktour.md b/docs/source/it/quicktour.md index f0e981d18eb7..07e7a2974a1f 100644 --- a/docs/source/it/quicktour.md +++ b/docs/source/it/quicktour.md @@ -125,7 +125,7 @@ Crea una [`pipeline`] con il compito che vuoi risolvere e con il modello che vuo ... ) ``` -Poi, carica un dataset (vedi 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) per maggiori dettagli) sul quale vuoi iterare. Per esempio, carichiamo il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): +Poi, carica un dataset (vedi 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) per maggiori dettagli) sul quale vuoi iterare. Per esempio, carichiamo il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio diff --git a/docs/source/it/training.md b/docs/source/it/training.md index be0883f07b77..503a43321799 100644 --- a/docs/source/it/training.md +++ b/docs/source/it/training.md @@ -43,7 +43,7 @@ Inizia caricando il dataset [Yelp Reviews](https://huggingface.co/datasets/yelp_ 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'} ``` -Come già sai, hai bisogno di un tokenizer per processare il testo e includere una strategia di padding e truncation per gestire sequenze di lunghezza variabile. Per processare il dataset in un unico passo, usa il metodo [`map`](https://huggingface.co/docs/datasets/process.html#map) di 🤗 Datasets che applica la funzione di preprocessing all'intero dataset: +Come già sai, hai bisogno di un tokenizer per processare il testo e includere una strategia di padding e truncation per gestire sequenze di lunghezza variabile. Per processare il dataset in un unico passo, usa il metodo [`map`](https://huggingface.co/docs/datasets/process#map) di 🤗 Datasets che applica la funzione di preprocessing all'intero dataset: ```py >>> from transformers import AutoTokenizer @@ -103,7 +103,7 @@ Specifica dove salvare i checkpoints del tuo addestramento: ### Metriche -[`Trainer`] non valuta automaticamente le performance del modello durante l'addestramento. Dovrai passare a [`Trainer`] una funzione che calcola e restituisce le metriche. La libreria 🤗 Datasets mette a disposizione una semplice funzione [`accuracy`](https://huggingface.co/metrics/accuracy) che puoi caricare con la funzione `load_metric` (guarda questa [esercitazione](https://huggingface.co/docs/datasets/metrics.html) per maggiori informazioni): +[`Trainer`] non valuta automaticamente le performance del modello durante l'addestramento. Dovrai passare a [`Trainer`] una funzione che calcola e restituisce le metriche. La libreria 🤗 Datasets mette a disposizione una semplice funzione [`accuracy`](https://huggingface.co/metrics/accuracy) che puoi caricare con la funzione `load_metric` (guarda questa [esercitazione](https://huggingface.co/docs/datasets/metrics) per maggiori informazioni): ```py >>> import numpy as np @@ -346,7 +346,7 @@ Per tenere traccia dei tuoi progressi durante l'addestramento, usa la libreria [ ### Metriche -Proprio come è necessario aggiungere una funzione di valutazione del [`Trainer`], è necessario fare lo stesso quando si scrive il proprio ciclo di addestramento. Ma invece di calcolare e riportare la metrica alla fine di ogni epoca, questa volta accumulerai tutti i batch con [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch) e calcolerai la metrica alla fine. +Proprio come è necessario aggiungere una funzione di valutazione del [`Trainer`], è necessario fare lo stesso quando si scrive il proprio ciclo di addestramento. Ma invece di calcolare e riportare la metrica alla fine di ogni epoca, questa volta accumulerai tutti i batch con [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=add_batch#datasets.Metric.add_batch) e calcolerai la metrica alla fine. ```py >>> metric = load_metric("accuracy") diff --git a/docs/source/ja/preprocessing.md b/docs/source/ja/preprocessing.md index c4e3566fd3ae..b8fad2a0d21b 100644 --- a/docs/source/ja/preprocessing.md +++ b/docs/source/ja/preprocessing.md @@ -227,7 +227,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], オーディオタスクの場合、データセットをモデル用に準備するために[特徴抽出器](main_classes/feature_extractor)が必要です。 特徴抽出器は生のオーディオデータから特徴を抽出し、それらをテンソルに変換するために設計されています。 -[PolyAI/minds14](https://huggingface.co/datasets/PolyAI/minds14)データセットをロードして(データセットのロード方法の詳細については🤗 [Datasetsチュートリアル](https://huggingface.co/docs/datasets/load_hub.html)を参照)、 +[PolyAI/minds14](https://huggingface.co/datasets/PolyAI/minds14)データセットをロードして(データセットのロード方法の詳細については🤗 [Datasetsチュートリアル](https://huggingface.co/docs/datasets/load_hub)を参照)、 オーディオデータセットで特徴抽出器をどのように使用できるかを確認してみましょう: ```python @@ -349,7 +349,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], -コンピュータビジョンのデータセットで画像プロセッサを使用する方法を示すために、[food101](https://huggingface.co/datasets/food101)データセットをロードします(データセットのロード方法の詳細については🤗[Datasetsチュートリアル](https://huggingface.co/docs/datasets/load_hub.html)を参照): +コンピュータビジョンのデータセットで画像プロセッサを使用する方法を示すために、[food101](https://huggingface.co/datasets/food101)データセットをロードします(データセットのロード方法の詳細については🤗[Datasetsチュートリアル](https://huggingface.co/docs/datasets/load_hub)を参照): @@ -363,7 +363,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], >>> dataset = load_dataset("food101", split="train[:100]") ``` -次に、🤗 Datasetsの [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image) 機能で画像を見てみましょう: +次に、🤗 Datasetsの [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) 機能で画像を見てみましょう: ```python >>> dataset[0]["image"] @@ -419,7 +419,7 @@ AutoImageProcessorを[`AutoImageProcessor.from_pretrained`]を使用してロー 画像を増強変換の一部として正規化したい場合は、`image_processor.image_mean` と `image_processor.image_std` の値を使用してください。 -3. 次に、🤗 Datasetsの[`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform)を使用して、変換をリアルタイムで適用します: +3. 次に、🤗 Datasetsの[`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)を使用して、変換をリアルタイムで適用します: ```python >>> dataset.set_transform(transforms) @@ -474,7 +474,7 @@ AutoImageProcessorを[`AutoImageProcessor.from_pretrained`]を使用してロー マルチモーダル入力を使用するタスクの場合、モデル用にデータセットを準備するための[プロセッサ](main_classes/processors)が必要です。プロセッサは、トークナイザや特徴量抽出器などの2つの処理オブジェクトを結合します。 -自動音声認識(ASR)のためのプロセッサの使用方法を示すために、[LJ Speech](https://huggingface.co/datasets/lj_speech)データセットをロードします(データセットのロード方法の詳細については🤗 [Datasets チュートリアル](https://huggingface.co/docs/datasets/load_hub.html)を参照): +自動音声認識(ASR)のためのプロセッサの使用方法を示すために、[LJ Speech](https://huggingface.co/datasets/lj_speech)データセットをロードします(データセットのロード方法の詳細については🤗 [Datasets チュートリアル](https://huggingface.co/docs/datasets/load_hub)を参照): ```python >>> from datasets import load_dataset diff --git a/docs/source/ja/training.md b/docs/source/ja/training.md index 54b34274bf1c..4e5dbaa77aef 100644 --- a/docs/source/ja/training.md +++ b/docs/source/ja/training.md @@ -49,7 +49,7 @@ rendered properly in your Markdown viewer. ``` トークナイザがテキストを処理し、可変のシーケンス長を処理するためのパディングと切り捨て戦略を含める必要があることをご存知の通り、 -データセットを1つのステップで処理するには、🤗 Datasets の [`map`](https://huggingface.co/docs/datasets/process.html#map) メソッドを使用して、 +データセットを1つのステップで処理するには、🤗 Datasets の [`map`](https://huggingface.co/docs/datasets/process#map) メソッドを使用して、 データセット全体に前処理関数を適用します: ```py diff --git a/docs/source/ko/preprocessing.md b/docs/source/ko/preprocessing.md index 7a9d2987381c..e11f68d65666 100644 --- a/docs/source/ko/preprocessing.md +++ b/docs/source/ko/preprocessing.md @@ -220,7 +220,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 오디오 작업은 모델에 맞는 데이터 세트를 준비하기 위해 [특성 추출기](main_classes/feature_extractor)가 필요합니다. 특성 추출기는 원시 오디오 데이터에서 특성를 추출하고 이를 텐서로 변환하는 것이 목적입니다. -오디오 데이터 세트에 특성 추출기를 사용하는 방법을 보기 위해 [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 데이터 세트를 가져오세요. (데이터 세트를 가져오는 방법은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub.html)에서 자세히 설명하고 있습니다.) +오디오 데이터 세트에 특성 추출기를 사용하는 방법을 보기 위해 [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 데이터 세트를 가져오세요. (데이터 세트를 가져오는 방법은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)에서 자세히 설명하고 있습니다.) ```py >>> from datasets import load_dataset, Audio @@ -346,7 +346,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [food101](https://huggingface.co/datasets/food101) 데이터 세트를 가져와서 컴퓨터 비전 데이터 세트에서 이미지 프로세서를 어떻게 사용하는지 알아보세요. -데이터 세트를 불러오는 방법은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub.html)을 참고하세요. +데이터 세트를 불러오는 방법은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)을 참고하세요. @@ -360,7 +360,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], >>> dataset = load_dataset("food101", split="train[:100]") ``` -다음으로, 🤗 Datasets의 [`image`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image)로 이미지를 확인해보세요: +다음으로, 🤗 Datasets의 [`image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image)로 이미지를 확인해보세요: ```py >>> dataset[0]["image"] @@ -418,7 +418,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], -3. 🤗 Datasets의 [`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform)를 사용하여 실시간으로 변환을 적용합니다: +3. 🤗 Datasets의 [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)를 사용하여 실시간으로 변환을 적용합니다: ```py >>> dataset.set_transform(transforms) @@ -476,7 +476,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 프로세서는 토크나이저와 특성 추출기와 같은 두 가지 처리 객체를 결합합니다. [LJ Speech](https://huggingface.co/datasets/lj_speech) 데이터 세트를 가져와서 자동 음성 인식(ASR)을 위한 프로세서를 사용하는 방법을 확인하세요. -(데이터 세트를 가져오는 방법에 대한 자세한 내용은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub.html)에서 볼 수 있습니다.) +(데이터 세트를 가져오는 방법에 대한 자세한 내용은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)에서 볼 수 있습니다.) ```py >>> from datasets import load_dataset diff --git a/docs/source/ko/training.md b/docs/source/ko/training.md index 4e375f0f7215..f4ab13322943 100644 --- a/docs/source/ko/training.md +++ b/docs/source/ko/training.md @@ -43,7 +43,7 @@ rendered properly in your Markdown viewer. 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'} ``` -텍스트를 처리하고 서로 다른 길이의 시퀀스 패딩 및 잘라내기 전략을 포함하려면 토크나이저가 필요합니다. 데이터셋을 한 번에 처리하려면 🤗 Dataset [`map`](https://huggingface.co/docs/datasets/process.html#map) 메서드를 사용하여 전체 데이터셋에 전처리 함수를 적용하세요: +텍스트를 처리하고 서로 다른 길이의 시퀀스 패딩 및 잘라내기 전략을 포함하려면 토크나이저가 필요합니다. 데이터셋을 한 번에 처리하려면 🤗 Dataset [`map`](https://huggingface.co/docs/datasets/process#map) 메서드를 사용하여 전체 데이터셋에 전처리 함수를 적용하세요: ```py >>> from transformers import AutoTokenizer diff --git a/docs/source/pt/quicktour.md b/docs/source/pt/quicktour.md index fd89b2485599..9ecb760e6969 100644 --- a/docs/source/pt/quicktour.md +++ b/docs/source/pt/quicktour.md @@ -119,7 +119,7 @@ Crie uma [`pipeline`] com a tarefa que deseja resolver e o modelo que deseja usa >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` -A seguir, carregue uma base de dados (confira a 🤗 [Iniciação em Datasets](https://huggingface.co/docs/datasets/quickstart.html) para mais detalhes) que você gostaria de iterar sobre. Por exemplo, vamos carregar o dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): +A seguir, carregue uma base de dados (confira a 🤗 [Iniciação em Datasets](https://huggingface.co/docs/datasets/quickstart) para mais detalhes) que você gostaria de iterar sobre. Por exemplo, vamos carregar o dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio diff --git a/docs/source/pt/tasks/sequence_classification.md b/docs/source/pt/tasks/sequence_classification.md index 6469ac4d4553..02647f68f886 100644 --- a/docs/source/pt/tasks/sequence_classification.md +++ b/docs/source/pt/tasks/sequence_classification.md @@ -148,7 +148,7 @@ O [`Trainer`] aplicará o preenchimento dinâmico por padrão quando você defin
-Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato `tf.data.Dataset` com [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset). Nessa execução você deverá especificar as entradas e rótulos (no parâmetro `columns`), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator: +Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato `tf.data.Dataset` com [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Nessa execução você deverá especificar as entradas e rótulos (no parâmetro `columns`), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator: ```py >>> tf_train_set = tokenized_imdb["train"].to_tf_dataset( diff --git a/docs/source/pt/tasks/token_classification.md b/docs/source/pt/tasks/token_classification.md index ba8298e9f581..316d6a810218 100644 --- a/docs/source/pt/tasks/token_classification.md +++ b/docs/source/pt/tasks/token_classification.md @@ -201,7 +201,7 @@ Nesse ponto, restam apenas três passos: ```
-Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato `tf.data.Dataset` com [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset). Nessa execução você deverá especificar as entradas e rótulos (no parâmetro `columns`), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator: +Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato `tf.data.Dataset` com [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Nessa execução você deverá especificar as entradas e rótulos (no parâmetro `columns`), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator: ```py >>> tf_train_set = tokenized_wnut["train"].to_tf_dataset( diff --git a/docs/source/pt/training.md b/docs/source/pt/training.md index aa529ac948b8..6e39a46b1643 100644 --- a/docs/source/pt/training.md +++ b/docs/source/pt/training.md @@ -52,7 +52,7 @@ Comece carregando o dataset [Yelp Reviews](https://huggingface.co/datasets/yelp_ Como já sabe, é necessário ter um tokenizador para processar o texto e incluir uma estratégia de padding e truncamento, para manejar qualquer tamanho varíavel de sequência. Para processar o seu dataset em apenas um passo, utilize o método de -🤗 Datasets [`map`](https://huggingface.co/docs/datasets/process.html#map) para aplicar uma função de preprocessamento sobre +🤗 Datasets [`map`](https://huggingface.co/docs/datasets/process#map) para aplicar uma função de preprocessamento sobre todo o dataset. ```py @@ -126,7 +126,7 @@ Especifique onde salvar os checkpoints do treinamento: O [`Trainer`] não avalia automaticamente o rendimento do modelo durante o treinamento. Será necessário passar ao [`Trainer`] uma função para calcular e fazer um diagnóstico sobre as métricas. A biblioteca 🤗 Datasets proporciona uma função de [`accuracy`](https://huggingface.co/metrics/accuracy) simples que pode ser carregada com a função -`load_metric` (ver este [tutorial](https://huggingface.co/docs/datasets/metrics.html) para mais informações): +`load_metric` (ver este [tutorial](https://huggingface.co/docs/datasets/metrics) para mais informações): ```py >>> import numpy as np @@ -203,7 +203,7 @@ Assegure-se de especificar os `return_tensors` para retornar os tensores do Tens Em seguida, converta os datasets tokenizados em datasets do TensorFlow com o método -[`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset). +[`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Especifique suas entradas em `columns` e seu rótulo em `label_cols`: ```py @@ -385,7 +385,7 @@ uma barra de progresso sobre o número de passos percorridos no treinamento atua Da mesma forma que é necessário adicionar uma função de avaliação ao [`Trainer`], é necessário fazer o mesmo quando escrevendo o próprio ciclo de treinamento. Contudo, em vez de calcular e retornar a métrica final de cada época, -você deverá adicionar todos os batches com [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch) +você deverá adicionar todos os batches com [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=add_batch#datasets.Metric.add_batch) e calcular a métrica apenas no final. ```py diff --git a/docs/source/zh/preprocessing.md b/docs/source/zh/preprocessing.md index 95b799989c91..f2b3189dd4a6 100644 --- a/docs/source/zh/preprocessing.md +++ b/docs/source/zh/preprocessing.md @@ -227,7 +227,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 对于音频任务,您需要[feature extractor](main_classes/feature_extractor)来准备您的数据集以供模型使用。`feature extractor`旨在从原始音频数据中提取特征,并将它们转换为张量。 -加载[MInDS-14](https://huggingface.co/datasets/PolyAI/minds14)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets教程](https://huggingface.co/docs/datasets/load_hub.html))以了解如何在音频数据集中使用`feature extractor`: +加载[MInDS-14](https://huggingface.co/datasets/PolyAI/minds14)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets教程](https://huggingface.co/docs/datasets/load_hub))以了解如何在音频数据集中使用`feature extractor`: ```py @@ -352,7 +352,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], -加载[food101](https://huggingface.co/datasets/food101)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets教程](https://huggingface.co/docs/datasets/load_hub.html))以了解如何在计算机视觉数据集中使用图像处理器: +加载[food101](https://huggingface.co/datasets/food101)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets教程](https://huggingface.co/docs/datasets/load_hub))以了解如何在计算机视觉数据集中使用图像处理器: @@ -367,7 +367,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], >>> dataset = load_dataset("food101", split="train[:100]") ``` -接下来,使用🤗 Datasets的[`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image)功能查看图像: +接下来,使用🤗 Datasets的[`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image)功能查看图像: ```py @@ -421,7 +421,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], -3. 然后使用🤗 Datasets的[`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform)在运行时应用这些变换: +3. 然后使用🤗 Datasets的[`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)在运行时应用这些变换: ```py @@ -476,7 +476,7 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 对于涉及多模态输入的任务,您需要[processor](main_classes/processors)来为模型准备数据集。`processor`将两个处理对象-例如`tokenizer`和`feature extractor`-组合在一起。 -加载[LJ Speech](https://huggingface.co/datasets/lj_speech)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets 教程](https://huggingface.co/docs/datasets/load_hub.html))以了解如何使用`processor`进行自动语音识别(ASR): +加载[LJ Speech](https://huggingface.co/datasets/lj_speech)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets 教程](https://huggingface.co/docs/datasets/load_hub))以了解如何使用`processor`进行自动语音识别(ASR): ```py diff --git a/docs/source/zh/training.md b/docs/source/zh/training.md index 4ef49b459f95..89908130fe30 100644 --- a/docs/source/zh/training.md +++ b/docs/source/zh/training.md @@ -43,7 +43,7 @@ rendered properly in your Markdown viewer. 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'} ``` -正如您现在所知,您需要一个`tokenizer`来处理文本,包括填充和截断操作以处理可变的序列长度。如果要一次性处理您的数据集,可以使用 🤗 Datasets 的 [`map`](https://huggingface.co/docs/datasets/process.html#map) 方法,将预处理函数应用于整个数据集: +正如您现在所知,您需要一个`tokenizer`来处理文本,包括填充和截断操作以处理可变的序列长度。如果要一次性处理您的数据集,可以使用 🤗 Datasets 的 [`map`](https://huggingface.co/docs/datasets/process#map) 方法,将预处理函数应用于整个数据集: ```py >>> from transformers import AutoTokenizer diff --git a/examples/flax/image-captioning/README.md b/examples/flax/image-captioning/README.md index 0faf56124bc2..66b7bb58bab0 100644 --- a/examples/flax/image-captioning/README.md +++ b/examples/flax/image-captioning/README.md @@ -10,7 +10,7 @@ way which enables simple and efficient model parallelism. `run_image_captioning_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. -For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets.html#json-files and you also will find examples of these below. +For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets#json-files and you also will find examples of these below. ### Download COCO dataset (2017) This example uses COCO dataset (2017) through a custom dataset script, which requires users to manually download the diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 8f5e09e315ea..859a006dbddc 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -494,7 +494,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer model = FlaxVisionEncoderDecoderModel.from_pretrained( diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 53af12fab684..8603482218b4 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -589,7 +589,7 @@ def main(): num_proc=data_args.preprocessing_num_workers, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer diff --git a/examples/flax/language-modeling/run_clm_flax.py b/examples/flax/language-modeling/run_clm_flax.py index 157c0b78f8e0..48d924f9bb39 100755 --- a/examples/flax/language-modeling/run_clm_flax.py +++ b/examples/flax/language-modeling/run_clm_flax.py @@ -484,7 +484,7 @@ def main(): num_proc=data_args.preprocessing_num_workers, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index d89b4c4dc93c..39fc5e783637 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -516,7 +516,7 @@ def main(): num_proc=data_args.preprocessing_num_workers, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer diff --git a/examples/flax/language-modeling/run_t5_mlm_flax.py b/examples/flax/language-modeling/run_t5_mlm_flax.py index bbf7b827c81c..45d3fe32bcf9 100755 --- a/examples/flax/language-modeling/run_t5_mlm_flax.py +++ b/examples/flax/language-modeling/run_t5_mlm_flax.py @@ -630,7 +630,7 @@ def main(): num_proc=data_args.preprocessing_num_workers, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer diff --git a/examples/flax/question-answering/run_qa.py b/examples/flax/question-answering/run_qa.py index c0bf8b63250f..51f57c1a04f4 100644 --- a/examples/flax/question-answering/run_qa.py +++ b/examples/flax/question-answering/run_qa.py @@ -536,7 +536,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Load pretrained model and tokenizer diff --git a/examples/flax/summarization/README.md b/examples/flax/summarization/README.md index bbe231f31a56..c94b048ec88b 100644 --- a/examples/flax/summarization/README.md +++ b/examples/flax/summarization/README.md @@ -9,7 +9,7 @@ way which enables simple and efficient model parallelism. `run_summarization_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. -For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets.html#json-files and you also will find examples of these below. +For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets#json-files and you also will find examples of these below. ### Train the model Next we can run the example script to train the model: diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index a7d6633f64f8..f39882362e26 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -521,7 +521,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py index 632a66841b36..36b1ce58ec11 100755 --- a/examples/flax/text-classification/run_flax_glue.py +++ b/examples/flax/text-classification/run_flax_glue.py @@ -410,7 +410,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: @@ -427,7 +427,7 @@ def main(): num_labels = 1 else: # A useful fast method: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique + # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) diff --git a/examples/flax/token-classification/run_flax_ner.py b/examples/flax/token-classification/run_flax_ner.py index 2060508079df..ff5efd8a2d53 100644 --- a/examples/flax/token-classification/run_flax_ner.py +++ b/examples/flax/token-classification/run_flax_ner.py @@ -465,7 +465,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. if raw_datasets["train"] is not None: column_names = raw_datasets["train"].column_names diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index e72db1f7f1d4..fe8cafa90a85 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -340,7 +340,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # 5. Load pretrained model, tokenizer, and image processor if model_args.tokenizer_name: diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 7c668919253b..df6e248bf70c 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -388,7 +388,7 @@ def main(): ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index b14649483d5b..c95ce9a08336 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -368,7 +368,7 @@ def main(): ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 3effeb16fc1e..b6b01ee29e83 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -382,7 +382,7 @@ def main(): ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index 6427af1f4089..58974ed45940 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -371,7 +371,7 @@ def main(): ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index 34c75149caeb..f1d607c5fd4e 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -352,7 +352,7 @@ def main(): ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index 35a2ecd5e794..430497967130 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -329,7 +329,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index 91c9337f4b8a..38e5eb02b121 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -366,7 +366,7 @@ def main(): for split in raw_datasets.keys(): raw_datasets[split] = raw_datasets[split].select(range(100)) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. if raw_datasets["train"] is not None: column_names = raw_datasets["train"].column_names diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index ff007292bb19..bdc6cb444f14 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -337,7 +337,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 187afe569388..f6809c6186de 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -325,7 +325,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index ba813c321311..ca5589e8e9a6 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -369,7 +369,7 @@ def main(): extension = args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files, field="data") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index 97a72bf40cb5..2db77e1899c5 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -417,7 +417,7 @@ def main(): extension = args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files, field="data") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index cc5ccc97be55..0cbc88602449 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -382,7 +382,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/speech-recognition/README.md b/examples/pytorch/speech-recognition/README.md index 6ae2e1abef60..99eec85537b2 100644 --- a/examples/pytorch/speech-recognition/README.md +++ b/examples/pytorch/speech-recognition/README.md @@ -134,7 +134,7 @@ of **0.36**. ### Multi GPU CTC with Dataset Streaming -The following command shows how to use [Dataset Streaming mode](https://huggingface.co/docs/datasets/dataset_streaming.html) +The following command shows how to use [Dataset Streaming mode](https://huggingface.co/docs/datasets/dataset_streaming) to fine-tune [XLS-R](https://huggingface.co/transformers/main/model_doc/xls_r.html) on [Common Voice](https://huggingface.co/datasets/common_voice) using 4 GPUs in half-precision. diff --git a/examples/pytorch/summarization/README.md b/examples/pytorch/summarization/README.md index db7f8f4061a5..027119681de0 100644 --- a/examples/pytorch/summarization/README.md +++ b/examples/pytorch/summarization/README.md @@ -33,7 +33,7 @@ For the old `finetune_trainer.py` and related utils, see [`examples/legacy/seq2s `run_summarization.py` is a lightweight example of how to download and preprocess a dataset from the [🤗 Datasets](https://github.com/huggingface/datasets) library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. -For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets.html#json-files +For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets#json-files and you also will find examples of these below. ## With Trainer diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index d7f543c24868..46e92a70c0a3 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -432,7 +432,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index 42232787dfa4..6bc5f8a42eed 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -409,7 +409,7 @@ def main(): extension = args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/pytorch/text-classification/run_classification.py b/examples/pytorch/text-classification/run_classification.py index 7e14c3deb69e..f7194405a1a8 100755 --- a/examples/pytorch/text-classification/run_classification.py +++ b/examples/pytorch/text-classification/run_classification.py @@ -396,7 +396,7 @@ def main(): ) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. if data_args.remove_splits is not None: for split in data_args.remove_splits.split(","): diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index ff2644f86507..343ee94843f4 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -355,7 +355,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: @@ -372,7 +372,7 @@ def main(): num_labels = 1 else: # A useful fast method: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique + # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) diff --git a/examples/pytorch/text-classification/run_glue_no_trainer.py b/examples/pytorch/text-classification/run_glue_no_trainer.py index e4332966becd..ad6147d38694 100644 --- a/examples/pytorch/text-classification/run_glue_no_trainer.py +++ b/examples/pytorch/text-classification/run_glue_no_trainer.py @@ -293,7 +293,7 @@ def main(): extension = (args.train_file if args.train_file is not None else args.validation_file).split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Labels if args.task_name is not None: diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index ec77e8ea6a82..3901191d0690 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -318,7 +318,7 @@ def main(): extension = data_args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. if training_args.do_train: column_names = raw_datasets["train"].column_names diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index 1f83c65fcb54..42d1a70f37da 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -348,7 +348,7 @@ def main(): for split in raw_datasets.keys(): raw_datasets[split] = raw_datasets[split].select(range(100)) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. if raw_datasets["train"] is not None: column_names = raw_datasets["train"].column_names diff --git a/examples/pytorch/translation/README.md b/examples/pytorch/translation/README.md index 0593d577a01f..bd95e3a55215 100644 --- a/examples/pytorch/translation/README.md +++ b/examples/pytorch/translation/README.md @@ -33,7 +33,7 @@ For the old `finetune_trainer.py` and related utils, see [`examples/legacy/seq2s `run_translation.py` is a lightweight examples of how to download and preprocess a dataset from the [🤗 Datasets](https://github.com/huggingface/datasets) library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. -For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets.html#json-files +For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets#json-files and you also will find examples of these below. diff --git a/examples/pytorch/translation/run_translation_no_trainer.py b/examples/pytorch/translation/run_translation_no_trainer.py index 35bc9a59da34..1b7a1417a682 100644 --- a/examples/pytorch/translation/run_translation_no_trainer.py +++ b/examples/pytorch/translation/run_translation_no_trainer.py @@ -389,7 +389,7 @@ def main(): extension = args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/research_projects/jax-projects/README.md b/examples/research_projects/jax-projects/README.md index fc5f09695522..420a97f7682a 100644 --- a/examples/research_projects/jax-projects/README.md +++ b/examples/research_projects/jax-projects/README.md @@ -227,7 +227,7 @@ the forum and making use of the [🤗 hub](http://huggingface.co/) to have a ver control for your models and training logs. - When debugging, it is important that the debugging cycle is kept as short as possible to be able to effectively debug. *E.g.* if there is a problem with your training script, -you should run it with just a couple of hundreds of examples and not the whole dataset script. This can be done by either making use of [datasets streaming](https://huggingface.co/docs/datasets/master/dataset_streaming.html?highlight=streaming) or by selecting just the first +you should run it with just a couple of hundreds of examples and not the whole dataset script. This can be done by either making use of [datasets streaming](https://huggingface.co/docs/datasets/master/dataset_streaming?highlight=streaming) or by selecting just the first X number of data samples after loading: ```python diff --git a/examples/research_projects/jax-projects/dataset-streaming/README.md b/examples/research_projects/jax-projects/dataset-streaming/README.md index 416eee06af33..35fc02acd29d 100644 --- a/examples/research_projects/jax-projects/dataset-streaming/README.md +++ b/examples/research_projects/jax-projects/dataset-streaming/README.md @@ -23,7 +23,7 @@ JAX/Flax allows you to trace pure functions and compile them into efficient, fus Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. -All of the following examples make use of [dataset streaming](https://huggingface.co/docs/datasets/master/dataset_streaming.html), therefore allowing to train models on massive datasets\ +All of the following examples make use of [dataset streaming](https://huggingface.co/docs/datasets/master/dataset_streaming), therefore allowing to train models on massive datasets\ without ever having to download the full dataset. ## Masked language modeling diff --git a/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py b/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py index efe87c1b059f..4ff4bd559d8c 100644 --- a/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py +++ b/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py @@ -304,7 +304,7 @@ def main(): extension = "text" dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained config and tokenizer if model_args.config_name: diff --git a/examples/research_projects/jax-projects/wav2vec2/README.md b/examples/research_projects/jax-projects/wav2vec2/README.md index 3b1b74743085..200e7ad933ee 100644 --- a/examples/research_projects/jax-projects/wav2vec2/README.md +++ b/examples/research_projects/jax-projects/wav2vec2/README.md @@ -10,7 +10,7 @@ way which enables simple and efficient model parallelism. `run_wav2vec2_pretrain_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then pretrain the wav2vec2 architectures above on it. -For custom datasets in `jsonlines` format please see: [the Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets.html#json-files) and you also will find examples of these below. +For custom datasets in `jsonlines` format please see: [the Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets#json-files) and you also will find examples of these below. Let's start by creating a model repository to save the trained model and logs. Here we call the model `"wav2vec2-base-robust"`, but you can change the model name as you like. diff --git a/examples/research_projects/luke/run_luke_ner_no_trainer.py b/examples/research_projects/luke/run_luke_ner_no_trainer.py index c1b573aee814..e03c665e4ec2 100644 --- a/examples/research_projects/luke/run_luke_ner_no_trainer.py +++ b/examples/research_projects/luke/run_luke_ner_no_trainer.py @@ -294,7 +294,7 @@ def main(): for split in raw_datasets.keys(): raw_datasets[split] = raw_datasets[split].select(range(100)) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. if raw_datasets["train"] is not None: column_names = raw_datasets["train"].column_names diff --git a/examples/research_projects/mlm_wwm/run_mlm_wwm.py b/examples/research_projects/mlm_wwm/run_mlm_wwm.py index d22b2db7dcad..3a7326d38219 100644 --- a/examples/research_projects/mlm_wwm/run_mlm_wwm.py +++ b/examples/research_projects/mlm_wwm/run_mlm_wwm.py @@ -278,7 +278,7 @@ def main(): extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/research_projects/performer/run_mlm_performer.py b/examples/research_projects/performer/run_mlm_performer.py index c1ff5aa388a5..7c1f418815be 100644 --- a/examples/research_projects/performer/run_mlm_performer.py +++ b/examples/research_projects/performer/run_mlm_performer.py @@ -524,7 +524,7 @@ def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarra extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer diff --git a/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py b/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py index 2055e6f4676b..f056e89206c6 100755 --- a/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py +++ b/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py @@ -272,7 +272,7 @@ def model_infer(inputs, context, d_inputs, h_output0, h_output1, d_output0, d_ou else: raise ValueError("Evaluation requires a dataset name") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at -# https://huggingface.co/docs/datasets/loading_datasets.html. +# https://huggingface.co/docs/datasets/loading_datasets. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. diff --git a/examples/research_projects/quantization-qdqbert/run_quant_qa.py b/examples/research_projects/quantization-qdqbert/run_quant_qa.py index fac834ef70f3..3294b70da7e3 100755 --- a/examples/research_projects/quantization-qdqbert/run_quant_qa.py +++ b/examples/research_projects/quantization-qdqbert/run_quant_qa.py @@ -308,7 +308,7 @@ def main(): extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # set default quantization parameters before building model quant_trainer.set_default_quantizers(quant_trainer_args) diff --git a/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py b/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py index e0aa86a3a65b..20e0ea2d3cc2 100644 --- a/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py +++ b/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py @@ -65,7 +65,7 @@ def main( "csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) - # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files + # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets?highlight=csv#csv-files # Then split the documents into passages of 100 words dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc) diff --git a/examples/research_projects/rag/use_own_knowledge_dataset.py b/examples/research_projects/rag/use_own_knowledge_dataset.py index 84d7c854975f..d2ab6d07d5cc 100644 --- a/examples/research_projects/rag/use_own_knowledge_dataset.py +++ b/examples/research_projects/rag/use_own_knowledge_dataset.py @@ -73,7 +73,7 @@ def main( "csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) - # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files + # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets?highlight=csv#csv-files # Then split the documents into passages of 100 words dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc) diff --git a/examples/research_projects/robust-speech-event/README.md b/examples/research_projects/robust-speech-event/README.md index fd1a42c7d4bb..4999950020b1 100644 --- a/examples/research_projects/robust-speech-event/README.md +++ b/examples/research_projects/robust-speech-event/README.md @@ -112,7 +112,7 @@ Hugging Face Hub for additional audio data, for example by selecting the categor ["speech-processing"](https://huggingface.co/datasets?task_categories=task_categories:speech-processing&sort=downloads). All datasets that are available on the Hub can be downloaded via the 🤗 Datasets library in the same way Common Voice is downloaded. If one wants to combine multiple datasets for training, it might make sense to take a look at -the [`interleave_datasets`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=interleave#datasets.interleave_datasets) function. +the [`interleave_datasets`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=interleave#datasets.interleave_datasets) function. In addition, participants can also make use of their audio data. Here, please make sure that you **are allowed to use the audio data**. E.g., if audio data is taken from media platforms, such as YouTube, it should be verified that the media platform and the owner of the data have given her/his approval to use the audio diff --git a/examples/research_projects/tapex/run_tabfact_with_tapex.py b/examples/research_projects/tapex/run_tabfact_with_tapex.py index 2bef4a371ef6..5dcec10a084c 100644 --- a/examples/research_projects/tapex/run_tabfact_with_tapex.py +++ b/examples/research_projects/tapex/run_tabfact_with_tapex.py @@ -277,7 +277,7 @@ def main(): # Loading a dataset from local json files raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Labels label_list = raw_datasets["train"].features["label"].names diff --git a/examples/research_projects/tapex/run_wikisql_with_tapex.py b/examples/research_projects/tapex/run_wikisql_with_tapex.py index 821b283d9ff6..81e940a77c88 100644 --- a/examples/research_projects/tapex/run_wikisql_with_tapex.py +++ b/examples/research_projects/tapex/run_wikisql_with_tapex.py @@ -317,7 +317,7 @@ def main(): datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py b/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py index f874eebb3418..55350025cb3b 100644 --- a/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py +++ b/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py @@ -315,7 +315,7 @@ def main(): datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/examples/tensorflow/contrastive-image-text/run_clip.py b/examples/tensorflow/contrastive-image-text/run_clip.py index e56d66ce196c..7f625616012b 100644 --- a/examples/tensorflow/contrastive-image-text/run_clip.py +++ b/examples/tensorflow/contrastive-image-text/run_clip.py @@ -361,7 +361,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # 5. Load pretrained model, tokenizer, and image processor if model_args.tokenizer_name: diff --git a/examples/tensorflow/image-classification/run_image_classification.py b/examples/tensorflow/image-classification/run_image_classification.py index 53c95b972a75..3e4fe829682e 100644 --- a/examples/tensorflow/image-classification/run_image_classification.py +++ b/examples/tensorflow/image-classification/run_image_classification.py @@ -316,7 +316,7 @@ def main(): task="image-classification", ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. diff --git a/examples/tensorflow/language-modeling/run_clm.py b/examples/tensorflow/language-modeling/run_clm.py index d1555af174fe..52b76f8fa0e4 100755 --- a/examples/tensorflow/language-modeling/run_clm.py +++ b/examples/tensorflow/language-modeling/run_clm.py @@ -371,7 +371,7 @@ def main(): **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Load pretrained model and tokenizer diff --git a/examples/tensorflow/language-modeling/run_mlm.py b/examples/tensorflow/language-modeling/run_mlm.py index 73c5d292cde5..5be9e0219b71 100755 --- a/examples/tensorflow/language-modeling/run_mlm.py +++ b/examples/tensorflow/language-modeling/run_mlm.py @@ -353,7 +353,7 @@ def main(): ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Load pretrained model and tokenizer diff --git a/examples/tensorflow/multiple-choice/run_swag.py b/examples/tensorflow/multiple-choice/run_swag.py index db73e137b33d..7b0cc7af9da7 100644 --- a/examples/tensorflow/multiple-choice/run_swag.py +++ b/examples/tensorflow/multiple-choice/run_swag.py @@ -338,7 +338,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # When using your own dataset or a different dataset from swag, you will probably need to change this. ending_names = [f"ending{i}" for i in range(4)] diff --git a/examples/tensorflow/question-answering/run_qa.py b/examples/tensorflow/question-answering/run_qa.py index 4d458c1190dd..9f1fd997f1c4 100755 --- a/examples/tensorflow/question-answering/run_qa.py +++ b/examples/tensorflow/question-answering/run_qa.py @@ -352,7 +352,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Load pretrained model and tokenizer diff --git a/examples/tensorflow/summarization/run_summarization.py b/examples/tensorflow/summarization/run_summarization.py index c60893399340..6781801f64cd 100644 --- a/examples/tensorflow/summarization/run_summarization.py +++ b/examples/tensorflow/summarization/run_summarization.py @@ -401,7 +401,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Load model config and tokenizer diff --git a/examples/tensorflow/text-classification/run_glue.py b/examples/tensorflow/text-classification/run_glue.py index 618296aa71a8..642bf57d1ae1 100644 --- a/examples/tensorflow/text-classification/run_glue.py +++ b/examples/tensorflow/text-classification/run_glue.py @@ -271,7 +271,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. is_regression = data_args.task_name == "stsb" if not is_regression: diff --git a/examples/tensorflow/text-classification/run_text_classification.py b/examples/tensorflow/text-classification/run_text_classification.py index 169f109d2d92..0c0d989c4cb3 100644 --- a/examples/tensorflow/text-classification/run_text_classification.py +++ b/examples/tensorflow/text-classification/run_text_classification.py @@ -290,7 +290,7 @@ def main(): # Loading a dataset from local json files datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Label preprocessing diff --git a/examples/tensorflow/token-classification/run_ner.py b/examples/tensorflow/token-classification/run_ner.py index bca64df92479..31dff57862c7 100644 --- a/examples/tensorflow/token-classification/run_ner.py +++ b/examples/tensorflow/token-classification/run_ner.py @@ -269,7 +269,7 @@ def main(): token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. if raw_datasets["train"] is not None: column_names = raw_datasets["train"].column_names diff --git a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py index acdfe49090e8..f01283ae08fc 100755 --- a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py +++ b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py @@ -290,7 +290,7 @@ def main(): extension = "text" raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # @@ -735,7 +735,7 @@ def main(): extension = args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # diff --git a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py index c38ee542e6ce..fd8b36fc9ab2 100644 --- a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py +++ b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py @@ -282,7 +282,7 @@ def main(): # Loading a dataset from local json files datasets = load_dataset("json", data_files=data_files) # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. + # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: @@ -299,7 +299,7 @@ def main(): num_labels = 1 else: # A useful fast method: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique + # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) From d1a00f9dd0b851245e4a54cbd70816a80e781ec2 Mon Sep 17 00:00:00 2001 From: jiaqiw09 <60021713+jiaqiw09@users.noreply.github.com> Date: Sat, 18 Nov 2023 05:49:31 +0800 Subject: [PATCH 211/268] translate deepspeed.md to chinese (#27495) * translate deepspeed.md * update --- docs/source/zh/_toctree.yml | 2 + docs/source/zh/main_classes/deepspeed.md | 2101 ++++++++++++++++++++++ 2 files changed, 2103 insertions(+) create mode 100644 docs/source/zh/main_classes/deepspeed.md diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml index dd3b6cf9982b..aa44dbc9cdcd 100644 --- a/docs/source/zh/_toctree.yml +++ b/docs/source/zh/_toctree.yml @@ -72,5 +72,7 @@ title: 模型 - local: main_classes/trainer title: Trainer + - local: main_classes/deepspeed + title: DeepSpeed集成 title: 主要类 title: 应用程序接口 (API) diff --git a/docs/source/zh/main_classes/deepspeed.md b/docs/source/zh/main_classes/deepspeed.md new file mode 100644 index 000000000000..c9f9781b65f4 --- /dev/null +++ b/docs/source/zh/main_classes/deepspeed.md @@ -0,0 +1,2101 @@ + + +# DeepSpeed集成 + +[DeepSpeed](https://github.com/microsoft/DeepSpeed)实现了[ZeRO论文](https://arxiv.org/abs/1910.02054)中描述的所有内容。目前,它提供对以下功能的全面支持: + +1. 优化器状态分区(ZeRO stage 1) +2. 梯度分区(ZeRO stage 2) +3. 参数分区(ZeRO stage 3) +4. 自定义混合精度训练处理 +5. 一系列基于CUDA扩展的快速优化器 +6. ZeRO-Offload 到 CPU 和 NVMe + +ZeRO-Offload有其自己的专门论文:[ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840)。而NVMe支持在论文[ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857)中进行了描述。 + +DeepSpeed ZeRO-2主要用于训练,因为它的特性对推理没有用处。 + +DeepSpeed ZeRO-3也可以用于推理,因为它允许将单个GPU无法加载的大模型加载到多个GPU上。 + +🤗 Transformers通过以下两种方式集成了[DeepSpeed](https://github.com/microsoft/DeepSpeed): + +1. 通过[`Trainer`]集成核心的DeepSpeed功能。这是一种“为您完成一切”式的集成 - 您只需提供自定义配置文件或使用我们的模板配置文件。本文档的大部分内容都集中在这个功能上。 +2. 如果您不使用[`Trainer`]并希望在自己的Trainer中集成DeepSpeed,那么像`from_pretrained`和`from_config`这样的核心功能函数将包括ZeRO stage 3及以上的DeepSpeed的基础部分,如`zero.Init`。要利用此功能,请阅读有关[非Trainer DeepSpeed集成](#nontrainer-deepspeed-integration)的文档。 + +集成的内容: + +训练: + +1. DeepSpeed ZeRO训练支持完整的ZeRO stages 1、2和3,以及ZeRO-Infinity(CPU和NVMe offload)。 + +推理: + +1. DeepSpeed ZeRO推理支持ZeRO stage 3和ZeRO-Infinity。它使用与训练相同的ZeRO协议,但不使用优化器和学习率调度器,只有stage 3与推理相关。更多详细信息请参阅:[zero-inference](#zero-inference)。 + +此外还有DeepSpeed推理 - 这是一种完全不同的技术,它使用张量并行而不是ZeRO(即将推出)。 + + + + + +## Trainer DeepSpeed 集成 + + + + +### 安装 + +通过pypi安装库: + + +```bash +pip install deepspeed +``` + +或通过 `transformers` 的 `extras`安装: + +```bash +pip install transformers[deepspeed] +``` + +或在 [DeepSpeed 的 GitHub 页面](https://github.com/microsoft/deepspeed#installation) 和 +[高级安装](https://www.deepspeed.ai/tutorials/advanced-install/) 中查找更多详细信息。 + +如果构建过程中仍然遇到问题,请首先确保阅读 [CUDA 扩展安装注意事项](trainer#cuda-extension-installation-notes)。 + +如果您没有预先构建扩展而是在运行时构建它们,而且您尝试了以上所有解决方案都无效,下一步可以尝试在安装之前预先构建扩展。 + +进行 DeepSpeed 的本地构建: + + +```bash +git clone https://github.com/microsoft/DeepSpeed/ +cd DeepSpeed +rm -rf build +TORCH_CUDA_ARCH_LIST="8.6" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install . \ +--global-option="build_ext" --global-option="-j8" --no-cache -v \ +--disable-pip-version-check 2>&1 | tee build.log +``` + +如果您打算使用 NVMe offload,您还需要在上述说明中添加 `DS_BUILD_AIO=1`(并且还需要在系统范围内安装 *libaio-dev*)。 + +编辑 `TORCH_CUDA_ARCH_LIST` 以插入您打算使用的 GPU 卡的架构代码。假设您的所有卡都是相同的,您可以通过以下方式获取架构: + +```bash +CUDA_VISIBLE_DEVICES=0 python -c "import torch; print(torch.cuda.get_device_capability())" +``` + +因此,如果您得到 `8, 6`,则使用 `TORCH_CUDA_ARCH_LIST="8.6"`。如果您有多个不同的卡,您可以像这样列出所有卡 `TORCH_CUDA_ARCH_LIST="6.1;8.6"`。 + +如果您需要在多台机器上使用相同的设置,请创建一个二进制 wheel: + + +```bash +git clone https://github.com/microsoft/DeepSpeed/ +cd DeepSpeed +rm -rf build +TORCH_CUDA_ARCH_LIST="8.6" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 \ +python setup.py build_ext -j8 bdist_wheel +``` + +它将生成类似于 `dist/deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl` 的文件,现在您可以在本地或任何其他机器上安装它,如 `pip install deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl`。 + +再次提醒确保调整 `TORCH_CUDA_ARCH_LIST` 以匹配目标架构。 + +您可以在[这里](https://developer.nvidia.com/cuda-gpus)找到完整的 NVIDIA GPU 列表及其对应的 **计算能力**(与此上下文中的架构相同)。 + +您可以使用以下命令检查 PyTorch 构建时使用的架构: + + +```bash +python -c "import torch; print(torch.cuda.get_arch_list())" +``` + +以下是如何查找已安装 GPU 中的一张卡的架构。例如,对于 GPU 0: + +```bash +CUDA_VISIBLE_DEVICES=0 python -c "import torch; \ +print(torch.cuda.get_device_properties(torch.device('cuda')))" +``` + +如果输出结果如下: + +```bash +_CudaDeviceProperties(name='GeForce RTX 3090', major=8, minor=6, total_memory=24268MB, multi_processor_count=82) +``` + +然后您就知道这张卡的架构是 `8.6`。 + +您也可以完全省略 `TORCH_CUDA_ARCH_LIST`,然后构建程序将自动查询构建所在的 GPU 的架构。这可能与目标机器上的 GPU 不匹配,因此最好明确指定所需的架构。 + +如果尝试了所有建议的方法仍然遇到构建问题,请继续在 [Deepspeed](https://github.com/microsoft/DeepSpeed/issues)的 GitHub Issue 上提交问题。 + + + + +### 多GPU启用 + +为了启用DeepSpeed 集成,调整 [`Trainer`] 的命令行参数,添加一个新的参数 `--deepspeed ds_config.json`,其中 `ds_config.json` 是 DeepSpeed 配置文件,如文档 [这里](https://www.deepspeed.ai/docs/config-json/) 所述。文件命名由您决定。 +建议使用 DeepSpeed 的 `add_config_arguments` 程序将必要的命令行参数添加到您的代码中。 +有关更多信息,请参阅 [DeepSpeed 的参数解析](https://deepspeed.readthedocs.io/en/latest/initialize.html#argument-parsing) 文档。 + +在这里,您可以使用您喜欢的启动器。您可以继续使用 PyTorch 启动器: + + +```bash +torch.distributed.run --nproc_per_node=2 your_program.py --deepspeed ds_config.json +``` + +或使用由 `deepspeed` 提供的启动器: + + +```bash +deepspeed --num_gpus=2 your_program.py --deepspeed ds_config.json +``` + + +正如您所见,这两个启动器的参数不同,但对于大多数需求,任何一个都可以满足工作需求。有关如何配置各个节点和 GPU 的完整详细信息,请查看 [此处](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node)。 + +当您使用 `deepspeed` 启动器并且希望使用所有可用的 GPU 时,您可以简单地省略 `--num_gpus` 标志。 + +以下是在 DeepSpeed 中启用使用所有可用 GPU情况下, 运行 `run_translation.py` 的示例: + + +```bash +deepspeed examples/pytorch/translation/run_translation.py \ +--deepspeed tests/deepspeed/ds_config_zero3.json \ +--model_name_or_path t5-small --per_device_train_batch_size 1 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ +--do_train --max_train_samples 500 --num_train_epochs 1 \ +--dataset_name wmt16 --dataset_config "ro-en" \ +--source_lang en --target_lang ro +``` + +请注意,在 DeepSpeed 文档中,您可能会看到 `--deepspeed --deepspeed_config ds_config.json` - 即两个与 DeepSpeed 相关的参数,但为简单起见,并且因为已经有很多参数要处理,我们将两者合并为一个单一参数。 + +有关一些实际使用示例,请参阅 [此帖](https://github.com/huggingface/transformers/issues/8771#issuecomment-759248400)。 + + + + + +### 单GPU启用 + +要使用一张 GPU 启用 DeepSpeed,调整 [`Trainer`] 的命令行参数如下: + + +```bash +deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ +--deepspeed tests/deepspeed/ds_config_zero2.json \ +--model_name_or_path t5-small --per_device_train_batch_size 1 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ +--do_train --max_train_samples 500 --num_train_epochs 1 \ +--dataset_name wmt16 --dataset_config "ro-en" \ +--source_lang en --target_lang ro +``` + +这与多 GPU 的情况几乎相同,但在这里我们通过 `--num_gpus=1` 明确告诉 DeepSpeed 仅使用一张 GPU。默认情况下,DeepSpeed 启用给定节点上可以看到的所有 GPU。如果您一开始只有一张 GPU,那么您不需要这个参数。以下 [文档](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) 讨论了启动器的选项。 + +为什么要在仅使用一张 GPU 的情况下使用 DeepSpeed 呢? + +1. 它具有 ZeRO-offload 功能,可以将一些计算和内存委托给主机的 CPU 和 内存,从而为模型的需求保留更多 GPU 资源 - 例如更大的批处理大小,或启用正常情况下无法容纳的非常大模型。 +2. 它提供了智能的 GPU 内存管理系统,最小化内存碎片,这再次允许您容纳更大的模型和数据批次。 + +虽然接下来我们将详细讨论配置,但在单个 GPU 上通过 DeepSpeed 实现巨大性能提升的关键是在配置文件中至少有以下配置: + + +```json +{ + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "overlap_comm": true, + "contiguous_gradients": true + } +} +``` + +这会启用`optimizer offload `和一些其他重要功能。您可以尝试不同的buffer大小,有关详细信息,请参见下面的讨论。 + +关于这种启用类型的实际使用示例,请参阅 [此帖](https://github.com/huggingface/transformers/issues/8771#issuecomment-759176685)。 + +您还可以尝试使用本文后面进一步解释的支持`CPU 和 NVMe offload`功能的ZeRO-3 。 + + + + +注意: + +- 如果您需要在特定的 GPU 上运行,而不是 GPU 0,则无法使用 `CUDA_VISIBLE_DEVICES` 来限制可用 GPU 的可见范围。相反,您必须使用以下语法: + + ```bash + deepspeed --include localhost:1 examples/pytorch/translation/run_translation.py ... + ``` + + 在这个例子中,我们告诉 DeepSpeed 使用 GPU 1(第二个 GPU)。 + + + + + +### 多节点启用 + +这一部分的信息不仅适用于 DeepSpeed 集成,也适用于任何多节点程序。但 DeepSpeed 提供了一个比其他启动器更易于使用的 `deepspeed` 启动器,除非您在 SLURM 环境中。 + +在本节,让我们假设您有两个节点,每个节点有 8 张 GPU。您可以通过 `ssh hostname1` 访问第一个节点,通过 `ssh hostname2` 访问第二个节点,两者必须能够在本地通过 ssh 无密码方式相互访问。当然,您需要将这些主机(节点)名称重命名为您实际使用的主机名称。 + + +#### torch.distributed.run启动器 + + +例如,要使用 `torch.distributed.run`,您可以执行以下操作: + +```bash +python -m torch.distributed.run --nproc_per_node=8 --nnode=2 --node_rank=0 --master_addr=hostname1 \ +--master_port=9901 your_program.py --deepspeed ds_config.json +``` + +您必须 ssh 到每个节点,并在每个节点上运行相同的命令!不用担心,启动器会等待两个节点同步完成。 + +有关更多信息,请参阅 [torchrun](https://pytorch.org/docs/stable/elastic/run.html)。顺便说一下,这也是替代了几个 PyTorch 版本前的 `torch.distributed.launch` 的启动器。 + + +#### deepspeed启动器 + +要改用 `deepspeed` 启动器,首先需要创建一个 `hostfile` 文件: + +``` +hostname1 slots=8 +hostname2 slots=8 +``` +然后,您可以这样启动: + +```bash +deepspeed --num_gpus 8 --num_nodes 2 --hostfile hostfile --master_addr hostname1 --master_port=9901 \ +your_program.py --deepspeed ds_config.json +``` + +与 `torch.distributed.run` 启动器不同,`deepspeed` 将自动在两个节点上启动此命令! + +更多信息,请参阅[资源配置(多节点)](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node)。 + + +#### 在 SLURM 环境中启动 + +在 SLURM 环境中,可以采用以下方法。以下是一个 SLURM 脚本 `launch.slurm`,您需要根据您的具体 SLURM 环境进行调整。 + +```bash +#SBATCH --job-name=test-nodes # name +#SBATCH --nodes=2 # nodes +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=10 # number of cores per tasks +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name + +export GPUS_PER_NODE=8 +export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +export MASTER_PORT=9901 + +srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ + --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ +your_program.py --deepspeed ds_config.json' +``` + +剩下的就是运行它: + +```bash +sbatch launch.slurm +``` + +`srun` 将负责在所有节点上同时启动程序。 + + +#### 使用非共享文件系统 + +默认情况下,DeepSpeed 假定多节点环境使用共享存储。如果不是这种情况,每个节点只能看到本地文件系统,你需要调整配置文件,包含一个 [`checkpoint` 部分](https://www.deepspeed.ai/docs/config-json/#checkpoint-options)并设置如下选项: + +```json +{ + "checkpoint": { + "use_node_local_storage": true + } +} +``` + +或者,你还可以使用 [`Trainer`] 的 `--save_on_each_node` 参数,上述配置将自动添加。 + + + + +### 在Notebooks启用 + +在将`notebook cells`作为脚本运行的情况下,问题在于没有正常的 `deepspeed` 启动器可依赖,因此在某些设置下,我们必须仿真运行它。 + +如果您只使用一个 GPU,以下是如何调整notebook中的训练代码以使用 DeepSpeed。 + +```python +# DeepSpeed requires a distributed environment even when only one process is used. +# This emulates a launcher in the notebook +import os + +os.environ["MASTER_ADDR"] = "localhost" +os.environ["MASTER_PORT"] = "9994" # modify if RuntimeError: Address already in use +os.environ["RANK"] = "0" +os.environ["LOCAL_RANK"] = "0" +os.environ["WORLD_SIZE"] = "1" + +# Now proceed as normal, plus pass the deepspeed config file +training_args = TrainingArguments(..., deepspeed="ds_config_zero3.json") +trainer = Trainer(...) +trainer.train() +``` + +注意:`...` 代表您传递给函数的正常参数。 + +如果要使用多于一个 GPU,您必须在 DeepSpeed 中使用多进程环境。也就是说,您必须使用专门的启动器来实现这一目的,而不能通过仿真本节开头呈现的分布式环境来完成。 + +如果想要在notebook中动态创建配置文件并保存在当前目录,您可以在一个专用的cell中使用: + +```python no-style +%%bash +cat <<'EOT' > ds_config_zero3.json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +EOT +``` + +如果训练脚本在一个普通文件中而不是在notebook cells中,您可以通过笔记本中的 shell 正常启动 `deepspeed`。例如,要使用 `run_translation.py`,您可以这样启动: + +```python no-style +!git clone https://github.com/huggingface/transformers +!cd transformers; deepspeed examples/pytorch/translation/run_translation.py ... +``` + +或者使用 `%%bash` 魔术命令,您可以编写多行代码,用于运行 shell 程序: + +```python no-style +%%bash + +git clone https://github.com/huggingface/transformers +cd transformers +deepspeed examples/pytorch/translation/run_translation.py ... +``` + +在这种情况下,您不需要本节开头呈现的任何代码。 + +注意:虽然 `%%bash` 魔术命令很方便,但目前它会缓冲输出,因此在进程完成之前您看不到日志。 + + + + +### 配置 + +有关可以在 DeepSpeed 配置文件中使用的完整配置选项的详细指南,请参阅[以下文档](https://www.deepspeed.ai/docs/config-json/)。 + +您可以在 [DeepSpeedExamples 仓库](https://github.com/microsoft/DeepSpeedExamples)中找到解决各种实际需求的数十个 DeepSpeed 配置示例。 + +```bash +git clone https://github.com/microsoft/DeepSpeedExamples +cd DeepSpeedExamples +find . -name '*json' +``` + +延续上面的代码,假设您要配置 Lamb 优化器。那么您可以通过以下方式在示例的 `.json` 文件中进行搜索: + +```bash +grep -i Lamb $(find . -name '*json') +``` + +还可以在[主仓](https://github.com/microsoft/DeepSpeed)中找到更多示例。 + +在使用 DeepSpeed 时,您总是需要提供一个 DeepSpeed 配置文件,但是一些配置参数必须通过命令行进行配置。您将在本指南的剩余章节找到这些细微差别。 + +为了了解 DeepSpeed 配置文件,这里有一个激活 ZeRO stage 2 功能的示例,包括优化器状态的 CPU offload,使用 `AdamW` 优化器和 `WarmupLR` 调度器,并且如果传递了 `--fp16` 参数将启用混合精度训练: + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "contiguous_gradients": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", +} +``` + +当您执行程序时,DeepSpeed 将把它从 [`Trainer`] 收到的配置日志输出到console,因此您可以看到传递给它的最终配置。 + + + + + +### 传递配置 + +正如本文档讨论的那样,通常将 DeepSpeed 配置作为指向 JSON 文件的路径传递,但如果您没有使用命令行界面配置训练,而是通过 [`TrainingArguments`] 实例化 [`Trainer`],那么对于 `deepspeed` 参数,你可以传递一个嵌套的 `dict`。这使您能够即时创建配置,而无需在将其传递给 [`TrainingArguments`] 之前将其写入文件系统。 + +总结起来,您可以这样做: + +```python +TrainingArguments(..., deepspeed="/path/to/ds_config.json") +``` + +或者: + +```python +ds_config_dict = dict(scheduler=scheduler_params, optimizer=optimizer_params) +TrainingArguments(..., deepspeed=ds_config_dict) +``` + + + +### 共享配置 + + + + +这一部分是必读的。 + + + +一些配置值对于 [`Trainer`] 和 DeepSpeed 正常运行都是必需的,因此,为了防止定义冲突及导致的难以检测的错误,我们选择通过 [`Trainer`] 命令行参数配置这些值。 + +此外,一些配置值是基于模型的配置自动派生的,因此,与其记住手动调整多个值,最好让 [`Trainer`] 为您做大部分配置。 + +因此,在本指南的其余部分,您将找到一个特殊的配置值:`auto`,当设置时将自动将参数替换为正确或最有效的值。请随意选择忽略此建议或显式设置该值,在这种情况下,请务必确保 [`Trainer`] 参数和 DeepSpeed 配置保持一致。例如,您是否使用相同的学习率、批量大小或梯度累积设置?如果这些不匹配,训练可能以非常难以检测的方式失败。请重视该警告。 + +还有一些参数是仅适用于 DeepSpeed 的,并且这些参数必须手动设置以适应您的需求。 + +在您自己的程序中,如果您想要作为主动修改 DeepSpeed 配置并以此配置 [`TrainingArguments`],您还可以使用以下方法。步骤如下: + +1. 创建或加载要用作主配置的 DeepSpeed 配置 +2. 根据这些参数值创建 [`TrainingArguments`] 对象 + +请注意,一些值,比如 `scheduler.params.total_num_steps`,是在 [`Trainer`] 的 `train` 过程中计算的,但当然您也可以自己计算这些值。 + + + + +### ZeRO + +[Zero Redundancy Optimizer (ZeRO)](https://www.deepspeed.ai/tutorials/zero/) 是 DeepSpeed 的工作核心。它支持3个不同级别(stages)的优化。Stage 1 对于扩展性来说不是很有趣,因此本文档重点关注Stage 2和Stage 3。Stage 3通过最新的 ZeRO-Infinity 进一步改进。你可以在 DeepSpeed 文档中找到更详细的信息。 + +配置文件的 `zero_optimization` 部分是最重要的部分([文档](https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training)),因为在这里您定义了要启用哪些 ZeRO stages 以及如何配置它们。您可以在 DeepSpeed 文档中找到每个参数的解释。 + +这一部分必须通过 DeepSpeed 配置文件单独配置 - [`Trainer`] 不提供相应的命令行参数。 + +注意:目前 DeepSpeed 不验证参数名称,因此如果您拼错了任何参数,它将使用拼写错误的参数的默认设置。您可以观察 DeepSpeed 引擎启动日志消息,看看它将使用哪些值。 + + + +#### ZeRO-2 配置 + +以下是 ZeRO stage 2 的配置示例: + +```json +{ + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients": true + } +} +``` + +**性能调优:** + +- 启用 `offload_optimizer` 应该减少 GPU 内存使用(需要 `"stage": 2`)。 +- `"overlap_comm": true` 通过增加 GPU 内存使用来降低all-reduce 的延迟。 `overlap_comm` 使用了 `allgather_bucket_size` 和 `reduce_bucket_size` 值的4.5倍。因此,如果它们设置为 `5e8`,这将需要一个9GB的内存占用(`5e8 x 2Bytes x 2 x 4.5`)。因此,如果您的 GPU 内存为8GB或更小,为了避免出现OOM错误,您需要将这些参数减小到约 `2e8`,这将需要3.6GB。如果您的 GPU 容量更大,当您开始遇到OOM时,你可能也需要这样做。 +- 当减小这些buffers时,您以更慢的通信速度来换取更多的 GPU 内存。buffers大小越小,通信速度越慢,GPU 可用于其他任务的内存就越多。因此,如果更大的批处理大小很重要,那么稍微减慢训练时间可能是一个很好的权衡。 + +此外,`deepspeed==0.4.4` 添加了一个新选项 `round_robin_gradients`,您可以通过以下方式启用: + +```json +{ + "zero_optimization": { + "round_robin_gradients": true + } +} +``` +这是一个用于 CPU offloading 的stage 2优化,通过细粒度梯度分区在 ranks 之间并行复制到 CPU 内存,从而实现了性能的提升。性能优势随着梯度累积步骤(在优化器步骤之间进行更多复制)或 GPU 数量(增加并行性)增加而增加。 + + + +#### ZeRO-3 配置 + +以下是 ZeRO stage 3的配置示例: + +```json +{ + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + } +} +``` + +如果您因为你的模型或激活值超过 GPU 内存而遇到OOM问题,并且您有未使用的 CPU 内存,可以通股票使用 `"device": "cpu"` 将优化器状态和参数卸载到 CPU 内存中,来解决这个限制。如果您不想卸载到 CPU 内存,可以在 `device` 条目中使用 `none` 代替 `cpu`。将优化器状态卸载到 NVMe 上会在后面进一步讨论。 + +通过将 `pin_memory` 设置为 `true` 启用固定内存。此功能会以减少可用于其他进程的内存为代价来提高吞吐量。固定内存被分配给特定请求它的进程,通常比普通 CPU 内存访问速度更快。 + +**性能调优:** + +- `stage3_max_live_parameters`: `1e9` +- `stage3_max_reuse_distance`: `1e9` + +如果遇到OOM问题,请减小 `stage3_max_live_parameters` 和 `stage3_max_reuse_distance`。它们对性能的影响应该很小,除非您正在进行激活值checkpointing。`1e9` 大约会消耗 ~2GB。内存由 `stage3_max_live_parameters` 和 `stage3_max_reuse_distance` 共享,所以它不是叠加的,而是总共2GB。 + +`stage3_max_live_parameters` 是在任何给定时间要在 GPU 上保留多少个完整参数的上限。"reuse distance" 是我们用来确定参数在将来何时会再次使用的度量标准,我们使用 `stage3_max_reuse_distance` 来决定是丢弃参数还是保留参数。如果一个参数在不久的将来(小于 `stage3_max_reuse_distance`)将被再次使用,那么我们将其保留以减少通信开销。这在启用激活值checkpoing时非常有用,其中我们以单层粒度进行前向重计算和反向传播,并希望在反向传播期间保留前向重计算中的参数。 + +以下配置值取决于模型的隐藏大小: + +- `reduce_bucket_size`: `hidden_size*hidden_size` +- `stage3_prefetch_bucket_size`: `0.9 * hidden_size * hidden_size` +- `stage3_param_persistence_threshold`: `10 * hidden_size` + +因此,将这些值设置为 `auto`,[`Trainer`] 将自动分配推荐的参数值。当然,如果您愿意,也可以显式设置这些值。 + +`stage3_gather_16bit_weights_on_model_save` 在模型保存时启用模型的 fp16 权重整合。对于大模型和多个 GPU,无论是在内存还是速度方面,这都是一项昂贵的操作。目前如果计划恢复训练,这是必需的。请注意未来的更新可能会删除此限制并让使用更加灵活。 + +如果您从 ZeRO-2 配置迁移,请注意 `allgather_partitions`、`allgather_bucket_size` 和 `reduce_scatter` 配置参数在 ZeRO-3 中不被使用。如果保留这些配置文件,它们将被忽略。 + +- `sub_group_size`: `1e9` + +`sub_group_size` 控制在优化器步骤期间更新参数的粒度。参数被分组到大小为 `sub_group_size` 的桶中,每个桶逐个更新。在 ZeRO-Infinity 中与 NVMe offload一起使用时,`sub_group_size` 控制了在优化器步骤期间在 NVMe 和 CPU 内存之间移动模型状态的粒度。这可以防止非常大的模型耗尽 CPU 内存。 + +当不使用 NVMe offload时,可以将 `sub_group_size` 保留为其默认值 *1e9*。在以下情况下,您可能需要更改其默认值: + +1. 在优化器步骤中遇到OOM:减小 `sub_group_size` 以减少临时buffers的内存利用 +2. 优化器步骤花费很长时间:增加 `sub_group_size` 以提高由于增加的数据buffers而导致的带宽利用率。 + + +#### ZeRO-0 配置 + +请注意,我们将 Stage 0 和 1 放在最后,因为它们很少使用。 + +Stage 0 禁用了所有类型的分片,只是将 DeepSpeed 作为 DDP 使用。您可以通过以下方式启用: + +```json +{ + "zero_optimization": { + "stage": 0 + } +} +``` + +这将实质上禁用 ZeRO,而无需更改其他任何内容。 + + +#### ZeRO-1 配置 + + +Stage 1 等同于 Stage 2 减去梯度分片。您可以尝试使用以下配置,仅对优化器状态进行分片,以稍微加速: + + +```json +{ + "zero_optimization": { + "stage": 1 + } +} +``` + + + + + +### NVMe 支持 + +ZeRO-Infinity 通过使用 NVMe 内存扩展 GPU 和 CPU 内存,从而允许训练非常大的模型。由于智能分区和平铺算法,在offload期间每个 GPU 需要发送和接收非常小量的数据,因此 NVMe 被证明适用于训练过程中提供更大的总内存池。ZeRO-Infinity 需要启用 ZeRO-3。 + +以下配置示例启用 NVMe 来offload优化器状态和参数: + +```json +{ + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "nvme", + "nvme_path": "/local_nvme", + "pin_memory": true, + "buffer_count": 4, + "fast_init": false + }, + "offload_param": { + "device": "nvme", + "nvme_path": "/local_nvme", + "pin_memory": true, + "buffer_count": 5, + "buffer_size": 1e8, + "max_in_cpu": 1e9 + }, + "aio": { + "block_size": 262144, + "queue_depth": 32, + "thread_count": 1, + "single_submit": false, + "overlap_events": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, +} +``` + +您可以选择将优化器状态和参数都卸载到 NVMe,也可以只选择其中一个,或者都不选择。例如,如果您有大量的 CPU 内存可用,只卸载到 CPU 内存训练速度会更快(提示:"device": "cpu")。 + +这是有关卸载 [优化器状态](https://www.deepspeed.ai/docs/config-json/#optimizer-offloading) 和 [参数](https://www.deepspeed.ai/docs/config-json/#parameter-offloading) 的完整文档。 + +确保您的 `nvme_path` 实际上是一个 NVMe,因为它与普通硬盘或 SSD 一起工作,但速度会慢得多。快速可扩展的训练是根据现代 NVMe 传输速度设计的(截至本文撰写时,可以达到 ~3.5GB/s 读取,~3GB/s 写入的峰值速度)。 + +为了找出最佳的 `aio` 配置块,您必须在目标设置上运行一个基准测试,具体操作请参见[说明](https://github.com/microsoft/DeepSpeed/issues/998)。 + + + + + +#### ZeRO-2 和 ZeRO-3 性能对比 + +如果其他一切都配置相同,ZeRO-3 可能比 ZeRO-2 慢,因为前者除了 ZeRO-2 的操作外,还必须收集模型权重。如果 ZeRO-2 满足您的需求,而且您不需要扩展到几个 GPU 以上,那么您可以选择继续使用它。重要的是要理解,ZeRO-3 以速度为代价实现了更高的可扩展性。 + +可以调整 ZeRO-3 配置使其性能接近 ZeRO-2: + +- 将 `stage3_param_persistence_threshold` 设置为一个非常大的数字 - 大于最大的参数,例如 `6 * hidden_size * hidden_size`。这将保留参数在 GPU 上。 +- 关闭 `offload_params`,因为 ZeRO-2 没有这个选项。 + +即使不更改 `stage3_param_persistence_threshold`,仅将 `offload_params` 关闭,性能可能会显著提高。当然,这些更改将影响您可以训练的模型的大小。因此,这些更改可根据需求帮助您在可扩展性和速度之间进行权衡。 + + + + + +#### ZeRO-2 示例 + +这是一个完整的 ZeRO-2 自动配置文件 `ds_config_zero2.json`: + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "contiguous_gradients": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +``` + +这是一个完整的手动设置的启用所有功能的 ZeRO-2 配置文件。主要是为了让您看到典型的参数值是什么样的,但我们强烈建议使用其中包含多个 `auto` 设置的配置文件。 + +```json +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": 3e-5, + "betas": [0.8, 0.999], + "eps": 1e-8, + "weight_decay": 3e-7 + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": 3e-5, + "warmup_num_steps": 500 + } + }, + + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "contiguous_gradients": true + }, + + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +``` + + + +#### ZeRO-3 示例 + +这是一个完整的 ZeRO-3 自动配置文件 `ds_config_zero3.json`: + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +``` + +这是一个完整的 手动设置的启用所有功能的ZeRO-3 配置文件。主要是为了让您看到典型的参数值是什么样的,但我们强烈建议使用其中包含多个 `auto` 设置的配置文件。 + +```json +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": 3e-5, + "betas": [0.8, 0.999], + "eps": 1e-8, + "weight_decay": 3e-7 + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": 3e-5, + "warmup_num_steps": 500 + } + }, + + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": 1e6, + "stage3_prefetch_bucket_size": 0.94e6, + "stage3_param_persistence_threshold": 1e4, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, + + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +``` + +#### 如何选择最佳性能的ZeRO Stage和 offloads + +了解了这些不同stages后,现在您需要决定使用哪个stage。本节将尝试回答这个问题。 + +通常,以下规则适用: + +- 速度方面(左边比右边快) + + stage 0(DDP) > stage 1 > stage 2 > stage 2 + offload > stage 3 > stage3 + offload + +- GPU内存使用方面(右边比左边更节省GPU内存) + + stage 0(DDP) < stage 1 < stage 2 < stage 2 + offload < stage 3 < stage 3 + offload + +所以,当您希望在尽量使用较少数量的GPU的同时获得最快的执行速度时,可以按照以下步骤进行。我们从最快的方法开始,如果遇到GPU内存溢出,然后切换到下一个速度较慢但使用的GPU内存更少的方法。以此类推。 + +首先,将批量大小设置为1(您始终可以使用梯度累积来获得任何所需的有效批量大小)。 + + +1. 启用 `--gradient_checkpointing 1`(HF Trainer)或直接 `model.gradient_checkpointing_enable()` - 如果发生OOM(Out of Memory),则执行以下步骤。 +2. 首先尝试 ZeRO stage 2。如果发生OOM,则执行以下步骤。 +3. 尝试 ZeRO stage 2 + `offload_optimizer` - 如果发生OOM,则执行以下步骤。 +4. 切换到 ZeRO stage 3 - 如果发生OOM,则执行以下步骤。 +5. 启用 `offload_param` 到 `cpu` - 如果发生OOM,则执行以下步骤。 +6. 启用 `offload_optimizer` 到 `cpu` - 如果发生OOM,则执行以下步骤。 +7. 如果仍然无法适应批量大小为1,请首先检查各种默认值并尽可能降低它们。例如,如果使用 `generate` 并且不使用宽搜索束,将其缩小,因为它会占用大量内存。 +8. 绝对要使用混合半精度而非fp32 - 在Ampere及更高的GPU上使用bf16,在旧的GPU体系结构上使用fp16。 +9. 如果仍然发生OOM,可以添加更多硬件或启用ZeRO-Infinity - 即切换 `offload_param` 和 `offload_optimizer` 到 `nvme`。您需要确保它是非常快的NVMe。作为趣闻,我曾经能够在一个小型GPU上使用BLOOM-176B进行推理,使用了ZeRO-Infinity,尽管速度非常慢。但它奏效了! + +当然,您也可以按相反的顺序进行这些步骤,从最节省GPU内存的配置开始,然后逐步反向进行,或者尝试进行二分法。 + +一旦您的批量大小为1不会导致OOM,就测量您的有效吞吐量。 + +接下来尝试将批量大小增加到尽可能大,因为批量大小越大,GPU的效率越高,特别是在它们乘法运算的矩阵很大时。 + +现在性能优化游戏开始了。您可以关闭一些offload特性,或者降低ZeRO stage,并增加/减少批量大小,再次测量有效吞吐量。反复尝试,直到满意为止。 + +不要花费太多时间,但如果您即将开始一个为期3个月的训练 - 请花几天时间找到吞吐量方面最有效的设置。这样您的训练成本将最低,而且您会更快地完成训练。在当前快节奏的机器学习世界中,如果您花费一个额外的月份来训练某样东西,你很可能会错过一个黄金机会。当然,这只是我分享的一种观察,我并不是在催促你。在开始训练BLOOM-176B之前,我花了2天时间进行这个过程,成功将吞吐量从90 TFLOPs提高到150 TFLOPs!这一努力为我们节省了一个多月的训练时间。 + +这些注释主要是为训练模式编写的,但它们在推理中也应该大部分适用。例如,在推理中,Gradient Checkpointing 是无用的,因为它只在训练过程中有用。此外,我们发现,如果你正在进行多GPU推理并且不使用 [DeepSpeed-Inference](https://www.deepspeed.ai/tutorials/inference-tutorial/),[Accelerate](https://huggingface.co/blog/bloom-inference-pytorch-scripts) 应该提供更优越的性能。 + +其他与性能相关的快速注释: +- 如果您从头开始训练某个模型,请尽量确保张量的形状可以被16整除(例如隐藏层大小)。对于批量大小,至少尝试可被2整除。如果您想从GPU中挤取更高性能,还有一些硬件特定的[wave和tile量化](https://developer.nvidia.com/blog/optimizing-gpu-performance-tensor-cores/)的可整除性。 + + + +### Activation Checkpointing 或 Gradient Checkpointing + +Activation Checkpointing和Gradient Checkpointing是指相同方法的两个不同术语。这确实让人感到困惑,但事实就是这样。 + +Gradient Checkpointing允许通过牺牲速度来换取GPU内存,这要么使您能够克服GPU内存溢出,要么增加批量大小来获得更好的性能。 + +HF Transformers 模型对DeepSpeed的Activation Checkpointing一无所知,因此如果尝试在DeepSpeed配置文件中启用该功能,什么都不会发生。 + +因此,您有两种方法可以利用这个非常有益的功能: + +1. 如果您想使用 HF Transformers 模型,你可以使用 `model.gradient_checkpointing_enable()` 或在 HF Trainer 中使用 `--gradient_checkpointing`,它会自动为您启用这个功能。在这里使用了 `torch.utils.checkpoint`。 +2. 如果您编写自己的模型并希望使用DeepSpeed的Activation Checkpointing,可以使用[规定的API](https://deepspeed.readthedocs.io/en/latest/activation-checkpointing.html)。您还可以使用 HF Transformers 的模型代码,将 `torch.utils.checkpoint` 替换为 DeepSpeed 的API。后者更灵活,因为它允许您将前向激活值卸载到CPU内存,而不是重新计算它们。 + + +### Optimizer 和 Scheduler + +只要你不启用 `offload_optimizer`,您可以混合使用DeepSpeed和HuggingFace的调度器和优化器,但有一个例外,即不要使用HuggingFace调度器和DeepSpeed优化器的组合: + + +| Combos | HF Scheduler | DS Scheduler | +|:-------------|:-------------|:-------------| +| HF Optimizer | Yes | Yes | +| DS Optimizer | No | Yes | + +在启用 `offload_optimizer` 的情况下,可以使用非DeepSpeed优化器,只要该优化器具有CPU和GPU的实现(除了LAMB)。 + + + +#### Optimizer + +DeepSpeed的主要优化器包括Adam、AdamW、OneBitAdam和Lamb。这些优化器已经与ZeRO进行了彻底的测试,因此建议使用它们。然而,也可以导入`torch`中的其他优化器。完整的文档在[这里](https://www.deepspeed.ai/docs/config-json/#optimizer-parameters)。 + +如果在配置文件中不配置`optimizer`条目,[`Trainer`] 将自动将其设置为 `AdamW`,并使用提供的值或以下命令行参数的默认值:`--learning_rate`、`--adam_beta1`、`--adam_beta2`、`--adam_epsilon` 和 `--weight_decay`。 + +以下是`AdamW` 的自动配置示例: + +```json +{ + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + } +} +``` + +请注意,命令行参数将设置配置文件中的值。这是为了有一个明确的值来源,并避免在不同地方设置学习率等值时难以找到的错误。命令行参数配置高于其他。被覆盖的值包括: + +- `lr` 的值为 `--learning_rate` +- `betas` 的值为 `--adam_beta1 --adam_beta2` +- `eps` 的值为 `--adam_epsilon` +- `weight_decay` 的值为 `--weight_decay` + +因此,请记住在命令行上调整共享的超参数。 + +您也可以显式地设置这些值: + +```json +{ + "optimizer": { + "type": "AdamW", + "params": { + "lr": 0.001, + "betas": [0.8, 0.999], + "eps": 1e-8, + "weight_decay": 3e-7 + } + } +} +``` + +但在这种情况下,您需要自己同步[`Trainer`]命令行参数和DeepSpeed配置。 + +如果您想使用上面未列出的其他优化器,您将不得不将其添加到顶层配置中。 + +```json +{ + "zero_allow_untested_optimizer": true +} +``` + +类似于 `AdamW`,您可以配置其他官方支持的优化器。只是记住这些可能有不同的配置值。例如,对于Adam,您可能需要将 `weight_decay` 设置在 `0.01` 左右。 + +此外,当与DeepSpeed的CPU Adam优化器一起使用时,offload的效果最好。如果您想在offload时使用不同的优化器,自 `deepspeed==0.8.3` 起,您还需要添加: + + +```json +{ + "zero_force_ds_cpu_optimizer": false +} +``` +到顶层配置中。 + + + + + +#### Scheduler + +DeepSpeed支持`LRRangeTest`、`OneCycle`、`WarmupLR`和`WarmupDecayLR`学习率调度器。完整文档在[这里](https://www.deepspeed.ai/docs/config-json/#scheduler-parameters)。 + +以下是🤗 Transformers 和 DeepSpeed 之间的调度器重叠部分: + +- 通过 `--lr_scheduler_type constant_with_warmup` 实现 `WarmupLR` +- 通过 `--lr_scheduler_type linear` 实现 `WarmupDecayLR`。这也是 `--lr_scheduler_type` 的默认值,因此,如果不配置调度器,这将是默认配置的调度器。 + +如果在配置文件中不配置 `scheduler` 条目,[`Trainer`] 将使用 `--lr_scheduler_type`、`--learning_rate` 和 `--warmup_steps` 或 `--warmup_ratio` 的值来配置其🤗 Transformers 版本。 + +以下是 `WarmupLR` 的自动配置示例: + +```json +{ + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + } +} +``` + +由于使用了 *"auto"*,[`Trainer`] 的参数将在配置文件中设置正确的值。这是为了有一个明确的值来源,并避免在不同地方设置学习率等值时难以找到的错误。命令行配置高于其他。被设置的值包括: + +- `warmup_min_lr` 的值为 `0`。 +- `warmup_max_lr` 的值为 `--learning_rate`。 +- `warmup_num_steps` 的值为 `--warmup_steps`(如果提供)。否则,将使用 `--warmup_ratio` 乘以训练步骤的数量,并四舍五入。 +- `total_num_steps` 的值为 `--max_steps` 或者如果没有提供,将在运行时根据环境、数据集的大小和其他命令行参数(对于 `WarmupDecayLR` 来说需要)自动推导。 + +当然,您可以接管任何或所有的配置值,并自行设置这些值: + +```json +{ + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": 0.001, + "warmup_num_steps": 1000 + } + } +} +``` + +但在这种情况下,您需要自己同步[`Trainer`]命令行参数和DeepSpeed配置。 + +例如,对于 `WarmupDecayLR`,您可以使用以下条目: + +```json +{ + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "last_batch_iteration": -1, + "total_num_steps": "auto", + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + } +} +``` + +然后,`total_num_steps`、`warmup_max_lr`、`warmup_num_steps` 和 `total_num_steps` 将在加载时设置。 + + + + +### fp32精度 + +DeepSpeed支持完整的fp32和fp16混合精度。 + +由于fp16混合精度具有更小的内存需求和更快的速度,唯一不使用它的时候是当您使用的模型在这种训练模式下表现不佳时。通常,当模型没有在fp16混合精度下进行预训练时(例如,bf16预训练模型经常出现这种情况),会出现这种情况。这样的模型可能会发生溢出或下溢,导致 `NaN` 损失。如果是这种情况,那么您将希望使用完整的fp32模式,通过显式禁用默认启用的fp16混合精度模式: + +```json +{ + "fp16": { + "enabled": false, + } +} +``` + +如果您使用基于Ampere架构的GPU,PyTorch版本1.7及更高版本将自动切换到使用更高效的tf32格式进行一些操作,但结果仍将以fp32格式呈现。有关详细信息和基准测试,请参见[TensorFloat-32(TF32) on Ampere devices](https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)。如果出于某种原因您不希望使用它,该文档包括有关如何禁用此自动转换的说明。 + +在🤗 Trainer中,你可以使用 `--tf32` 来启用它,或使用 `--tf32 0` 或 `--no_tf32` 来禁用它。默认情况下,使用PyTorch的默认设置。 + + + + + +### 自动混合精度 + +您可以使用自动混合精度,可以选择使用类似 PyTorch AMP 的方式,也可以选择使用类似 Apex 的方式: + +### fp16 + +要配置PyTorch AMP-like 的 fp16(float16) 模式,请设置: + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + } +} +``` + +并且,[`Trainer`]将根据`args.fp16_backend`的值自动启用或禁用它。其余的配置值由您决定。 + +当传递`--fp16 --fp16_backend amp`或`--fp16_full_eval`命令行参数时,此模式将被启用。 + +您也可以显式地启用/禁用此模式: + +```json +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + } +} +``` + +但是之后您需要自己同步[`Trainer`]命令行参数和DeepSpeed配置。 + +以下是[相关文档](https://www.deepspeed.ai/docs/config-json/#fp16-training-options) + + +### bf16 + +如果需要使用bfloat16而不是fp16,那么可以使用以下配置部分: + +```json +{ + "bf16": { + "enabled": "auto" + } +} +``` + +bf16具有与fp32相同的动态范围,因此不需要损失缩放。 + +当传递`--bf16`或`--bf16_full_eval`命令行参数时,启用此模式。 + +您还可以显式地启用/禁用此模式: + +```json +{ + "bf16": { + "enabled": true + } +} +``` + + + +在`deepspeed==0.6.0`版本中,bf16支持是新的实验性功能。 + +如果您启用了bf16来进行[梯度累积](#gradient-accumulation),您需要意识到它会以bf16累积梯度,这可能不是您想要的,因为这种格式的低精度可能会导致lossy accumulation。 + +修复这个问题的工作正在努力进行,同时提供了使用更高精度的`dtype`(fp16或fp32)的选项。 + + + + +### NCCL集合 + +在训练过程中,有两种数据类型:`dtype`和用于通信收集操作的`dtype`,如各种归约和收集/分散操作。 + +所有的gather/scatter操作都是在数据相同的`dtype`中执行的,所以如果您正在使用bf16的训练模式,那么它将在bf16中进行gather操作 - gather操作是非损失性的。 + +各种reduce操作可能会是非常损失性的,例如当梯度在多个gpu上平均时,如果通信是在fp16或bf16中进行的,那么结果可能是有损失性的 - 因为当在一个低精度中添加多个数字时,结果可能不是精确的。更糟糕的是,bf16比fp16具有更低的精度。通常,当平均梯度时,损失最小,这些梯度通常非常小。因此,对于半精度训练,默认情况下,fp16被用作reduction操作的默认值。但是,您可以完全控制这个功能,如果你选择的话,您可以添加一个小的开销,并确保reductions将使用fp32作为累积数据类型,只有当结果准备好时,它才会降级到您在训练中使用的半精度`dtype`。 + +要覆盖默认设置,您只需添加一个新的配置条目: + +```json +{ + "communication_data_type": "fp32" +} +``` + +根据这个信息,有效的值包括"fp16"、"bfp16"和"fp32"。 + +注意:在stage zero 3中,bf16通信数据类型存在一个bug,该问题已在`deepspeed==0.8.1`版本中得到修复。 + + +### apex + +配置apex AMP-like模式: + +```json +"amp": { + "enabled": "auto", + "opt_level": "auto" +} +``` + +并且,[`Trainer`]将根据`args.fp16_backend`和`args.fp16_opt_level`的值自动配置它。 + +当传递`--fp16 --fp16_backend apex --fp16_opt_level 01`命令行参数时,此模式将被启用。 + +您还可以显式配置此模式: + +```json +{ + "amp": { + "enabled": true, + "opt_level": "O1" + } +} +``` + +但是,您需要自己同步[`Trainer`]命令行参数和DeepSpeed配置。 + +这里是[文档](https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options) + + + + +### Batch Size + +配置batch size可以使用如下参数: + +```json +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto" +} +``` + +并且,[`Trainer`]将自动将`train_micro_batch_size_per_gpu`设置为`args.per_device_train_batch_size`的值,并将`train_batch_size`设置为`args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps`。 + +您也可以显式设置这些值: + +```json +{ + "train_batch_size": 12, + "train_micro_batch_size_per_gpu": 4 +} +``` + +但是,您需要自己同步[`Trainer`]命令行参数和DeepSpeed配置。 + + + + +### Gradient Accumulation + +配置gradient accumulation设置如下: + +```json +{ + "gradient_accumulation_steps": "auto" +} +``` + +并且,[`Trainer`]将自动将其设置为`args.gradient_accumulation_steps`的值。 + +您也可以显式设置这个值: + +```json +{ + "gradient_accumulation_steps": 3 +} +``` + +但是,您需要自己同步[`Trainer`]命令行参数和DeepSpeed配置。 + + + + +### Gradient Clipping + +配置gradient clipping如下: + +```json +{ + "gradient_clipping": "auto" +} +``` + +并且,[`Trainer`]将自动将其设置为`args.max_grad_norm`的值。 + +您也可以显式设置这个值: + +```json +{ + "gradient_clipping": 1.0 +} +``` + +但是,您需要自己同步[`Trainer`]命令行参数和DeepSpeed配置。 + + + + + +### 获取模型权重 + +只要您继续使用DeepSpeed进行训练和恢复,您就不需要担心任何事情。DeepSpeed在其自定义检查点优化器文件中存储fp32主权重,这些文件是`global_step*/*optim_states.pt`(这是glob模式),并保存在正常的checkpoint下。 + +**FP16权重:** + +当模型保存在ZeRO-2下时,您最终会得到一个包含模型权重的普通`pytorch_model.bin`文件,但它们只是权重的fp16版本。 + +在ZeRO-3下,事情要复杂得多,因为模型权重分布在多个GPU上,因此需要`"stage3_gather_16bit_weights_on_model_save": true`才能让`Trainer`保存fp16版本的权重。如果这个设置是`False`,`pytorch_model.bin`将不会被创建。这是因为默认情况下,DeepSpeed的`state_dict`包含一个占位符而不是实际的权重。如果我们保存这个`state_dict`,就无法再加载它了。 + + +```json +{ + "zero_optimization": { + "stage3_gather_16bit_weights_on_model_save": true + } +} +``` + +**FP32权重:** + +虽然fp16权重适合恢复训练,但如果您完成了模型的微调并希望将其上传到[models hub](https://huggingface.co/models)或传递给其他人,您很可能想要获取fp32权重。这最好不要在训练期间完成,因为这需要大量内存,因此最好在训练完成后离线进行。但是,如果需要并且有充足的空闲CPU内存,可以在相同的训练脚本中完成。以下部分将讨论这两种方法。 + +**实时FP32权重恢复:** + +如果您的模型很大,并且在训练结束时几乎没有剩余的空闲CPU内存,这种方法可能不起作用。 + +如果您至少保存了一个检查点,并且想要使用最新的一个,可以按照以下步骤操作: + +```python +from transformers.trainer_utils import get_last_checkpoint +from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + +checkpoint_dir = get_last_checkpoint(trainer.args.output_dir) +fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) +``` + +如果您在使用`--load_best_model_at_end`类:*~transformers.TrainingArguments*参数(用于跟踪最佳 +检查点),那么你可以首先显式地保存最终模型,然后再执行相同的操作: + +```python +from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + +checkpoint_dir = os.path.join(trainer.args.output_dir, "checkpoint-final") +trainer.deepspeed.save_checkpoint(checkpoint_dir) +fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) +``` + + + +注意,一旦运行了`load_state_dict_from_zero_checkpoint`,该模型将不再可以在相同的应用程序的DeepSpeed上下文中使用。也就是说,您需要重新初始化deepspeed引擎,因为`model.load_state_dict(state_dict)`会从其中移除所有的DeepSpeed相关点。所以您只能训练结束时这样做。 + + + +当然,您不必使用类:*~transformers.Trainer*,您可以根据你的需求调整上面的示例。 + +如果您出于某种原因想要更多的优化,您也可以提取权重的fp32 `state_dict`并按照以下示例进行操作: + +```python +from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + +state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu +model = model.cpu() +model.load_state_dict(state_dict) +``` + +**离线FP32权重恢复:** + +DeepSpeed会创建一个特殊的转换脚本`zero_to_fp32.py`,并将其放置在checkpoint文件夹的顶层。使用此脚本,您可以在任何时候提取权重。该脚本是独立的,您不再需要配置文件或`Trainer`来执行提取操作。 + +假设您的checkpoint文件夹如下所示: + +```bash +$ ls -l output_dir/checkpoint-1/ +-rw-rw-r-- 1 stas stas 1.4K Mar 27 20:42 config.json +drwxrwxr-x 2 stas stas 4.0K Mar 25 19:52 global_step1/ +-rw-rw-r-- 1 stas stas 12 Mar 27 13:16 latest +-rw-rw-r-- 1 stas stas 827K Mar 27 20:42 optimizer.pt +-rw-rw-r-- 1 stas stas 231M Mar 27 20:42 pytorch_model.bin +-rw-rw-r-- 1 stas stas 623 Mar 27 20:42 scheduler.pt +-rw-rw-r-- 1 stas stas 1.8K Mar 27 20:42 special_tokens_map.json +-rw-rw-r-- 1 stas stas 774K Mar 27 20:42 spiece.model +-rw-rw-r-- 1 stas stas 1.9K Mar 27 20:42 tokenizer_config.json +-rw-rw-r-- 1 stas stas 339 Mar 27 20:42 trainer_state.json +-rw-rw-r-- 1 stas stas 2.3K Mar 27 20:42 training_args.bin +-rwxrw-r-- 1 stas stas 5.5K Mar 27 13:16 zero_to_fp32.py* +``` + +在这个例子中,只有一个DeepSpeed检查点子文件夹*global_step1*。因此,要重构fp32权重,只需运行: + +```bash +python zero_to_fp32.py . pytorch_model.bin +``` + +这就是它。`pytorch_model.bin`现在将包含从多个GPUs合并的完整的fp32模型权重。 + +该脚本将自动能够处理ZeRO-2或ZeRO-3 checkpoint。 + +`python zero_to_fp32.py -h`将为您提供使用细节。 + +该脚本将通过文件`latest`的内容自动发现deepspeed子文件夹,在当前示例中,它将包含`global_step1`。 + +注意:目前该脚本需要2倍于最终fp32模型权重的通用内存。 + + +### ZeRO-3 和 Infinity Nuances + +ZeRO-3与ZeRO-2有很大的不同,主要是因为它的参数分片功能。 + +ZeRO-Infinity进一步扩展了ZeRO-3,以支持NVMe内存和其他速度和可扩展性改进。 + +尽管所有努力都是为了在不需要对模型进行任何特殊更改的情况下就能正常运行,但在某些情况下,您可能需要以下信息。 + + +#### 构建大模型 + +DeepSpeed/ZeRO-3可以处理参数量达到数万亿的模型,这些模型可能无法适应现有的内存。在这种情况下,如果您还是希望初始化更快地发生,可以使用*deepspeed.zero.Init()*上下文管理器(也是一个函数装饰器)来初始化模型,如下所示: + +```python +from transformers import T5ForConditionalGeneration, T5Config +import deepspeed + +with deepspeed.zero.Init(): + config = T5Config.from_pretrained("t5-small") + model = T5ForConditionalGeneration(config) +``` + +如您所见,这会为您随机初始化一个模型。 + +如果您想使用预训练模型,`model_class.from_pretrained`将在`is_deepspeed_zero3_enabled()`返回`True`的情况下激活此功能,目前这是通过传递的DeepSpeed配置文件中的ZeRO-3配置部分设置的。因此,在调用`from_pretrained`之前,您必须创建**TrainingArguments**对象。以下是可能的顺序示例: + +```python +from transformers import AutoModel, Trainer, TrainingArguments + +training_args = TrainingArguments(..., deepspeed=ds_config) +model = AutoModel.from_pretrained("t5-small") +trainer = Trainer(model=model, args=training_args, ...) +``` + +如果您使用的是官方示例脚本,并且命令行参数中包含`--deepspeed ds_config.json`且启用了ZeRO-3配置,那么一切都已经为您准备好了,因为这是示例脚本的编写方式。 + +注意:如果模型的fp16权重无法适应单个GPU的内存,则必须使用此功能。 + +有关此方法和其他相关功能的完整详细信息,请参阅[构建大模型](https://deepspeed.readthedocs.io/en/latest/zero3.html#constructing-massive-models)。 + +此外,在加载fp16预训练模型时,您希望`from_pretrained`使用`torch_dtype=torch.float16`。详情请参见[from_pretrained-torch-dtype](#from_pretrained-torch-dtype)。 + + +#### 参数收集 + +在多个GPU上使用ZeRO-3时,没有一个GPU拥有所有参数,除非它是当前执行层的参数。因此,如果您需要一次访问所有层的所有参数,有一个特定的方法可以实现。 +您可能不需要它,但如果您需要,请参考[参数收集](https://deepspeed.readthedocs.io/en/latest/zero3.html#manual-parameter-coordination)。 + +然而,我们在多个地方确实使用了它,其中一个例子是在`from_pretrained`中加载预训练模型权重。我们一次加载一层,然后立即将其分区到所有参与的GPU上,因为对于非常大的模型,无法在一个GPU上一次性加载并将其分布到多个GPU上,因为内存限制。 + +此外,在ZeRO-3下,如果您编写自己的代码并遇到看起来像这样的模型参数权重: + +```python +tensor([1.0], device="cuda:0", dtype=torch.float16, requires_grad=True) +``` + +强调`tensor([1.])`,或者如果您遇到一个错误,它说参数的大小是`1`,而不是某个更大的多维形状,这意味着参数被划分了,你看到的是一个ZeRO-3占位符。 + + + + + + +### ZeRO 推理 + +"ZeRO 推断" 使用与 "ZeRO-3 训练" 相同的配置。您只需要去掉优化器和调度器部分。实际上,如果您希望与训练共享相同的配置文件,您可以将它们保留在配置文件中,它们只会被忽略。 + +您只需要传递通常的[`TrainingArguments`]参数。例如: + +```bash +deepspeed --num_gpus=2 your_program.py --do_eval --deepspeed ds_config.json +``` + +唯一的重要事情是您需要使用ZeRO-3配置,因为ZeRO-2对于推理没有任何优势,因为只有ZeRO-3才对参数进行分片,而ZeRO-1则对梯度和优化器状态进行分片。 + +以下是在DeepSpeed下运行`run_translation.py`启用所有可用GPU的示例: + +```bash +deepspeed examples/pytorch/translation/run_translation.py \ +--deepspeed tests/deepspeed/ds_config_zero3.json \ +--model_name_or_path t5-small --output_dir output_dir \ +--do_eval --max_eval_samples 50 --warmup_steps 50 \ +--max_source_length 128 --val_max_target_length 128 \ +--overwrite_output_dir --per_device_eval_batch_size 4 \ +--predict_with_generate --dataset_config "ro-en" --fp16 \ +--source_lang en --target_lang ro --dataset_name wmt16 \ +--source_prefix "translate English to Romanian: " +``` + +由于在推理阶段,优化器状态和梯度不需要额外的大量内存,您应该能够将更大的批次和/或序列长度放到相同的硬件上。 + +此外,DeepSpeed目前正在开发一个名为Deepspeed-Inference的相关产品,它与ZeRO技术无关,而是使用张量并行来扩展无法适应单个GPU的模型。这是一个正在进行的工作,一旦该产品完成,我们将提供集成。 + + +### 内存要求 + +由于 DeepSpeed ZeRO 可以将内存卸载到 CPU(和 NVMe),该框架提供了一些工具,允许根据使用的 GPU 数量告知将需要多少 CPU 和 GPU 内存。 + +让我们估计在单个GPU上微调"bigscience/T0_3B"所需的内存: + +```bash +$ python -c 'from transformers import AutoModel; \ +from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ +model = AutoModel.from_pretrained("bigscience/T0_3B"); \ +estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1)' +[...] +Estimated memory needed for params, optim states and gradients for a: +HW: Setup with 1 node, 1 GPU per node. +SW: Model with 2783M total params, 65M largest layer params. + per CPU | per GPU | Options + 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 + 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 + 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=1 + 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=0 + 0.37GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=1 + 15.56GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=0 +``` + +因此,您可以将模型拟合在单个80GB的GPU上,不进行CPU offload,或者使用微小的8GB GPU,但需要约60GB的CPU内存。(请注意,这仅是参数、优化器状态和梯度所需的内存 - 您还需要为CUDA内核、激活值和临时变量分配更多的内存。) + +然后,这是成本与速度的权衡。购买/租用较小的 GPU(或较少的 GPU,因为您可以使用多个 GPU 进行 Deepspeed ZeRO)。但这样会更慢,因此即使您不关心完成某项任务的速度,减速也直接影响 GPU 使用的持续时间,从而导致更大的成本。因此,请进行实验并比较哪种方法效果最好。 + +如果您有足够的GPU内存,请确保禁用CPU/NVMe卸载,因为这会使所有操作更快。 + +例如,让我们重复相同的操作,使用2个GPU: + +```bash +$ python -c 'from transformers import AutoModel; \ +from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ +model = AutoModel.from_pretrained("bigscience/T0_3B"); \ +estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=2, num_nodes=1)' +[...] +Estimated memory needed for params, optim states and gradients for a: +HW: Setup with 1 node, 2 GPUs per node. +SW: Model with 2783M total params, 65M largest layer params. + per CPU | per GPU | Options + 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 + 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 + 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=1 + 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=0 + 0.74GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=1 + 31.11GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=0 + +``` + +所以,您需要2个32GB或更高的GPU,且不进行CPU卸载。 + +如需了解更多信息,请参阅[内存估算器](https://deepspeed.readthedocs.io/en/latest/memory.html)。 + + + +### 归档Issues + +请按照以下步骤提交问题,以便我们能够迅速找到问题并帮助您解除工作阻塞。 + +在您的报告中,请始终包括以下内容: + +1. 完整的Deepspeed配置文件 +2. 如果使用了[`Trainer`],则包括命令行参数;如果自己编写了Trainer设置,则包括[`TrainingArguments`]参数。请不要导出[`TrainingArguments`],因为它有几十个与问题无关的条目。 +3. 输出: + + ```bash + python -c 'import torch; print(f"torch: {torch.__version__}")' + python -c 'import transformers; print(f"transformers: {transformers.__version__}")' + python -c 'import deepspeed; print(f"deepspeed: {deepspeed.__version__}")' + ``` + +4. 如果可能,请包含一个Google Colab notebook链接,我们可以使用它来重现问题。您可以使用这个[notebook](https://github.com/stas00/porting/blob/master/transformers/deepspeed/DeepSpeed_on_colab_CLI.ipynb)作为起点。 +5. 除非不可能,否则请始终使用标准数据集,而不是自定义数据集。 +6. 如果可能,尝试使用现有[示例](https://github.com/huggingface/transformers/tree/main/examples/pytorch)之一来重现问题。 + +需要考虑的因素: + +- Deepspeed通常不是问题的原因。 + + 一些已提交的问题被证明与Deepspeed无关。也就是说,一旦将Deepspeed从设置中移除,问题仍然存在。 + + 因此,如果问题明显与DeepSpeed相关,例如您可以看到有一个异常并且可以看到DeepSpeed模块涉及其中,请先重新测试没有DeepSpeed的设置。只有当问题仍然存在时,才向Deepspeed提供所有必需的细节。 + +- 如果您明确问题是在Deepspeed核心中而不是集成部分,请直接向[Deepspeed](https://github.com/microsoft/DeepSpeed/)提交问题。如果您不确定,请不要担心,无论使用哪个issue跟踪问题都可以,一旦您发布问题,我们会弄清楚并将其重定向到另一个issue跟踪(如果需要的话)。 + + + +### Troubleshooting + +#### 启动时`deepspeed`进程被终止,没有回溯 + +如果启动时`deepspeed`进程被终止,没有回溯,这通常意味着程序尝试分配的CPU内存超过了系统的限制或进程被允许分配的内存,操作系统内核杀死了该进程。这是因为您的配置文件很可能将`offload_optimizer`或`offload_param`或两者都配置为卸载到`cpu`。如果您有NVMe,可以尝试在ZeRO-3下卸载到NVMe。这里是如何[估计特定模型所需的内存](https://deepspeed.readthedocs.io/en/latest/memory.html)。 + +#### 训练和/或评估/预测loss为`NaN` + +这种情况通常发生在使用bf16混合精度模式预训练的模型试图在fp16(带或不带混合精度)下使用时。大多数在TPU上训练的模型以及由谷歌发布的模型都属于这个类别(例如,几乎所有基于t5的模型)。在这种情况下,解决方案是要么使用fp32,要么在支持的情况下使用bf16(如TPU、Ampere GPU或更新的版本)。 + +另一个问题可能与使用fp16有关。当您配置此部分时: + +```json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + } +} +``` + +并且您在日志中看到Deepspeed报告`OVERFLOW`如下 + +``` +0%| | 0/189 [00:00=4.28`开始,如果没有明确指定`synced_gpus`,检测到这些条件后它将自动设置为`True`。但如果您需要覆盖`synced_gpus`的值,仍然可以这样做。 + + + +## 测试 DeepSpeed 集成 + +如果您提交了一个涉及DeepSpeed集成的PR,请注意我们的CircleCI PR CI设置没有GPU,因此我们只在另一个CI夜间运行需要GPU的测试。因此,如果您在PR中获得绿色的CI报告,并不意味着DeepSpeed测试通过。 + +要运行DeepSpeed测试,请至少运行以下命令: + +``` +RUN_SLOW=1 pytest tests/deepspeed/test_deepspeed.py +``` + +如果你更改了任何模型或PyTorch示例代码,请同时运行多模型测试。以下将运行所有DeepSpeed测试: + +``` +RUN_SLOW=1 pytest tests/deepspeed +``` + +## 主要的DeepSpeed资源 + +- [项目GitHub](https://github.com/microsoft/deepspeed) +- [使用文档](https://www.deepspeed.ai/getting-started/) +- [API文档](https://deepspeed.readthedocs.io/en/latest/index.html) +- [博客文章](https://www.microsoft.com/en-us/research/search/?q=deepspeed) + +论文: + +- [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054) +- [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840) +- [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857) + +最后,请记住,HuggingFace [`Trainer`]仅集成了DeepSpeed,因此如果您在使用DeepSpeed时遇到任何问题或疑问,请在[DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues)上提交一个issue。 From 25b0f2033ba23e354ef2f665764248fcbb3f49ba Mon Sep 17 00:00:00 2001 From: Omar Sanseviero Date: Sat, 18 Nov 2023 18:22:52 +0100 Subject: [PATCH 212/268] Fix broken distilbert url (#27579) --- docs/source/en/model_doc/distilbert.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/model_doc/distilbert.md b/docs/source/en/model_doc/distilbert.md index 5742380c517b..233a182a553f 100644 --- a/docs/source/en/model_doc/distilbert.md +++ b/docs/source/en/model_doc/distilbert.md @@ -32,7 +32,7 @@ rendered properly in your Markdown viewer. The DistilBERT model was proposed in the blog post [Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT](https://medium.com/huggingface/distilbert-8cf3380435b5), and the paper [DistilBERT, a -distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/papers/1910.01108). DistilBERT is a +distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108). DistilBERT is a small, fast, cheap and light Transformer model trained by distilling BERT base. It has 40% less parameters than *bert-base-uncased*, runs 60% faster while preserving over 95% of BERT's performances as measured on the GLUE language understanding benchmark. From dc68a39c8111217683bf49a4912d0c9018bab33d Mon Sep 17 00:00:00 2001 From: Rafael Padilla <31217453+rafaelpadilla@users.noreply.github.com> Date: Sun, 19 Nov 2023 12:42:01 -0300 Subject: [PATCH 213/268] Adding leaky relu in dict ACT2CLS (#27574) Co-authored-by: Rafael Padilla --- src/transformers/activations.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/transformers/activations.py b/src/transformers/activations.py index be26825f4bad..2355fb5fed67 100644 --- a/src/transformers/activations.py +++ b/src/transformers/activations.py @@ -206,6 +206,7 @@ def __getitem__(self, key): "gelu_pytorch_tanh": PytorchGELUTanh, "gelu_accurate": AccurateGELUActivation, "laplace": LaplaceActivation, + "leaky_relu": nn.LeakyReLU, "linear": LinearActivation, "mish": MishActivation, "quick_gelu": QuickGELUActivation, From dbf7bfafa7d9a0e5d7963c5d15350ea6b34060ab Mon Sep 17 00:00:00 2001 From: Joel Tang <44188317+jtang98@users.noreply.github.com> Date: Mon, 20 Nov 2023 07:56:18 +0100 Subject: [PATCH 214/268] Fix idx2sym not loaded from pretrained vocab file in Transformer XL (#27589) * Load idx2sym from pretrained vocab file in Transformer XL When loading vocab file from a pretrained tokenizer for Transformer XL, although the pickled vocabulary file contains a idx2sym key, it isn't loaded, because it is discarded as the empty list already exists as an attribute. Solution is to explicitly take it into account, just like for sym2idx. * ran make style --- .../transfo_xl/tokenization_transfo_xl.py | 2 +- .../test_tokenization_transfo_xl.py | 26 +++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py b/src/transformers/models/transfo_xl/tokenization_transfo_xl.py index 8a2aba92f7a8..eaa5ecee4ba3 100644 --- a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py +++ b/src/transformers/models/transfo_xl/tokenization_transfo_xl.py @@ -223,7 +223,7 @@ def __init__( if vocab_dict is not None: for key, value in vocab_dict.items(): - if key not in self.__dict__ or key == "sym2idx": + if key not in self.__dict__ or key in ["sym2idx", "idx2sym"]: self.__dict__[key] = value elif vocab_file is not None: self.build_vocab() diff --git a/tests/models/transfo_xl/test_tokenization_transfo_xl.py b/tests/models/transfo_xl/test_tokenization_transfo_xl.py index 15b712ff3784..d8835a164c61 100644 --- a/tests/models/transfo_xl/test_tokenization_transfo_xl.py +++ b/tests/models/transfo_xl/test_tokenization_transfo_xl.py @@ -15,7 +15,9 @@ import os +import pickle import unittest +from collections import Counter, OrderedDict from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer @@ -47,6 +49,25 @@ def setUp(self): with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) + saved_dict = { + "eos_idx": 0, + "min_freq": 0, + "vocab_file": None, + "counter": Counter(["welcome home"]), + "sym2idx": OrderedDict([("", 0), ("welcome", 1), ("home", 2)]), + "delimiter": None, + "idx2sym": ["", "welcome", "home"], + "max_size": None, + "lower_case": False, + "special": [""], + } + self.pretrained_vocab_file = os.path.join( + self.tmpdirname, "mock_folder", VOCAB_FILES_NAMES["pretrained_vocab_file"] + ) + os.makedirs(os.path.dirname(self.pretrained_vocab_file), exist_ok=True) + with open(self.pretrained_vocab_file, "wb") as f: + pickle.dump(saved_dict, f) + def get_tokenizer(self, **kwargs): kwargs["lower_case"] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs) @@ -128,3 +149,8 @@ def test_move_added_token(self): # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1"), [1]) self.assertEqual(tokenizer.decode([1]), "new1") + + def test_from_pretrained_vocab_file(self): + tokenizer = TransfoXLTokenizer.from_pretrained(os.path.join(self.tmpdirname, "mock_folder")) + sentence = "welcome home" + self.assertEqual(tokenizer.decode(tokenizer.encode(sentence)), sentence) From ee292615559834ae2ba5b3aae3abe3f54bc81ac2 Mon Sep 17 00:00:00 2001 From: Xabier de Zuazo Date: Mon, 20 Nov 2023 08:08:40 +0100 Subject: [PATCH 215/268] Add `convert_hf_to_openai.py` script to Whisper documentation resources (#27590) Add `convert_hf_to_openai.py` script to Whisper documentation resources. --- docs/source/en/model_doc/whisper.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/source/en/model_doc/whisper.md b/docs/source/en/model_doc/whisper.md index 8d73a5655fdf..4a5738cf469c 100644 --- a/docs/source/en/model_doc/whisper.md +++ b/docs/source/en/model_doc/whisper.md @@ -75,6 +75,19 @@ Here is a step-by-step guide to transcribing an audio sample using a pre-trained ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ``` +## Resources + +A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Whisper. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. + +- A fork with a script to [convert a Whisper model in Hugging Face format to OpenAI format](https://github.com/zuazo-forks/transformers/blob/convert_hf_to_openai/src/transformers/models/whisper/convert_hf_to_openai.py). 🌎 +Usage example: +```bash +pip install -U openai-whisper +python convert_hf_to_openai.py \ + --checkpoint openai/whisper-tiny \ + --whisper_dump_path whisper-tiny-openai.pt +``` + ## WhisperConfig [[autodoc]] WhisperConfig From e4280d650c579a87f645d1f4a4535feb27c49804 Mon Sep 17 00:00:00 2001 From: Peter Pan Date: Mon, 20 Nov 2023 20:24:38 +0800 Subject: [PATCH 216/268] docs: fix 404 link (#27529) Signed-off-by: Peter Pan --- docs/source/en/main_classes/trainer.md | 2 +- docs/source/ja/main_classes/trainer.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/main_classes/trainer.md b/docs/source/en/main_classes/trainer.md index e9a93bbff751..7f85d6d72ad0 100644 --- a/docs/source/en/main_classes/trainer.md +++ b/docs/source/en/main_classes/trainer.md @@ -219,7 +219,7 @@ accelerate launch --num_processes 2 trainer-program.py ... deepspeed --num_gpus 2 trainer-program.py ... ``` -You don't need to use the Accelerate or [the Deepspeed integration](Deepspeed) features to use these launchers. +You don't need to use the Accelerate or [the Deepspeed integration](deepspeed) features to use these launchers. Until now you were able to tell the program how many GPUs to use. Now let's discuss how to select specific GPUs and control their order. diff --git a/docs/source/ja/main_classes/trainer.md b/docs/source/ja/main_classes/trainer.md index 4c1ce95ca38a..f05aa83eadc1 100644 --- a/docs/source/ja/main_classes/trainer.md +++ b/docs/source/ja/main_classes/trainer.md @@ -209,7 +209,7 @@ accelerate launch --num_processes 2 trainer-program.py ... deepspeed --num_gpus 2 trainer-program.py ... ``` -これらのランチャーを使用するために、Accelerate または [Deepspeed 統合](Deepspeed) 機能を使用する必要はありません。 +これらのランチャーを使用するために、Accelerate または [Deepspeed 統合](deepspeed) 機能を使用する必要はありません。 これまでは、プログラムに使用する GPU の数を指示できました。次に、特定の GPU を選択し、その順序を制御する方法について説明します。 From f31af3927f4091f5fb8126c77a0addebd4c1fe94 Mon Sep 17 00:00:00 2001 From: Mathias Nielsen Date: Mon, 20 Nov 2023 15:45:42 +0100 Subject: [PATCH 217/268] [ examples] fix loading jsonl with load dataset in run translation example (#26924) * Renamed variable extension to builder_name * If builder name is jsonl change to json to align with load_datasets * Apply suggestions from code review Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> --------- Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> --- examples/pytorch/translation/run_translation.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index 6edbe6a995c3..98780483a37e 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -374,8 +374,12 @@ def main(): if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] + if extension == "jsonl": + builder_name = "json" # the "json" builder reads both .json and .jsonl files + else: + builder_name = extension # e.g. "parquet" raw_datasets = load_dataset( - extension, + builder_name, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, From e66984f9952641e176521a7ef3faf2d5fa3c608b Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Mon, 20 Nov 2023 16:45:55 +0100 Subject: [PATCH 218/268] [`FA-2`] Add fa2 support for `from_config` (#26914) * add fa2 support for from_config * Update test_modeling_common.py --- src/transformers/modeling_utils.py | 6 ++++ tests/test_modeling_common.py | 48 ++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index d7e0580e4359..2d7db724fff5 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1173,14 +1173,20 @@ def _from_config(cls, config, **kwargs): Args: torch_dtype (`torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under this dtype. + use_flash_attention_2 (`bool`, *optional*): + Whether to load the model with Flash Attention 2 modules. """ torch_dtype = kwargs.pop("torch_dtype", None) + use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) # override default dtype if needed dtype_orig = None if torch_dtype is not None: dtype_orig = cls._set_default_torch_dtype(torch_dtype) + if use_flash_attention_2: + config = cls._check_and_enable_flash_attn_2(config, torch_dtype) + if is_deepspeed_zero3_enabled(): import deepspeed diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 0edc23c7af20..49d64dc207f1 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -33,6 +33,7 @@ import transformers from transformers import ( AutoModel, + AutoModelForCausalLM, AutoModelForSequenceClassification, PretrainedConfig, is_torch_available, @@ -3269,6 +3270,53 @@ def test_flax_from_pt_safetensors(self): # Check models are equal self.assertTrue(check_models_equal(flax_model_1, flax_model_2)) + @require_flash_attn + @require_torch_gpu + @mark.flash_attn_test + @slow + def test_flash_attn_2_from_config(self): + import torch + + for model_class in self.all_generative_model_classes: + if not model_class._supports_flash_attn_2: + return + + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + # TODO: to change it in the future with other relevant auto classes + fa2_model = AutoModelForCausalLM.from_config( + config, use_flash_attention_2=True, torch_dtype=torch.bfloat16 + ).to(torch_device) + + dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) + dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [0, 1, 1, 1]]).to(torch_device) + + fa2_correctly_converted = False + + for _, module in fa2_model.named_modules(): + if "FlashAttention" in module.__class__.__name__: + fa2_correctly_converted = True + break + + self.assertTrue(fa2_correctly_converted) + + _ = fa2_model(input_ids=dummy_input, attention_mask=dummy_attention_mask) + + with tempfile.TemporaryDirectory() as tmpdirname: + fa2_model.save_pretrained(tmpdirname) + + model_from_pretrained = AutoModelForCausalLM.from_pretrained(tmpdirname) + + self.assertFalse(getattr(model_from_pretrained.config, "_flash_attn_2_enabled", False)) + + fa2_correctly_converted = False + + for _, module in model_from_pretrained.named_modules(): + if "FlashAttention" in module.__class__.__name__: + fa2_correctly_converted = True + break + + self.assertFalse(fa2_correctly_converted) + global_rng = random.Random() From 93f2de858b260ecf4c243e68756a324c9a96add5 Mon Sep 17 00:00:00 2001 From: Said Taghadouini <84044788+staghado@users.noreply.github.com> Date: Mon, 20 Nov 2023 17:00:30 +0100 Subject: [PATCH 219/268] timm to pytorch conversion for vit model fix (#26908) * timm to pytorch conversion for vit model fix * remove unecessary print statments * Detect non-supported ViTs in transformers & better handle id2label mapping * detect non supported hybrid resnet-vit models in conversion script * remove check for overlap between cls token and pos embed --- .../models/vit/convert_vit_timm_to_pytorch.py | 119 +++++++++--------- 1 file changed, 62 insertions(+), 57 deletions(-) diff --git a/src/transformers/models/vit/convert_vit_timm_to_pytorch.py b/src/transformers/models/vit/convert_vit_timm_to_pytorch.py index b73c5f346dba..0ccd9b9f6685 100644 --- a/src/transformers/models/vit/convert_vit_timm_to_pytorch.py +++ b/src/transformers/models/vit/convert_vit_timm_to_pytorch.py @@ -16,14 +16,13 @@ import argparse -import json from pathlib import Path import requests import timm import torch -from huggingface_hub import hf_hub_download from PIL import Image +from timm.data import ImageNetInfo, infer_imagenet_subset from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging @@ -60,13 +59,11 @@ def create_rename_keys(config, base_model=False): ) if base_model: - # layernorm + pooler + # layernorm rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), - ("pre_logits.fc.weight", "pooler.dense.weight"), - ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) @@ -140,60 +137,68 @@ def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path): # define default ViT configuration config = ViTConfig() base_model = False - # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size - if vit_name[-5:] == "in21k": - base_model = True - config.patch_size = int(vit_name[-12:-10]) - config.image_size = int(vit_name[-9:-6]) - else: - config.num_labels = 1000 - repo_id = "huggingface/label-files" - filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) - id2label = {int(k): v for k, v in id2label.items()} - config.id2label = id2label - config.label2id = {v: k for k, v in id2label.items()} - config.patch_size = int(vit_name[-6:-4]) - config.image_size = int(vit_name[-3:]) - # size of the architecture - if "deit" in vit_name: - if vit_name[9:].startswith("tiny"): - config.hidden_size = 192 - config.intermediate_size = 768 - config.num_hidden_layers = 12 - config.num_attention_heads = 3 - elif vit_name[9:].startswith("small"): - config.hidden_size = 384 - config.intermediate_size = 1536 - config.num_hidden_layers = 12 - config.num_attention_heads = 6 - else: - pass - else: - if vit_name[4:].startswith("small"): - config.hidden_size = 768 - config.intermediate_size = 2304 - config.num_hidden_layers = 8 - config.num_attention_heads = 8 - elif vit_name[4:].startswith("base"): - pass - elif vit_name[4:].startswith("large"): - config.hidden_size = 1024 - config.intermediate_size = 4096 - config.num_hidden_layers = 24 - config.num_attention_heads = 16 - elif vit_name[4:].startswith("huge"): - config.hidden_size = 1280 - config.intermediate_size = 5120 - config.num_hidden_layers = 32 - config.num_attention_heads = 16 # load original model from timm timm_model = timm.create_model(vit_name, pretrained=True) timm_model.eval() - # load state_dict of original model, remove and rename some keys + # detect unsupported ViT models in transformers + # fc_norm is present + if not isinstance(getattr(timm_model, "fc_norm", None), torch.nn.Identity): + raise ValueError(f"{vit_name} is not supported in transformers because of the presence of fc_norm.") + + # use of global average pooling in combination (or without) class token + if getattr(timm_model, "global_pool", None) == "avg": + raise ValueError(f"{vit_name} is not supported in transformers because of use of global average pooling.") + + # CLIP style vit with norm_pre layer present + if "clip" in vit_name and not isinstance(getattr(timm_model, "norm_pre", None), torch.nn.Identity): + raise ValueError( + f"{vit_name} is not supported in transformers because it's a CLIP style ViT with norm_pre layer." + ) + + # SigLIP style vit with attn_pool layer present + if "siglip" in vit_name and getattr(timm_model, "global_pool", None) == "map": + raise ValueError( + f"{vit_name} is not supported in transformers because it's a SigLIP style ViT with attn_pool." + ) + + # use of layer scale in ViT model blocks + if not isinstance(getattr(timm_model.blocks[0], "ls1", None), torch.nn.Identity) or not isinstance( + getattr(timm_model.blocks[0], "ls2", None), torch.nn.Identity + ): + raise ValueError(f"{vit_name} is not supported in transformers because it uses a layer scale in its blocks.") + + # Hybrid ResNet-ViTs + if not isinstance(timm_model.patch_embed, timm.layers.PatchEmbed): + raise ValueError(f"{vit_name} is not supported in transformers because it is a hybrid ResNet-ViT.") + + # get patch size and image size from the patch embedding submodule + config.patch_size = timm_model.patch_embed.patch_size[0] + config.image_size = timm_model.patch_embed.img_size[0] + + # retrieve architecture-specific parameters from the timm model + config.hidden_size = timm_model.embed_dim + config.intermediate_size = timm_model.blocks[0].mlp.fc1.out_features + config.num_hidden_layers = len(timm_model.blocks) + config.num_attention_heads = timm_model.blocks[0].attn.num_heads + + # check whether the model has a classification head or not + if timm_model.num_classes != 0: + config.num_labels = timm_model.num_classes + # infer ImageNet subset from timm model + imagenet_subset = infer_imagenet_subset(timm_model) + dataset_info = ImageNetInfo(imagenet_subset) + config.id2label = {i: dataset_info.index_to_label_name(i) for i in range(dataset_info.num_classes())} + config.label2id = {v: k for k, v in config.id2label.items()} + else: + print(f"{vit_name} is going to be converted as a feature extractor only.") + base_model = True + + # load state_dict of original model state_dict = timm_model.state_dict() + + # remove and rename some keys in the state dict if base_model: remove_classification_head_(state_dict) rename_keys = create_rename_keys(config, base_model) @@ -202,8 +207,8 @@ def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path): read_in_q_k_v(state_dict, config, base_model) # load HuggingFace model - if vit_name[-5:] == "in21k": - model = ViTModel(config).eval() + if base_model: + model = ViTModel(config, add_pooling_layer=False).eval() else: model = ViTForImageClassification(config).eval() model.load_state_dict(state_dict) @@ -219,8 +224,8 @@ def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path): if base_model: timm_pooled_output = timm_model.forward_features(pixel_values) - assert timm_pooled_output.shape == outputs.pooler_output.shape - assert torch.allclose(timm_pooled_output, outputs.pooler_output, atol=1e-3) + assert timm_pooled_output.shape == outputs.last_hidden_state.shape + assert torch.allclose(timm_pooled_output, outputs.last_hidden_state, atol=1e-1) else: timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape From 87e217d06576a40103803a09bf7f588a62d7e3af Mon Sep 17 00:00:00 2001 From: Dmitrii Mukhutdinov Date: Tue, 21 Nov 2023 00:36:48 +0800 Subject: [PATCH 220/268] [Whisper] Add `large-v3` version support (#27336) * Enable large-v3 downloading and update language list * Fix type annotation * make fixup * Export Whisper feature extractor * Fix error after extractor loading * Do not use pre-computed mel filters * Save the full preprocessor properly * Update docs * Remove comment Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Add alignment heads consistent with each Whisper version * Remove alignment heads calculation * Save fast tokenizer format as well * Fix slow to fast conversion * Fix bos/eos/pad token IDs in the model config * Add decoder_start_token_id to config --------- Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- docs/source/en/model_doc/whisper.md | 8 +- docs/source/ko/model_doc/whisper.md | 8 ++ .../models/whisper/convert_openai_to_hf.py | 113 ++++++++++++++---- 3 files changed, 100 insertions(+), 29 deletions(-) diff --git a/docs/source/en/model_doc/whisper.md b/docs/source/en/model_doc/whisper.md index 4a5738cf469c..37411209bf91 100644 --- a/docs/source/en/model_doc/whisper.md +++ b/docs/source/en/model_doc/whisper.md @@ -34,13 +34,13 @@ The original code can be found [here](https://github.com/openai/whisper). - Inference is currently only implemented for short-form i.e. audio is pre-segmented into <=30s segments. Long-form (including timestamps) will be implemented in a future release. - One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text. -- To convert the tokenizer, we recommend using the following: +- To convert the model and the processor, we recommend using the following: ```bash -python src/transformers/models/whisper/convert_openai_to_hf.py --checkpoint_path "" --pytorch_dump_folder_path "Arthur/whisper-3" --convert_tokenizer True --whisper_version 3 --multilingual True +python src/transformers/models/whisper/convert_openai_to_hf.py --checkpoint_path "" --pytorch_dump_folder_path "Arthur/whisper-3" --convert_preprocessor True ``` -Here the `whisper_version` will set the number of languages to `100` to account for `cantonese` which was added in `whisper-large-v3`. - +The script will automatically determine all necessary parameters from the OpenAI checkpoint. A `tiktoken` library needs to be installed +to perform the conversion of the OpenAI tokenizer to the `tokenizers` version. ## Inference diff --git a/docs/source/ko/model_doc/whisper.md b/docs/source/ko/model_doc/whisper.md index 68fbe045caf6..f48bae1e60f5 100644 --- a/docs/source/ko/model_doc/whisper.md +++ b/docs/source/ko/model_doc/whisper.md @@ -33,6 +33,14 @@ Whisper 모델은 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine - 현재 추론은 짧은 형식에만 구현되어 있으며, 오디오는 30초 미만의 세그먼트로 미리 분할되어야 합니다. 타임스탬프를 포함한 긴 형식에 대한 추론은 향후 릴리스에서 구현될 예정입니다. - [`WhisperProcessor`]를 사용하여 모델에 사용할 오디오를 준비하고, 예측된 ID를 텍스트로 디코딩할 수 있습니다. +- 모델과 프로세서를 변환하려면 다음을 사용하는 것이 좋습니다: + +```bash +python src/transformers/models/whisper/convert_openai_to_hf.py --checkpoint_path "" --pytorch_dump_folder_path "Arthur/whisper-3" --convert_preprocessor True +``` +스크립트는 OpenAI 체크포인트에서 필요한 모든 매개변수를 자동으로 결정합니다. OpenAI 변환을 수행하려면 `tiktoken` 라이브러리를 설치해야 합니다. +라이브러리를 설치해야 OpenAI 토큰화기를 `tokenizers` 버전으로 변환할 수 있습니다. + 이 모델은 [Arthur Zucker](https://huggingface.co/ArthurZ)에 의해 제공되었습니다. 이 모델의 Tensorflow 버전은 [amyeroberts](https://huggingface.co/amyeroberts)에 의해 제공되었습니다. 원본 코드는 [여기](https://github.com/openai/whisper)에서 찾을 수 있습니다. diff --git a/src/transformers/models/whisper/convert_openai_to_hf.py b/src/transformers/models/whisper/convert_openai_to_hf.py index 0d6cdaa95882..763511291a9c 100755 --- a/src/transformers/models/whisper/convert_openai_to_hf.py +++ b/src/transformers/models/whisper/convert_openai_to_hf.py @@ -21,13 +21,22 @@ import tempfile import urllib import warnings +from typing import Any, Optional, Tuple import torch from huggingface_hub.utils import insecure_hashlib from torch import nn from tqdm import tqdm -from transformers import WhisperConfig, WhisperForConditionalGeneration, WhisperTokenizer +from transformers import ( + GenerationConfig, + WhisperConfig, + WhisperFeatureExtractor, + WhisperForConditionalGeneration, + WhisperProcessor, + WhisperTokenizer, + WhisperTokenizerFast, +) from transformers.models.whisper.tokenization_whisper import LANGUAGES, bytes_to_unicode from transformers.utils.import_utils import _is_package_available @@ -43,14 +52,47 @@ "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", + "large-v3": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", } + _TOKENIZERS = { "multilingual": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/multilingual.tiktoken", "english": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/gpt2.tiktoken", } +def _get_generation_config( + is_multilingual: bool, + num_languages: int = 100, + openai_version: Optional[str] = None, +) -> GenerationConfig: + """ + Loads the appropriate generation config from HF repo + """ + if openai_version is not None: + repo = f"openai/whisper-{openai_version}" + elif not is_multilingual: + repo = "openai/whisper-medium.en" + elif num_languages < 100: + repo = "openai/whisper-large-v2" + else: + repo = "openai/whisper-large-v3" + + gen_cfg = GenerationConfig.from_pretrained(repo) + if openai_version is None: + gen_cfg.alignment_heads = None + warnings.warn( + "Alignment heads have not been included in the generation config, since they are available " + "only for the original OpenAI checkpoints." + "If you want to use word-level timestamps with a custom version of Whisper," + "see https://github.com/openai/whisper/blob/main/notebooks/Multilingual_ASR.ipynb" + "for the example of how to produce word-level timestamps manually." + ) + + return gen_cfg + + def remove_ignore_keys_(state_dict): ignore_keys = ["layers", "blocks"] for k in ignore_keys: @@ -102,7 +144,7 @@ def make_linear_from_emb(emb): return lin_layer -def _download(url: str, root: str) -> io.BytesIO: +def _download(url: str, root: str) -> Any: os.makedirs(root, exist_ok=True) filename = os.path.basename(url) @@ -140,12 +182,17 @@ def _download(url: str, root: str) -> io.BytesIO: return torch.load(io.BytesIO(model_bytes)) -def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): +def convert_openai_whisper_to_tfms( + checkpoint_path, pytorch_dump_folder_path +) -> Tuple[WhisperForConditionalGeneration, bool, int]: if ".pt" not in checkpoint_path: root = os.path.dirname(pytorch_dump_folder_path) or "." original_checkpoint = _download(_MODELS[checkpoint_path], root) + openai_version = checkpoint_path else: original_checkpoint = torch.load(checkpoint_path, map_location="cpu") + openai_version = None + dimensions = original_checkpoint["dims"] state_dict = original_checkpoint["model_state_dict"] proj_out_weights = state_dict["decoder.token_embedding.weight"] @@ -154,6 +201,9 @@ def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): tie_embeds = True ffn_dim = state_dict["decoder.layers.0.fc1.weight"].shape[0] + # a hacky way to properly set up the bos/eos/pad token ids in the model + endoftext_id = 50257 if dimensions["n_vocab"] > 51865 else 50256 + config = WhisperConfig( vocab_size=dimensions["n_vocab"], encoder_ffn_dim=ffn_dim, @@ -166,6 +216,10 @@ def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): decoder_layers=dimensions["n_text_layer"], decoder_attention_heads=dimensions["n_text_head"], max_source_positions=dimensions["n_audio_ctx"], + eos_token_id=endoftext_id, + bos_token_id=endoftext_id, + pad_token_id=endoftext_id, + decoder_start_token_id=endoftext_id + 1, ) model = WhisperForConditionalGeneration(config) @@ -184,7 +238,17 @@ def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): else: model.proj_out.weight.data = proj_out_weights - model.save_pretrained(pytorch_dump_folder_path) + # determine those parameters from a model checkpoint as Whisper repo does + is_multilingual = model.config.vocab_size >= 51865 + num_languages = model.config.vocab_size - 51765 - int(is_multilingual) + + model.generation_config = _get_generation_config( + is_multilingual, + num_languages, + openai_version, + ) + + return model, is_multilingual, num_languages # Adapted from https://github.com/openai/tiktoken/issues/60#issuecomment-1499977960 @@ -225,7 +289,7 @@ def token_bytes_to_string(b): def convert_tiktoken_to_hf( - pytorch_dump_folder_path: str, multilingual: bool = True, num_languages: int = 100, time_precision=0.02 + multilingual: bool = True, num_languages: int = 100, time_precision=0.02 ) -> WhisperTokenizer: # requires whisper, unless we use the path to the tiktoken file tiktoken_tokenizer_path = _TOKENIZERS["multilingual" if multilingual else "english"] @@ -260,7 +324,7 @@ def convert_tiktoken_to_hf( hf_tokenizer.add_tokens(start_of_transcript + language_tokens + control_tokens, special_tokens=True) hf_tokenizer.add_tokens(timestamp_tokens, special_tokens=False) - hf_tokenizer.save_pretrained(pytorch_dump_folder_path) + return hf_tokenizer if __name__ == "__main__": @@ -269,26 +333,18 @@ def convert_tiktoken_to_hf( parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( - "--convert_tokenizer", + "--convert_preprocessor", type=bool, default=False, - help="Whether or not the tokenizer should be converted along with the model.", - ) - parser.add_argument( - "--whisper_version", - type=int, - default=2, - help="Version of the whisper release", - ) - parser.add_argument( - "--multilingual", - type=bool, - default="store_true", - help="Whether or not the model is multilingual or english only", + help="Whether or not the preprocessor (tokenizer + feature extractor) should be converted along with the model.", ) args = parser.parse_args() - if args.convert_tokenizer: + model, is_multilingual, num_languages = convert_openai_whisper_to_tfms( + args.checkpoint_path, args.pytorch_dump_folder_path + ) + + if args.convert_preprocessor: try: if not _is_package_available("tiktoken"): raise """`tiktoken` is not installed, use `pip install tiktoken` to convert the tokenizer""" @@ -297,9 +353,16 @@ def convert_tiktoken_to_hf( else: from tiktoken.load import load_tiktoken_bpe - NUM_LANGUAGES_PER_RELEASE = {1: 99, 2: 99, 3: 100} - convert_tiktoken_to_hf( - args.pytorch_dump_folder_path, args.multilingual, NUM_LANGUAGES_PER_RELEASE[args.whisper_version] + tokenizer = convert_tiktoken_to_hf(is_multilingual, num_languages) + feature_extractor = WhisperFeatureExtractor( + feature_size=model.config.num_mel_bins, + # the rest of default parameters are the same as hardcoded in openai/whisper ) + processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + processor.save_pretrained(args.pytorch_dump_folder_path) + + # save fast tokenizer as well + fast_tokenizer = WhisperTokenizerFast.from_pretrained(args.pytorch_dump_folder_path) + fast_tokenizer.save_pretrained(args.pytorch_dump_folder_path, legacy_format=False) - convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path) + model.save_pretrained(args.pytorch_dump_folder_path) From f18c95b49cbb675764f0cd19b56fb70f39a62bb2 Mon Sep 17 00:00:00 2001 From: Yeonwoo Sung Date: Tue, 21 Nov 2023 02:14:23 +0900 Subject: [PATCH 221/268] Update Korean tutorial for using LLMs, and refactor the nested conditional statements in hr_argparser.py (#27489) docs: Update Korean LLM tutorial to use Mistral-7B, not Llama-v1 --- docs/source/ko/llm_tutorial.md | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/docs/source/ko/llm_tutorial.md b/docs/source/ko/llm_tutorial.md index 05f27dff4f50..d5e0bd356edd 100644 --- a/docs/source/ko/llm_tutorial.md +++ b/docs/source/ko/llm_tutorial.md @@ -74,14 +74,13 @@ LLM과 자기회귀 생성을 함께 사용할 때 핵심적인 부분은 이 - 먼저, 모델을 불러오세요. -```py +```python >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained( -... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True +... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True ... ) ``` @@ -94,18 +93,20 @@ LLM과 자기회귀 생성을 함께 사용할 때 핵심적인 부분은 이 이어서 텍스트 입력을 [토크나이저](tokenizer_summary)으로 전처리하세요. -```py +```python >>> from transformers import AutoTokenizer +>>> import torch ->>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b") ->>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda") +>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") +>>> device = "cuda" if torch.cuda.is_available() else "cpu" +>>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to(device) ``` `model_inputs` 변수에는 토큰화된 텍스트 입력과 함께 어텐션 마스크가 들어 있습니다. [`~generation.GenerationMixin.generate`]는 어텐션 마스크가 제공되지 않았을 경우에도 이를 추론하려고 노력하지만, 최상의 성능을 위해서는 가능하면 어텐션 마스크를 전달하는 것을 권장합니다. 마지막으로 [`~generation.GenerationMixin.generate`] 메소드를 호출해 생성된 토큰을 얻은 후, 이를 출력하기 전에 텍스트 형태로 변환하세요. -```py +```python >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A list of colors: red, blue, green, yellow, black, white, and brown' @@ -121,10 +122,10 @@ LLM과 자기회귀 생성을 함께 사용할 때 핵심적인 부분은 이 ```py >>> from transformers import AutoModelForCausalLM, AutoTokenizer ->>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b") ->>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default +>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") +>>> tokenizer.pad_token = tokenizer.eos_token # Mistral has no pad token by default >>> model = AutoModelForCausalLM.from_pretrained( -... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True +... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True ... ) ``` @@ -137,12 +138,12 @@ LLM과 자기회귀 생성을 함께 사용할 때 핵심적인 부분은 이 >>> model_inputs = tokenizer(["A sequence of numbers: 1, 2"], return_tensors="pt").to("cuda") >>> # By default, the output will contain up to 20 tokens ->>> generated_ids = model.generate(**model_inputs) +>>> generated_ids = model.generate(**model_inputs, pad_token_id=tokenizer.eos_token_id) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A sequence of numbers: 1, 2, 3, 4, 5' >>> # Setting `max_new_tokens` allows you to control the maximum length ->>> generated_ids = model.generate(**model_inputs, max_new_tokens=50) +>>> generated_ids = model.generate(**model_inputs, pad_token_id=tokenizer.eos_token_id, max_new_tokens=50) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A sequence of numbers: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,' ``` @@ -151,7 +152,7 @@ LLM과 자기회귀 생성을 함께 사용할 때 핵심적인 부분은 이 기본적으로 [`~generation.GenerationConfig`] 파일에서 별도로 지정하지 않으면, `generate`는 각 반복에서 가장 확률이 높은 토큰을 선택합니다(그리디 디코딩). 하려는 작업에 따라 이 방법은 바람직하지 않을 수 있습니다. 예를 들어, 챗봇이나 에세이 작성과 같은 창의적인 작업은 샘플링이 적합할 수 있습니다. 반면, 오디오를 텍스트로 변환하거나 번역과 같은 입력 기반 작업은 그리디 디코딩이 더 적합할 수 있습니다. `do_sample=True`로 샘플링을 활성화할 수 있으며, 이 주제에 대한 자세한 내용은 이 [블로그 포스트](https://huggingface.co/blog/how-to-generate)에서 볼 수 있습니다. -```py +```python >>> # Set seed or reproducibility -- you don't need this unless you want full reproducibility >>> from transformers import set_seed >>> set_seed(0) @@ -173,7 +174,7 @@ LLM과 자기회귀 생성을 함께 사용할 때 핵심적인 부분은 이 LLM은 [디코더 전용](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt) 구조를 가지고 있어, 입력 프롬프트에 대해 지속적으로 반복 처리를 합니다. 입력 데이터의 길이가 다르면 패딩 작업이 필요합니다. LLM은 패딩 토큰에서 작동을 이어가도록 설계되지 않았기 때문에, 입력 왼쪽에 패딩이 추가 되어야 합니다. 그리고 어텐션 마스크도 꼭 `generate` 함수에 전달되어야 합니다! -```py +```python >>> # The tokenizer initialized above has right-padding active by default: the 1st sequence, >>> # which is shorter, has padding on the right side. Generation fails. >>> model_inputs = tokenizer( From 38e2633f80a4924bf613b0240622492beee4cfcc Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 20 Nov 2023 22:22:51 +0000 Subject: [PATCH 222/268] Fix torch.fx import issue for torch 1.12 (#27570) * Fix torch.fx import issue for torch 1.12 * Fix up * Python verion dependent import * Woops - fix * Fix --- src/transformers/models/gpt_neo/modeling_gpt_neo.py | 4 ++++ src/transformers/models/llama/modeling_llama.py | 5 ++++- src/transformers/pytorch_utils.py | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index 368920f3769c..7cc3bef70fdc 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -36,6 +36,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import is_torch_greater_or_equal_than_1_13 from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -55,6 +56,9 @@ # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. # It means that the function will not be traced through and simply appear as a node in the graph. if is_torch_fx_available(): + if not is_torch_greater_or_equal_than_1_13: + import torch.fx + _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index b9625dd92139..dd38abfcd01c 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -32,7 +32,7 @@ from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ALL_LAYERNORM_LAYERS +from ...pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13 from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -52,6 +52,9 @@ # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. # It means that the function will not be traced through and simply appear as a node in the graph. if is_torch_fx_available(): + if not is_torch_greater_or_equal_than_1_13: + import torch.fx + _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) diff --git a/src/transformers/pytorch_utils.py b/src/transformers/pytorch_utils.py index d0bc55fe8383..1a464b62a665 100644 --- a/src/transformers/pytorch_utils.py +++ b/src/transformers/pytorch_utils.py @@ -30,6 +30,7 @@ is_torch_greater_or_equal_than_2_1 = parsed_torch_version_base >= version.parse("2.1") is_torch_greater_or_equal_than_2_0 = parsed_torch_version_base >= version.parse("2.0") +is_torch_greater_or_equal_than_1_13 = parsed_torch_version_base >= version.parse("1.13") is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") is_torch_greater_or_equal_than_1_11 = parsed_torch_version_base >= version.parse("1.11") is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11") From 8eb9e29d8dc8b8bd98b4dd48317d1d596ec548f3 Mon Sep 17 00:00:00 2001 From: Dave Berenbaum Date: Tue, 21 Nov 2023 03:29:51 -0500 Subject: [PATCH 223/268] dvclive callback: warn instead of fail when logging non-scalars (#27608) * dvclive callback: warn instead of fail when logging non-scalars * tests: log lr as scalar --- src/transformers/integrations/integration_utils.py | 11 ++++++++++- tests/trainer/test_trainer.py | 6 +++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/src/transformers/integrations/integration_utils.py b/src/transformers/integrations/integration_utils.py index 5bddb24ed326..5eef480ac93f 100644 --- a/src/transformers/integrations/integration_utils.py +++ b/src/transformers/integrations/integration_utils.py @@ -1680,10 +1680,19 @@ def on_log(self, args, state, control, model=None, logs=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: + from dvclive.plots import Metric from dvclive.utils import standardize_metric_name for key, value in logs.items(): - self.live.log_metric(standardize_metric_name(key, "dvclive.huggingface"), value) + if Metric.could_log(value): + self.live.log_metric(standardize_metric_name(key, "dvclive.huggingface"), value) + else: + logger.warning( + "Trainer is attempting to log a value of " + f'"{value}" of type {type(value)} for key "{key}" as a scalar. ' + "This invocation of DVCLive's Live.log_metric() " + "is incorrect so we dropped this attribute." + ) self.live.next_step() def on_save(self, args, state, control, **kwargs): diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index b9f801fabd7f..5d4ae3416196 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -672,7 +672,7 @@ class TrainerWithLRLogs(Trainer): def log(self, logs): # the LR is computed after metrics and does not exist for the first epoch if hasattr(self.lr_scheduler, "_last_lr"): - logs["learning_rate"] = self.lr_scheduler._last_lr + logs["learning_rate"] = self.lr_scheduler._last_lr[0] super().log(logs) train_dataset = RegressionDataset(length=64) @@ -702,14 +702,14 @@ def log(self, logs): if loss > best_loss: bad_epochs += 1 if bad_epochs > patience: - self.assertLess(logs[i + 1]["learning_rate"][0], log["learning_rate"][0]) + self.assertLess(logs[i + 1]["learning_rate"], log["learning_rate"]) just_decreased = True bad_epochs = 0 else: best_loss = loss bad_epochs = 0 if not just_decreased: - self.assertEqual(logs[i + 1]["learning_rate"][0], log["learning_rate"][0]) + self.assertEqual(logs[i + 1]["learning_rate"], log["learning_rate"]) def test_adafactor_lr_none(self): # test the special case where lr=None, since Trainer can't not have lr_scheduler From 0e6794ff1cb098e1388b0dff17985ba57cd80619 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 21 Nov 2023 11:03:30 +0100 Subject: [PATCH 224/268] [`core` / `gradient_checkpointing`] add support for old GC method (#27610) * add support for old GC method * add also disable * up * oops --- src/transformers/modeling_utils.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 2d7db724fff5..cee472036b27 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1876,7 +1876,18 @@ def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs) - self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` methid + _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters + + if not _is_using_old_format: + self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) + else: + self.apply(partial(self._set_gradient_checkpointing, value=True)) + logger.warn( + "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." + "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." + ) if getattr(self, "_hf_peft_config_loaded", False): # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True @@ -1915,7 +1926,17 @@ def gradient_checkpointing_disable(self): activations". """ if self.supports_gradient_checkpointing: - self._set_gradient_checkpointing(enable=False) + # For old GC format (transformers < 4.35.0) for models that live on the Hub + # we will fall back to the overwritten `_set_gradient_checkpointing` methid + _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters + if not _is_using_old_format: + self._set_gradient_checkpointing(enable=False) + else: + logger.warn( + "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." + "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." + ) + self.apply(partial(self._set_gradient_checkpointing, value=False)) if getattr(self, "_hf_peft_config_loaded", False): self.disable_input_require_grads() From ade7af9361c4b2509054bf528319e11c811335e5 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Tue, 21 Nov 2023 11:14:42 +0100 Subject: [PATCH 225/268] [ConvNext] Improve backbone (#27621) * Improve convnext backbone * Fix convnext2 --- src/transformers/models/convnext/modeling_convnext.py | 11 +++++------ .../models/convnextv2/modeling_convnextv2.py | 11 +++++------ 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/src/transformers/models/convnext/modeling_convnext.py b/src/transformers/models/convnext/modeling_convnext.py index a0102b47ce8a..a952e5d8165e 100755 --- a/src/transformers/models/convnext/modeling_convnext.py +++ b/src/transformers/models/convnext/modeling_convnext.py @@ -529,14 +529,13 @@ def forward( outputs = self.encoder( embedding_output, output_hidden_states=True, - return_dict=True, + return_dict=return_dict, ) - hidden_states = outputs.hidden_states + hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () - # we skip the stem - for idx, (stage, hidden_state) in enumerate(zip(self.stage_names[1:], hidden_states[1:])): + for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: hidden_state = self.hidden_states_norms[stage](hidden_state) feature_maps += (hidden_state,) @@ -544,11 +543,11 @@ def forward( if not return_dict: output = (feature_maps,) if output_hidden_states: - output += (outputs.hidden_states,) + output += (hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, - hidden_states=outputs.hidden_states if output_hidden_states else None, + hidden_states=hidden_states if output_hidden_states else None, attentions=None, ) diff --git a/src/transformers/models/convnextv2/modeling_convnextv2.py b/src/transformers/models/convnextv2/modeling_convnextv2.py index 07580731ea1a..8d166200d122 100644 --- a/src/transformers/models/convnextv2/modeling_convnextv2.py +++ b/src/transformers/models/convnextv2/modeling_convnextv2.py @@ -552,14 +552,13 @@ def forward( outputs = self.encoder( embedding_output, output_hidden_states=True, - return_dict=True, + return_dict=return_dict, ) - hidden_states = outputs.hidden_states + hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () - # we skip the stem - for idx, (stage, hidden_state) in enumerate(zip(self.stage_names[1:], hidden_states[1:])): + for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: hidden_state = self.hidden_states_norms[stage](hidden_state) feature_maps += (hidden_state,) @@ -567,11 +566,11 @@ def forward( if not return_dict: output = (feature_maps,) if output_hidden_states: - output += (outputs.hidden_states,) + output += (hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, - hidden_states=outputs.hidden_states if output_hidden_states else None, + hidden_states=hidden_states if output_hidden_states else None, attentions=None, ) From 81b79818309031eca7c71f159d19dd4fb7df00ad Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 21 Nov 2023 10:48:14 +0000 Subject: [PATCH 226/268] Generate: Update docs regarding reusing `past_key_values` in `generate` (#27612) --- docs/source/en/llm_tutorial_optimization.md | 68 +++++++++++++++++---- 1 file changed, 55 insertions(+), 13 deletions(-) diff --git a/docs/source/en/llm_tutorial_optimization.md b/docs/source/en/llm_tutorial_optimization.md index 497e624820d4..a90fc045aff4 100644 --- a/docs/source/en/llm_tutorial_optimization.md +++ b/docs/source/en/llm_tutorial_optimization.md @@ -22,7 +22,7 @@ The crux of these challenges lies in augmenting the computational and memory cap In this guide, we will go over the effective techniques for efficient LLM deployment: -1. **Lower Precision**: Research has shown that operating at reduced numerical precision, namely [8-bit and 4-bit](./main_classes/quantization.md) can achieve computational advantages without a considerable decline in model performance. +1. **Lower Precision:** Research has shown that operating at reduced numerical precision, namely [8-bit and 4-bit](./main_classes/quantization.md) can achieve computational advantages without a considerable decline in model performance. 2. **Flash Attention:** Flash Attention is a variation of the attention algorithm that not only provides a more memory-efficient approach but also realizes increased efficiency due to optimized GPU memory utilization. @@ -58,7 +58,7 @@ As of writing this document, the largest GPU chip on the market is the A100 & H1 🤗 Transformers does not support tensor parallelism out of the box as it requires the model architecture to be written in a specific way. If you're interested in writing models in a tensor-parallelism-friendly way, feel free to have a look at [the text-generation-inference library](https://github.com/huggingface/text-generation-inference/tree/main/server/text_generation_server/models/custom_modeling). Naive pipeline parallelism is supported out of the box. For this, simply load the model with `device="auto"` which will automatically place the different layers on the available GPUs as explained [here](https://huggingface.co/docs/accelerate/v0.22.0/en/concept_guides/big_model_inference). -Note, however that while very effective, this naive pipeline parallelism does not tackle the issues of GPU idling. For this more advanced pipeline parallelism is required as explained [here](https://huggingface.co/docs/transformers/v4.34.0/en/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism). +Note, however that while very effective, this naive pipeline parallelism does not tackle the issues of GPU idling. For this more advanced pipeline parallelism is required as explained [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism). If you have access to an 8 x 80GB A100 node, you could load BLOOM as follows @@ -286,7 +286,7 @@ If GPU memory is not a constraint for your use case, there is often no need to l For more in-detail usage information, we strongly recommend taking a look at the [Transformers Quantization Docs](https://huggingface.co/docs/transformers/main_classes/quantization#general-usage). Next, let's look into how we can improve computational and memory efficiency by using better algorithms and an improved model architecture. -# 2. Flash Attention +## 2. Flash Attention Today's top-performing LLMs share more or less the same fundamental architecture that consists of feed-forward layers, activation layers, layer normalization layers, and most crucially, self-attention layers. @@ -484,7 +484,9 @@ We can observe that we only use roughly 100MB more GPU memory when passing a ver ```py flush() ``` -For more information on how to use Flash Attention, please have a look at [this doc page](https://huggingface.co/docs/transformers/v4.34.0/en/perf_infer_gpu_one#flash-attention-2). + +For more information on how to use Flash Attention, please have a look at [this doc page](https://huggingface.co/docs/transformers/en/perf_infer_gpu_one#flashattention-2). + ## 3. Architectural Innovations So far we have looked into improving computational and memory efficiency by: @@ -662,7 +664,15 @@ Using the key-value cache has two advantages: > One should *always* make use of the key-value cache as it leads to identical results and a significant speed-up for longer input sequences. Transformers has the key-value cache enabled by default when making use of the text pipeline or the [`generate` method](https://huggingface.co/docs/transformers/main_classes/text_generation). -Note that the key-value cache is especially useful for applications such as chat where multiple passes of auto-regressive decoding are required. Let's look at an example. + + +Note that, despite our advice to use key-value caches, your LLM output may be slightly different when you use them. This is a property of the matrix multiplication kernels themselves -- you can read more about it [here](https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535). + + + +#### 3.2.1 Multi-round conversation + +The key-value cache is especially useful for applications such as chat where multiple passes of auto-regressive decoding are required. Let's look at an example. ``` User: How many people live in France? @@ -672,14 +682,45 @@ Assistant: Germany has ca. 81 million inhabitants ``` In this chat, the LLM runs auto-regressive decoding twice: -- 1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step. -- 2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, it's computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`. + 1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step. + 2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, it's computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`. Two things should be noted here: 1. Keeping all the context is crucial for LLMs deployed in chat so that the LLM understands all the previous context of the conversation. E.g. for the example above the LLM needs to understand that the user refers to the population when asking `"And how many are in Germany"`. 2. The key-value cache is extremely useful for chat as it allows us to continuously grow the encoded chat history instead of having to re-encode the chat history again from scratch (as e.g. would be the case when using an encoder-decoder architecture). -There is however one catch. While the required peak memory for the \\( \mathbf{QK}^T \\) matrix is significantly reduced, holding the key-value cache in memory can become very memory expensive for long input sequences or multi-turn chat. Remember that the key-value cache needs to store the key-value vectors for all previous input vectors \\( \mathbf{x}_i \text{, for } i \in \{1, \ldots, c - 1\} \\) for all self-attention layers and for all attention heads. +In `transformers`, a `generate` call will return `past_key_values` when `return_dict_in_generate=True` is passed, in addition to the default `use_cache=True`. Note that it is not yet available through the `pipeline` interface. + +```python +# Generation as usual +prompt = system_prompt + "Question: Please write a function in Python that transforms bytes to Giga bytes.\n\nAnswer: Here" +model_inputs = tokenizer(prompt, return_tensors='pt') +generation_output = model.generate(**model_inputs, max_new_tokens=60, return_dict_in_generate=True) +decoded_output = tokenizer.batch_decode(generation_output.sequences)[0] + +# Piping the returned `past_key_values` to speed up the next conversation round +prompt = decoded_output + "\nQuestion: How can I modify the function above to return Mega bytes instead?\n\nAnswer: Here" +model_inputs = tokenizer(prompt, return_tensors='pt') +generation_output = model.generate( + **model_inputs, + past_key_values=generation_output.past_key_values, + max_new_tokens=60, + return_dict_in_generate=True +) +tokenizer.batch_decode(generation_output.sequences)[0][len(prompt):] +``` + +**Output**: +``` + is a modified version of the function that returns Mega bytes instead. + +def bytes_to_megabytes(bytes): + return bytes / 1024 / 1024 + +Answer: The function takes a number of bytes as input and returns the number of +``` + +Great, no additional time is spent recomputing the same key and values for the attention layer! There is however one catch. While the required peak memory for the \\( \mathbf{QK}^T \\) matrix is significantly reduced, holding the key-value cache in memory can become very memory expensive for long input sequences or multi-turn chat. Remember that the key-value cache needs to store the key-value vectors for all previous input vectors \\( \mathbf{x}_i \text{, for } i \in \{1, \ldots, c - 1\} \\) for all self-attention layers and for all attention heads. Let's compute the number of float values that need to be stored in the key-value cache for the LLM `bigcode/octocoder` that we used before. The number of float values amounts to two times the sequence length times the number of attention heads times the attention head dimension and times the number of layers. @@ -696,11 +737,11 @@ config = model.config ``` Roughly 8 billion float values! Storing 8 billion float values in `float16` precision requires around 15 GB of RAM which is circa half as much as the model weights themselves! -Researchers have proposed two methods that allow to significantly reduce the memory cost of storing the key-value cache: +Researchers have proposed two methods that allow to significantly reduce the memory cost of storing the key-value cache, which are explored in the next subsections. - 1. [Multi-Query-Attention (MQA)](https://arxiv.org/abs/1911.02150) +#### 3.2.2 Multi-Query-Attention (MQA) -Multi-Query-Attention was proposed in Noam Shazeer's *Fast Transformer Decoding: One Write-Head is All You Need* paper. As the title says, Noam found out that instead of using `n_head` key-value projections weights, one can use a single head-value projection weight pair that is shared across all attention heads without that the model's performance significantly degrades. +[Multi-Query-Attention](https://arxiv.org/abs/1911.02150) was proposed in Noam Shazeer's *Fast Transformer Decoding: One Write-Head is All You Need* paper. As the title says, Noam found out that instead of using `n_head` key-value projections weights, one can use a single head-value projection weight pair that is shared across all attention heads without that the model's performance significantly degrades. > By using a single head-value projection weight pair, the key value vectors \\( \mathbf{k}_i, \mathbf{v}_i \\) have to be identical across all attention heads which in turn means that we only need to store 1 key-value projection pair in the cache instead of `n_head` ones. @@ -720,9 +761,9 @@ MQA has seen wide adoption by the community and is now used by many of the most Also, the checkpoint used in this notebook - `bigcode/octocoder` - makes use of MQA. - 2. [Grouped-Query-Attention (GQA)](https://arxiv.org/abs/2305.13245) +#### 3.2.3 Grouped-Query-Attention (GQA) -Grouped-Query-Attention, as proposed by Ainslie et al. from Google, found that using MQA can often lead to quality degradation compared to using vanilla multi-key-value head projections. The paper argues that more model performance can be kept by less drastically reducing the number of query head projection weights. Instead of using just a single key-value projection weight, `n < n_head` key-value projection weights should be used. By choosing `n` to a significantly smaller value than `n_head`, such as 2,4 or 8 almost all of the memory and speed gains from MQA can be kept while sacrificing less model capacity and thus arguably less performance. +[Grouped-Query-Attention](https://arxiv.org/abs/2305.13245), as proposed by Ainslie et al. from Google, found that using MQA can often lead to quality degradation compared to using vanilla multi-key-value head projections. The paper argues that more model performance can be kept by less drastically reducing the number of query head projection weights. Instead of using just a single key-value projection weight, `n < n_head` key-value projection weights should be used. By choosing `n` to a significantly smaller value than `n_head`, such as 2,4 or 8 almost all of the memory and speed gains from MQA can be kept while sacrificing less model capacity and thus arguably less performance. Moreover, the authors of GQA found out that existing model checkpoints can be *uptrained* to have a GQA architecture with as little as 5% of the original pre-training compute. While 5% of the original pre-training compute can still be a massive amount, GQA *uptraining* allows existing checkpoints to be useful for longer input sequences. @@ -731,6 +772,7 @@ The most notable application of GQA is [Llama-v2](https://huggingface.co/meta-ll > As a conclusion, it is strongly recommended to make use of either GQA or MQA if the LLM is deployed with auto-regressive decoding and is required to handle large input sequences as is the case for example for chat. + ## Conclusion The research community is constantly coming up with new, nifty ways to speed up inference time for ever-larger LLMs. As an example, one such promising research direction is [speculative decoding](https://arxiv.org/abs/2211.17192) where "easy tokens" are generated by smaller, faster language models and only "hard tokens" are generated by the LLM itself. Going into more detail is out of the scope of this notebook, but can be read upon in this [nice blog post](https://huggingface.co/blog/assisted-generation). From 851a4f7088281072bdcf95edaaf08c6984e28cfd Mon Sep 17 00:00:00 2001 From: Leo Tronchon Date: Tue, 21 Nov 2023 13:26:01 +0100 Subject: [PATCH 227/268] Idefics: Fix information leak with cross attention gate in modeling (#26839) * fix image_attention gate in idefics modeling * update comment * cleaner gating * fix gate condition * create attention gate once * update comment * update doc of cross-attention forward * improve comment * bring back no_images * pass cross_attention_gate similarly to no_images gate * add information on gate shape * fix no_images placement * make tests for gate * take off no_images logic * update test based on comments * raise value error if cross_attention_gate is None * send cross_attention_gate to device * Revert "send cross_attention_gate to device" This reverts commit 054f84228405bfa2e75fecc502f6a96dc83cdc0b. * send cross_attention_gate to device * fix device in test + nit * fill hidden_states with zeros instead of multiplying with the gate * style * Update src/transformers/models/idefics/modeling_idefics.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Update src/transformers/models/idefics/modeling_idefics.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --------- Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- .../models/idefics/modeling_idefics.py | 37 +++++++--- tests/models/idefics/test_modeling_idefics.py | 74 +++++++++++++++++++ 2 files changed, 100 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index f7881ddd39ed..46672f2e26b7 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -864,16 +864,20 @@ def forward( attention_mask: Optional[torch.Tensor] = None, image_hidden_states: Optional[torch.Tensor] = None, image_attention_mask: Optional[torch.Tensor] = None, + cross_attention_gate: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, past_key_value: Optional[Tuple[torch.Tensor]] = None, - no_images: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + cross_attention_gate (`torch.FloatTensor`, *optional*): + gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. @@ -881,7 +885,6 @@ def forward( If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - no_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored """ if image_hidden_states is None: raise ValueError( @@ -889,6 +892,11 @@ def forward( " conditioned on." ) + if cross_attention_gate is None: + raise ValueError( + "`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images." + ) + if past_key_value is not None: raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.") @@ -904,9 +912,9 @@ def forward( output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) - # when there are no images the model is used in pure language mode - gate = 0 if no_images else 1 - hidden_states = residual + gate * self.act_cross_attn(self.alpha_cross_attn) * hidden_states + # Fill in zeros for cross_attention hidden_states of tokens attending to no images + hidden_states[cross_attention_gate == 0] = hidden_states[cross_attention_gate == 0].fill_(0) + hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states # Fully Connected residual = hidden_states @@ -1166,14 +1174,12 @@ def forward( ) position_ids = position_ids.unsqueeze(0) - no_images = False if (pixel_values, image_encoder_embeddings, perceiver_embeddings).count(None) != 2: raise ValueError( "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None." ) elif pixel_values is not None: - no_images = len(torch.nonzero(pixel_values)) == 0 pixel_values = pixel_values.to(dtype=self.dtype, device=device) # fp16 compatibility batch_size, num_images = pixel_values.shape[:2] pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:]) @@ -1218,6 +1224,15 @@ def forward( else: image_attention_mask = None + # cross_attention_gate: + # For any tokens attending to no images, the hidden_states comming out of the cross-attention should be zeroed-out. + # `image_attention_mask` has shape [bsz, 1, num_images, hidden_size] with elements equal to either 0.0 or a very negative number. + # If any of the elements are 0.0, then the token is attending to at least one image and the gate value is 1. Otherwise the gate value is 0. + # `cross_attention_gate` has shape [bsz, seq_len] with elements equal to either 0.0 or 1.0. + cross_attention_gate = ((((image_attention_mask == 0.0).any(dim=-1)).to(dtype=self.dtype)).squeeze(dim=1)).to( + device + ) + if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions @@ -1257,9 +1272,9 @@ def vblock( past_key_value, image_hidden_states, image_attention_mask, + cross_attention_gate, output_attentions, use_cache, - no_images, layer_idx, cross_layer_interval, gated_cross_attn_layers, @@ -1272,10 +1287,10 @@ def vblock( attention_mask=attention_mask, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, + cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, past_key_value=None, # not implemented - no_images=no_images, ) hidden_states = outputs[0] @@ -1307,9 +1322,9 @@ def vblock( past_key_value, image_hidden_states, image_attention_mask, + cross_attention_gate, output_attentions, use_cache, - no_images, idx, self.cross_layer_interval, self.gated_cross_attn_layers, @@ -1323,9 +1338,9 @@ def vblock( past_key_value=past_key_value, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, + cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, - no_images=no_images, layer_idx=idx, cross_layer_interval=self.cross_layer_interval, gated_cross_attn_layers=self.gated_cross_attn_layers, diff --git a/tests/models/idefics/test_modeling_idefics.py b/tests/models/idefics/test_modeling_idefics.py index ffd46dd197dc..1e2bb12ee460 100644 --- a/tests/models/idefics/test_modeling_idefics.py +++ b/tests/models/idefics/test_modeling_idefics.py @@ -71,6 +71,7 @@ def __init__( type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, + alpha_initializer="ones", num_labels=3, scope=None, modality_type_vocab_size=2, @@ -108,6 +109,7 @@ def __init__( self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range + self.alpha_initializer = alpha_initializer self.num_labels = num_labels self.scope = scope self.modality_type_vocab_size = modality_type_vocab_size @@ -167,6 +169,57 @@ def prepare_config_and_inputs(self, num_images=1, interpolate_pos_encoding=False config = self.get_config() return (config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding) + def prepare_config_and_inputs_gate_tests(self): + # Create a list of configs and inputs, to test 2 things: + # 1. For the same image, the output should be different when image_attention_mask is filled with 0s vs filled with 1s. + # 2. For 2 different images, the output should be the same when image_attention_mask is filled with 0s. + + interpolate_pos_encoding = False + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + pixel_values = floats_tensor( + [ + self.batch_size, + 1, + self.num_channels, + self.image_size, + self.image_size, + ] + ) + pixel_values_list = [ + pixel_values.clone(), + pixel_values.clone(), + pixel_values.clone().fill_(0.6), + pixel_values.clone().fill_(0.3), + ] + attention_mask = None + if self.use_input_mask: + attention_mask = random_attention_mask([self.batch_size, self.seq_length]) + + image_attention_mask = random_attention_mask([self.batch_size, self.seq_length, 1]) + image_attention_mask_list = [ + image_attention_mask.clone().fill_(0), + image_attention_mask.clone().fill_(1), + image_attention_mask.clone().fill_(0), + image_attention_mask.clone().fill_(0), + ] + + config = self.get_config() + inputs_list = [] + for pixel_values, image_attention_mask in zip(pixel_values_list, image_attention_mask_list): + inputs_list.append( + { + "input_ids": input_ids, + "attention_mask": attention_mask, + "pixel_values": pixel_values, + "image_attention_mask": image_attention_mask, + "interpolate_pos_encoding": interpolate_pos_encoding, + } + ) + + inputs_w_same_img = inputs_list[:2] + inputs_w_0_img_attn = inputs_list[2:] + return config, inputs_w_same_img, inputs_w_0_img_attn + def get_config(self): return IdeficsConfig( image_size=self.image_size, @@ -184,6 +237,7 @@ def get_config(self): type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, + alpha_initializer=self.alpha_initializer, num_labels=self.num_labels, modality_type_vocab_size=self.modality_type_vocab_size, vision_config=self.vision_config, @@ -337,6 +391,26 @@ def test_generate_with_image_pos_embeddings_interpolation_multiple_images(self): ) self.model_tester.create_and_check_model_gen(*config_and_inputs) + def test_cross_attention_gates(self): + config, inputs_w_same_img, inputs_w_0_img_attn = self.model_tester.prepare_config_and_inputs_gate_tests() + + model = IdeficsModel(config=config).to(torch_device) + model.eval() + test_1_results = [] + for inputs in inputs_w_same_img: + with torch.no_grad(): + last_hidden_states = model(**inputs).last_hidden_state + last_hidden_states = model(**inputs).last_hidden_state + test_1_results.append(last_hidden_states) + self.assertNotEqual(test_1_results[0].sum().item(), test_1_results[1].sum().item()) + + test_2_results = [] + for inputs in inputs_w_0_img_attn: + with torch.no_grad(): + last_hidden_states = model(**inputs).last_hidden_state + test_2_results.append(last_hidden_states) + self.assertEqual(test_2_results[0].sum().item(), test_2_results[1].sum().item()) + def test_training(self): if not self.model_tester.is_training: return From f93c1e9eceafde40b1d33fbb03834de97556706c Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Tue, 21 Nov 2023 14:55:13 +0100 Subject: [PATCH 228/268] Add RoCm scheduled CI & upgrade RoCm CI to PyTorch 2.1 (#26940) * add scheduled ci on amdgpu * fix likely typo * more tests, avoid parallelism * precise comment * fix report channel * trigger docker build on this branch * fix * fix * run rocm scheduled ci * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix --------- Co-authored-by: ydshieh --- .../workflows/self-push-amd-mi210-caller.yml | 2 +- .../workflows/self-push-amd-mi250-caller.yml | 2 +- .../workflows/self-scheduled-amd-caller.yml | 25 + .github/workflows/self-scheduled-amd.yml | 461 ++++++++++++++++++ .../transformers-pytorch-amd-gpu/Dockerfile | 21 +- utils/notification_service.py | 7 +- 6 files changed, 505 insertions(+), 13 deletions(-) create mode 100644 .github/workflows/self-scheduled-amd-caller.yml create mode 100644 .github/workflows/self-scheduled-amd.yml diff --git a/.github/workflows/self-push-amd-mi210-caller.yml b/.github/workflows/self-push-amd-mi210-caller.yml index 5dd010ef66d8..918cdbcdbceb 100644 --- a/.github/workflows/self-push-amd-mi210-caller.yml +++ b/.github/workflows/self-push-amd-mi210-caller.yml @@ -18,7 +18,7 @@ on: jobs: run_amd_ci: name: AMD mi210 - if: (cancelled() != true) && ((github.event_name != 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller'))) + if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller'))) uses: ./.github/workflows/self-push-amd.yml with: gpu_flavor: mi210 diff --git a/.github/workflows/self-push-amd-mi250-caller.yml b/.github/workflows/self-push-amd-mi250-caller.yml index a55378c4caa5..fb139b28a03c 100644 --- a/.github/workflows/self-push-amd-mi250-caller.yml +++ b/.github/workflows/self-push-amd-mi250-caller.yml @@ -18,7 +18,7 @@ on: jobs: run_amd_ci: name: AMD mi250 - if: (cancelled() != true) && ((github.event_name != 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller'))) + if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller'))) uses: ./.github/workflows/self-push-amd.yml with: gpu_flavor: mi250 diff --git a/.github/workflows/self-scheduled-amd-caller.yml b/.github/workflows/self-scheduled-amd-caller.yml new file mode 100644 index 000000000000..4755bd868249 --- /dev/null +++ b/.github/workflows/self-scheduled-amd-caller.yml @@ -0,0 +1,25 @@ +name: Self-hosted runner (AMD scheduled CI caller) + +on: + schedule: + - cron: "17 2 * * *" + push: + branches: + - run_amd_scheduled_ci_caller* + +jobs: + run_amd_ci_mi210: + name: AMD mi210 + if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_scheduled_ci_caller'))) + uses: ./.github/workflows/self-scheduled-amd.yml + with: + gpu_flavor: mi210 + secrets: inherit + + run_amd_ci_mi250: + name: AMD mi250 + if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_scheduled_ci_caller'))) + uses: ./.github/workflows/self-scheduled-amd.yml + with: + gpu_flavor: mi250 + secrets: inherit diff --git a/.github/workflows/self-scheduled-amd.yml b/.github/workflows/self-scheduled-amd.yml new file mode 100644 index 000000000000..17e907e40a57 --- /dev/null +++ b/.github/workflows/self-scheduled-amd.yml @@ -0,0 +1,461 @@ +name: Self-hosted runner (scheduled-amd) + +# Note: For the AMD CI, we rely on a caller workflow and on the workflow_call event to trigger the +# CI in order to run it on both MI210 and MI250, without having to use matrix here which pushes +# us towards the limit of allowed jobs on GitHub Actions. +on: + workflow_call: + inputs: + gpu_flavor: + required: true + type: string + +env: + HF_HOME: /mnt/cache + TRANSFORMERS_IS_CI: yes + OMP_NUM_THREADS: 8 + MKL_NUM_THREADS: 8 + RUN_SLOW: yes + SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} + + +# Important note: each job (run_tests_single_gpu, run_tests_multi_gpu, run_examples_gpu, run_pipelines_torch_gpu) requires all the previous jobs before running. +# This is done so that we avoid parallelizing the scheduled tests, to leave available +# runners for the push CI that is running on the same machine. +jobs: + check_runner_status: + name: Check Runner Status + runs-on: ubuntu-22.04 + steps: + - name: Checkout transformers + uses: actions/checkout@v3 + with: + fetch-depth: 2 + + - name: Check Runner Status + run: python utils/check_self_hosted_runner.py --target_runners hf-amd-mi210-ci-1gpu-1,hf-amd-mi250-ci-1gpu-1 --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + + check_runners: + name: Check Runners + needs: check_runner_status + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] + container: + image: huggingface/transformers-pytorch-amd-gpu + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + steps: + - name: ROCM-SMI + run: | + rocm-smi + - name: ROCM-INFO + run: | + rocminfo | grep "Agent" -A 14 + - name: Show ROCR environment + run: | + echo "ROCR: $ROCR_VISIBLE_DEVICES" + + setup: + name: Setup + needs: check_runners + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] + container: + image: huggingface/transformers-pytorch-amd-gpu + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - name: Update clone + working-directory: /transformers + run: | + git fetch && git checkout ${{ github.sha }} + + - name: Cleanup + working-directory: /transformers + run: | + rm -rf tests/__pycache__ + rm -rf tests/models/__pycache__ + rm -rf reports + + - name: Show installed libraries and their versions + working-directory: /transformers + run: pip freeze + + - id: set-matrix + name: Identify models to test + working-directory: /transformers/tests + run: | + echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT + + - name: ROCM-SMI + run: | + rocm-smi + + - name: ROCM-INFO + run: | + rocminfo | grep "Agent" -A 14 + - name: Show ROCR environment + run: | + echo "ROCR: $ROCR_VISIBLE_DEVICES" + + - name: Environment + working-directory: /transformers + run: | + python3 utils/print_env.py + + run_tests_single_gpu: + name: Single GPU tests + strategy: + max-parallel: 1 # For now, not to parallelize. Can change later if it works well. + fail-fast: false + matrix: + folders: ${{ fromJson(needs.setup.outputs.matrix) }} + machine_type: [single-gpu] + runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] + container: + image: huggingface/transformers-pytorch-amd-gpu + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + needs: setup + steps: + - name: Echo folder ${{ matrix.folders }} + shell: bash + # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to + # set the artifact folder names (because the character `/` is not allowed). + run: | + echo "${{ matrix.folders }}" + matrix_folders=${{ matrix.folders }} + matrix_folders=${matrix_folders/'models/'/'models_'} + echo "$matrix_folders" + echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV + + - name: Update clone + working-directory: /transformers + run: git fetch && git checkout ${{ github.sha }} + + - name: Reinstall transformers in edit mode (remove the one installed during docker image build) + working-directory: /transformers + run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . + + - name: ROCM-SMI + run: | + rocm-smi + - name: ROCM-INFO + run: | + rocminfo | grep "Agent" -A 14 + - name: Show ROCR environment + run: | + echo "ROCR: $ROCR_VISIBLE_DEVICES" + + - name: Environment + working-directory: /transformers + run: | + python3 utils/print_env.py + + - name: Show installed libraries and their versions + working-directory: /transformers + run: pip freeze + + - name: Run all tests on GPU + working-directory: /transformers + run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }} + + - name: Failure short reports + if: ${{ failure() }} + continue-on-error: true + run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt + + - name: Test suite reports artifacts + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports + path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} + + run_tests_multi_gpu: + name: Multi GPU tests + strategy: + max-parallel: 1 + fail-fast: false + matrix: + folders: ${{ fromJson(needs.setup.outputs.matrix) }} + machine_type: [multi-gpu] + runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] + container: + image: huggingface/transformers-pytorch-amd-gpu + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + needs: setup + steps: + - name: Echo folder ${{ matrix.folders }} + shell: bash + # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to + # set the artifact folder names (because the character `/` is not allowed). + run: | + echo "${{ matrix.folders }}" + matrix_folders=${{ matrix.folders }} + matrix_folders=${matrix_folders/'models/'/'models_'} + echo "$matrix_folders" + echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV + + - name: Update clone + working-directory: /transformers + run: git fetch && git checkout ${{ github.sha }} + + - name: Reinstall transformers in edit mode (remove the one installed during docker image build) + working-directory: /transformers + run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . + + - name: ROCM-SMI + run: | + rocm-smi + - name: ROCM-INFO + run: | + rocminfo | grep "Agent" -A 14 + - name: Show ROCR environment + run: | + echo "ROCR: $ROCR_VISIBLE_DEVICES" + + - name: Environment + working-directory: /transformers + run: | + python3 utils/print_env.py + + - name: Show installed libraries and their versions + working-directory: /transformers + run: pip freeze + + - name: Run all tests on GPU + working-directory: /transformers + run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }} + + - name: Failure short reports + if: ${{ failure() }} + continue-on-error: true + run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt + + - name: Test suite reports artifacts + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports + path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} + + run_examples_gpu: + name: Examples tests + strategy: + fail-fast: false + matrix: + machine_type: [single-gpu] + runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] + container: + image: huggingface/transformers-pytorch-amd-gpu + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + needs: setup + steps: + - name: Update clone + working-directory: /transformers + run: git fetch && git checkout ${{ github.sha }} + + - name: Reinstall transformers in edit mode (remove the one installed during docker image build) + working-directory: /transformers + run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . + + - name: ROCM-SMI + run: | + rocm-smi + - name: ROCM-INFO + run: | + rocminfo | grep "Agent" -A 14 + - name: Show ROCR environment + run: | + echo "ROCR: $ROCR_VISIBLE_DEVICES" + + - name: Environment + working-directory: /transformers + run: | + python3 utils/print_env.py + + - name: Show installed libraries and their versions + working-directory: /transformers + run: pip freeze + + - name: Run examples tests on GPU + working-directory: /transformers + run: | + pip install -r examples/pytorch/_tests_requirements.txt + python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_examples_gpu examples/pytorch + + - name: Failure short reports + if: ${{ failure() }} + continue-on-error: true + run: cat /transformers/reports/${{ matrix.machine_type }}_examples_gpu/failures_short.txt + + - name: Test suite reports artifacts + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.machine_type }}_run_examples_gpu + path: /transformers/reports/${{ matrix.machine_type }}_examples_gpu + + run_pipelines_torch_gpu: + name: PyTorch pipelines tests + strategy: + fail-fast: false + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] + container: + image: huggingface/transformers-pytorch-amd-gpu + options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + needs: setup + steps: + - name: Update clone + working-directory: /transformers + run: git fetch && git checkout ${{ github.sha }} + + - name: Reinstall transformers in edit mode (remove the one installed during docker image build) + working-directory: /transformers + run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . + + - name: ROCM-SMI + run: | + rocm-smi + - name: ROCM-INFO + run: | + rocminfo | grep "Agent" -A 14 + - name: Show ROCR environment + run: | + echo "ROCR: $ROCR_VISIBLE_DEVICES" + + - name: Environment + working-directory: /transformers + run: | + python3 utils/print_env.py + + - name: Show installed libraries and their versions + working-directory: /transformers + run: pip freeze + + - name: Run all pipeline tests on GPU + working-directory: /transformers + run: | + python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines + + - name: Failure short reports + if: ${{ failure() }} + continue-on-error: true + run: cat /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu/failures_short.txt + + - name: Test suite reports artifacts + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu + path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu + + run_extract_warnings: + name: Extract warnings in CI artifacts + runs-on: ubuntu-22.04 + if: always() + needs: [ + check_runner_status, + check_runners, + setup, + run_tests_single_gpu, + run_tests_multi_gpu, + run_examples_gpu, + run_pipelines_torch_gpu, + # run_all_tests_torch_cuda_extensions_gpu + ] + steps: + - name: Checkout transformers + uses: actions/checkout@v3 + with: + fetch-depth: 2 + + - name: Install transformers + run: pip install transformers + + - name: Show installed libraries and their versions + run: pip freeze + + - name: Create output directory + run: mkdir warnings_in_ci + + - uses: actions/download-artifact@v3 + with: + path: warnings_in_ci + + - name: Show artifacts + run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')" + working-directory: warnings_in_ci + + - name: Extract warnings in CI artifacts + run: | + python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh + echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')" + + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: warnings_in_ci + path: warnings_in_ci/selected_warnings.json + + send_results: + name: Send results to webhook + runs-on: ubuntu-22.04 + if: always() + needs: [ + check_runner_status, + check_runners, + setup, + run_tests_single_gpu, + run_tests_multi_gpu, + run_examples_gpu, + run_pipelines_torch_gpu, + # run_all_tests_torch_cuda_extensions_gpu, + run_extract_warnings + ] + steps: + - name: Preliminary job status + shell: bash + # For the meaning of these environment variables, see the job `Setup` + run: | + echo "Runner availability: ${{ needs.check_runner_status.result }}" + echo "Runner status: ${{ needs.check_runners.result }}" + echo "Setup status: ${{ needs.setup.result }}" + + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 + - name: Send message to Slack + env: + CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }} + CI_SLACK_CHANNEL_ID_DAILY_AMD: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_AMD }} + CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} + CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_AMD }} + ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + CI_EVENT: Scheduled CI (AMD) + CI_SHA: ${{ github.sha }} + CI_WORKFLOW_REF: ${{ github.workflow_ref }} + RUNNER_STATUS: ${{ needs.check_runner_status.result }} + RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} + SETUP_STATUS: ${{ needs.setup.result }} + # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change + # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. + run: | + sudo apt-get install -y curl + pip install slack_sdk + pip show slack_sdk + python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" + + # Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack. + - name: Failure table artifacts + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: test_failure_tables + path: test_failure_tables diff --git a/docker/transformers-pytorch-amd-gpu/Dockerfile b/docker/transformers-pytorch-amd-gpu/Dockerfile index f19cd4edb0e4..216ff4c43855 100644 --- a/docker/transformers-pytorch-amd-gpu/Dockerfile +++ b/docker/transformers-pytorch-amd-gpu/Dockerfile @@ -1,23 +1,24 @@ -FROM rocm/pytorch:rocm5.6_ubuntu20.04_py3.8_pytorch_2.0.1 +FROM rocm/dev-ubuntu-20.04:5.6 +# rocm/pytorch has no version with 2.1.0 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive +ARG PYTORCH='2.1.0' +ARG TORCH_VISION='0.16.0' +ARG TORCH_AUDIO='2.1.0' +ARG ROCM='5.6' + RUN apt update && \ - apt install -y --no-install-recommends git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg && \ + apt install -y --no-install-recommends git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-dev python3-pip ffmpeg && \ apt clean && \ rm -rf /var/lib/apt/lists/* -RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools ninja git+https://github.com/facebookresearch/detectron2.git pytesseract "itsdangerous<2.1.0" +RUN python3 -m pip install --no-cache-dir --upgrade pip -# If set to nothing, will install the latest version -ARG PYTORCH='2.0.1' -ARG TORCH_VISION='0.15.2' -ARG TORCH_AUDIO='2.0.2' -ARG ROCM='5.6' +RUN python3 -m pip install torch==$PYTORCH torchvision==$TORCH_VISION torchaudio==$TORCH_AUDIO --index-url https://download.pytorch.org/whl/rocm$ROCM -RUN git clone --depth 1 --branch v$TORCH_AUDIO https://github.com/pytorch/audio.git -RUN cd audio && USE_ROCM=1 USE_CUDA=0 python setup.py install +RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools ninja git+https://github.com/facebookresearch/detectron2.git pytesseract "itsdangerous<2.1.0" ARG REF=main WORKDIR / diff --git a/utils/notification_service.py b/utils/notification_service.py index 4df3b2994484..548d3a9b2d83 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -968,10 +968,15 @@ def prepare_reports(title, header, reports, to_truncate=True): "Torch CUDA extension tests": "run_tests_torch_cuda_extensions_gpu_test_reports", } - if ci_event in ["push", "Nightly CI"] or ci_event.startswith("Past CI") or ci_event.startswith("Push CI (AMD)"): + if ci_event in ["push", "Nightly CI"] or ci_event.startswith("Past CI"): del additional_files["Examples directory"] del additional_files["PyTorch pipelines"] del additional_files["TensorFlow pipelines"] + elif ci_event.startswith("Scheduled CI (AMD)"): + del additional_files["TensorFlow pipelines"] + del additional_files["Torch CUDA extension tests"] + elif ci_event.startswith("Push CI (AMD)"): + additional_files = {} additional_results = { key: { From 82cc0a79ac796184806a137a7000ad1b2036fe5b Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Tue, 21 Nov 2023 15:20:44 +0100 Subject: [PATCH 229/268] Fix flash attention bugs with Mistral and Falcon (#27625) * fix various bugs with flash attention * bump * fix test * fix mistral * use skiptest instead of return that may be misleading * fix on review --- .../models/falcon/modeling_falcon.py | 6 +++ .../models/mistral/modeling_mistral.py | 2 +- tests/models/llama/test_modeling_llama.py | 2 + tests/models/mistral/test_modeling_mistral.py | 54 +++++++++++-------- tests/test_modeling_common.py | 18 +++---- 5 files changed, 50 insertions(+), 32 deletions(-) diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index d4c647c846fa..e7538eb40bd4 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -564,6 +564,12 @@ def forward( past_key_value = (key_layer, value_layer) if use_cache else None + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_layer = query_layer.transpose(1, 2) + key_layer = key_layer.transpose(1, 2) + value_layer = value_layer.transpose(1, 2) + if alibi is not None: raise ValueError("`alibi` is not supported when `use_flash_attn` is True") diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 3aefb03d8c6d..e56ebc031066 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -838,7 +838,7 @@ def forward( attention_mask is not None and hasattr(self.config, "_flash_attn_2_enabled") and self.config._flash_attn_2_enabled - and past_key_values is not None + and use_cache ): is_padding_right = attention_mask[:, -1].sum().item() != batch_size if is_padding_right: diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py index 21fb4f44d2b8..55b36c7102a6 100644 --- a/tests/models/llama/test_modeling_llama.py +++ b/tests/models/llama/test_modeling_llama.py @@ -22,6 +22,7 @@ from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import ( + require_bitsandbytes, require_flash_attn, require_torch, require_torch_accelerator, @@ -385,6 +386,7 @@ def test_model_rope_scaling(self, scaling_type): @require_flash_attn @require_torch_gpu + @require_bitsandbytes @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py index b30e70ba71f9..31426435d09f 100644 --- a/tests/models/mistral/test_modeling_mistral.py +++ b/tests/models/mistral/test_modeling_mistral.py @@ -375,9 +375,6 @@ def test_flash_attn_2_generate_padding_right(self): import torch for model_class in self.all_generative_model_classes: - if not model_class._supports_flash_attn_2: - return - config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) @@ -405,36 +402,49 @@ def test_flash_attn_2_generate_padding_right(self): @require_torch_gpu @pytest.mark.flash_attn_test @slow - def test_flash_attn_2_inference_padding_right(self): + def test_flash_attn_2_generate_use_cache(self): import torch - for model_class in self.all_model_classes: - if not model_class._supports_flash_attn_2: - return + max_new_tokens = 30 + + for model_class in self.all_generative_model_classes: + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + dummy_input = inputs_dict[model_class.main_input_name] + if dummy_input.dtype in [torch.float32, torch.bfloat16]: + dummy_input = dummy_input.to(torch.float16) + + # make sure that all models have enough positions for generation + if hasattr(config, "max_position_embeddings"): + config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 - config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) - model_fa = model_class.from_pretrained( - tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True - ) - model_fa.to(torch_device) + + dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) + # NOTE: Mistral apparently does not support right padding + use_cache with FA2. + dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( - tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False - ) - model.to(torch_device) + tmpdirname, + torch_dtype=torch.float16, + use_flash_attention_2=True, + low_cpu_mem_usage=True, + ).to(torch_device) - dummy_input = torch.LongTensor([[1, 2, 3, 4, 5]]).to(torch_device) - dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1, 0]]).to(torch_device) + # Just test that a large cache works as expected + _ = model.generate( + dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False + ) - _ = model(dummy_input, output_hidden_states=True).hidden_states[-1] - with self.assertRaises(ValueError): - _ = model_fa( - dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True - ).hidden_states[-1] + @require_flash_attn + @require_torch_gpu + @pytest.mark.flash_attn_test + @slow + def test_flash_attn_2_inference_padding_right(self): + self.skipTest("Mistral flash attention does not support right padding") @require_torch diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 49d64dc207f1..9d9e96db4347 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2835,7 +2835,7 @@ def test_flash_attn_2_conversion(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: - return + self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") model = model_class(config) @@ -2860,7 +2860,7 @@ def test_flash_attn_2_inference(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: - return + self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) @@ -2957,7 +2957,7 @@ def test_flash_attn_2_inference_padding_right(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: - return + self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) @@ -3050,7 +3050,7 @@ def test_flash_attn_2_generate_left_padding(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: - return + self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) @@ -3093,7 +3093,7 @@ def test_flash_attn_2_generate_padding_right(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: - return + self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) @@ -3109,7 +3109,7 @@ def test_flash_attn_2_generate_padding_right(self): dummy_input = dummy_input.to(torch.float16) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) - # make sure we do left padding + # make sure we do right padding dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 @@ -3138,7 +3138,7 @@ def test_flash_attn_2_generate_use_cache(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: - return + self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -3179,7 +3179,7 @@ def test_flash_attn_2_fp32_ln(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: - return + self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) @@ -3279,7 +3279,7 @@ def test_flash_attn_2_from_config(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: - return + self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, _ = self.model_tester.prepare_config_and_inputs_for_common() # TODO: to change it in the future with other relevant auto classes From 0145c6825e488b2bfa1bbf403a6b92f754043ed3 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Tue, 21 Nov 2023 14:28:38 +0000 Subject: [PATCH 230/268] Fix tracing dinov2 (#27561) * Enable tracing with DINOv2 model * ABC * Add note to model doc --- docs/source/en/model_doc/dinov2.md | 31 +++++++++++++++++++ .../models/dinov2/modeling_dinov2.py | 2 +- src/transformers/utils/fx.py | 1 + tests/models/dinov2/test_modeling_dinov2.py | 2 +- 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/docs/source/en/model_doc/dinov2.md b/docs/source/en/model_doc/dinov2.md index 49a5bd3e260f..72a0478924f4 100644 --- a/docs/source/en/model_doc/dinov2.md +++ b/docs/source/en/model_doc/dinov2.md @@ -25,6 +25,37 @@ The abstract from the paper is the following: This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/dinov2). +## Usage tips + +The model can be traced using `torch.jit.trace` which leverages JIT compilation to optimize the model making it faster to run. Note this still produces some mis-matched elements and the difference between the original model and the traced model is of the order of 1e-4. + +```python +import torch +from transformers import AutoImageProcessor, AutoModel +from PIL import Image +import requests + +url = 'http://images.cocodataset.org/val2017/000000039769.jpg' +image = Image.open(requests.get(url, stream=True).raw) + +processor = AutoImageProcessor.from_pretrained('facebook/dinov2-base') +model = AutoModel.from_pretrained('facebook/dinov2-base') + +inputs = processor(images=image, return_tensors="pt") +outputs = model(**inputs) +last_hidden_states = outputs[0] + +# We have to force return_dict=False for tracing +model.config.return_dict = False + +with torch.no_grad(): + traced_model = torch.jit.trace(model, [inputs.pixel_values]) + traced_outputs = traced_model(inputs.pixel_values) + +print((last_hidden_states - traced_outputs[0]).abs().max()) +``` + + ## Dinov2Config [[autodoc]] Dinov2Config diff --git a/src/transformers/models/dinov2/modeling_dinov2.py b/src/transformers/models/dinov2/modeling_dinov2.py index e6a17e570743..66bac639f673 100644 --- a/src/transformers/models/dinov2/modeling_dinov2.py +++ b/src/transformers/models/dinov2/modeling_dinov2.py @@ -105,7 +105,7 @@ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, - scale_factor=(height / math.sqrt(num_positions), width / math.sqrt(num_positions)), + scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))), mode="bicubic", align_corners=False, ) diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index 50320dabb704..1559da0e53c6 100755 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -122,6 +122,7 @@ def _generate_supported_model_class_names( "convnext", "deberta", "deberta-v2", + "dinov2", "distilbert", "donut-swin", "electra", diff --git a/tests/models/dinov2/test_modeling_dinov2.py b/tests/models/dinov2/test_modeling_dinov2.py index a040356fb798..4e3839749bdd 100644 --- a/tests/models/dinov2/test_modeling_dinov2.py +++ b/tests/models/dinov2/test_modeling_dinov2.py @@ -221,7 +221,7 @@ class Dinov2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): if is_torch_available() else {} ) - fx_compatible = False + fx_compatible = True test_pruning = False test_resize_embeddings = False From f5c9738f61a4221e447bbf0c3a0496bd11583031 Mon Sep 17 00:00:00 2001 From: "Hz, Ji" Date: Wed, 22 Nov 2023 00:09:35 +0800 Subject: [PATCH 231/268] remove the deprecated method `init_git_repo` (#27617) * remove deprecated method `init_git_repo` * make style --- src/transformers/trainer.py | 59 +------------------------------------ 1 file changed, 1 insertion(+), 58 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 25100dad69bc..0bb123d0e7c2 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -48,7 +48,7 @@ import numpy as np import torch import torch.distributed as dist -from huggingface_hub import Repository, create_repo, upload_folder +from huggingface_hub import create_repo, upload_folder from packaging import version from torch import nn from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler @@ -3496,63 +3496,6 @@ def init_hf_repo(self): self.hub_model_id = repo_url.repo_id self.push_in_progress = None - def init_git_repo(self, at_init: bool = False): - """ - Initializes a git repo in `self.args.hub_model_id`. - - - - This function is deprecated and will be removed in v4.34.0 of Transformers. - - - - Args: - at_init (`bool`, *optional*, defaults to `False`): - Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is - `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped - out. - """ - warnings.warn( - "`Trainer.init_git_repo` is deprecated and will be removed in v4.34.0 of Transformers. Use " - "`Trainer.init_hf_repo` instead." - ) - if not self.is_world_process_zero(): - return - - # Make sure the repo exists + retrieve "real" repo_id - repo_name = self.args.hub_model_id - if repo_name is None: - repo_name = Path(self.args.output_dir).absolute().name - repo_id = create_repo( - repo_id=repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True - ).repo_id - - try: - self.repo = Repository(self.args.output_dir, clone_from=repo_id, token=self.args.hub_token) - except EnvironmentError: - if self.args.overwrite_output_dir and at_init: - # Try again after wiping output_dir - shutil.rmtree(self.args.output_dir) - self.repo = Repository(self.args.output_dir, clone_from=repo_id, token=self.args.hub_token) - else: - raise - - self.repo.git_pull() - - # By default, ignore the checkpoint folders - if ( - not os.path.exists(os.path.join(self.args.output_dir, ".gitignore")) - and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS - ): - with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer: - writer.writelines(["checkpoint-*/"]) - - # Add "*.sagemaker" to .gitignore if using SageMaker - if os.environ.get("SM_TRAINING_ENV"): - self._add_sm_patterns_to_gitignore() - - self.push_in_progress = None - def create_model_card( self, language: Optional[str] = None, From c770600fde5be461d17e741827e082d0128f35c8 Mon Sep 17 00:00:00 2001 From: jiqing-feng <107918818+jiqing-feng@users.noreply.github.com> Date: Wed, 22 Nov 2023 00:41:55 +0800 Subject: [PATCH 232/268] TVP model (#25856) * tvp model for video grounding add tokenizer auto fix param in TVPProcessor add docs clear comments and enable different torch dtype add image processor test and model test and fix code style * fix conflict * fix model doc * fix image processing tests * fix tvp tests * remove torch in processor * fix grammar error * add more details on tvp.md * fix model arch for loss, grammar, and processor * add docstring and do not regard TvpTransformer, TvpVisionModel as individual model * use pad_image * update copyright * control first downsample stride * reduce first only works for ResNetBottleNeckLayer * fix param name * fix style * add testing * fix style * rm init_weight * fix style * add post init * fix comments * do not test TvpTransformer * fix warning * fix style * fix example * fix config map * add link in config * fix comments * fix style * rm useless param * change attention * change test * add notes * fix comments * fix tvp * import checkpointing * fix gradient checkpointing * Use a more accurate example in readme * update * fix copy * fix style * update readme * delete print * remove tvp test_forward_signature * remove TvpTransformer * fix test init model * merge main and make style * fix tests and others * fix image processor * fix style and model_input_names * fix tests --- README.md | 1 + README_es.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.md | 1 + docs/source/en/model_doc/tvp.md | 186 ++++ docs/source/ms/index.md | 2 + src/transformers/__init__.py | 26 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/image_processing_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 1 + .../models/auto/processing_auto.py | 1 + .../models/auto/tokenization_auto.py | 1 + src/transformers/models/tvp/__init__.py | 80 ++ .../models/tvp/configuration_tvp.py | 175 ++++ .../models/tvp/image_processing_tvp.py | 476 ++++++++++ src/transformers/models/tvp/modeling_tvp.py | 890 ++++++++++++++++++ src/transformers/models/tvp/processing_tvp.py | 154 +++ src/transformers/utils/dummy_pt_objects.py | 24 + .../utils/dummy_vision_objects.py | 7 + tests/models/tvp/__init__.py | 0 tests/models/tvp/test_image_processing_tvp.py | 306 ++++++ tests/models/tvp/test_modeling_tvp.py | 261 +++++ utils/check_repo.py | 3 +- 29 files changed, 2607 insertions(+), 1 deletion(-) create mode 100644 docs/source/en/model_doc/tvp.md create mode 100644 src/transformers/models/tvp/__init__.py create mode 100644 src/transformers/models/tvp/configuration_tvp.py create mode 100644 src/transformers/models/tvp/image_processing_tvp.py create mode 100644 src/transformers/models/tvp/modeling_tvp.py create mode 100644 src/transformers/models/tvp/processing_tvp.py create mode 100644 tests/models/tvp/__init__.py create mode 100644 tests/models/tvp/test_image_processing_tvp.py create mode 100644 tests/models/tvp/test_modeling_tvp.py diff --git a/README.md b/README.md index 12724e60a188..80444a56a312 100644 --- a/README.md +++ b/README.md @@ -489,6 +489,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. 1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal. +1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding. 1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. diff --git a/README_es.md b/README_es.md index 5cdbc27ec791..f7f4f14fb002 100644 --- a/README_es.md +++ b/README_es.md @@ -464,6 +464,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. 1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal. +1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding. 1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. diff --git a/README_hd.md b/README_hd.md index 01937532f967..d2e1ce6e9e7a 100644 --- a/README_hd.md +++ b/README_hd.md @@ -438,6 +438,7 @@ conda install -c huggingface transformers 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU की ओर से) कागज के साथ [संस्करण-एक्स: एक ब्लॉग मॉडल चौकस चौक मॉडल मॉडल] (https://arxivorg/abs/1901.02860) क्वोकोक वी. ले, रुस्लैन सलाखुतदी 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. 1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal. +1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding. 1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research से) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. द्वाराअनुसंधान पत्र [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) के साथ जारी किया गया 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (माइक्रोसॉफ्ट रिसर्च से) साथ में दिया गया पेपर [UniSpeech: यूनिफाइड स्पीच रिप्रेजेंटेशन लर्निंग विद लेबलेड एंड अनलेबल्ड डेटा](https:/ /arxiv.org/abs/2101.07597) चेंगई वांग, यू वू, याओ कियान, केनिची कुमातानी, शुजी लियू, फुरु वेई, माइकल ज़ेंग, ज़ुएदोंग हुआंग द्वारा। diff --git a/README_ja.md b/README_ja.md index 5935da396bf1..0cdd96c306a6 100644 --- a/README_ja.md +++ b/README_ja.md @@ -498,6 +498,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU から) Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov から公開された研究論文: [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (Microsoft から), Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei から公開された研究論文: [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill から), Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal から公開された研究論文: [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) +1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (Intel から), Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding から公開された研究論文: [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) 1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (Google Research から) Yi Tay, Mostafa Dehghani, Vinh Q から公開された研究論文: [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research から) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. から公開された研究論文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research から) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang から公開された研究論文: [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) diff --git a/README_ko.md b/README_ko.md index e0c38472cc46..dc72f85b5482 100644 --- a/README_ko.md +++ b/README_ko.md @@ -413,6 +413,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU 에서) Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 의 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 논문과 함께 발표했습니다. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (Microsoft 에서) Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 의 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 논문과 함께 발표했습니다. 1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill 에서) Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal 의 [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) 논문과 함께 발표했습니다. +1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (Intel 에서) Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding 의 [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) 논문과 함께 발표했습니다. 1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (Google Research 에서) Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzle 의 [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) 논문과 함께 발표했습니다. 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research 에서 제공)은 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.의 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi)논문과 함께 발표했습니다. 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research 에서) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 의 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 논문과 함께 발표했습니다. diff --git a/README_zh-hans.md b/README_zh-hans.md index 3d84374d5561..d74abb2a00e1 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -437,6 +437,7 @@ conda install -c huggingface transformers 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。 1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (来自 UNC Chapel Hill) 伴随论文 [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) 由 Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal 发布。 +1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (来自 Intel) 伴随论文 [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) 由 Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding 发布. 1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (来自 Google Research) 伴随论文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) 由 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant 发布。 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index c095423cce15..eca50e97c7b1 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -449,6 +449,7 @@ conda install -c huggingface transformers 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. 1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal. +1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding. 1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 4e0ce88c10af..fd64afdf486f 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -723,6 +723,8 @@ title: TrOCR - local: model_doc/tvlt title: TVLT + - local: model_doc/tvp + title: TVP - local: model_doc/vilt title: ViLT - local: model_doc/vision-encoder-decoder diff --git a/docs/source/en/index.md b/docs/source/en/index.md index ae01569e970c..b19d567f8e47 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -264,6 +264,7 @@ Flax), PyTorch, and/or TensorFlow. | [Transformer-XL](model_doc/transfo-xl) | ✅ | ✅ | ❌ | | [TrOCR](model_doc/trocr) | ✅ | ❌ | ❌ | | [TVLT](model_doc/tvlt) | ✅ | ❌ | ❌ | +| [TVP](model_doc/tvp) | ✅ | ❌ | ❌ | | [UL2](model_doc/ul2) | ✅ | ✅ | ✅ | | [UMT5](model_doc/umt5) | ✅ | ❌ | ❌ | | [UniSpeech](model_doc/unispeech) | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/tvp.md b/docs/source/en/model_doc/tvp.md new file mode 100644 index 000000000000..1e733db6c8cb --- /dev/null +++ b/docs/source/en/model_doc/tvp.md @@ -0,0 +1,186 @@ + + +# TVP + +## Overview + +The text-visual prompting (TVP) framework was proposed in the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding. + +The abstract from the paper is the following: + +*In this paper, we study the problem of temporal video grounding (TVG), which aims to predict the starting/ending time points of moments described by a text sentence within a long untrimmed video. Benefiting from fine-grained 3D visual features, the TVG techniques have achieved remarkable progress in recent years. However, the high complexity of 3D convolutional neural networks (CNNs) makes extracting dense 3D visual features time-consuming, which calls for intensive memory and computing resources. Towards efficient TVG, we propose a novel text-visual prompting (TVP) framework, which incorporates optimized perturbation patterns (that we call ‘prompts’) into both visual inputs and textual features of a TVG model. In sharp contrast to 3D CNNs, we show that TVP allows us to effectively co-train vision encoder and language encoder in a 2D TVG model and improves the performance of cross-modal feature fusion using only low-complexity sparse 2D visual features. Further, we propose a Temporal-Distance IoU (TDIoU) loss for efficient learning of TVG. Experiments on two benchmark datasets, Charades-STA and ActivityNet Captions datasets, empirically show that the proposed TVP significantly boosts the performance of 2D TVG (e.g., 9.79% improvement on Charades-STA and 30.77% improvement on ActivityNet Captions) and achieves 5× inference acceleration over TVG using 3D visual features.* + +This research addresses temporal video grounding (TVG), which is the process of pinpointing the start and end times of specific events in a long video, as described by a text sentence. Text-visual prompting (TVP), is proposed to enhance TVG. TVP involves integrating specially designed patterns, known as 'prompts', into both the visual (image-based) and textual (word-based) input components of a TVG model. These prompts provide additional spatial-temporal context, improving the model's ability to accurately determine event timings in the video. The approach employs 2D visual inputs in place of 3D ones. Although 3D inputs offer more spatial-temporal detail, they are also more time-consuming to process. The use of 2D inputs with the prompting method aims to provide similar levels of context and accuracy more efficiently. + + + + TVP architecture. Taken from the original paper. + +This model was contributed by [Jiqing Feng](https://huggingface.co/Jiqing). The original code can be found [here](https://github.com/intel/TVP). + +## Usage tips and examples + +Prompts are optimized perturbation patterns, which would be added to input video frames or text features. Universal set refers to using the same exact set of prompts for any input, this means that these prompts are added consistently to all video frames and text features, regardless of the input's content. + +TVP consists of a visual encoder and cross-modal encoder. A universal set of visual prompts and text prompts to be integrated into sampled video frames and textual features, respectively. Specially, a set of different visual prompts are applied to uniformly-sampled frames of one untrimmed video in order. + +The goal of this model is to incorporate trainable prompts into both visual inputs and textual features to temporal video grounding(TVG) problems. +In principle, one can apply any visual, cross-modal encoder in the proposed architecture. + +The [`TvpProcessor`] wraps [`BertTokenizer`] and [`TvpImageProcessor`] into a single instance to both +encode the text and prepare the images respectively. + +The following example shows how to run temporal video grounding using [`TvpProcessor`] and [`TvpForVideoGrounding`]. +```python +import av +import cv2 +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from transformers import AutoProcessor, TvpForVideoGrounding + + +def pyav_decode(container, sampling_rate, num_frames, clip_idx, num_clips, target_fps): + ''' + Convert the video from its original fps to the target_fps and decode the video with PyAV decoder. + Args: + container (container): pyav container. + sampling_rate (int): frame sampling rate (interval between two sampled frames). + num_frames (int): number of frames to sample. + clip_idx (int): if clip_idx is -1, perform random temporal sampling. + If clip_idx is larger than -1, uniformly split the video to num_clips + clips, and select the clip_idx-th video clip. + num_clips (int): overall number of clips to uniformly sample from the given video. + target_fps (int): the input video may have different fps, convert it to + the target video fps before frame sampling. + Returns: + frames (tensor): decoded frames from the video. Return None if the no + video stream was found. + fps (float): the number of frames per second of the video. + ''' + video = container.streams.video[0] + fps = float(video.average_rate) + clip_size = sampling_rate * num_frames / target_fps * fps + delta = max(num_frames - clip_size, 0) + start_idx = delta * clip_idx / num_clips + end_idx = start_idx + clip_size - 1 + timebase = video.duration / num_frames + video_start_pts = int(start_idx * timebase) + video_end_pts = int(end_idx * timebase) + seek_offset = max(video_start_pts - 1024, 0) + container.seek(seek_offset, any_frame=False, backward=True, stream=video) + frames = {} + for frame in container.decode(video=0): + if frame.pts < video_start_pts: + continue + frames[frame.pts] = frame + if frame.pts > video_end_pts: + break + frames = [frames[pts] for pts in sorted(frames)] + return frames, fps + + +def decode(container, sampling_rate, num_frames, clip_idx, num_clips, target_fps): + ''' + Decode the video and perform temporal sampling. + Args: + container (container): pyav container. + sampling_rate (int): frame sampling rate (interval between two sampled frames). + num_frames (int): number of frames to sample. + clip_idx (int): if clip_idx is -1, perform random temporal sampling. + If clip_idx is larger than -1, uniformly split the video to num_clips + clips, and select the clip_idx-th video clip. + num_clips (int): overall number of clips to uniformly sample from the given video. + target_fps (int): the input video may have different fps, convert it to + the target video fps before frame sampling. + Returns: + frames (tensor): decoded frames from the video. + ''' + assert clip_idx >= -2, "Not a valied clip_idx {}".format(clip_idx) + frames, fps = pyav_decode(container, sampling_rate, num_frames, clip_idx, num_clips, target_fps) + clip_size = sampling_rate * num_frames / target_fps * fps + index = np.linspace(0, clip_size - 1, num_frames) + index = np.clip(index, 0, len(frames) - 1).astype(np.int64) + frames = np.array([frames[idx].to_rgb().to_ndarray() for idx in index]) + frames = frames.transpose(0, 3, 1, 2) + return frames + + +file = hf_hub_download(repo_id="Intel/tvp_demo", filename="AK2KG.mp4", repo_type="dataset") +model = TvpForVideoGrounding.from_pretrained("Intel/tvp-base") + +decoder_kwargs = dict( + container=av.open(file, metadata_errors="ignore"), + sampling_rate=1, + num_frames=model.config.num_frames, + clip_idx=0, + num_clips=1, + target_fps=3, +) +raw_sampled_frms = decode(**decoder_kwargs) + +text = "a person is sitting on a bed." +processor = AutoProcessor.from_pretrained("Intel/tvp-base") +model_inputs = processor( + text=[text], videos=list(raw_sampled_frms), return_tensors="pt", max_text_length=100#, size=size +) + +model_inputs["pixel_values"] = model_inputs["pixel_values"].to(model.dtype) +output = model(**model_inputs) + +def get_video_duration(filename): + cap = cv2.VideoCapture(filename) + if cap.isOpened(): + rate = cap.get(5) + frame_num = cap.get(7) + duration = frame_num/rate + return duration + return -1 + +duration = get_video_duration(file) +start, end = processor.post_process_video_grounding(output.logits, duration) + +print(f"The time slot of the video corresponding to the text \"{text}\" is from {start}s to {end}s") +``` + +Tips: + +- This implementation of TVP uses [`BertTokenizer`] to generate text embeddings and Resnet-50 model to compute visual embeddings. +- Checkpoints for pre-trained [tvp-base](https://huggingface.co/Intel/tvp-base) is released. +- Please refer to [Table 2](https://arxiv.org/pdf/2303.04995.pdf) for TVP's performance on Temporal Video Grounding task. + + +## TvpConfig + +[[autodoc]] TvpConfig + +## TvpImageProcessor + +[[autodoc]] TvpImageProcessor + - preprocess + +## TvpProcessor + +[[autodoc]] TvpProcessor + - __call__ + +## TvpModel + +[[autodoc]] TvpModel + - forward + +## TvpForVideoGrounding + +[[autodoc]] TvpForVideoGrounding + - forward \ No newline at end of file diff --git a/docs/source/ms/index.md b/docs/source/ms/index.md index e57b65fc40c6..28ec0aec7540 100644 --- a/docs/source/ms/index.md +++ b/docs/source/ms/index.md @@ -228,6 +228,7 @@ Dokumentasi disusun kepada lima bahagian: 1. **[Transformer-XL](model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. 1. **[TVLT](model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal. +1. **[TVP](model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding. 1. **[UL2](model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UniSpeech](model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. @@ -431,6 +432,7 @@ Flax), PyTorch, dan/atau TensorFlow. | Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ | | TrOCR | ❌ | ❌ | ✅ | ❌ | ❌ | | TVLT | ❌ | ❌ | ✅ | ❌ | ❌ | +| TVP | ❌ | ❌ | ✅ | ❌ | ❌ | | UniSpeech | ❌ | ❌ | ✅ | ❌ | ❌ | | UniSpeechSat | ❌ | ❌ | ✅ | ❌ | ❌ | | UPerNet | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index cf89602b6597..672386c7938b 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -597,6 +597,11 @@ "TvltFeatureExtractor", "TvltProcessor", ], + "models.tvp": [ + "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TvpConfig", + "TvpProcessor", + ], "models.umt5": ["UMT5Config"], "models.unispeech": [ "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", @@ -1010,6 +1015,7 @@ _import_structure["models.segformer"].extend(["SegformerFeatureExtractor", "SegformerImageProcessor"]) _import_structure["models.swin2sr"].append("Swin2SRImageProcessor") _import_structure["models.tvlt"].append("TvltImageProcessor") + _import_structure["models.tvp"].append("TvpImageProcessor") _import_structure["models.videomae"].extend(["VideoMAEFeatureExtractor", "VideoMAEImageProcessor"]) _import_structure["models.vilt"].extend(["ViltFeatureExtractor", "ViltImageProcessor", "ViltProcessor"]) _import_structure["models.vit"].extend(["ViTFeatureExtractor", "ViTImageProcessor"]) @@ -2931,6 +2937,14 @@ "TvltPreTrainedModel", ] ) + _import_structure["models.tvp"].extend( + [ + "TVP_PRETRAINED_MODEL_ARCHIVE_LIST", + "TvpForVideoGrounding", + "TvpModel", + "TvpPreTrainedModel", + ] + ) _import_structure["models.umt5"].extend( [ "UMT5EncoderModel", @@ -4795,6 +4809,11 @@ ) from .models.trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig, TrOCRProcessor from .models.tvlt import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig, TvltFeatureExtractor, TvltProcessor + from .models.tvp import ( + TVP_PRETRAINED_CONFIG_ARCHIVE_MAP, + TvpConfig, + TvpProcessor, + ) from .models.umt5 import UMT5Config from .models.unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig from .models.unispeech_sat import UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechSatConfig @@ -5165,6 +5184,7 @@ from .models.segformer import SegformerFeatureExtractor, SegformerImageProcessor from .models.swin2sr import Swin2SRImageProcessor from .models.tvlt import TvltImageProcessor + from .models.tvp import TvpImageProcessor from .models.videomae import VideoMAEFeatureExtractor, VideoMAEImageProcessor from .models.vilt import ViltFeatureExtractor, ViltImageProcessor, ViltProcessor from .models.vit import ViTFeatureExtractor, ViTImageProcessor @@ -6755,6 +6775,12 @@ TvltModel, TvltPreTrainedModel, ) + from .models.tvp import ( + TVP_PRETRAINED_MODEL_ARCHIVE_LIST, + TvpForVideoGrounding, + TvpModel, + TvpPreTrainedModel, + ) from .models.umt5 import ( UMT5EncoderModel, UMT5ForConditionalGeneration, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 6132512688e6..976f3f551886 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -207,6 +207,7 @@ transfo_xl, trocr, tvlt, + tvp, umt5, unispeech, unispeech_sat, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index c1c2387373b8..5b19b842acb6 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -214,6 +214,7 @@ ("transfo-xl", "TransfoXLConfig"), ("trocr", "TrOCRConfig"), ("tvlt", "TvltConfig"), + ("tvp", "TvpConfig"), ("umt5", "UMT5Config"), ("unispeech", "UniSpeechConfig"), ("unispeech-sat", "UniSpeechSatConfig"), @@ -420,6 +421,7 @@ ("timesformer", "TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("transfo-xl", "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("tvlt", "TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("tvp", "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("unispeech", "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("unispeech-sat", "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("van", "VAN_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -660,6 +662,7 @@ ("transfo-xl", "Transformer-XL"), ("trocr", "TrOCR"), ("tvlt", "TVLT"), + ("tvp", "TVP"), ("ul2", "UL2"), ("umt5", "UMT5"), ("unispeech", "UniSpeech"), diff --git a/src/transformers/models/auto/image_processing_auto.py b/src/transformers/models/auto/image_processing_auto.py index f52d18a70767..168b7a5dff3a 100644 --- a/src/transformers/models/auto/image_processing_auto.py +++ b/src/transformers/models/auto/image_processing_auto.py @@ -102,6 +102,7 @@ ("table-transformer", "DetrImageProcessor"), ("timesformer", "VideoMAEImageProcessor"), ("tvlt", "TvltImageProcessor"), + ("tvp", "TvpImageProcessor"), ("upernet", "SegformerImageProcessor"), ("van", "ConvNextImageProcessor"), ("videomae", "VideoMAEImageProcessor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index ffcae9a23494..d435a8770c9e 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -200,6 +200,7 @@ ("trajectory_transformer", "TrajectoryTransformerModel"), ("transfo-xl", "TransfoXLModel"), ("tvlt", "TvltModel"), + ("tvp", "TvpModel"), ("umt5", "UMT5Model"), ("unispeech", "UniSpeechModel"), ("unispeech-sat", "UniSpeechSatModel"), diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index 84f7ba3be5bf..e1b3bac2de05 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -81,6 +81,7 @@ ("speecht5", "SpeechT5Processor"), ("trocr", "TrOCRProcessor"), ("tvlt", "TvltProcessor"), + ("tvp", "TvpProcessor"), ("unispeech", "Wav2Vec2Processor"), ("unispeech-sat", "Wav2Vec2Processor"), ("vilt", "ViltProcessor"), diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index f30bc9ad6195..04a1bc77e655 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -371,6 +371,7 @@ ("tapas", ("TapasTokenizer", None)), ("tapex", ("TapexTokenizer", None)), ("transfo-xl", ("TransfoXLTokenizer", None)), + ("tvp", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), ( "umt5", ( diff --git a/src/transformers/models/tvp/__init__.py b/src/transformers/models/tvp/__init__.py new file mode 100644 index 000000000000..63c0bd271744 --- /dev/null +++ b/src/transformers/models/tvp/__init__.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_tvp": [ + "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TvpConfig", + ], + "processing_tvp": ["TvpProcessor"], +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_tvp"] = ["TvpImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tvp"] = [ + "TVP_PRETRAINED_MODEL_ARCHIVE_LIST", + "TvpModel", + "TvpPreTrainedModel", + "TvpForVideoGrounding", + ] + +if TYPE_CHECKING: + from .configuration_tvp import ( + TVP_PRETRAINED_CONFIG_ARCHIVE_MAP, + TvpConfig, + ) + from .processing_tvp import TvpProcessor + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_tvp import TvpImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tvp import ( + TVP_PRETRAINED_MODEL_ARCHIVE_LIST, + TvpForVideoGrounding, + TvpModel, + TvpPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/tvp/configuration_tvp.py b/src/transformers/models/tvp/configuration_tvp.py new file mode 100644 index 000000000000..dfb0a5f9985a --- /dev/null +++ b/src/transformers/models/tvp/configuration_tvp.py @@ -0,0 +1,175 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TVP model configuration""" + +import copy + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..auto import CONFIG_MAPPING + + +logger = logging.get_logger(__name__) + + +TVP_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "Intel/tvp-base": "https://huggingface.co/Intel/tvp-base/resolve/main/config.json", +} + + +class TvpConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`TvpModel`]. It is used to instantiate an Tvp + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Tvp + [Intel/tvp-base](https://huggingface.co/Intel/tvp-base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + backbone_config (`PretrainedConfig` or `dict`, *optional*): + The configuration of the backbone model. + distance_loss_weight (`float`, *optional*, defaults to 1.0): + The weight of distance loss. + duration_loss_weight (`float`, *optional*, defaults to 0.1): + The weight of duration loss. + visual_prompter_type (`str`, *optional*, defaults to `"framepad"`): + Visual prompt type. The type of padding. Framepad means padding on each frame. Should be one of "framepad" + or "framedownpad" + visual_prompter_apply (`str`, *optional*, defaults to `"replace"`): + The way of applying visual prompt. Replace means use the value of prompt to change the original value in + visual inputs. Should be one of "replace", or "add", or "remove". + visual_prompt_size (`int`, *optional*, defaults to 96): + The size of visual prompt. + max_img_size (`int`, *optional*, defaults to 448): + The maximum size of frame. + num_frames (`int`, *optional*, defaults to 48): + The number of frames extracted from a video. + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the Tvp text model. Defines the number of different tokens that can be represented by + the `inputs_ids` passed when calling [`TvpModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + max_grid_col_position_embeddings (`int`, *optional*, defaults to 100): + The largest number of horizontal patches from a video frame. + max_grid_row_position_embeddings (`int`, *optional*, defaults to 100): + The largest number of vertical patches from a video frame. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability of hidden layers. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability of attention layers. + """ + + model_type = "tvp" + + def __init__( + self, + backbone_config=None, + distance_loss_weight=1.0, + duration_loss_weight=0.1, + visual_prompter_type="framepad", + visual_prompter_apply="replace", + visual_prompt_size=96, + max_img_size=448, + num_frames=48, + vocab_size=30522, + hidden_size=768, + intermediate_size=3072, + num_hidden_layers=12, + num_attention_heads=12, + max_position_embeddings=512, + max_grid_col_position_embeddings=100, + max_grid_row_position_embeddings=100, + hidden_dropout_prob=0.1, + hidden_act="gelu", + layer_norm_eps=1e-12, + initializer_range=0.02, + attention_probs_dropout_prob=0.1, + **kwargs, + ): + super().__init__(**kwargs) + + if backbone_config is None: + logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") + backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) + elif isinstance(backbone_config, dict): + backbone_model_type = backbone_config.get("model_type") + config_class = CONFIG_MAPPING[backbone_model_type] + backbone_config = config_class.from_dict(backbone_config) + + self.backbone_config = backbone_config + self.distance_loss_weight = distance_loss_weight + self.duration_loss_weight = duration_loss_weight + self.visual_prompter_type = visual_prompter_type + self.visual_prompter_apply = visual_prompter_apply + self.visual_prompt_size = visual_prompt_size + self.max_img_size = max_img_size + self.num_frames = num_frames + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.max_position_embeddings = max_position_embeddings + self.max_grid_col_position_embeddings = max_grid_col_position_embeddings + self.max_grid_row_position_embeddings = max_grid_row_position_embeddings + self.layer_norm_eps = layer_norm_eps + self.hidden_dropout_prob = hidden_dropout_prob + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.attention_probs_dropout_prob = attention_probs_dropout_prob + + @classmethod + def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): + """Instantiate a [`TvpConfig`] (or a derived class) from a pre-trained backbone model configuration. + + Args: + backbone_config ([`PretrainedConfig`]): + The backbone configuration. + Returns: + [`TvpConfig`]: An instance of a configuration object + """ + return cls(backbone_config=backbone_config, **kwargs) + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. + + Returns: + `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + if output["backbone_config"] is not None: + output["backbone_config"] = self.backbone_config.to_dict() + output["model_type"] = self.__class__.model_type + return output diff --git a/src/transformers/models/tvp/image_processing_tvp.py b/src/transformers/models/tvp/image_processing_tvp.py new file mode 100644 index 000000000000..5363d5043195 --- /dev/null +++ b/src/transformers/models/tvp/image_processing_tvp.py @@ -0,0 +1,476 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for TVP.""" + +from typing import Dict, Iterable, List, Optional, Tuple, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( + PaddingMode, + flip_channel_order, + pad, + resize, + to_channel_dimension_format, +) +from ...image_utils import ( + IMAGENET_STANDARD_MEAN, + IMAGENET_STANDARD_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + is_valid_image, + to_numpy_array, + valid_images, +) +from ...utils import TensorType, is_vision_available, logging + + +if is_vision_available(): + import PIL + + +logger = logging.get_logger(__name__) + + +# Copied from transformers.models.vivit.image_processing_vivit.make_batched +def make_batched(videos) -> List[List[ImageInput]]: + if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): + return videos + + elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): + return [videos] + + elif is_valid_image(videos): + return [[videos]] + + raise ValueError(f"Could not make batched video from {videos}") + + +def get_resize_output_image_size( + input_image: np.ndarray, + max_size: int = 448, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> Tuple[int, int]: + height, width = get_image_size(input_image, input_data_format) + if height >= width: + ratio = width * 1.0 / height + new_height = max_size + new_width = new_height * ratio + else: + ratio = height * 1.0 / width + new_width = max_size + new_height = new_width * ratio + size = (int(new_height), int(new_width)) + + return size + + +class TvpImageProcessor(BaseImageProcessor): + r""" + Constructs a Tvp image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the + `do_resize` parameter in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"longest_edge": 448}`): + Size of the output image after resizing. The longest edge of the image will be resized to + `size["longest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by + `size` in the `preprocess` method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): + Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the + `preprocess` method. + do_center_crop (`bool`, *optional*, defaults to `True`): + Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` + parameter in the `preprocess` method. + crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): + Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the + `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` + parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter + in the `preprocess` method. + do_pad (`bool`, *optional*, defaults to `True`): + Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. + pad_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): + Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the + `preprocess` method. + constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0): + The fill value to use when padding the image. + pad_mode (`PaddingMode`, *optional*, defaults to `PaddingMode.CONSTANT`): + Use what kind of mode in padding. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. + do_flip_channel_order (`bool`, *optional*, defaults to `True`): + Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order` + parameter in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_center_crop: bool = True, + crop_size: Dict[str, int] = None, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_pad: bool = True, + pad_size: Dict[str, int] = None, + constant_values: Union[float, Iterable[float]] = 0, + pad_mode: PaddingMode = PaddingMode.CONSTANT, + do_normalize: bool = True, + do_flip_channel_order: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"longest_edge": 448} + crop_size = crop_size if crop_size is not None else {"height": 448, "width": 448} + pad_size = pad_size if pad_size is not None else {"height": 448, "width": 448} + + self.do_resize = do_resize + self.size = size + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_pad = do_pad + self.pad_size = pad_size + self.constant_values = constant_values + self.pad_mode = pad_mode + self.do_normalize = do_normalize + self.do_flip_channel_order = do_flip_channel_order + self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BILINEAR, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will + have the size `(h, w)`. If `size` is of the form `{"longest_edge": s}`, the output image will have its + longest edge of length `s` while keeping the aspect ratio of the original image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): + Resampling filter to use when resiizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + size = get_size_dict(size, default_to_square=False) + if "height" in size and "width" in size: + output_size = (size["height"], size["width"]) + elif "longest_edge" in size: + output_size = get_resize_output_image_size(image, size["longest_edge"], input_data_format) + else: + raise ValueError(f"Size must have 'height' and 'width' or 'longest_edge' as keys. Got {size.keys()}") + + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def pad_image( + self, + image: np.ndarray, + pad_size: Dict[str, int] = None, + constant_values: Union[float, Iterable[float]] = 0, + pad_mode: PaddingMode = PaddingMode.CONSTANT, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ): + """ + Pad an image with zeros to the given size. + + Args: + image (`np.ndarray`): + Image to pad. + pad_size (`Dict[str, int]`) + Size of the output image with pad. + constant_values (`Union[float, Iterable[float]]`) + The fill value to use when padding the image. + pad_mode (`PaddingMode`) + The pad mode, default to PaddingMode.CONSTANT + data_format (`ChannelDimension` or `str`, *optional*) + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + height, width = get_image_size(image, channel_dim=input_data_format) + max_height = pad_size.get("height", height) + max_width = pad_size.get("width", width) + + pad_right, pad_bottom = max_width - width, max_height - height + if pad_right < 0 or pad_bottom < 0: + raise ValueError("The padding size must be greater than image size") + + padding = ((0, pad_bottom), (0, pad_right)) + padded_image = pad( + image, + padding, + mode=pad_mode, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + + return padded_image + + def _preprocess_image( + self, + image: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_center_crop: bool = None, + crop_size: Dict[str, int] = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_pad: bool = True, + pad_size: Dict[str, int] = None, + constant_values: Union[float, Iterable[float]] = None, + pad_mode: PaddingMode = None, + do_normalize: bool = None, + do_flip_channel_order: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """Preprocesses a single image.""" + if do_resize and size is None or resample is None: + raise ValueError("Size and resample must be specified if do_resize is True.") + + if do_center_crop and crop_size is None: + raise ValueError("Crop size must be specified if do_center_crop is True.") + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_pad and pad_size is None: + raise ValueError("Padding size must be specified if do_pad is True.") + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("Image mean and std must be specified if do_normalize is True.") + + # All transformations expect numpy arrays. + image = to_numpy_array(image) + + if do_resize: + image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + + if do_center_crop: + image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) + + if do_rescale: + image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + + if do_normalize: + image = self.normalize( + image=image.astype(np.float32), mean=image_mean, std=image_std, input_data_format=input_data_format + ) + + if do_pad: + image = self.pad_image( + image=image, + pad_size=pad_size, + constant_values=constant_values, + pad_mode=pad_mode, + input_data_format=input_data_format, + ) + + # the pretrained checkpoints assume images are BGR, not RGB + if do_flip_channel_order: + image = flip_channel_order(image=image, input_data_format=input_data_format) + + image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + + return image + + def preprocess( + self, + videos: Union[ImageInput, List[ImageInput], List[List[ImageInput]]], + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_center_crop: bool = None, + crop_size: Dict[str, int] = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_pad: bool = None, + pad_size: Dict[str, int] = None, + constant_values: Union[float, Iterable[float]] = None, + pad_mode: PaddingMode = None, + do_normalize: bool = None, + do_flip_channel_order: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: ChannelDimension = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + videos (`ImageInput` or `List[ImageInput]` or `List[List[ImageInput]]`): + Frames to preprocess. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after applying resize. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only + has an effect if `do_resize` is set to `True`. + do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): + Whether to centre crop the image. + crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): + Size of the image after applying the centre crop. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_pad (`bool`, *optional*, defaults to `True`): + Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. + pad_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): + Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the + `preprocess` method. + constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0): + The fill value to use when padding the image. + pad_mode (`PaddingMode`, *optional*, defaults to "PaddingMode.CONSTANT"): + Use what kind of mode in padding. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`): + Whether to flip the channel order of the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the inferred channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + resample = resample if resample is not None else self.resample + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_pad = do_pad if do_pad is not None else self.do_pad + pad_size = pad_size if pad_size is not None else self.pad_size + constant_values = constant_values if constant_values is not None else self.constant_values + pad_mode = pad_mode if pad_mode else self.pad_mode + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + do_flip_channel_order = ( + do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order + ) + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False) + crop_size = crop_size if crop_size is not None else self.crop_size + crop_size = get_size_dict(crop_size, param_name="crop_size") + + if not valid_images(videos): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + videos = make_batched(videos) + + videos = [ + np.array( + [ + self._preprocess_image( + image=img, + do_resize=do_resize, + size=size, + resample=resample, + do_center_crop=do_center_crop, + crop_size=crop_size, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_pad=do_pad, + pad_size=pad_size, + constant_values=constant_values, + pad_mode=pad_mode, + do_normalize=do_normalize, + do_flip_channel_order=do_flip_channel_order, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + ) + for img in video + ] + ) + for video in videos + ] + + data = {"pixel_values": videos} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/src/transformers/models/tvp/modeling_tvp.py b/src/transformers/models/tvp/modeling_tvp.py new file mode 100644 index 000000000000..bf1fc5afbaac --- /dev/null +++ b/src/transformers/models/tvp/modeling_tvp.py @@ -0,0 +1,890 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch TVP Model""" + +import math +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import prune_linear_layer +from ...utils import logging +from ..auto import AutoBackbone +from .configuration_tvp import TvpConfig + + +logger = logging.get_logger(__name__) + +TVP_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "Intel/tvp-base", + "Intel/tvp-base-ANet", + # See all Tvp models at https://huggingface.co/models?filter=tvp +] + + +@dataclass +class TvpVideoGroundingOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Temporal-Distance IoU loss for video grounding. + logits (`torch.FloatTensor` of shape `(batch_size, 2)`): + Contains start_time/duration and end_time/duration. It is the time slot of the videos corresponding to the + input texts. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of + the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +class TvpLoss(nn.Module): + """ + This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute + hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched + ground-truth / prediction (supervise class and box). + + Args: + losses (`List[str]`): + List of all the losses to be applied. + """ + + def __init__(self, losses): + super().__init__() + self.loss_map = { + "iou": self.loss_iou, + "distance": self.loss_distance, + "duration": self.loss_duration, + } + for loss in losses: + if loss not in self.loss_map: + raise ValueError(f"Loss {loss} not supported") + + self.losses = losses + + def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): + """ + Measure the intersection over union. + """ + inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time) + union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time) + iou = 1 - inter.clamp(min=0) / union + + return iou + + def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): + """ + Measure the distance of mid points. + """ + mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0) + mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0) + distance_diff = torch.div( + torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration + ).clamp(min=0.2) + + return distance_diff + + def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): + """ + Measure the difference of duration. + """ + duration_candidates = torch.sub(candidates_end_time, candidates_start_time) + duration_groundtruth = torch.sub(end_time, start_time) + duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration)) + duration_diff = duration_diff.clamp(min=0.4) + + return duration_diff + + def forward(self, logits, labels): + """ + This performs the loss computation. + + Args: + logits (`torch.FloatTensor`): + The output logits of head module. + labels (`List[torch.FloatTensor]`): + List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration. + """ + duration, start_time, end_time = labels + candidates = torch.mul(logits, duration) + candidates_start_time, candidates_end_time = candidates[:, 0].float(), candidates[:, 1].float() + + losses_dict = {} + for loss in self.losses: + losses_dict.update( + {loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)} + ) + + return losses_dict + + +class TvpVisionModel(nn.Module): + def __init__(self, config): + super().__init__() + self.backbone = AutoBackbone.from_config(config.backbone_config) + self.grid_encoder_conv = nn.Conv2d( + config.backbone_config.hidden_sizes[-1], + config.hidden_size, + kernel_size=3, + stride=1, + padding=1, + groups=1, + bias=False, + ) + + def forward(self, pixel_values): + batch_size, num_frames, num_channels, height, width = pixel_values.shape + # (batch_size * num_frames, num_channels, height, width) + pixel_values = pixel_values.view(batch_size * num_frames, num_channels, height, width) + grid_feat_outputs = self.backbone(pixel_values)["feature_maps"][0] + grid = self.grid_encoder_conv(grid_feat_outputs) + grid = nn.functional.max_pool2d(grid, kernel_size=2, stride=2) + grid = nn.functional.relu(grid, inplace=True) + new_channel, new_height, new_width = grid.shape[-3:] + # (batch_size, num_frames, num_channels, height, width) + grid = grid.view(batch_size, num_frames, new_channel, new_height, new_width) + # (batch_size, num_frames, height, width, num_channels) + grid = grid.permute(0, 1, 3, 4, 2) + return grid + + +class TvpVisualInputEmbedding(nn.Module): + """ + Takes input of both image and video (multi-frame) + """ + + def __init__(self, config): + super().__init__() + # sequence embedding + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.row_position_embeddings = nn.Embedding(config.max_grid_row_position_embeddings, config.hidden_size) + self.col_position_embeddings = nn.Embedding(config.max_grid_col_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(1, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def add_2d_positional_embeddings(self, grid): + """ + Args: + grid: (batch_size, height, width, hidden_dim) + Returns: + grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim) + """ + batch_size, height, width, hidden_dim = grid.shape + + # add row-wise position embeddings + row_position_ids = torch.arange(height, dtype=torch.long, device=grid.device) # (height, ) + row_position_embeddings = self.row_position_embeddings(row_position_ids) # (height, hidden_dim) + row_shape = (1,) * (len(grid.shape) - 3) + (height, 1, hidden_dim) # (1, height, 1, hidden_dim) + grid = grid + row_position_embeddings.view(*row_shape) # broadcast automatically + + # add column-wise position embeddings + col_position_ids = torch.arange(width, dtype=torch.long, device=grid.device) # (width, ) + col_position_embeddings = self.col_position_embeddings(col_position_ids) # (width, hidden_dim) + col_shape = (batch_size, 1, width, hidden_dim) # (1, 1, width, hidden_dim) + return grid + col_position_embeddings.view(*col_shape) # broadcast automatically + + def forward(self, grid): + """ + Args: + grid: Array of shape (batch_size, num_frames, height, width, num_channels). + It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note, + num_frames can be 1 + + Returns: + embeddings: The embedding of grid with size (batch_size, height*width, num_channels) + + """ + batch_size, num_frames, height, width, num_channels = grid.shape + # temporal mean pooling, (batch_size, height, width, hidden_size) + grid = grid.mean(1) + grid = self.add_2d_positional_embeddings(grid) + # image token sequence, (batch_size, height*width, num_channels) + visual_tokens = grid.view(batch_size, -1, num_channels) + visual_tokens_shape = visual_tokens.shape[:-1] + device = visual_tokens.device + + # image token type embeddings. + token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = visual_tokens + token_type_embeddings + embeddings = self.layer_norm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class TvpTextInputEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + device = input_ids.device if input_ids is not None else inputs_embeds.device + if position_ids is None: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).expand(input_shape) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + position_embeddings + token_type_embeddings + embeddings = self.layer_norm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class TvpAttention(nn.Module): + def __init__(self, config): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + self.attn_dropout = nn.Dropout(config.attention_probs_dropout_prob) + + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + mask = torch.ones(self.num_attention_heads, self.attention_head_size) + heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads + for head in heads: + # Compute how many pruned heads are before the head and move the index accordingly + head = head - sum(1 if h < head else 0 for h in self.pruned_heads) + mask[head] = 0 + mask = mask.view(-1).contiguous().eq(1) + index = torch.arange(len(mask))[mask].long() + + # Prune linear layers + self.query = prune_linear_layer(self.query, index) + self.key = prune_linear_layer(self.key, index) + self.value = prune_linear_layer(self.value, index) + self.dense = prune_linear_layer(self.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.num_attention_heads = self.num_attention_heads - len(heads) + self.all_head_size = self.attention_head_size * self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def _reshape(self, tensor: torch.Tensor, sequence_length: int, batch_size: int): + return ( + tensor.view(batch_size, sequence_length, self.num_attention_heads, self.attention_head_size) + .transpose(1, 2) + .contiguous() + ) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + output_attentions: Optional[bool] = None, + ): + batch_size, sequence_length = hidden_states.shape[:2] + mixed_query_layer = self.query(hidden_states) + + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self._reshape(mixed_query_layer, sequence_length, batch_size) + key_layer = self._reshape(mixed_key_layer, sequence_length, batch_size) + value_layer = self._reshape(mixed_value_layer, sequence_length, batch_size) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.attn_dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + attn_output = torch.matmul(attention_probs, value_layer) + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(batch_size, sequence_length, self.all_head_size) + + attn_output = self.dense(attn_output) + attn_output = self.dropout(attn_output) + attn_output = self.layer_norm(attn_output + hidden_states) + # add attentions if we output them + outputs = (attn_output, attention_probs) if output_attentions else (attn_output,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Tvp +class TvpIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class TvpOutputLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.layer_norm(hidden_states + input_tensor) + return hidden_states + + +class TvpEncodeLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.attention = TvpAttention(config) + self.intermediate = TvpIntermediate(config) + self.output = TvpOutputLayer(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + output_attentions: Optional[bool] = None, + ): + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + outputs + return outputs + + +class TvpEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([TvpEncodeLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + return_dict = return_dict if return_dict is not None else self.config.return_dict + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + all_hidden_states = () + all_attentions = () + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + (head_mask[i] if head_mask is not None else None), + output_attentions, + ) + else: + layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], output_attentions) + + hidden_states = layer_outputs[0] + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + outputs = (hidden_states,) + if output_hidden_states: + outputs = outputs + (all_hidden_states,) + if output_attentions: + outputs = outputs + (all_attentions,) + return outputs # last-layer hidden state, (all hidden states), (all attentions) + + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states if output_hidden_states else None, + attentions=all_attentions if output_attentions else None, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Tvp +class TvpPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class TvpPreTrainedModel(PreTrainedModel): + """An abstract class to handle weights initialization and + a simple interface for downloading and loading pretrained models. + """ + + config_class = TvpConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + if isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + +TVP_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`TvpConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +TVP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input + IDs?](../glossary#input-ids) + + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`TvpImageProcessor`]. See [`TvpImageProcessor.__call__`] + for details. + + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + [What are attention masks?](../glossary#attention-mask) + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class TvpFrameDownPadPrompter(nn.Module): + """ + Pad frames extracted from videos only at the bottom. + """ + + def __init__(self, config): + if config.visual_prompter_apply not in ("add", "replace", "remove"): + raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)") + + super().__init__() + self.visual_prompt_size = config.visual_prompt_size + self.frame_num = config.frame_num + self.max_img_size = config.max_img_size + self.visual_prompter_apply = config.visual_prompter_apply + + self.pad_down = nn.Parameter( + torch.randn([1, config.frame_num, 3, config.visual_prompt_size, config.max_img_size]) + ) + + def forward(self, pixel_values): + if self.visual_prompter_apply != "add": + visual_prompt_mask = torch.ones([self.max_img_size, self.max_img_size], dtype=pixel_values.dtype) + visual_prompt_mask[self.max_img_size - self.visual_prompt_size : self.max_img_size, :] = 0.0 + pixel_values *= visual_prompt_mask + if self.visual_prompter_apply != "remove": + prompt = torch.zeros( + [pixel_values.shape[0], pixel_values.shape[1], 3, self.max_img_size, self.max_img_size] + ) + start_point = self.max_img_size - self.visual_prompt_size + prompt[:, :, :, start_point : self.max_img_size, :] = self.pad_down + pixel_values += prompt.to(pixel_values.dtype) + return pixel_values + + +class TvpFramePadPrompter(nn.Module): + """ + Pad frames extracted from videos in the surroundings. + """ + + def __init__(self, config): + if config.visual_prompter_apply not in ("add", "replace", "remove"): + raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)") + + super().__init__() + self.num_frames = config.num_frames + self.max_img_size = config.max_img_size + self.visual_prompter_apply = config.visual_prompter_apply + + self.base_size = config.max_img_size - config.visual_prompt_size * 2 + self.pad_up = nn.Parameter( + torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]) + ) + self.pad_down = nn.Parameter( + torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]) + ) + self.pad_left = nn.Parameter( + torch.randn( + [ + 1, + config.num_frames, + 3, + config.max_img_size - config.visual_prompt_size * 2, + config.visual_prompt_size, + ] + ) + ) + self.pad_right = nn.Parameter( + torch.randn( + [ + 1, + config.num_frames, + 3, + config.max_img_size - config.visual_prompt_size * 2, + config.visual_prompt_size, + ] + ) + ) + + def forward(self, pixel_values): + if self.visual_prompter_apply not in ("add", "remove", "replace"): + raise ValueError(f"Invalid visual_prompter_apply value {self.visual_prompter_apply}") + if self.visual_prompter_apply in ("replace", "remove"): + visual_prompt_mask = torch.ones([self.max_img_size, self.max_img_size], dtype=pixel_values.dtype) + pixel_values *= visual_prompt_mask + if self.visual_prompter_apply in ("replace", "add"): + base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size) + prompt = torch.cat([self.pad_left, base, self.pad_right], dim=4) + prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=3) + prompt = torch.cat(pixel_values.size(0) * [prompt]) + pixel_values += prompt.to(pixel_values.dtype) + return pixel_values + + +TVP_PROMPTER_CLASSES_MAPPING = { + "framedownpad": TvpFrameDownPadPrompter, + "framepad": TvpFramePadPrompter, +} + + +@add_start_docstrings( + "The bare Tvp Model transformer outputting BaseModelOutputWithPooling object without any specific head on" " top.", + TVP_START_DOCSTRING, +) +class TvpModel(TvpPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + self.vision_model = TvpVisionModel(config) + self.embeddings = TvpTextInputEmbeddings(config) + self.visual_embeddings = TvpVisualInputEmbedding(config) + self.encoder = TvpEncoder(config) + self.pooler = TvpPooler(config) + self.text_prompt = nn.Parameter(torch.randn([1, 10, config.hidden_size])) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + if config.visual_prompter_type not in TVP_PROMPTER_CLASSES_MAPPING: + raise ValueError("`visual_prompter_type` must be in (framedownpad, framepad)") + self.visual_prompter = TVP_PROMPTER_CLASSES_MAPPING[config.visual_prompter_type](config) + + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=TvpConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns: + + Examples: + ```python + >>> import torch + >>> from transformers import AutoConfig, AutoTokenizer, TvpModel + + >>> model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp") + + >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp") + + >>> pixel_values = torch.rand(1, 1, 3, 448, 448) + >>> text_inputs = tokenizer("This is an example input", return_tensors="pt") + >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask) + ```""" + return_dict = return_dict if return_dict is not None else self.config.return_dict + + # Add visual prompt, it compensates for the spatiotemporal information loss in 2D visual features. + pixel_values = self.vision_model(self.visual_prompter(pixel_values)) + # (batch_size, sequence_length, hidden_size) + text_embedding_output = self.embeddings(input_ids=input_ids) + # (batch_size, visual_sequence_length, hidden_size) + visual_embedding_output = self.visual_embeddings(pixel_values) + if attention_mask is not None: + # (batch_size, visual_sequence_length) + visual_attention_mask = attention_mask.new_ones(visual_embedding_output.shape[:2]) + pt_mask = torch.ones(attention_mask.shape[0], 10).to( + device=attention_mask.device, dtype=attention_mask.dtype + ) + attention_mask = torch.cat([pt_mask, attention_mask, visual_attention_mask], dim=-1) + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.size()).to(input_ids.device) + text_prompt = self.text_prompt.expand(text_embedding_output.shape[0], -1, -1) + # (batch_size, sequence_length + visual_sequence_length, hidden_size) + embedding_output = torch.cat([text_prompt, text_embedding_output, visual_embedding_output], dim=1) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=attention_mask, + head_mask=self.get_head_mask(head_mask, self.config.num_hidden_layers), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0] + pooled_output = self.pooler(last_hidden_state) + last_hidden_state = self.dropout(last_hidden_state) + pooled_output = self.dropout(pooled_output) + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class TvpVideoGroundingHead(nn.Module): + def __init__(self, config): + super().__init__() + self.layer_0 = nn.Linear(config.hidden_size, config.hidden_size * 2) + self.layer_1 = nn.Linear(config.hidden_size * 2, 2) + self.activation_0 = nn.ReLU() + self.activation_1 = nn.Sigmoid() + + def forward(self, pooler_output): + logits = self.activation_0(self.layer_0(pooler_output)) + logits = self.activation_1(self.layer_1(logits)) + return logits + + +@add_start_docstrings( + """ + Tvp Model with a video grounding head on top computing IoU, distance, and duration loss. + """, + TVP_START_DOCSTRING, +) +class TvpForVideoGrounding(TvpPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + self.model = TvpModel(config) + self.video_grounding_head = TvpVideoGroundingHead(config) + + self.post_init() + + @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TvpVideoGroundingOutput, config_class=TvpConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + labels: Tuple[torch.Tensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + labels (`torch.FloatTensor` of shape `(batch_size, 3)`, *optional*): + The labels contains duration, start time, and end time of the video corresponding to the text. + Returns: + + Examples: + ```python + >>> import torch + >>> from transformers import AutoConfig, AutoTokenizer, TvpForVideoGrounding + + >>> model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp") + + >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp") + + >>> pixel_values = torch.rand(1, 1, 3, 448, 448) + >>> text_inputs = tokenizer("This is an example input", return_tensors="pt") + >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask) + ```""" + return_dict = return_dict if return_dict is not None else self.config.return_dict + outputs = self.model( + input_ids, + pixel_values, + attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pooler_output = outputs[1] + + logits = self.video_grounding_head(pooler_output) + + loss = None + if labels is not None: + criterion = TvpLoss(["iou", "distance", "duration"]) + criterion.to(self.device) + loss_dict = criterion(logits, labels) + loss = ( + loss_dict["iou"] + + self.config.distance_loss_weight * loss_dict["distance"] + + self.config.duration_loss_weight * loss_dict["duration"] + ) + + if not return_dict: + outputs = (logits,) + outputs[2:] + if loss is not None: + outputs = (loss,) + outputs + return outputs + + return TvpVideoGroundingOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/src/transformers/models/tvp/processing_tvp.py b/src/transformers/models/tvp/processing_tvp.py new file mode 100644 index 000000000000..4e27399ab805 --- /dev/null +++ b/src/transformers/models/tvp/processing_tvp.py @@ -0,0 +1,154 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for TVP. +""" + + +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding + + +class TvpProcessor(ProcessorMixin): + r""" + Constructs an TVP processor which wraps a TVP image processor and a Bert tokenizer into a single processor. + + [`TvpProcessor`] offers all the functionalities of [`TvpImageProcessor`] and [`BertTokenizerFast`]. See the + [`~TvpProcessor.__call__`] and [`~TvpProcessor.decode`] for more information. + + Args: + image_processor ([`TvpImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`BertTokenizerFast`], *optional*): + The tokenizer is a required input. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "TvpImageProcessor" + tokenizer_class = ("BertTokenizer", "BertTokenizerFast") + + def __init__(self, image_processor=None, tokenizer=None, **kwargs): + if image_processor is None: + raise ValueError("You need to specify an `image_processor`.") + if tokenizer is None: + raise ValueError("You need to specify a `tokenizer`.") + + super().__init__(image_processor, tokenizer) + + def __call__(self, text=None, videos=None, return_tensors=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode + the text. To prepare the image(s), this method forwards the `videos` and `kwargs` arguments to + TvpImageProcessor's [`~TvpImageProcessor.__call__`] if `videos` is not `None`. Please refer to the doctsring of + the above two methods for more information. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + videos (`List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[PIL.Image.Image]]`, `List[List[np.ndarrray]]`,: + `List[List[torch.Tensor]]`): The video or batch of videos to be prepared. Each video should be a list + of frames, which can be either PIL images or NumPy arrays. In case of NumPy arrays/PyTorch tensors, + each frame should be of shape (H, W, C), where H and W are frame height and width, and C is a number of + channels. + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `videos` is not `None`. + """ + + max_text_length = kwargs.pop("max_text_length", None) + + if text is None and videos is None: + raise ValueError("You have to specify either text or videos. Both cannot be none.") + + encoding = {} + if text is not None: + textual_input = self.tokenizer.batch_encode_plus( + text, + truncation=True, + padding="max_length", + max_length=max_text_length, + pad_to_max_length=True, + return_tensors=return_tensors, + return_token_type_ids=False, + **kwargs, + ) + encoding.update(textual_input) + + if videos is not None: + image_features = self.image_processor(videos, return_tensors=return_tensors, **kwargs) + encoding.update(image_features) + + return BatchEncoding(data=encoding, tensor_type=return_tensors) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + def post_process_video_grounding(self, logits, video_durations): + """ + Compute the time of the video. + + Args: + logits (`torch.Tensor`): + The logits output of TvpForVideoGrounding. + video_durations (`float`): + The video's duration. + + Returns: + start (`float`): + The start time of the video. + end (`float`): + The end time of the video. + """ + start, end = ( + round(logits.tolist()[0][0] * video_durations, 1), + round(logits.tolist()[0][1] * video_durations, 1), + ) + + return start, end + + @property + # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index c6b20c7e3674..2de58f74595d 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -7829,6 +7829,30 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +TVP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TvpForVideoGrounding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvpModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvpPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class UMT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index c4a3c9312ac8..f1a10ff5710a 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -485,6 +485,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) +class TvpImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class VideoMAEFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] diff --git a/tests/models/tvp/__init__.py b/tests/models/tvp/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/models/tvp/test_image_processing_tvp.py b/tests/models/tvp/test_image_processing_tvp.py new file mode 100644 index 000000000000..1c9a84beb842 --- /dev/null +++ b/tests/models/tvp/test_image_processing_tvp.py @@ -0,0 +1,306 @@ +# coding=utf-8 +# Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest +from typing import Dict, List, Optional, Union + +import numpy as np + +from transformers.image_transforms import PaddingMode +from transformers.testing_utils import require_torch, require_vision +from transformers.utils import is_torch_available, is_vision_available + +from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs + + +if is_torch_available(): + import torch + +if is_vision_available(): + from PIL import Image + + from transformers import TvpImageProcessor + + +class TvpImageProcessingTester(unittest.TestCase): + def __init__( + self, + parent, + do_resize: bool = True, + size: Dict[str, int] = {"longest_edge": 40}, + do_center_crop: bool = False, + crop_size: Dict[str, int] = None, + do_rescale: bool = False, + rescale_factor: Union[int, float] = 1 / 255, + do_pad: bool = True, + pad_size: Dict[str, int] = {"height": 80, "width": 80}, + fill: int = None, + pad_mode: PaddingMode = None, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073], + image_std: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711], + batch_size=2, + min_resolution=40, + max_resolution=80, + num_channels=3, + num_frames=2, + ): + self.do_resize = do_resize + self.size = size + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_pad = do_pad + self.pad_size = pad_size + self.fill = fill + self.pad_mode = pad_mode + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + self.batch_size = batch_size + self.num_channels = num_channels + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.num_frames = num_frames + + def prepare_image_processor_dict(self): + return { + "image_mean": self.image_mean, + "image_std": self.image_std, + "do_normalize": self.do_normalize, + "do_resize": self.do_resize, + "size": self.size, + "do_rescale": self.do_rescale, + "do_center_crop": self.do_center_crop, + "do_pad": self.do_pad, + "pad_size": self.pad_size, + } + + def get_expected_values(self, image_inputs, batched=False): + """ + This function computes the expected height and width when providing images to TvpImageProcessor, + assuming do_resize is set to True with a scalar size. + """ + if not batched: + return (int(self.pad_size["height"]), int(self.pad_size["width"])) + + else: + expected_values = [] + for image in image_inputs: + expected_height, expected_width = self.get_expected_values([image]) + expected_values.append((expected_height, expected_width)) + expected_height = max(expected_values, key=lambda item: item[0])[0] + expected_width = max(expected_values, key=lambda item: item[1])[1] + + return expected_height, expected_width + + def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): + return prepare_video_inputs( + batch_size=self.batch_size, + num_frames=self.num_frames, + num_channels=self.num_channels, + min_resolution=self.min_resolution, + max_resolution=self.max_resolution, + equal_resolution=equal_resolution, + numpify=numpify, + torchify=torchify, + ) + + +@require_torch +@require_vision +class TvpImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): + image_processing_class = TvpImageProcessor if is_vision_available() else None + + def setUp(self): + self.image_processor_tester = TvpImageProcessingTester(self) + + @property + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_rescale")) + self.assertTrue(hasattr(image_processing, "do_pad")) + self.assertTrue(hasattr(image_processing, "pad_size")) + + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) + self.assertEqual(image_processor.size, {"longest_edge": 40}) + + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 12}) + self.assertEqual(image_processor.size, {"longest_edge": 12}) + + def test_call_pil(self): + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) + # create random PIL videos + video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) + for video in video_inputs: + self.assertIsInstance(video, list) + self.assertIsInstance(video[0], Image.Image) + + # Test not batched input + expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) + encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + 1, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + expected_height, + expected_width, + ), + ) + + # Test batched + expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) + encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + self.image_processor_tester.batch_size, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + expected_height, + expected_width, + ), + ) + + def test_call_numpy(self): + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) + # create random numpy tensors + video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) + for video in video_inputs: + self.assertIsInstance(video, list) + self.assertIsInstance(video[0], np.ndarray) + + # Test not batched input + expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) + encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + 1, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + expected_height, + expected_width, + ), + ) + + # Test batched + expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) + encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + self.image_processor_tester.batch_size, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + expected_height, + expected_width, + ), + ) + + def test_call_numpy_4_channels(self): + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) + # create random numpy tensors + video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) + for video in video_inputs: + self.assertIsInstance(video, list) + self.assertIsInstance(video[0], np.ndarray) + + # Test not batched input + expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) + encoded_videos = image_processing( + video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" + ).pixel_values + self.assertEqual( + encoded_videos.shape, + ( + 1, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + expected_height, + expected_width, + ), + ) + + # Test batched + expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) + encoded_videos = image_processing( + video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" + ).pixel_values + self.assertEqual( + encoded_videos.shape, + ( + self.image_processor_tester.batch_size, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + expected_height, + expected_width, + ), + ) + self.image_processor_tester.num_channels = 3 + + def test_call_pytorch(self): + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) + # create random PyTorch tensors + video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) + for video in video_inputs: + self.assertIsInstance(video, list) + self.assertIsInstance(video[0], torch.Tensor) + + # Test not batched input + expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) + encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + 1, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + expected_height, + expected_width, + ), + ) + + # Test batched + expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) + encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + self.image_processor_tester.batch_size, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + expected_height, + expected_width, + ), + ) diff --git a/tests/models/tvp/test_modeling_tvp.py b/tests/models/tvp/test_modeling_tvp.py new file mode 100644 index 000000000000..b81635888c74 --- /dev/null +++ b/tests/models/tvp/test_modeling_tvp.py @@ -0,0 +1,261 @@ +# coding=utf-8 +# Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch TVP model. """ + + +import unittest + +from transformers import ResNetConfig, TvpConfig +from transformers.testing_utils import require_torch, require_vision, torch_device +from transformers.utils import cached_property, is_torch_available, is_vision_available + +from ...test_modeling_common import ( + ModelTesterMixin, + _config_zero_init, + floats_tensor, + ids_tensor, + random_attention_mask, +) +from ...test_pipeline_mixin import PipelineTesterMixin + + +if is_torch_available(): + import torch + + from transformers import TvpForVideoGrounding, TvpModel + +if is_vision_available(): + from PIL import Image + + from transformers import TvpImageProcessor + + +# Copied from test.models.videomae.test_modeling_videomae.VideoMAEModelTester with VideoMAE->TVP +class TVPModelTester: + def __init__( + self, + parent, + batch_size=1, + seq_length=2, + alpha=1.0, + beta=0.1, + visual_prompter_type="framepad", + visual_prompter_apply="replace", + num_frames=2, + max_img_size=448, + visual_prompt_size=96, + vocab_size=100, + hidden_size=32, + intermediate_size=32, + num_hidden_layers=2, + num_attention_heads=4, + max_position_embeddings=30, + max_grid_col_position_embeddings=30, + max_grid_row_position_embeddings=30, + hidden_dropout_prob=0.1, + hidden_act="gelu", + layer_norm_eps=1e-12, + initializer_range=0.02, + pad_token_id=0, + type_vocab_size=2, + attention_probs_dropout_prob=0.1, + ): + self.parent = parent + self.batch_size = batch_size + self.input_id_length = seq_length + self.seq_length = seq_length + 10 + 784 # include text prompt length and visual input length + self.alpha = alpha + self.beta = beta + self.visual_prompter_type = visual_prompter_type + self.visual_prompter_apply = visual_prompter_apply + self.num_frames = num_frames + self.max_img_size = max_img_size + self.visual_prompt_size = visual_prompt_size + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.max_grid_col_position_embeddings = max_grid_col_position_embeddings + self.max_grid_row_position_embeddings = max_grid_row_position_embeddings + self.layer_norm_eps = layer_norm_eps + self.initializer_range = initializer_range + self.pad_token_id = pad_token_id + self.type_vocab_size = type_vocab_size + self.is_training = False + self.num_channels = 3 + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.input_id_length], self.vocab_size) + attention_mask = random_attention_mask([self.batch_size, self.input_id_length]) + pixel_values = floats_tensor( + [self.batch_size, self.num_frames, self.num_channels, self.max_img_size, self.max_img_size] + ) + + config = self.get_config() + + return (config, input_ids, pixel_values, attention_mask) + + def get_config(self): + resnet_config = ResNetConfig( + num_channels=3, + embeddings_size=64, + hidden_sizes=[64, 128], + depths=[2, 2], + hidden_act="relu", + out_features=["stage2"], + out_indices=[2], + ) + return TvpConfig( + backbone_config=resnet_config, + alpha=self.alpha, + beta=self.beta, + visual_prompter_type=self.visual_prompter_type, + visual_prompter_apply=self.visual_prompter_apply, + num_frames=self.num_frames, + max_img_size=self.max_img_size, + visual_prompt_size=self.visual_prompt_size, + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + max_grid_col_position_embeddings=self.max_grid_col_position_embeddings, + max_grid_row_position_embeddings=self.max_grid_row_position_embeddings, + layer_norm_eps=self.layer_norm_eps, + initializer_range=self.initializer_range, + pad_token_id=self.pad_token_id, + type_vocab_size=self.type_vocab_size, + ) + + def create_and_check_model(self, config, input_ids, pixel_values, attention_mask): + model = TvpModel(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, pixel_values, attention_mask) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, pixel_values, attention_mask = config_and_inputs + inputs_dict = {"input_ids": input_ids, "pixel_values": pixel_values, "attention_mask": attention_mask} + return config, inputs_dict + + +@require_torch +class TVPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as TVP does not use, inputs_embeds. + The seq_length in TVP contain textual and visual inputs, and prompt. + """ + + all_model_classes = (TvpModel, TvpForVideoGrounding) if is_torch_available() else () + pipeline_model_mapping = ( + {"feature-extraction": TvpModel, "temporal-video-grounding": TvpForVideoGrounding} + if is_torch_available() + else {} + ) + + def setUp(self): + self.model_tester = TVPModelTester(self) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + @unittest.skip(reason="TVP does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="TVPModel does not have input/output embeddings") + def test_model_common_attributes(self): + pass + + # override as the `logit_scale` parameter initilization is different for TVP + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if param.requires_grad: + # params are randomly initialized. + self.assertAlmostEqual( + param.data.mean().item(), + 0.0, + delta=1.0, + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_vision +@require_torch +class TvpModelIntegrationTests(unittest.TestCase): + @cached_property + def default_image_processor(self): + return TvpImageProcessor.from_pretrained("Jiqing/tiny-random-tvp") if is_vision_available() else None + + def test_inference_no_head(self): + model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) + + image_processor = self.default_image_processor + image = prepare_img() + encoding = image_processor(images=image, return_tensors="pt").to(torch_device) + input_ids = torch.tensor([[1, 2]]) + attention_mask = torch.tensor([[1, 1]]) + encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) + + with torch.no_grad(): + outputs = model(**encoding) + + expected_shape = torch.Size((1, 796, 128)) + assert outputs.last_hidden_state.shape == expected_shape + expected_slice = torch.tensor( + [[-0.4902, -0.4121, -1.7872], [-0.2184, 2.1211, -0.9371], [0.1180, 0.5003, -0.1727]] + ).to(torch_device) + self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) + + def test_inference_with_head(self): + model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) + + image_processor = self.default_image_processor + image = prepare_img() + encoding = image_processor(images=image, return_tensors="pt").to(torch_device) + input_ids = torch.tensor([[1, 2]]) + attention_mask = torch.tensor([[1, 1]]) + encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) + + with torch.no_grad(): + outputs = model(**encoding) + + expected_shape = torch.Size((1, 2)) + assert outputs.logits.shape == expected_shape + expected_slice = torch.tensor([[0.5061, 0.4988]]).to(torch_device) + self.assertTrue(torch.allclose(outputs.logits, expected_slice, atol=1e-4)) diff --git a/utils/check_repo.py b/utils/check_repo.py index d740eefed019..cac78bfe80c6 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -114,7 +114,7 @@ "BridgeTowerTextModel", # No need to test it as it is tested by BridgeTowerModel model. "BridgeTowerVisionModel", # No need to test it as it is tested by BridgeTowerModel model. "BarkCausalModel", # Building part of bigger (tested) model. - "BarkModel", # Does not have a forward signature - generation tested with integration tests + "BarkModel", # Does not have a forward signature - generation tested with integration tests. "SeamlessM4TTextToUnitModel", # Building part of bigger (tested) model. "SeamlessM4TCodeHifiGan", # Building part of bigger (tested) model. "SeamlessM4TTextToUnitForConditionalGeneration", # Building part of bigger (tested) model. @@ -293,6 +293,7 @@ "SeamlessM4TTextToUnitForConditionalGeneration", "SeamlessM4TCodeHifiGan", "SeamlessM4TForSpeechToSpeech", # no auto class for speech-to-speech + "TvpForVideoGrounding", ] # DO NOT edit this list! From 7f04373865393f625fb8f20bdabdab188120f9b8 Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Tue, 21 Nov 2023 17:53:10 +0100 Subject: [PATCH 233/268] Explicitely specify `use_cache=True` in Flash Attention tests (#27635) explicit use_cache=True --- tests/models/mistral/test_modeling_mistral.py | 6 +++++- tests/test_modeling_common.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py index 31426435d09f..0c28f46d5ec2 100644 --- a/tests/models/mistral/test_modeling_mistral.py +++ b/tests/models/mistral/test_modeling_mistral.py @@ -436,7 +436,11 @@ def test_flash_attn_2_generate_use_cache(self): # Just test that a large cache works as expected _ = model.generate( - dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False + dummy_input, + attention_mask=dummy_attention_mask, + max_new_tokens=max_new_tokens, + do_sample=False, + use_cache=True, ) @require_flash_attn diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 9d9e96db4347..c69b5ed77fe5 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -3166,7 +3166,11 @@ def test_flash_attn_2_generate_use_cache(self): # Just test that a large cache works as expected _ = model.generate( - dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False + dummy_input, + attention_mask=dummy_attention_mask, + max_new_tokens=max_new_tokens, + do_sample=False, + use_cache=True, ) @require_flash_attn From d2a980ec74db8bc3ea0104e126cbf5d4b1f0e73b Mon Sep 17 00:00:00 2001 From: Lucain Date: Tue, 21 Nov 2023 18:36:26 +0100 Subject: [PATCH 234/268] Harmonize HF environment variables + other cleaning (#27564) * Harmonize HF environment variables + other cleaning * backward compat * switch from HUGGINGFACE_HUB_CACHE to HF_HUB_CACHE * revert --- src/transformers/dynamic_module_utils.py | 3 +- src/transformers/utils/hub.py | 157 +++++++++-------------- 2 files changed, 61 insertions(+), 99 deletions(-) diff --git a/src/transformers/dynamic_module_utils.py b/src/transformers/dynamic_module_utils.py index ebf772a959e9..7cdc0ad93d52 100644 --- a/src/transformers/dynamic_module_utils.py +++ b/src/transformers/dynamic_module_utils.py @@ -25,6 +25,8 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Union +from huggingface_hub import try_to_load_from_cache + from .utils import ( HF_MODULES_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, @@ -32,7 +34,6 @@ extract_commit_hash, is_offline_mode, logging, - try_to_load_from_cache, ) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index f6cf0a852ed7..8d2f77da6845 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -31,13 +31,16 @@ import huggingface_hub import requests from huggingface_hub import ( + _CACHED_NO_EXIST, CommitOperationAdd, + constants, create_branch, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, + try_to_load_from_cache, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( @@ -49,7 +52,9 @@ RevisionNotFoundError, build_hf_headers, hf_raise_for_status, + send_telemetry, ) +from huggingface_hub.utils._deprecation import _deprecate_method from requests.exceptions import HTTPError from . import __version__, logging @@ -75,17 +80,25 @@ def is_offline_mode(): torch_cache_home = os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) +default_cache_path = constants.default_cache_path old_default_cache_path = os.path.join(torch_cache_home, "transformers") -# New default cache, shared with the Datasets library -hf_cache_home = os.path.expanduser( - os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) -) -default_cache_path = os.path.join(hf_cache_home, "hub") + +# Determine default cache directory. Lots of legacy environment variables to ensure backward compatibility. +# The best way to set the cache path is with the environment variable HF_HOME. For more details, checkout this +# documentation page: https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables. +# +# In code, use `HF_HUB_CACHE` as the default cache path. This variable is set by the library and is guaranteed +# to be set to the right value. +# +# TODO: clean this for v5? +PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", constants.HF_HUB_CACHE) +PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) +TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) # Onetime move from the old location to the new one if no ENV variable has been set. if ( os.path.isdir(old_default_cache_path) - and not os.path.isdir(default_cache_path) + and not os.path.isdir(constants.HF_HUB_CACHE) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ @@ -97,16 +110,26 @@ def is_offline_mode(): " '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) - shutil.move(old_default_cache_path, default_cache_path) + shutil.move(old_default_cache_path, constants.HF_HUB_CACHE) -PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) -PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) -HUGGINGFACE_HUB_CACHE = os.getenv("HUGGINGFACE_HUB_CACHE", PYTORCH_TRANSFORMERS_CACHE) -TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", HUGGINGFACE_HUB_CACHE) -HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) +HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(constants.HF_HOME, "modules")) TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules" SESSION_ID = uuid4().hex -DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", False) in ENV_VARS_TRUE_VALUES +DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", constants.HF_HUB_DISABLE_TELEMETRY) in ENV_VARS_TRUE_VALUES + +# Add deprecation warning for old environment variables. +for key in ("PYTORCH_PRETRAINED_BERT_CACHE", "PYTORCH_TRANSFORMERS_CACHE", "TRANSFORMERS_CACHE"): + if os.getenv(key) is not None: + warnings.warn( + f"Using `{key}` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.", + FutureWarning, + ) +if os.getenv("DISABLE_TELEMETRY") is not None: + warnings.warn( + "Using `DISABLE_TELEMETRY` is deprecated and will be removed in v5 of Transformers. Use `HF_HUB_DISABLE_TELEMETRY` instead.", + FutureWarning, + ) + S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" @@ -126,15 +149,16 @@ def is_offline_mode(): HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}" HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples" -# Return value when trying to load a file from cache but the file does not exist in the distant repo. -_CACHED_NO_EXIST = object() - def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") +# TODO: remove this once fully deprecated +# TODO? remove from './examples/research_projects/lxmert/utils.py' as well +# TODO? remove from './examples/research_projects/visual_bert/utils.py' as well +@_deprecate_method(version="4.39.0", message="This method is outdated and does not support the new cache system.") def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: """ Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, @@ -219,7 +243,7 @@ def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: return ua -def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]): +def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]) -> Optional[str]: """ Extracts the commit hash from a resolved filename toward a cache file. """ @@ -233,73 +257,6 @@ def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None -def try_to_load_from_cache( - repo_id: str, - filename: str, - cache_dir: Union[str, Path, None] = None, - revision: Optional[str] = None, - repo_type: Optional[str] = None, -) -> Optional[str]: - """ - Explores the cache to return the latest cached file for a given revision if found. - - This function will not raise any exception if the file in not cached. - - Args: - cache_dir (`str` or `os.PathLike`): - The folder where the cached files lie. - repo_id (`str`): - The ID of the repo on huggingface.co. - filename (`str`): - The filename to look for inside `repo_id`. - revision (`str`, *optional*): - The specific model version to use. Will default to `"main"` if it's not provided and no `commit_hash` is - provided either. - repo_type (`str`, *optional*): - The type of the repo. - - Returns: - `Optional[str]` or `_CACHED_NO_EXIST`: - Will return `None` if the file was not cached. Otherwise: - - The exact path to the cached file if it's found in the cache - - A special value `_CACHED_NO_EXIST` if the file does not exist at the given commit hash and this fact was - cached. - """ - if revision is None: - revision = "main" - - if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE - - object_id = repo_id.replace("/", "--") - if repo_type is None: - repo_type = "model" - repo_cache = os.path.join(cache_dir, f"{repo_type}s--{object_id}") - if not os.path.isdir(repo_cache): - # No cache for this model - return None - for subfolder in ["refs", "snapshots"]: - if not os.path.isdir(os.path.join(repo_cache, subfolder)): - return None - - # Resolve refs (for instance to convert main to the associated commit sha) - cached_refs = os.listdir(os.path.join(repo_cache, "refs")) - if revision in cached_refs: - with open(os.path.join(repo_cache, "refs", revision)) as f: - revision = f.read() - - if os.path.isfile(os.path.join(repo_cache, ".no_exist", revision, filename)): - return _CACHED_NO_EXIST - - cached_shas = os.listdir(os.path.join(repo_cache, "snapshots")) - if revision not in cached_shas: - # No cache for this revision and we won't try to return a random revision - return None - - cached_file = os.path.join(repo_cache, "snapshots", revision, filename) - return cached_file if os.path.isfile(cached_file) else None - - def cached_file( path_or_repo_id: Union[str, os.PathLike], filename: str, @@ -317,7 +274,7 @@ def cached_file( _raise_exceptions_for_connection_errors: bool = True, _commit_hash: Optional[str] = None, **deprecated_kwargs, -): +) -> Optional[str]: """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. @@ -369,7 +326,8 @@ def cached_file( ```python # Download a model weight from the Hub and cache it. model_weights_file = cached_file("bert-base-uncased", "pytorch_model.bin") - ```""" + ``` + """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( @@ -499,6 +457,10 @@ def cached_file( return resolved_file +# TODO: deprecate `get_file_from_repo` or document it differently? +# Docstring is exactly the same as `cached_repo` but behavior is slightly different. If file is missing or if +# there is a connection error, `cached_repo` will return None while `get_file_from_repo` will raise an error. +# IMO we should keep only 1 method and have a single `raise_error` argument (to be discussed). def get_file_from_repo( path_or_repo: Union[str, os.PathLike], filename: str, @@ -564,7 +526,8 @@ def get_file_from_repo( tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json") # This model does not have a tokenizer config so the result will be None. tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json") - ```""" + ``` + """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( @@ -609,10 +572,11 @@ def download_url(url, proxies=None): f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in" " v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note" " that this is not compatible with the caching system (your file will be downloaded at each execution) or" - " multiple processes (each process will download the file in a different temporary file)." + " multiple processes (each process will download the file in a different temporary file).", + FutureWarning, ) - tmp_file = tempfile.mkstemp()[1] - with open(tmp_file, "wb") as f: + tmp_fd, tmp_file = tempfile.mkstemp() + with os.fdopen(tmp_fd, "wb") as f: http_get(url, f, proxies=proxies) return tmp_file @@ -947,13 +911,10 @@ def send_example_telemetry(example_name, *example_args, framework="pytorch"): script_name = script_name.replace("_no_trainer", "") data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}" - headers = {"user-agent": http_user_agent(data)} - try: - r = requests.head(HUGGINGFACE_CO_EXAMPLES_TELEMETRY, headers=headers) - r.raise_for_status() - except Exception: - # We don't want to error in case of connection errors of any kind. - pass + # Send telemetry in the background + send_telemetry( + topic="examples", library_name="transformers", library_version=__version__, user_agent=http_user_agent(data) + ) def convert_file_size_to_int(size: Union[int, str]): @@ -1258,7 +1219,7 @@ def cancel(self) -> None: "`transformers.utils.move_cache()`." ) try: - if TRANSFORMERS_CACHE != default_cache_path: + if TRANSFORMERS_CACHE != constants.HF_HUB_CACHE: # Users set some env variable to customize cache storage move_cache(TRANSFORMERS_CACHE, TRANSFORMERS_CACHE) else: From c5be38cd27bee92be73c73ba09aec8bedf841423 Mon Sep 17 00:00:00 2001 From: Ziyu Chen <55686423+czy-orange@users.noreply.github.com> Date: Wed, 22 Nov 2023 01:51:48 +0800 Subject: [PATCH 235/268] Fix `resize_token_embeddings` (#26861) (#26865) * Fix `resize_token_embeddings` about `requires_grad` The method `resize_token_embeddings` should keep `requires_grad` unchanged for all parameters in embeddings. Previously, `resize_token_embeddings` always set `requires_grad` to `True`. After fixed, `resize_token_embeddings` copy the `requires_grad` attribute in the old embeddings. --- src/transformers/modeling_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index cee472036b27..e2b27de7d1e5 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1586,6 +1586,8 @@ def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): if hasattr(old_embeddings, "_hf_hook"): hook = old_embeddings._hf_hook add_hook_to_module(new_embeddings, hook) + old_embeddings_requires_grad = old_embeddings.weight.requires_grad + new_embeddings.requires_grad_(old_embeddings_requires_grad) self.set_input_embeddings(new_embeddings) # Update new_num_tokens with the actual size of new_embeddings @@ -1605,6 +1607,8 @@ def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): if hasattr(old_lm_head, "_hf_hook"): hook = old_lm_head._hf_hook add_hook_to_module(new_lm_head, hook) + old_lm_head_requires_grad = old_lm_head.weight.requires_grad + new_lm_head.requires_grad_(old_lm_head_requires_grad) self.set_output_embeddings(new_lm_head) return self.get_input_embeddings() From b54993aa94f744598129db32d7e1ec4e7c299099 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Wed, 22 Nov 2023 09:40:30 +0100 Subject: [PATCH 236/268] [`dependency`] update pillow pins (#27409) * update pillow pins * Apply suggestions from code review * more freedomin pins --- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index deccac468a8a..a51f2a7a5266 100644 --- a/setup.py +++ b/setup.py @@ -95,7 +95,7 @@ # 1. all dependencies should be listed here with their version requirements if any # 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py _deps = [ - "Pillow<10.0.0", + "Pillow>=10.0.1,<=15.0", "accelerate>=0.20.3", "av==9.2.0", # Latest version of PyAV (10.0.0) has issues with audio stream. "beautifulsoup4", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 93e21ab2d3e5..08fddd2e1ecc 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -2,7 +2,7 @@ # 1. modify the `_deps` dict in setup.py # 2. run `make deps_table_update`` deps = { - "Pillow": "Pillow<10.0.0", + "Pillow": "Pillow>=10.0.1,<=15.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", From c651eb23c37c0acda2d9c4e888c93b0c852143f6 Mon Sep 17 00:00:00 2001 From: Wangyi Jiang Date: Wed, 22 Nov 2023 18:49:40 +0800 Subject: [PATCH 237/268] Simplify the implementation of jitter noise in moe models (#27643) --- .../gptsan_japanese/modeling_gptsan_japanese.py | 11 +---------- .../modeling_switch_transformers.py | 11 +---------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py index 2ac76be8d259..cb0d85722262 100644 --- a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py @@ -188,17 +188,8 @@ def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[to hidden_states = hidden_states.to(self.dtype) if self.jitter_noise > 0: - # Get the lower and upper bound of the uniform distribution - # Adapted from: https://stackoverflow.com/questions/44328530/how-to-get-a-uniform-distribution-in-a-range-r1-r2-in-pytorch - distrib_lower_bound = 1.0 - self.jitter_noise - distrib_upper_bound = 1.0 + self.jitter_noise - - uniform_distrib = torch.rand(hidden_states.shape, device=hidden_states.device, dtype=self.dtype) - uniform_distrib = uniform_distrib * (distrib_lower_bound - distrib_upper_bound) - - uniform_distrib = uniform_distrib + distrib_upper_bound # Multiply the token inputs by the uniform distribution - adding some noise - hidden_states *= uniform_distrib + hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) # Shape: [num_groups, tokens_per_group, num_experts] self._cast_classifier() diff --git a/src/transformers/models/switch_transformers/modeling_switch_transformers.py b/src/transformers/models/switch_transformers/modeling_switch_transformers.py index 60c100d37c48..3a6d19c3478d 100644 --- a/src/transformers/models/switch_transformers/modeling_switch_transformers.py +++ b/src/transformers/models/switch_transformers/modeling_switch_transformers.py @@ -169,17 +169,8 @@ def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[to hidden_states = hidden_states.to(self.dtype) if self.jitter_noise > 0: - # Get the lower and upper bound of the uniform distribution - # Adapted from: https://stackoverflow.com/questions/44328530/how-to-get-a-uniform-distribution-in-a-range-r1-r2-in-pytorch - distrib_lower_bound = 1.0 - self.jitter_noise - distrib_upper_bound = 1.0 + self.jitter_noise - - uniform_distrib = torch.rand(hidden_states.shape, device=hidden_states.device, dtype=self.dtype) - uniform_distrib = uniform_distrib * (distrib_lower_bound - distrib_upper_bound) - - uniform_distrib = uniform_distrib + distrib_upper_bound # Multiply the token inputs by the uniform distribution - adding some noise - hidden_states *= uniform_distrib + hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) # Shape: [num_groups, tokens_per_group, num_experts] self._cast_classifier() From b2c63c79c3c7658c5783340dba9d049cf3824e5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Quentin=20Gallou=C3=A9dec?= <45557362+qgallouedec@users.noreply.github.com> Date: Wed, 22 Nov 2023 12:10:11 +0100 Subject: [PATCH 238/268] Fix `max_steps` documentation regarding the end-of-training condition (#27624) * fix max_steps doc * Update src/transformers/training_args.py [ci skip] Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * propagate suggested change --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- src/transformers/training_args.py | 16 ++++++++-------- src/transformers/training_args_tf.py | 2 ++ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index b368d86e0ed8..430ec257426d 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -234,8 +234,8 @@ class TrainingArguments: the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. - In case of using a finite iterable dataset the training may stop before reaching the set number of steps - when all data is exhausted + For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until + `max_steps` is reached. lr_scheduler_type (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. lr_scheduler_kwargs ('dict', *optional*, defaults to {}): @@ -2181,9 +2181,9 @@ def set_training( Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): - If set to a positive number, the total number of training steps to perform. Overrides - `num_train_epochs`. In case of using a finite iterable dataset the training may stop before reaching - the set number of steps when all data is exhausted. + If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. + For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until + `max_steps` is reached. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. @@ -2588,9 +2588,9 @@ def set_lr_scheduler( Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): - If set to a positive number, the total number of training steps to perform. Overrides - `num_train_epochs`. In case of using a finite iterable dataset the training may stop before reaching - the set number of steps when all data is exhausted. + If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. + For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until + `max_steps` is reached. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): diff --git a/src/transformers/training_args_tf.py b/src/transformers/training_args_tf.py index 461c4086acc3..5a13cc551b69 100644 --- a/src/transformers/training_args_tf.py +++ b/src/transformers/training_args_tf.py @@ -92,6 +92,8 @@ class TFTrainingArguments(TrainingArguments): Total number of training epochs to perform. max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. + For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until + `max_steps` is reached. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): From 4151fbb49c42bd22f8bf18b1773e09aa84846bdd Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 22 Nov 2023 13:27:34 +0100 Subject: [PATCH 239/268] [Whisper] Add sequential longform decoding (#27492) * [Whisper] Add seq gen * [Whisper] Add seq gen * more debug * Fix whisper logit processor * Improve whisper code further * Fix more * more debug * more debug * Improve further * Add tests * Prep for batch size > 1 * Get batch_size>1 working * Correct more * Add extensive tests * more debug * more debug * more debug * add more tests * more debug * Apply suggestions from code review * more debug * add comments to explain the code better * add comments to explain the code better * add comments to explain the code better * Add more examples * add comments to explain the code better * fix more * add comments to explain the code better * add comments to explain the code better * correct * correct * finalize * Apply suggestions from code review * Apply suggestions from code review --- src/transformers/generation/logits_process.py | 50 +- .../models/whisper/modeling_whisper.py | 443 ++++++++++++++++-- .../pipelines/automatic_speech_recognition.py | 22 +- tests/models/whisper/test_modeling_whisper.py | 322 ++++++++++++- ..._pipelines_automatic_speech_recognition.py | 82 ++-- 5 files changed, 836 insertions(+), 83 deletions(-) diff --git a/src/transformers/generation/logits_process.py b/src/transformers/generation/logits_process.py index d1704ed02074..6c7b84f6ae67 100644 --- a/src/transformers/generation/logits_process.py +++ b/src/transformers/generation/logits_process.py @@ -1487,6 +1487,7 @@ class WhisperTimeStampLogitsProcessor(LogitsProcessor): max_initial_timestamp_index (`int`, *optional*, defaults to 1): Used to set the maximum value of the initial timestamp. This is used to prevent the model from predicting timestamps that are too far in the future. + _detect_timestamp_from_logprob (`bool`, *optional*): Whether timestamps can be predicted from logprobs over all timestamps. Examples: ``` python @@ -1517,29 +1518,35 @@ class WhisperTimeStampLogitsProcessor(LogitsProcessor): ``` """ - def __init__(self, generate_config): # support for the kwargs + def __init__( + self, generate_config, _detect_timestamp_from_logprob: Optional[bool] = None + ): # support for the kwargs self.eos_token_id = generate_config.eos_token_id self.no_timestamps_token_id = generate_config.no_timestamps_token_id self.timestamp_begin = generate_config.no_timestamps_token_id + 1 - self.begin_index = len(generate_config.forced_decoder_ids) + 2 - if generate_config.forced_decoder_ids[-1][1] == self.no_timestamps_token_id: - self.begin_index -= 1 - self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index + # this variable is mostly just used for testing + self._detect_timestamp_from_logprob = ( + _detect_timestamp_from_logprob + if _detect_timestamp_from_logprob is not None + else getattr(generate_config, "_detect_timestamp_from_logprob", True) + ) + + self.begin_index = ( + len(generate_config.forced_decoder_ids) + 1 if generate_config.forced_decoder_ids is not None else 1 + ) + self.max_initial_timestamp_index = getattr(generate_config, "max_initial_timestamp_index", None) @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # suppress <|notimestamps|> which is handled by without_timestamps scores[:, self.no_timestamps_token_id] = -float("inf") - if input_ids.shape[1] == self.begin_index - 1: - scores[:, :] = -float("inf") - scores[:, self.timestamp_begin] = 0 - return scores - # timestamps have to appear in pairs, except directly before eos_token; mask logits accordingly for k in range(input_ids.shape[0]): - seq = list(input_ids[k, self.begin_index :].tolist()) + sampled_tokens = input_ids[k, self.begin_index :] + seq = list(sampled_tokens.tolist()) + last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.timestamp_begin penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.timestamp_begin @@ -1549,8 +1556,23 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> to else: # cannot be normal text tokens scores[k, : self.eos_token_id] = -float("inf") - # apply the `max_initial_timestamp` option - if input_ids.shape[1] == self.begin_index and self.max_initial_timestamp_index is not None: + timestamps = sampled_tokens[sampled_tokens.ge(self.timestamp_begin)] + if timestamps.numel() > 0: + # `timestamps` shouldn't decrease; forbid timestamp tokens smaller than the last + # The following lines of code are copied from: https://github.com/openai/whisper/pull/914/files#r1137085090 + if last_was_timestamp and not penultimate_was_timestamp: + timestamp_last = timestamps[-1] + else: + # Avoid to emit <|0.00|> again + timestamp_last = timestamps[-1] + 1 + + scores[k, self.timestamp_begin : timestamp_last] = -float("inf") + + # apply the `max_initial_timestamp` option + if input_ids.shape[1] == self.begin_index: + scores[:, : self.timestamp_begin] = -float("inf") + + if self.max_initial_timestamp_index is not None: last_allowed = self.timestamp_begin + self.max_initial_timestamp_index scores[:, last_allowed + 1 :] = -float("inf") @@ -1559,7 +1581,7 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> to for k in range(input_ids.shape[0]): timestamp_logprob = logprobs[k, self.timestamp_begin :].logsumexp(dim=-1) max_text_token_logprob = logprobs[k, : self.timestamp_begin].max() - if timestamp_logprob > max_text_token_logprob: + if timestamp_logprob > max_text_token_logprob and self._detect_timestamp_from_logprob: scores[k, : self.timestamp_begin] = -float("inf") return scores diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index ad54d51b73f3..e88fe3a6aacd 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -15,6 +15,7 @@ """ PyTorch Whisper model.""" import math +import warnings from typing import Optional, Tuple, Union import numpy as np @@ -1111,6 +1112,13 @@ def forward( return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ + + expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0] + if input_features.shape[-1] != expected_seq_length: + raise ValueError( + f"Whisper expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}." + ) + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states @@ -1723,7 +1731,7 @@ def forward( def generate( self, - inputs: Optional[torch.Tensor] = None, + input_features: Optional[torch.Tensor] = None, generation_config=None, logits_processor=None, stopping_criteria=None, @@ -1734,12 +1742,16 @@ def generate( language=None, is_multilingual=None, prompt_ids: Optional[torch.Tensor] = None, - return_token_timestamps=None, + num_segment_frames: Optional[int] = None, + return_token_timestamps: Optional[bool] = None, + return_segments: bool = False, + attention_mask: Optional[torch.Tensor] = None, + time_precision: int = 0.02, + return_dict_in_generate: Optional[bool] = None, **kwargs, ): """ - - Generates sequences of token ids for models with a language modeling head. + Transcribes or translates passed mel input features to a sequence of token ids. @@ -1801,46 +1813,162 @@ def generate( Whether to return token-level timestamps with the text. This can be used with or without the `return_timestamps` option. To get word-level timestamps, use the tokenizer to group the tokens into words. + return_segments (`bool`, *optional*, defaults to `False`): + Whether to additionally return a list of all segments. Note that this option can only be enabled + when doing long-form transcription. + attention_mask (`torch.Tensor`, *optional*): + `attention_mask` needs to be passed when doing long-form transcription using a batch size > 1. + time_precision (`int`, *optional*, defaults to 0.02): + The duration of output token in seconds. *E.g.* 0.02 means that a generated token on average accounts + for 20 ms. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of just returning the generated tokens. + Note that when doing long-form transcription, `return_dict_in_generate` can only be enabled when + `return_segments` is set True. In this case the generation outputs of each segment is added to each + segment. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: - [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` - or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. - - If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible - [`~utils.ModelOutput`] types are: + [`~utils.ModelOutput`] or `torch.LongTensor` or `Dict[str, Any]`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` + or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor` or a dict of segments when `return_segments=True`. - - [`~generation.GreedySearchDecoderOnlyOutput`], - - [`~generation.SampleDecoderOnlyOutput`], - - [`~generation.BeamSearchDecoderOnlyOutput`], - - [`~generation.BeamSampleDecoderOnlyOutput`] + If the passed input is > 30 seconds / > 3000 mel input features and `return_segments=True` then a dictionary of generated sequence ids, called `sequences` and a list of each generated segment is returned. - If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible - [`~utils.ModelOutput`] types are: + else if the passed input is <= 30 seconds / >= 3000 mel input features, the possible [`~utils.ModelOutput`] types are: - [`~generation.GreedySearchEncoderDecoderOutput`], - [`~generation.SampleEncoderDecoderOutput`], - [`~generation.BeamSearchEncoderDecoderOutput`], - [`~generation.BeamSampleEncoderDecoderOutput`] + + else only the generated output sequence ids are returned. + + Example: + + - *Longform transcription*: To transcribe or translate audios longer than 30 seconds, process the audio files without truncation and pass all mel features at once to generate. + + ```python + >>> import torch + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration + >>> from datasets import load_dataset, Audio + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> model.cuda() + + >>> # load audios > 30 seconds + >>> ds = load_dataset("distil-whisper/meanwhile", "default")["test"] + >>> # resample to 16kHz + >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) + >>> # take first 8 audios and retrieve array + >>> audio = ds[:8]["audio"] + >>> audio = [x["array"] for x in audio] + + >>> # make sure to NOT truncate the input audio, to return the `attention_mask` and to pad to the longest audio + >>> inputs = processor(audio, return_tensors="pt", truncation=False, padding="longest", return_attention_mask=True, sampling_rate=16_000) + >>> inputs = inputs.to("cuda", torch.float32) + + >>> # transcribe audio to ids + >>> generated_ids = model.generate(**inputs) + + >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True) + >>> transcription[0] + ' Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct dennies. set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile!' + ``` + + - *Shortform transcription*: If passed mel input features are < 30 seconds, the whole audio will be transcribed with a single call to generate. + + ```python + >>> import torch + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + + >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") + >>> input_features = inputs.input_features + + >>> generated_ids = model.generate(inputs=input_features) + + >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + >>> transcription + ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' + ``` + """ + + if "inputs" in kwargs: + input_features = kwargs.pop("inputs") + warnings.warn( + "The input name `inputs` is deprecated. Please make sure to use `input_features` instead.", + FutureWarning, + ) + + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + if generation_config is None: generation_config = self.generation_config - if return_timestamps is not None: + input_stride = self.model.encoder.conv1.stride[0] * self.model.encoder.conv2.stride[0] + if num_segment_frames is None: + num_segment_frames = input_stride * self.config.max_source_positions + + # 1. Check whether we're in shortform or longform mode + if input_features is not None: + total_input_frames = input_features.shape[-1] + elif "encoder_outputs" in kwargs: + encoder_outputs_shape = ( + kwargs["encoder_outputs"][0].shape + if isinstance(kwargs["encoder_outputs"], BaseModelOutput) + else kwargs["encoder_outputs"].shape + ) + total_input_frames = encoder_outputs_shape[1] * input_stride + else: + raise ValueError("Make sure to provide either `input_features` or `encoder_outputs` to `generate`.") + + is_shortform = total_input_frames <= num_segment_frames + + # 2. Make sure the generation config is correctly set depending on whether timestamps are to be returned or not + if return_timestamps is True: if not hasattr(generation_config, "no_timestamps_token_id"): raise ValueError( "You are trying to return timestamps, but the generation config is not properly set. " "Make sure to initialize the generation config with the correct attributes that are needed such as `no_timestamps_token_id`. " "For more details on how to generate the approtiate config, refer to https://github.com/huggingface/transformers/issues/21878#issuecomment-1451902363" ) - generation_config.return_timestamps = return_timestamps + elif not is_shortform: + if return_timestamps is False: + raise ValueError( + "You have passed more than 3000 mel input features (> 30 seconds) which automatically enables long-form generation which " + "requires the model to predict timestamp tokens. Please either pass `return_timestamps=True` or make sure to pass no more than 3000 mel input features." + ) + + if not hasattr(generation_config, "no_timestamps_token_id"): + raise ValueError( + "You have passed more than 3000 mel input features (> 30 seconds) which automatically enables long-form generation which " + "requires the generation config to have `no_timestamps_token_id` correctly. " + "Make sure to initialize the generation config with the correct attributes that are needed such as `no_timestamps_token_id`. " + "For more details on how to generate the approtiate config, refer to https://github.com/huggingface/transformers/issues/21878#issuecomment-1451902363" + "or make sure to pass no more than 3000 mel input features." + ) + + logger.info("Setting `return_timestamps=True` for long-form generation.") + generation_config.return_timestamps = True else: generation_config.return_timestamps = False + # 3. Make sure to correctly set language-related parameters if is_multilingual is not None: if not hasattr(generation_config, "is_multilingual"): raise ValueError( @@ -1875,8 +2003,8 @@ def generate( ) generation_config.task = task + # 4. Add forced decoder ids depending on passed `language`, `task`,`prompt_ids`, `return_token_timestamps` and `return_timestamps` forced_decoder_ids = None - # Legacy code for backward compatibility if hasattr(self.config, "forced_decoder_ids") and self.config.forced_decoder_ids is not None: forced_decoder_ids = self.config.forced_decoder_ids @@ -1961,12 +2089,9 @@ def generate( forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(forced_decoder_ids)] generation_config.forced_decoder_ids = forced_decoder_ids - if generation_config.return_timestamps: - logits_processor = [WhisperTimeStampLogitsProcessor(generation_config)] - if return_token_timestamps: kwargs["output_attentions"] = True - kwargs["return_dict_in_generate"] = True + return_dict_in_generate = True if getattr(generation_config, "task", None) == "translate": logger.warning("Token-level timestamps may not be reliable for task 'translate'.") @@ -1979,23 +2104,267 @@ def generate( if kwargs.get("num_frames") is not None: generation_config.num_frames = kwargs.pop("num_frames") - outputs = super().generate( - inputs, - generation_config, - logits_processor, - stopping_criteria, - prefix_allowed_tokens_fn, - synced_gpus, - **kwargs, - ) + if generation_config.return_timestamps is True: + last_forced_decoder_ids = ( + generation_config.forced_decoder_ids[-1][-1] + if hasattr(self.config, "forced_decoder_ids") and self.config.forced_decoder_ids + else None + ) + if last_forced_decoder_ids == self.generation_config.no_timestamps_token_id: + # remove no_timestamp to be forcefully generated if we want to return timestamps + # this is also important to make sure `WhisperTimeStampLogitsProcessor` functions correctly + forced_decoder_ids = generation_config.forced_decoder_ids[:-1] + # Make sure that if list is empty we set it to None + generation_config.forced_decoder_ids = None if len(forced_decoder_ids) == 0 else forced_decoder_ids + + timestamp_processor = [WhisperTimeStampLogitsProcessor(generation_config)] + logits_processor = ( + timestamp_processor if logits_processor is None else timestamp_processor + logits_processor + ) - if return_token_timestamps and hasattr(generation_config, "alignment_heads"): - num_frames = getattr(generation_config, "num_frames", None) - outputs["token_timestamps"] = self._extract_token_timestamps( - outputs, generation_config.alignment_heads, num_frames=num_frames + # 5. If we're in shortform mode, simple generate the whole input at once and return the output + if is_shortform: + outputs = super().generate( + input_features, + generation_config, + logits_processor, + stopping_criteria, + prefix_allowed_tokens_fn, + synced_gpus, + return_dict_in_generate=return_dict_in_generate, + **kwargs, ) - return outputs + if return_token_timestamps and hasattr(generation_config, "alignment_heads"): + num_frames = getattr(generation_config, "num_frames", None) + outputs["token_timestamps"] = self._extract_token_timestamps( + outputs, generation_config.alignment_heads, num_frames=num_frames + ) + + return outputs + + # 6. Else we're in longform mode which is more complex. We need to chunk the audio input depending on when the model generated + # timestamp tokens + # 6.1 Set running parameters for while loop + if not return_segments and return_dict_in_generate: + raise ValueError( + "Make sure to set `return_segments=True` to return generation outputs as part of the `'segments' key.`" + ) + + # if input is longer than 30 seconds we default to long-form generation + timestamp_begin = self.generation_config.no_timestamps_token_id + 1 + # input stride is mel frames per encoder output vector which is the product of all conv strides + batch_size = input_features.shape[0] + + if batch_size > 1 and attention_mask is None: + raise ValueError( + "When doing long-form audio transcription, make sure to pass an `attention_mask`. You can retrieve the `attention_mask` by doing `processor(audio, ..., return_attention_mask=True)` " + ) + elif batch_size > 1: + max_frames = attention_mask.sum(-1).cpu().to(torch.long) + seek = torch.zeros((batch_size,), dtype=torch.long) + else: + max_frames = torch.ones((1,), dtype=torch.long) * total_input_frames + seek = torch.zeros((1,), dtype=torch.long) + + current_segments = [[] for _ in range(batch_size)] + cur_to_prev_index_map = list(range(batch_size)) + + # batch size can decrease during the run + cur_bsz = prev_bsz = batch_size + + # 6.2 Transcribe audio until we reach the end of all input audios + while (seek < max_frames).any(): + prev_bsz = cur_bsz + + # 6.3 NOTE: When in longform transcription mode and batch size > 1 we need to dynamically reduce the batch size during the loop + # in case one audio finished earlier than another one. Thus, we need to keep a table of "previous-index-2-current-index" in order + # to know which original audio is being decoded + new_cur_to_prev_index_map = [] + for i in range(prev_bsz): + prev_i = cur_to_prev_index_map[i] + if seek[prev_i] >= max_frames[prev_i]: + cut_index = i + (cur_bsz - prev_bsz) + cur_bsz -= 1 + input_features = torch.cat([input_features[:cut_index], input_features[cut_index + 1 :]], dim=0) + else: + # cut out index that goes away + new_cur_to_prev_index_map.append(prev_i) + + # 6.4 Set updated index map, duration of previously decoded chunks and number of max frames of current decoding chunk + cur_to_prev_index_map = new_cur_to_prev_index_map + time_offset = seek * time_precision / input_stride + seek_num_frames = (max_frames - seek).clamp(max=num_segment_frames) + + # 6.5 Make sure that all inputs are padded to the same input length + segment_input = [] + for i in range(cur_bsz): + prev_i = cur_to_prev_index_map[i] + segment_input_slice = input_features[ + i : i + 1, :, seek[prev_i] : seek[prev_i] + seek_num_frames[prev_i] + ] + + if segment_input_slice.shape[-1] < num_segment_frames: + # pad to 3000 if necessary + segment_input_slice = F.pad( + segment_input_slice, pad=(0, num_segment_frames - segment_input_slice.shape[-1]) + ) + + segment_input.append(segment_input_slice) + + segment_input = torch.cat(segment_input, dim=0) + + # 6.6 Batch generate current chunk + seek_outputs = super().generate( + segment_input, + generation_config, + logits_processor, + stopping_criteria, + prefix_allowed_tokens_fn, + synced_gpus, + return_dict_in_generate=return_dict_in_generate, + **kwargs, + ) + + if return_token_timestamps and hasattr(generation_config, "alignment_heads"): + num_frames = getattr(generation_config, "num_frames", None) + seek_outputs["token_timestamps"] = self._extract_token_timestamps( + seek_outputs, generation_config.alignment_heads, num_frames=num_frames + ) + + if return_dict_in_generate: + seek_sequences = seek_outputs["sequences"] + seek_outputs = [ + {k: v[i] for k, v in seek_outputs.items()} + for i in range(next(iter(seek_outputs.values())).size(0)) + ] + else: + seek_sequences = seek_outputs + + # 6.7 Loop over each decoded audio individually as each decoding can be of a different length + for i, seek_sequence in enumerate(seek_sequences): + prev_i = cur_to_prev_index_map[i] + + # make sure we cut a predicted EOS token if we are not finished with the generation yet + is_not_final = (seek[prev_i] + num_segment_frames) < max_frames[prev_i] + if is_not_final and seek_sequence[-1] == self.generation_config.eos_token_id: + seek_sequence = seek_sequence[:-1] + + # remove all padding tokens + if seek_sequence[-1] == self.generation_config.pad_token_id: + num_paddings = (seek_sequence == self.generation_config.pad_token_id).sum() + seek_sequence = seek_sequence[:-num_paddings] + + segments, segment_offset = self._retrieve_segment( + seek_sequence=seek_sequence, + seek_outputs=seek_outputs, + time_offset=time_offset, + timestamp_begin=timestamp_begin, + seek_num_frames=seek_num_frames, + cur_bsz=cur_bsz, + time_precision=time_precision, + input_stride=input_stride, + prev_idx=prev_i, + idx=i, + ) + + current_segments[prev_i] += segments + seek[prev_i] += segment_offset + + # 7. Once all segments are added to the list of all segments, called `current_segments`, we extract the predicted + # output tokens from the list of dicts. If we use batch size > 1, we make sure to pad the output + sequences = [] + max_total_length = 0 + for current_segment_list in current_segments: + sequences.append(torch.cat([d["tokens"] for d in current_segment_list], dim=-1)) + max_total_length = max(max_total_length, len(sequences[-1])) + + for i in range(batch_size): + sequences[i] = F.pad( + sequences[i], pad=(0, max_total_length - len(sequences[i])), value=self.generation_config.pad_token_id + ) + + sequences = torch.stack(sequences, dim=0) + + # 8. If we return all segments, the predicted output sequences are put under `"sequences"`. + if return_segments: + return {"sequences": sequences, "segments": current_segments} + + return sequences + + @staticmethod + def _retrieve_segment( + seek_sequence, + seek_outputs, + time_offset, + timestamp_begin, + seek_num_frames, + cur_bsz, + time_precision, + input_stride, + prev_idx, + idx, + ): + # find the predicted "end of segment" predictions of Whisper + # "end of segment" predictions occur whenever Whisper predicts a timestamp token + timestamp_tokens: torch.Tensor = seek_sequence.ge(timestamp_begin) + single_timestamp_ending = timestamp_tokens[-2:].tolist() == cur_bsz * [[False, True]] + timestamp_segment_indices = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + + # If whisper predicted a "end of segment" via a timestep token, let's go ever each + # "end of segment" prediction and slice the decoding into segments accordingly + if len(timestamp_segment_indices) > 0: + # if the output contains two consecutive timestamp tokens + slices = timestamp_segment_indices.tolist() + segments = [] + if single_timestamp_ending: + slices.append(len(seek_sequence)) + + last_slice = 0 + # Add each segment to list of all segments + for current_slice in slices: + sliced_tokens = seek_sequence[last_slice + 1 : current_slice + 1] + start_timestamp_pos = sliced_tokens[0].item() - timestamp_begin + end_timestamp_pos = sliced_tokens[-1].item() - timestamp_begin + segments.append( + { + "start": time_offset[prev_idx] + start_timestamp_pos * time_precision, + "end": time_offset[prev_idx] + end_timestamp_pos * time_precision, + "tokens": sliced_tokens, + "result": seek_outputs[idx], + } + ) + last_slice = current_slice + + if single_timestamp_ending: + # single timestamp at the end means no speech after the last timestamp. + segment_offset = seek_num_frames[prev_idx] + else: + # otherwise, ignore the unfinished segment and seek to the last timestamp + # here we throw away all predictions after the last predicted "end of segment" + # since we are cutting right in the middle of an audio + last_timestamp_pos = seek_sequence[last_slice].item() - timestamp_begin + segment_offset = last_timestamp_pos * input_stride + else: + # If whisper does not predict any "end of segment" token, then + # the whole decoding is considered a segment and we add it to the list of segments + timestamps = seek_sequence[timestamp_tokens.nonzero().flatten()] + last_timestamp_pos = seek_num_frames[prev_idx] + if timestamps.numel() > 0 and timestamps[-1].item() != timestamp_begin: + # no consecutive timestamps but it has a timestamp; use the last one. + last_timestamp_pos = timestamps[-1].item() - timestamp_begin + + segments = [ + { + "start": time_offset[prev_idx], + "end": time_offset[prev_idx] + last_timestamp_pos * time_precision, + "tokens": seek_sequence, + "result": seek_outputs[idx], + } + ] + segment_offset = seek_num_frames[prev_idx] + + return segments, segment_offset def prepare_inputs_for_generation( self, @@ -2229,7 +2598,7 @@ def forward( >>> predicted_ids = model.generate(input_features, assistant_model=assistant_model) >>> # decode token ids to text - >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) + >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] >>> transcription ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.' ```""" diff --git a/src/transformers/pipelines/automatic_speech_recognition.py b/src/transformers/pipelines/automatic_speech_recognition.py index b464bfb40925..8fd1701d3ca5 100644 --- a/src/transformers/pipelines/automatic_speech_recognition.py +++ b/src/transformers/pipelines/automatic_speech_recognition.py @@ -508,9 +508,19 @@ def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None): ): yield item else: - processed = self.feature_extractor( - inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" - ) + if self.type == "seq2seq_whisper" and inputs.shape[0] > self.feature_extractor.n_samples: + processed = self.feature_extractor( + inputs, + sampling_rate=self.feature_extractor.sampling_rate, + truncation=False, + padding="longest", + return_tensors="pt", + ) + else: + processed = self.feature_extractor( + inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" + ) + if self.torch_dtype is not None: processed = processed.to(dtype=self.torch_dtype) if stride is not None: @@ -551,8 +561,12 @@ def _forward(self, model_inputs, return_timestamps=False, generate_kwargs=None): if stride is not None: generate_kwargs["num_frames"] = stride[0] // self.feature_extractor.hop_length + if self.type == "seq2seq_whisper" and inputs.shape[-1] > self.feature_extractor.nb_max_frames: + generate_kwargs["input_features"] = inputs + else: + generate_kwargs["encoder_outputs"] = encoder(inputs, attention_mask=attention_mask) + tokens = self.model.generate( - encoder_outputs=encoder(inputs, attention_mask=attention_mask), attention_mask=attention_mask, **generate_kwargs, ) diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index e26527267fcb..f77d81d76e52 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -17,6 +17,7 @@ import copy import inspect import os +import random import tempfile import time import unittest @@ -47,7 +48,7 @@ if is_datasets_available(): import datasets - from datasets import load_dataset + from datasets import Audio, load_dataset if is_torch_available(): import torch @@ -61,8 +62,81 @@ WhisperProcessor, set_seed, ) + from transformers.generation.logits_process import LogitsProcessor from transformers.models.whisper.modeling_whisper import WhisperDecoder, WhisperEncoder, sinusoids + class DummyTimestampLogitProcessor(LogitsProcessor): + """This processor fakes the correct timestamps tokens pattern [TOK_1] [TOK_2] ... [TOK_N] [TIME_STAMP_TOK_1] [TIME_STAMP_TOK_2] [TOK_N+1] ...""" + + def __init__( + self, timestamp_begin, vocab_size, batch_size, max_length, min_space=3, seed=0, is_length_ascending=True + ): + self.timestamp_begin = timestamp_begin + self.vocab_size = vocab_size + + self.min_space_between_timestamps = min_space + self.timestamp_tokens = torch.arange(self.timestamp_begin, self.vocab_size) + self.timestamp_tokens.to(torch_device) + self.is_length_ascending = is_length_ascending + + self.no_time_stamp_counter = batch_size * [0] + self.prev_highest_timestamp = batch_size * [0] + self.batch_size = batch_size + self.max_length = max_length + self.count = 0 + + self.let_pass = [[] for _ in range(batch_size)] + for k in range(batch_size): + random.seed(seed + k) + for _ in range(10000): + self.let_pass[k].append(random.randint(1, 10) <= 3) + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # we don't want to randomely sample timestamp tokens + if input_ids.shape[-1] > 1: + scores[:, self.timestamp_begin :] = -float("inf") + + self.no_time_stamp_counter = [x + 1 for x in self.no_time_stamp_counter] + for k in range(input_ids.shape[0]): + # make sure to use correct index if a batch was removed + if self.is_length_ascending and input_ids.shape[0] < self.batch_size: + prev_k = k + self.batch_size - input_ids.shape[0] + else: + prev_k = k + + if input_ids[k, -1] == self.timestamp_begin: + self.no_time_stamp_counter[prev_k] = 0 + + can_produce = self.no_time_stamp_counter[prev_k] > self.min_space_between_timestamps + must_produce = ( + input_ids[k][2:].le(self.timestamp_begin).all() and input_ids.shape[-1] == self.max_length - 1 + ) + # produce timestamp with 30% + if (can_produce and self.let_pass[prev_k][self.count]) or must_produce: + self.no_time_stamp_counter[prev_k] = 0 + self.prev_highest_timestamp[prev_k] = max(input_ids[k].max() + 1, self.timestamp_tokens[0].item()) + + # force a timestamp + scores[k, :] = -float("inf") + scores[k, self.prev_highest_timestamp[prev_k]] = 10.0 + + if ( + input_ids.shape[-1] > 3 + and input_ids[k, -1].item() in self.timestamp_tokens + and input_ids[k, -2].item() not in self.timestamp_tokens + ): + # force the same as before + scores[k, :] = -float("inf") + scores[k, input_ids[k, -1].item()] = 10.0 + + self.count += 1 + + if torch.isinf(scores).all(): + raise ValueError("Dummy logit processor is incorrectly set up. Scores should not be all inf.") + + return scores + + if is_flax_available(): import jax.numpy as jnp @@ -1237,6 +1311,133 @@ def test_generate_with_prompt_ids_max_length(self): model.generate(input_features, max_new_tokens=1, prompt_ids=prompt_ids) + def test_longform_generate_single_batch(self): + config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() + + model = WhisperForConditionalGeneration(config).eval().to(torch_device) + input_features = input_dict["input_features"] + + # len = 250 with num_input_frames = 60 + long_input_features = torch.cat([input_features.repeat(1, 1, 4), input_features[:, :, :10]], dim=-1) + + # force bsz=1 + long_input_features = long_input_features[:1] + vocab_size = model.config.vocab_size + + batch_size = 1 + num_timestamp_tokens = 20 + max_length = 16 + logits_processor = [ + DummyTimestampLogitProcessor( + vocab_size - num_timestamp_tokens, + vocab_size, + batch_size=batch_size, + max_length=max_length, + min_space=4, + ) + ] + + # each chunk should not be longer than 10 + model.generation_config.max_length = max_length + + # if input features are long can't set return_timestamps to False + with self.assertRaises(ValueError): + _ = model.generate(long_input_features, logits_processor=logits_processor, return_timestamps=False) + + # if input features are long need to set generation config + with self.assertRaises(ValueError): + _ = model.generate(long_input_features, logits_processor=logits_processor) + + timestamp_begin = vocab_size - num_timestamp_tokens + model.generation_config.no_timestamps_token_id = timestamp_begin - 1 + model.generation_config.eos_token_id = None + model.generation_config._detect_timestamp_from_logprob = False + # make sure that we only have the same begin token + model.generation_config.max_initial_timestamp_index = 0 + + outputs = model.generate(long_input_features, logits_processor=logits_processor, return_segments=True) + + segments = outputs["segments"][0] + + for i, segment in enumerate(segments): + assert segment["start"] <= segment["end"], "start has to be smaller equal end" + assert ( + segment["tokens"][0] == model.generation_config.decoder_start_token_id + or segment["tokens"][0] >= timestamp_begin + ), "First segment token should be a timestamp token" + assert any( + s > timestamp_begin for s in segment["tokens"][1:] + ), f"At least one segment token should be a timestamp token, but not first., {segment['tokens']}" + assert ( + segment["tokens"].shape[-1] <= max_length + ), "make sure that no segment is larger than max generation length" + + def test_longform_generate_multi_batch(self): + config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() + + model = WhisperForConditionalGeneration(config).eval().to(torch_device) + input_features = input_dict["input_features"].to(torch_device) + + # len = 250 with num_input_frames = 60 + long_input_features = torch.cat([input_features.repeat(1, 1, 4), input_features[:, :, :10]], dim=-1) + long_input_features[:1, :, :200] + input_features_2 = long_input_features[1:] + attention_mask = torch.ones( + (2, long_input_features.shape[-1]), dtype=input_features.dtype, device=input_features.device + ) + attention_mask[0, 200:] = 0 + + # force bsz=1 + vocab_size = model.config.vocab_size + + batch_size = 1 + num_timestamp_tokens = 20 + max_length = 16 + timestamp_begin = vocab_size - num_timestamp_tokens + model.generation_config.no_timestamps_token_id = timestamp_begin - 1 + model.generation_config.eos_token_id = None + model.generation_config._detect_timestamp_from_logprob = False + # make sure that we only have the same begin token + model.generation_config.max_initial_timestamp_index = 0 + + logits_processor = [ + DummyTimestampLogitProcessor( + vocab_size - num_timestamp_tokens, + vocab_size, + batch_size=batch_size, + max_length=max_length, + min_space=4, + seed=1, + ) + ] + outputs_2 = model.generate(input_features_2, logits_processor=logits_processor, return_segments=True) + tokens_2 = outputs_2["sequences"][0] + segments_2 = outputs_2["segments"][0] + + batch_size = 2 + logits_processor = [ + DummyTimestampLogitProcessor( + vocab_size - num_timestamp_tokens, + vocab_size, + batch_size=batch_size, + max_length=max_length, + min_space=4, + seed=0, + ) + ] + outputs = model.generate( + long_input_features, attention_mask=attention_mask, logits_processor=logits_processor, return_segments=True + ) + tokens = outputs["sequences"][1] + segments = outputs["segments"][1] + + assert tokens_2.tolist() == tokens.tolist() + + for seg1, seg2 in zip(segments_2, segments): + assert seg1["start"] == seg2["start"] + assert seg1["end"] == seg2["end"] + assert seg1["tokens"].tolist() == seg2["tokens"].tolist() + @require_torch @require_torchaudio @@ -1831,6 +2032,125 @@ def test_speculative_decoding_non_distil(self): ] assert total_time_non_assist > total_time_assist, "Make sure that assistant decoding is faster" + @slow + def test_whisper_longform_single_batch(self): + # fmt: off + EXPECTED_TEXT = [' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter\'s manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Layton\'s work is really Greek after all, and can discover in it but little of rocky Ithaca. Linnell\'s pictures are a sort of up-gards and atom paintings, and Mason\'s exquisite idles are as national as a jingo poem. Mr. Birk at Foster\'s landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. Mr. John Collier gives his sitter a cheerful slap in the back, before he says, like a shampoo or a Turkish bath. Next man, it is obviously unnecessary for us to point out how luminous these criticisms are, how delicate an expression. On the general principles of art, Mr. Quilter writes with equal lucidity. he tells us is of a different quality to mathematics, and finish in art is adding more effect. As for etchings, there are two kinds, British and foreign. He laments most bitterly the divorce that has been made between decorative art and what we usually call pictures. Makes the customary appeal to the last judgment and reminds us that in the great days of art Michelangelo was the furnishing upholsterer. Near the fire, any ornaments Fred brought home from India on the mantelboard. In fact, he is quite severe on Mr. Ruskin for not recognizing that a picture should denote the frailty of man. And remarks was pleasing courtesy in Felicitis Grace that many faces are feeling. Only, unfortunately, his own work never does get good. Mr. Quilter has missed his chance, for he has failed even to make himself the Tupper of painting. By Harry Quilter M.A. A man said to the universe, Sir, I exist. Sweat-covered Breon\'s body trickling into the tight-lowing cloth that was the only german he wore. The cut on his chest still dripping blood. The ache of his overstrained eyes, even the soaring arena around him with thousands of spectators, retrovealities not worth thinking about. His instant panic was followed by a small sharp blow high on his chest. One minute, a voice said, and a time buzzer sounded. A minute is not a very large measure of time, and his body needed every fraction of it. The buzzers were triggered his muscles into complete relaxation. Oli\'s heart and lungs worked on at a strong, measured rate. He was in reverie, sliding along the borders of consciousness. The contestants in the twenties needed undisturbed rest. Therefore, nights in the dormitories were as quiet as death. Particularly so, on this last night, when only two of the little cubicles were occupied, The thousands of others standing with dark empty doors. The other voice snapped with a harsh urgency, clearly used to command. I\'m here because the matter is of utmost importance, and brand is the one I must see. Now stand aside. The twenties, he must have drawn his gun because the intruder said quickly, but that away you\'re being a fool. out, through his silence then, and still wondering, Breon was once more asleep. Ten seconds, he asked the handler who was needing his aching muscles. A red-haired mountain of a man, with an apparently inexhaustible store of energy. There could be little art in this last and final round of fencing. Just thrust and parry, and victory to the stronger. man who entered the twenties had his own training tricks. They were appeared to be an immediate association with the death trauma, as if the two were inextricably linked into one. The strength that enables someone in a trance to hold his body stiff and unsupported except at two points, the head and heels. This is physically impossible when conscious. had died before during the 20s and death during the last round was in some ways easier than defeat. Breathing deeply, Breon\'s softly spoke the auto-hypnotic phrases that triggered the process. When the buzzer sounded, he pulled his foil from his second startled grasp and ran forward. Our role looked amazed at the sudden fury of the attack, then smiled. He thought it was the last burst of energy. He knew how close they both were to exhaustion. Breon saw something close to panic on his opponent\'s face when the man finally recognized his error. A wave of despair rolled out from our rogue. Breon sensed it and knew the fifth point was his. Then the powerful twist that\'s rested aside, in and under the guard, because he was sleeping instead of conquering, the lovely rose princess has become a fiddle without a bow, while poor Shaggy sits there, accooing dove. He has gone, and gone for good," answered Polychrom, who had managed to squeeze into the room beside the dragon, and had witnessed the occurrences with much interest. I have remained a prisoner only because I wished to be one. And with says he stepped forward and burst the stout chains as easily as if they had been threads. The little girl had been asleep, but she heard the wraps and opened the door. The king has flooded disgrace, and your friends are asking for you. I begged Ruggadot long ago to send him away, but he would not do so. I also offered to help your brother to escape, but he would not go. He eats and sleeps very steadily, replied the new king. I hope he doesn\'t work too hard, said Shaggy. He doesn\'t work at all. In fact, there\'s nothing he can do in these dominions as well as our gnomes, whose numbers are so great that it worries us to keep them all busy. Not exactly, we\'ve turned Calico. Where is my brother now, inquired Shaggy. In the metal forest. Where is that? The middle forest is in the great domed cavern, the largest and all-ard dominions, replied Calico. Calico hesitated. However, if we look sharp, we may be able to discover one of these secret ways. Oh no, I\'m quite sure he didn\'t. That\'s funny, remarked Betsy thoughtfully. I don\'t believe Anne knew any magic, or she\'d have worked it before. I do not know, confess Shaggy. True, agreed Calico. Calico went to the big gong and pounded on it just as Virgato used to do, but no one answered the summons. Having returned to the Royal Cavern, Calico first pounded the gong and then sat in the throne, wearing Virgato\'s discarded ruby crown and holding in his hand to scepter which reggative head so often thrown at his head.'] + # fmt: on + + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + model = model.to("cuda") + + ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean") + one_audio = np.concatenate([x["array"] for x in ds["validation"]["audio"]], dtype=np.float32) + + input_features = processor(one_audio, return_tensors="pt", truncation=False, padding="longest")[ + "input_features" + ] + input_features = input_features.to(device="cuda") + + result = model.generate(input_features, return_timestamps=True) + decoded = processor.batch_decode(result, skip_special_tokens=True) + + assert decoded == EXPECTED_TEXT + + @slow + def test_whisper_longform_multi_batch(self): + # fmt: off + EXPECTED_TEXT_1 = [" Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Layton's work is really Greek after all, and can discover in it but little of rocky Ithaca. Linnell's pictures are a sort of up-gards and atom paintings, and Mason's exquisite idles are as national as a jingo poem. Mr. Birkett Foster's landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. And Mr. John Collier gives his sitter a cheerful slap in the back, before he says, like a shampooer and a Turkish bath. Next man, it is obviously unnecessary for us to point out how luminous these criticisms are, how delicate an expression. On the general principles of art, Mr. Quilter writes with equal lucidity. Painting he tells us is of a different quality to mathematics, and finish in art is adding more effect. As for etchings, there are two kinds, British and foreign. He laments most bitterly the divorce that has been made between decorative art and what we usually call pictures. Mix a customary appeal to the last judgment and reminds us that in the great days of art Michelangelo was the furnishing a poster or near the fire, and the ornaments Fred brought home from India on the mental board. In fact, he is quite severe on Mr. Ruskin for not recognizing that a picture should denote the frailty of man. And remarks was pleasing courtesy in Felicitis Grace that many faces are feeling. Only unfortunately his own work never does get good. Mr. Quilter has missed his chance, for he has failed even to make himself the Tupper of painting. a Harry Quilter M.A. A man said to the universe, Sir, I exist. Sweat-covered Breon's body trickling into the tight-wing cloth that was the only germany war. The cut on his chest still dripping blood. The ache of his overstrained eyes, even the soaring arena around him with thousands of spectators, retrovealities not worth thinking about. His instant panic was followed by a small sharp blow high on his chest. One minute, a voice said, and a time buzzer sounded. A minute is not a very large measure of time, and his body needed every fraction of it. The buzzers were, triggered his muscles into complete relaxation. Oily his heart and lungs worked on at a strong, measured rate. He was in reverie, sliding along the borders of consciousness. The contestants in the 20s needed undisturbed rest. Therefore, knights in the dormitories were as quiet as death. Particularly so, on this last night, when only two of the little cubicles were occupied, the thousands of others standing with dark empty doors. The other voice snapped with a harsh urgency, clearly used to command. I'm here because the matter is of utmost importance, and brand is the one I must see. Now stand aside. The twenty's he must have drawn his gun, because the intruder said quickly, but that away you're being a fool. Out there was silence then, and still wondering, Breon was once more asleep. Ten seconds he asked the handler who was needing his aching muscles. a red-haired mountain of a man with an apparently inexhaustible store of energy. There could be little art in this last and final round of fencing, just thrust and parry and victory to the stronger. Every man who entered the twenties had his own training tricks. There appeared to be an immediate association with the death trauma as if the two were andextricably linked into one. The strength that enables someone in a trance to hold his body stiff and unsupported except at two points, the head and heels. This is physically impossible when conscious. Others had died before during the twenties and death during the last round was, in some ways, easier than defeat. Breeding deeply, Breon's softly spoke the auto-hypnotic phrases that triggered the process. When the buzzer sounded, he pulled his foil from his second startled grasp and ran forward. I rolled the mazed at the sudden fury of the attack, then smiled. He thought it was the last burst of energy. He knew how close they both were to exhaustion. Breon saw something close to panic on his opponent's face when the man finally recognized his error. A wave of despair rolled out from our rogue, pre-inscented and new to fifth point was his. Then the powerful twist that's rest of the side, in and under the guard, because you were sleeping instead of conquering, the lovely rose princess has become a fiddle without a bow, while poor Shaggy sits there, a cooing dove. He has gone and gone for good, answered Polychrome, who had managed to squeeze into the room beside the dragon, and had witnessed the occurrences with much interest. I have remained a prisoner only because I wished to be one. And with this, he stepped forward and burst the stout chains as easily as if they had been threads. The little girl had been asleep, but she heard the wraps and opened the door. The king has flooded disgrace, and your friends are asking for you. I begged Ruggadot long ago to send him away, but he would not do so. I also offered to help your brother to escape, but he would not go. He eats and sleeps very steadily, replied the new king. I hope he doesn't work too hard, since Shaggy. He doesn't work at all. In fact, there's nothing he can do in these dominions, as well as our gnomes, whose numbers are so great that it worries us to keep them all busy. Not exactly, return Calico. Where is my brother now? choir-dshaggy, in the metal forest. Where is that? The metal forest is in the great domed cavern, the largest and all-ard dominions, replied Calico. Calico hesitated. However, if we look sharp, we may be able to discover one of these secret ways. Oh, no, I'm quite sure he didn't. That's funny, remarked Betsy thoughtfully. I don't believe and knew any magic, or she'd have worked it before. I do not know, confess shaggy. True, a great calico. Calico went to the big gong and pounded on it, just as Virgado used to do, but no one answered the summons. Having returned to the Royal Cavern, Calico first pounded the gong and then sat in the throne, wearing Virgados discarded Ruby Crown, and holding in his hand to scepter, which Virgado had so often thrown at his head. head."] + EXPECTED_TEXT_2 = [" Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Layton's work is really Greek after all, and can discover in it but little of rocky Ithaca. Linnell's pictures are a sort of up-gards and atom paintings, and Mason's exquisite idles are as national as a jingo poem. Mr. Burkett Foster's landscapes smile at one much in the same way that Mr. Carker."] + EXPECTED_TEXT_3 = [" possible. Nor is Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grieved doubts whether Sir Frederick Layton's work is really greek after all, and can discover in it but little of rocky Ithaca. Linnell's pictures are a sort of up-guards and atom paintings, and Mason's exquisite idles are as national as a jingo poem. Mr. Birk at Foster's landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. And Mr. John Collier gives his sitter a cheerful slap in the back, before he says, like a shampooer and a Turkish bath, next man, it is obviously unnecessary for us to point out how luminous these criticisms are, how delicate an expression. Under general principles of art, Mr. Quilter writes with equal lucidity. Painting, he tells us, is of a different quality to mathematics and finish in art is adding more effect. As for etchings, there are two kinds, British and foreign. He laments most bitterly the divorce that has been made between decorative art and what we usually call pictures. Mix a customary appeal to the last judgment and reminds us that in the great days of art Michelangelo was the furnishing upholsterer. Near the fire. any ornaments Fred brought home from India on the mental board. In fact, he is quite severe on Mr. Ruskin for not recognizing that a picture should denote the frailty of man, and remarks was pleasing courtesy in Felicitis Grace that many faces are feeling. Only, unfortunately, his own work never does get good. Mr. Quilter has missed his chance, for he has failed even to make himself the tupper of painting. By Harry Quilter M.A. A man said to the universe, Sir, I exist. Sweat-covered Breon's body trickling into the titling cloth that was the only german he wore. The cut on his chest still dripping blood. The ache of his overstrained eyes. Even to soaring arena around him with thousands of spectators, retrovealities not worth thinking about. His instant panic was followed by a small sharp blow high on his chest. One minute, a voice said, and a time buzzer sounded. A minute is not a very large measure of time, and his body needed every fraction of it. The buzzers were triggered as muscles into complete relaxation. Oily his heart and lungs worked on at a strong measured rate. He was in In reverie, sliding along the borders of consciousness. The contestants in the 20s needed undisturbed rest. Therefore, nights in the dormitories were as quiet as death. Particularly so, on this last night, when only two of the little cubicles were occupied, the thousands of others standing with dark empty doors. The other voice snapped with a harsh urgency clearly used to command. I'm here because the matter is of utmost importance, and brand is the one I must see. Now stand aside. The twenty's he must have drawn his gun, because the intruder said quickly, but that away you're being a fool. Out there was silence then, and still wondering, Breon was once more asleep. Ten seconds he asked the handler who was needing his aching muscles. a red-haired mountain of a man with an apparently inexhaustible store of energy. There could be little art in this last and final round of fencing, just thrust and parry and victory to the stronger. Every man who entered the twenties had his own training tricks. There appeared to be an immediate association with the death trauma as if the two were andextricably linked into one. The strength that enables someone in a trance to hold his body stiff and unsupported except at two points, the head and heels. This is physically impossible when conscious. Others had died before during the twenties and death during the last round was, in some ways, easier than defeat. Breeding deeply, Breon's softly spoke the auto-hypnotic phrases that triggered the process. When the buzzer sounded, he pulled his foil from his second startled grasp and ran forward. Our role looked amazed at the sudden fury of the attack, then smiled. He thought it was the last burst of energy. He knew how close they both were to exhaustion. Breon saw something close to panic on his opponent's face when the man finally recognized his error. A wave of despair rolled out from our rogue, re-insunced it and knew the fifth point was his. Then the powerful twist that's rest of the side, in and under the guard, because you were sleeping instead of conquering, the lovely rose princess has become a fiddle without a bow, while poor Shaggy sits there, a cooing dove. He has gone and gone for good, answered Polychrome, who had managed to squeeze into the room beside the dragon, and had witnessed the occurrences with much interest. I have remained a prisoner only because I wished to be one. And with this, he stepped forward and burst the stout chains as easily as if they had been threads. The little girl had been asleep, but she heard the wraps and opened the door. The king has fled and disgraced, and your friends are asking for you. I begged Ruggadot long ago to send him away, but he would not do so. I also offered to help your brother to escape, but he would not go. He eats and sleeps very steadily, replied the new king. I hope he doesn't work too hard, since Shaggy. He doesn't work at all. In fact, there's nothing he can do in these dominions as well as our gnomes, whose numbers are so great that it worries us to keep them all busy. Not exactly, we've turned Calico. Where is my brother now? quared shaggy. In the metal forest. Where is that? The metal forest is in the great domed cavern, the largest and all-ard dominions, replied Calico. Calico hesitated. However, if we look sharp, we may be able to discover one of these secret ways. Oh no, I'm quite sure he didn't. And that's funny, remarked Betsy thoughtfully. I don't believe Anne knew any magic, or she'd have worked it before. I do not know, confess Shaggy. True, a great calico. Calico went to the big gong and pounded on it, just as we're good to have used to do, but no one answered the summons. Having returned to the Royal Cavern, Calico first pounded the gong and then sat in the thrown wearing ruggedos discarded ruby crown and holding in his hand to septor which Ruggato had so often thrown at his head."] + EXPECTED_TEXT_4 = [' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter\'s manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Layton\'s work is really Greek after all, and can discover in it but little of rocky Ithaca. Linnell\'s pictures are a sort of up-gards and atom paintings, and Mason\'s exquisite idles are as national as a jingo poem. Mr. Birk at Foster\'s landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. Mr. John Collier gives his sitter a cheerful slap in the back, before he says, like a shampoo or a Turkish bath. Next man, it is obviously unnecessary for us to point out how luminous these criticisms are, how delicate an expression. On the general principles of art, Mr. Quilter writes with equal lucidity. he tells us is of a different quality to mathematics, and finish in art is adding more effect. As for etchings, there are two kinds, British and foreign. He laments most bitterly the divorce that has been made between decorative art and what we usually call pictures. Makes the customary appeal to the last judgment and reminds us that in the great days of art Michelangelo was the furnishing upholsterer. Near the fire, any ornaments Fred brought home from India on the mantelboard. In fact, he is quite severe on Mr. Ruskin for not recognizing that a picture should denote the frailty of man. And remarks was pleasing courtesy in Felicitis Grace that many faces are feeling. Only, unfortunately, his own work never does get good. Mr. Quilter has missed his chance, for he has failed even to make himself the Tupper of painting. By Harry Quilter M.A. A man said to the universe, Sir, I exist. Sweat-covered Breon\'s body trickling into the tight-lowing cloth that was the only german he wore. The cut on his chest still dripping blood. The ache of his overstrained eyes, even the soaring arena around him with thousands of spectators, retrovealities not worth thinking about. His instant panic was followed by a small sharp blow high on his chest. One minute, a voice said, and a time buzzer sounded. A minute is not a very large measure of time, and his body needed every fraction of it. The buzzers were triggered his muscles into complete relaxation. Oli\'s heart and lungs worked on at a strong, measured rate. He was in reverie, sliding along the borders of consciousness. The contestants in the twenties needed undisturbed rest. Therefore, nights in the dormitories were as quiet as death. Particularly so, on this last night, when only two of the little cubicles were occupied, The thousands of others standing with dark empty doors. The other voice snapped with a harsh urgency, clearly used to command. I\'m here because the matter is of utmost importance, and brand is the one I must see. Now stand aside. The twenties, he must have drawn his gun because the intruder said quickly, but that away you\'re being a fool. out, through his silence then, and still wondering, Breon was once more asleep. Ten seconds, he asked the handler who was needing his aching muscles. A red-haired mountain of a man, with an apparently inexhaustible store of energy. There could be little art in this last and final round of fencing. Just thrust and parry, and victory to the stronger. man who entered the twenties had his own training tricks. They were appeared to be an immediate association with the death trauma, as if the two were inextricably linked into one. The strength that enables someone in a trance to hold his body stiff and unsupported except at two points, the head and heels. This is physically impossible when conscious. had died before during the 20s and death during the last round was in some ways easier than defeat. Breathing deeply, Breon\'s softly spoke the auto-hypnotic phrases that triggered the process. When the buzzer sounded, he pulled his foil from his second startled grasp and ran forward. Our role looked amazed at the sudden fury of the attack, then smiled. He thought it was the last burst of energy. He knew how close they both were to exhaustion. Breon saw something close to panic on his opponent\'s face when the man finally recognized his error. A wave of despair rolled out from our rogue. Breon sensed it and knew the fifth point was his. Then the powerful twist that\'s rested aside, in and under the guard, because he was sleeping instead of conquering, the lovely rose princess has become a fiddle without a bow, while poor Shaggy sits there, accooing dove. He has gone, and gone for good," answered Polychrom, who had managed to squeeze into the room beside the dragon, and had witnessed the occurrences with much interest. I have remained a prisoner only because I wished to be one. And with says he stepped forward and burst the stout chains as easily as if they had been threads. The little girl had been asleep, but she heard the wraps and opened the door. The king has flooded disgrace, and your friends are asking for you. I begged Ruggadot long ago to send him away, but he would not do so. I also offered to help your brother to escape, but he would not go. He eats and sleeps very steadily, replied the new king. I hope he doesn\'t work too hard, said Shaggy. He doesn\'t work at all. In fact, there\'s nothing he can do in these dominions as well as our gnomes, whose numbers are so great that it worries us to keep them all busy. Not exactly, we\'ve turned Calico. Where is my brother now, inquired Shaggy. In the metal forest. Where is that? The middle forest is in the great domed cavern, the largest and all-ard dominions, replied Calico. Calico hesitated. However, if we look sharp, we may be able to discover one of these secret ways. Oh no, I\'m quite sure he didn\'t. That\'s funny, remarked Betsy thoughtfully. I don\'t believe Anne knew any magic, or she\'d have worked it before. I do not know, confess Shaggy. True, agreed Calico. Calico went to the big gong and pounded on it just as Virgato used to do, but no one answered the summons. Having returned to the Royal Cavern, Calico first pounded the gong and then sat in the throne, wearing Virgato\'s discarded ruby crown and holding in his hand to scepter which reggative head so often thrown at his head.'] + # fmt: on + + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + model = model.to("cuda") + + ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean") + one_audio = np.concatenate([x["array"] for x in ds["validation"]["audio"]], dtype=np.float32) + audios = [] + audios.append(one_audio[110000:]) + audios.append(one_audio[:800000]) + audios.append(one_audio[80000:]) + audios.append(one_audio[:]) + + decoded_single = [] + for audio in audios: + inputs = processor(audio, return_tensors="pt", truncation=False) + inputs = inputs.to(device="cuda") + + result = model.generate(**inputs, return_timestamps=True) + decoded_single.append(processor.batch_decode(result, skip_special_tokens=True)) + + inputs = processor( + audios, return_tensors="pt", truncation=False, padding="longest", return_attention_mask=True + ) + inputs = inputs.to(device="cuda") + + result = model.generate(**inputs, return_timestamps=True) + decoded_all = processor.batch_decode(result, skip_special_tokens=True) + + # make sure single & batch is exactly the same + assert decoded_all[0:1] == decoded_single[0] + assert decoded_all[1:2] == decoded_single[1] + assert decoded_all[2:3] == decoded_single[2] + assert decoded_all[3:4] == decoded_single[3] + + # exact match + assert decoded_all[0:1] == EXPECTED_TEXT_1 + assert decoded_all[1:2] == EXPECTED_TEXT_2 + assert decoded_all[2:3] == EXPECTED_TEXT_3 + assert decoded_all[3:4] == EXPECTED_TEXT_4 + + @slow + def test_whisper_longform_multi_batch_hard(self): + # fmt: off + EXPECTED_TEXT = [ + " Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct dennies. set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile!", + " Folks, I spend a lot of time right over there, night after night after night, actually. Carefully selecting for you the day's noosiest, most aerodynamic headlines, stress testing, and those topical anti-lock breaks and power steering, painstakingly stitching, leather seating so soft, it would make JD power and her associates blush to create the luxury sedan that is my nightly monologue. But sometimes, you sometimes, folks. I lurched a consciousness in the back of an abandoned school and slap myself awake with a crusty floor mat. Before using a mouse-bitten timing belt to strap some old plywood to a couple of discarded oil drums, then by the light of a heathen moon, render a gas tank out of an empty big gulp, fill with white claw and denatured alcohol, then light a match and let her rip and the demented one man soapbox derby of news that is my segment. Me, Guadalupe! No!", + " Ladies and gentlemen, you know, I spent a lot of time right over there Raising the finest Holstein news cattle firmly yet tenderly milking the latest headlines from their jokes swollen teats Churning the daily stories into the decadent proven-style style triple cream breed that is my nightly monologue But sometimes sometimes folks I stagger home hungry after being released by the police and Root around in the neighbor's trash can for an old milk carton scrape out the blooming dairy residue into the remains of a wet cheese rod I won from a rat in a pre-donned street fight. Put it in a discarded paint can to leave it to ferment next to a trash fire then hunker down and hallucinate while eating the listeria laden demon custard of news that is my segment. You mean one of them.", + " Folks, if you watch this show, you know I spend most of my time right over there carefully sorting through the day's biggest stories and selecting only the most subtle and unblemished ostrich and crocodile news leather, which I then entrust to artisan graduates of the Ichol Gregoire Ferrandi, who carefully dye them in a palette of bright zesty shades and adorn them in the finest and most topical inlay work using hand tools and double magnifying glasses, then assemble them according to now classic and elegant geometry using our signature saddles stitching. In line it with bees, wax, coated linen, finely attached a mallet, hammered strap, pearled hardware, and close-shit to create for you the one-of-a-kind hoke couture, Erme's Birkin bag that is my monologue. But sometimes, sometimes folks, sometimes. Sometimes I wake up in the last car of an abandoned roller coaster at Coney Island where I'm I'm hiding from the triads. I have some engine lubricants out of a safe way bag and stagger down the shore to tear the sail off a beach schooner. Then I rip the coaxial cable out of an RV and elderly couple from Utah, Hank, and Mabel lovely folks. And use it to stitch the sail into a loose pouch like a rock sack. And I stow away in the back of a garbage truck to the junkyard where I pick through to the debris for only the broken toys that make me the saddest until I have loaded for you. The Hobo Fugitives bug out, bindle of news that is my segment. Me one!", + " You know, folks, I spent a lot of time crafting for you a bespoke playlist of the day's biggest stories right over there. Meticulously selecting the most topical chakra affirming scented candles, and using Feng Shui to perfectly align the joke energy in the exclusive boutique yoga retreat that is my monologue. But sometimes just sometimes I go to the dumpster behind the waffle house at three in the morning, take off my shirt, cover myself, and used fry oil, wrap my hands with some double-duct tape by stole from the broken car window. Pound a six-pack of blueberry hard-seltzer and a sack of pills I stole from a parked ambulance. Then arm wrestle a raccoon in the back alley vision quest of news that is my segment. Meanwhile!", + " You know, folks, I spend most of my time right over there. Mining the day's biggest, most important stories, collecting the finest, most topical iron or hand hammering it into joke panels. Then I craft sheets of bronze and blazing with patterns that tell an epic tale of conquest and glory. Then, using the Germanic tradition press-black process, I place thin sheets of foil against the scenes and by hammering or otherwise applying pressure from the back, I project these scenes into a pair of cheat cards in a faceplate and, finally, using fluted strips of white alloyed molding, I divide the designs into framed panels and hold it all together using bronze rivets to create the beautiful and intimidating, Anglo-Saxon battle helm that is my nightly monologue. Sometimes, sometimes folks. Sometimes, just sometimes, I come into my sense as fully naked on the deck of a pirate besieged melee container ship that picked me up floating on the detached door of a portapotty in the Indian Ocean. Then after a sunstroke-induced realization of the crew of this ship plans to sell me an exchange for a bag of oranges to fight off scurvy, I lead a mutiny using only a PVC pipe at a pool chain that accepting my new role as Captain and declaring myself king of the windarc seas. I grab a dirty mop bucket covered in barnacles and adorn it with the teeth of the vanquished to create the sopping wet pirate crown of news that is my segment. Meanwhile!", + " Folks, if you watch this show, you know I spend most of my time right over there carefully blending for you the day's Newsiest most topical flower eggs milk and butter and Stranding into a fine batter to make delicate and informative comedy pancakes Then I glaze them in the juice and zest of the most relevant midnight Valencia oranges and douse it all and a fine Dela main de voyage cognac Before prom baying and basting them tables. I deserve for you the James Beard award worthy crepe suzzette That is my nightly monologue, but sometimes just sometimes folks. I wake up in the baggage hold of Greyhound bus. It's being hoisted by the scrap yard claw toward the burn pit. Escape to a nearby abandoned price chopper where I scrounge for old bread scraps and busted open bags of starfruit candies and expired eggs. Chuck it all on a dirty hubcap and slap it over a tire fire before using the legs of a strain, pair of sweatpants and as oven mitts to extract and serve the demented transience poundcake of news that is my segment. Me, Guadalupe!", + " Folks, if you watched the show and I hope you do, I spent a lot of time right over there. Tiredlessly studying the lineage of the days most important thoroughbred stories and whole-stiner headlines, working with the best trainers, money can buy to rear their comedy offspring with a hand that is stern yet gentle into the triple crown winning equine specimen. That is my nightly monologue, but sometimes, sometimes, folks, I break into an unincorporated veterinary genetics lab and grab whatever test tubes I can find and then under a grow light I got from a discarded chia pet. I mixed the pilfered DNA of a horse and whatever was in a tube labeled Keith Colan extra. Slurrying the concoction with caffeine pills and a microwave red bull, I screamed, sang a prayer to Janice, initiator of human life and God of transformation as a half horse, half man, freak. Seizes to life before me and the hideous collection of loose animal parts and corrupted man tissue that is my segment. Meanwhile!", + ] + # fmt: on + + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + model = model.to("cuda") + + ds = load_dataset("distil-whisper/meanwhile", "default")["test"] + ds = ds.cast_column("audio", Audio(sampling_rate=16000)) + + num_samples = 8 + + audio = ds[:num_samples]["audio"] + audios = [x["array"] for x in audio] + + decoded_single = [] + for audio in audios: + inputs = processor(audio, return_tensors="pt", truncation=False, sampling_rate=16_000) + inputs = inputs.to(device="cuda") + + result = model.generate(**inputs, return_timestamps=True) + decoded_single += processor.batch_decode(result, skip_special_tokens=True) + + inputs = processor( + audios, return_tensors="pt", truncation=False, padding="longest", return_attention_mask=True + ) + inputs = inputs.to(device="cuda") + + result = model.generate(**inputs, return_timestamps=True) + decoded_all = processor.batch_decode(result, skip_special_tokens=True) + + for i in range(num_samples): + assert decoded_all[i] == decoded_single[i] + assert decoded_all[i] == EXPECTED_TEXT[i] + def prepare_whisper_encoder_inputs_dict(config, input_features, head_mask=None): if head_mask is None: diff --git a/tests/pipelines/test_pipelines_automatic_speech_recognition.py b/tests/pipelines/test_pipelines_automatic_speech_recognition.py index b5dee1e00fc9..3276042daf43 100644 --- a/tests/pipelines/test_pipelines_automatic_speech_recognition.py +++ b/tests/pipelines/test_pipelines_automatic_speech_recognition.py @@ -16,7 +16,7 @@ import numpy as np import pytest -from datasets import load_dataset +from datasets import Audio, load_dataset from huggingface_hub import hf_hub_download, snapshot_download from transformers import ( @@ -329,16 +329,16 @@ def test_return_timestamps_in_preprocess(self): self.assertEqual( res, { - "text": " Conquered returned to its place amidst the tents.", - "chunks": [ - {"text": " Conquered", "timestamp": (0.5, 1.2)}, - {"text": " returned", "timestamp": (1.2, 1.64)}, - {"text": " to", "timestamp": (1.64, 1.84)}, - {"text": " its", "timestamp": (1.84, 2.02)}, - {"text": " place", "timestamp": (2.02, 2.28)}, - {"text": " amidst", "timestamp": (2.28, 2.78)}, - {"text": " the", "timestamp": (2.78, 2.96)}, - {"text": " tents.", "timestamp": (2.96, 3.48)}, + 'text': ' Conquered returned to its place amidst the tents.', + 'chunks': [ + {'text': ' Conquered', 'timestamp': (0.5, 1.2)}, + {'text': ' returned', 'timestamp': (1.2, 1.64)}, + {'text': ' to', 'timestamp': (1.64, 1.84)}, + {'text': ' its', 'timestamp': (1.84, 2.02)}, + {'text': ' place', 'timestamp': (2.02, 2.28)}, + {'text': ' amidst', 'timestamp': (2.28, 2.8)}, + {'text': ' the', 'timestamp': (2.8, 2.98)}, + {'text': ' tents.', 'timestamp': (2.98, 3.48)}, ], }, ) @@ -776,27 +776,27 @@ def test_simple_whisper_asr(self): self.assertEqual( output, { - "text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.", - "chunks": [ - {'text': ' Mr.', 'timestamp': (0.0, 1.02)}, - {'text': ' Quilter', 'timestamp': (1.02, 1.18)}, + 'text': ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.', + 'chunks': [ + {'text': ' Mr.', 'timestamp': (0.38, 1.04)}, + {'text': ' Quilter', 'timestamp': (1.04, 1.18)}, {'text': ' is', 'timestamp': (1.18, 1.44)}, {'text': ' the', 'timestamp': (1.44, 1.58)}, {'text': ' apostle', 'timestamp': (1.58, 1.98)}, - {'text': ' of', 'timestamp': (1.98, 2.3)}, - {'text': ' the', 'timestamp': (2.3, 2.46)}, + {'text': ' of', 'timestamp': (1.98, 2.32)}, + {'text': ' the', 'timestamp': (2.32, 2.46)}, {'text': ' middle', 'timestamp': (2.46, 2.56)}, - {'text': ' classes,', 'timestamp': (2.56, 3.38)}, - {'text': ' and', 'timestamp': (3.38, 3.52)}, - {'text': ' we', 'timestamp': (3.52, 3.6)}, - {'text': ' are', 'timestamp': (3.6, 3.72)}, + {'text': ' classes,', 'timestamp': (2.56, 3.4)}, + {'text': ' and', 'timestamp': (3.4, 3.54)}, + {'text': ' we', 'timestamp': (3.54, 3.62)}, + {'text': ' are', 'timestamp': (3.62, 3.72)}, {'text': ' glad', 'timestamp': (3.72, 4.0)}, {'text': ' to', 'timestamp': (4.0, 4.26)}, - {'text': ' welcome', 'timestamp': (4.26, 4.54)}, - {'text': ' his', 'timestamp': (4.54, 4.92)}, - {'text': ' gospel.', 'timestamp': (4.92, 6.66)}, - ], - }, + {'text': ' welcome', 'timestamp': (4.26, 4.56)}, + {'text': ' his', 'timestamp': (4.56, 4.92)}, + {'text': ' gospel.', 'timestamp': (4.92, 5.84)} + ] + } ) # fmt: on @@ -1087,6 +1087,34 @@ def test_with_local_lm_fast(self): self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], " Date: Wed, 22 Nov 2023 08:21:36 -0800 Subject: [PATCH 240/268] Add UnivNet Vocoder Model for Tortoise TTS Diffusers Integration (#24799) * initial commit * Add inital testing files and modify __init__ files to add UnivNet imports. * Fix some bugs * Add checkpoint conversion script and add references to transformers pre-trained model. * Add UnivNet entries for auto. * Add initial docs for UnivNet. * Handle input and output shapes in UnivNetGan.forward and add initial docstrings. * Write tests and make them pass. * Write docs. * Add UnivNet doc to _toctree.yml and improve docs. * fix typo * make fixup * make fix-copies * Add upsample_rates parameter to config and improve config documentation. * make fixup * make fix-copies * Remove unused upsample_rates config parameter. * apply suggestions from review * make style * Verify and add reason for skipped tests inherited from ModelTesterMixin. * Add initial UnivNetGan integration tests * make style * Remove noise_length input to UnivNetGan and improve integration tests. * Fix bug and make style * Make UnivNet integration tests pass * Add initial code for UnivNetFeatureExtractor. * make style * Add initial tests for UnivNetFeatureExtractor. * make style * Properly initialize weights for UnivNetGan * Get feature extractor fast tests passing * make style * Get feature extractor integration tests passing * Get UnivNet integration tests passing * make style * Add UnivNetGan usage example * make style and use feature extractor from hub in integration tests * Update tips in docs * apply suggestions from review * make style * Calculate padding directly instead of using get_padding methods. * Update UnivNetFeatureExtractor.to_dict to be UnivNet-specific. * Update feature extractor to support using model(**inputs) and add the ability to generate noise and pad the end of the spectrogram in __call__. * Perform padding before generating noise to ensure the shapes are correct. * Rename UnivNetGan.forward's noise_waveform argument to noise_sequence. * make style * Add tests to test generating noise and padding the end for UnivNetFeatureExtractor.__call__. * Add tests for checking batched vs unbatched inputs for UnivNet feature extractor and model. * Add expected mean and stddev checks to the integration tests and make them pass. * make style * Make it possible to use model(**inputs), where inputs is the output of the feature extractor. * fix typo in UnivNetGanConfig example * Calculate spectrogram_zero from other config values. * apply suggestions from review * make style * Refactor UnivNet conversion script to use load_state_dict (following persimmon). * Rename UnivNetFeatureExtractor to UnivNetGanFeatureExtractor. * make style * Switch to using torch.tensor and torch.testing.assert_close for testing expected values/slices. * make style * Use config in UnivNetGan modeling blocks. * make style * Rename the spectrogram argument of UnivNetGan.forward to input_features, following Whisper. * make style * Improving padding documentation. * Add UnivNet usage example to the docs. * apply suggestions from review * Move dynamic_range_compression computation into the mel_spectrogram method of the feature extractor. * Improve UnivNetGan.forward return docstring. * Update table in docs/source/en/index.md. * make fix-copies * Rename UnivNet components to have pattern UnivNet*. * make style * make fix-copies * Update docs * make style * Increase tolerance on flaky unbatched integration test. * Remove torch.no_grad decorators from UnivNet integration tests to try to avoid flax/Tensorflow test errors. * Add padding_mask argument to UnivNetModel.forward and add batch_decode feature extractor method to remove padding. * Update documentation and clean up padding code. * make style * make style * Remove torch dependency from UnivNetFeatureExtractor. * make style * Fix UnivNetModel usage example * Clean up feature extractor code/docstrings. * apply suggestions from review * make style * Add comments for tests skipped via ModelTesterMixin flags. * Add comment for model parallel tests skipped via the test_model_parallel ModelTesterMixin flag. * Add # Copied from statements to copied UnivNetFeatureExtractionTest tests. * Simplify UnivNetFeatureExtractorTest.test_batch_decode. * Add support for unbatched padding_masks in UnivNetModel.forward. * Refactor unbatched padding_mask support. * make style --- README.md | 1 + README_es.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.md | 1 + docs/source/en/model_doc/univnet.md | 80 +++ src/transformers/__init__.py | 17 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/feature_extraction_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 1 + src/transformers/models/univnet/__init__.py | 65 ++ .../models/univnet/configuration_univnet.py | 127 ++++ .../models/univnet/convert_univnet.py | 162 +++++ .../univnet/feature_extraction_univnet.py | 456 +++++++++++++ .../models/univnet/modeling_univnet.py | 636 ++++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 10 + tests/models/univnet/__init__.py | 0 .../test_feature_extraction_univnet.py | 365 ++++++++++ tests/models/univnet/test_modeling_univnet.py | 365 ++++++++++ 24 files changed, 2299 insertions(+) create mode 100644 docs/source/en/model_doc/univnet.md create mode 100644 src/transformers/models/univnet/__init__.py create mode 100644 src/transformers/models/univnet/configuration_univnet.py create mode 100644 src/transformers/models/univnet/convert_univnet.py create mode 100644 src/transformers/models/univnet/feature_extraction_univnet.py create mode 100644 src/transformers/models/univnet/modeling_univnet.py create mode 100644 tests/models/univnet/__init__.py create mode 100644 tests/models/univnet/test_feature_extraction_univnet.py create mode 100644 tests/models/univnet/test_modeling_univnet.py diff --git a/README.md b/README.md index 80444a56a312..e65d512defc4 100644 --- a/README.md +++ b/README.md @@ -494,6 +494,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. +1. **[UnivNet](https://huggingface.co/docs/transformers/main/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. 1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. diff --git a/README_es.md b/README_es.md index f7f4f14fb002..d42750237fc2 100644 --- a/README_es.md +++ b/README_es.md @@ -469,6 +469,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. +1. **[UnivNet](https://huggingface.co/docs/transformers/main/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. 1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. diff --git a/README_hd.md b/README_hd.md index d2e1ce6e9e7a..e4dd69943f4c 100644 --- a/README_hd.md +++ b/README_hd.md @@ -443,6 +443,7 @@ conda install -c huggingface transformers 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research से) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. द्वाराअनुसंधान पत्र [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) के साथ जारी किया गया 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (माइक्रोसॉफ्ट रिसर्च से) साथ में दिया गया पेपर [UniSpeech: यूनिफाइड स्पीच रिप्रेजेंटेशन लर्निंग विद लेबलेड एंड अनलेबल्ड डेटा](https:/ /arxiv.org/abs/2101.07597) चेंगई वांग, यू वू, याओ कियान, केनिची कुमातानी, शुजी लियू, फुरु वेई, माइकल ज़ेंग, ज़ुएदोंग हुआंग द्वारा। 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (माइक्रोसॉफ्ट रिसर्च से) कागज के साथ [UNISPEECH-SAT: यूनिवर्सल स्पीच रिप्रेजेंटेशन लर्निंग विद स्पीकर अवेयर प्री-ट्रेनिंग ](https://arxiv.org/abs/2110.05752) सानयुआन चेन, यू वू, चेंग्यी वांग, झेंगयांग चेन, झूओ चेन, शुजी लियू, जियान वू, याओ कियान, फुरु वेई, जिन्यु ली, जियांगज़ान यू द्वारा पोस्ट किया गया। +1. **[UnivNet](https://huggingface.co/docs/transformers/main/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (सिंघुआ यूनिवर्सिटी और ननकाई यूनिवर्सिटी से) साथ में पेपर [विजुअल अटेंशन नेटवर्क](https://arxiv.org/ pdf/2202.09741.pdf) मेंग-हाओ गुओ, चेंग-ज़े लू, झेंग-निंग लियू, मिंग-मिंग चेंग, शि-मिन हू द्वारा। 1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (मल्टीमीडिया कम्प्यूटिंग ग्रुप, नानजिंग यूनिवर्सिटी से) साथ में पेपर [वीडियोएमएई: मास्क्ड ऑटोएन्कोडर स्व-पर्यवेक्षित वीडियो प्री-ट्रेनिंग के लिए डेटा-कुशल सीखने वाले हैं] (https://arxiv.org/abs/2203.12602) ज़ान टोंग, यिबिंग सॉन्ग, जुए द्वारा वांग, लिमिन वांग द्वारा पोस्ट किया गया। diff --git a/README_ja.md b/README_ja.md index 0cdd96c306a6..ea8de35ff3de 100644 --- a/README_ja.md +++ b/README_ja.md @@ -503,6 +503,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research から) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. から公開された研究論文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research から) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang から公開された研究論文: [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (Microsoft Research から) Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu から公開された研究論文: [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) +1. **[UnivNet](https://huggingface.co/docs/transformers/main/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (Peking University から) Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. から公開された研究論文 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (Tsinghua University and Nankai University から) Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu から公開された研究論文: [Visual Attention Network](https://arxiv.org/abs/2202.09741) 1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (Multimedia Computing Group, Nanjing University から) Zhan Tong, Yibing Song, Jue Wang, Limin Wang から公開された研究論文: [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) diff --git a/README_ko.md b/README_ko.md index dc72f85b5482..c88d62a9ad9f 100644 --- a/README_ko.md +++ b/README_ko.md @@ -418,6 +418,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research 에서 제공)은 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.의 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi)논문과 함께 발표했습니다. 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research 에서) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 의 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 논문과 함께 발표했습니다. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (Microsoft Research 에서) Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 의 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 논문과 함께 발표했습니다. +1. **[UnivNet](https://huggingface.co/docs/transformers/main/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (Peking University 에서 제공)은 Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.의 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221)논문과 함께 발표했습니다. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (Tsinghua University and Nankai University 에서) Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 의 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 논문과 함께 발표했습니다. 1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (Multimedia Computing Group, Nanjing University 에서) Zhan Tong, Yibing Song, Jue Wang, Limin Wang 의 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 논문과 함께 발표했습니다. diff --git a/README_zh-hans.md b/README_zh-hans.md index d74abb2a00e1..e6e6ab59cd06 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -442,6 +442,7 @@ conda install -c huggingface transformers 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (来自 Google Research) 伴随论文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) 由 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant 发布。 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。 +1. **[UnivNet](https://huggingface.co/docs/transformers/main/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (来自 Peking University) 伴随论文 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) 由 Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun 发布。 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。 1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (来自 Multimedia Computing Group, Nanjing University) 伴随论文 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 由 Zhan Tong, Yibing Song, Jue Wang, Limin Wang 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index eca50e97c7b1..21cbe14be804 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -454,6 +454,7 @@ conda install -c huggingface transformers 1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. +1. **[UnivNet](https://huggingface.co/docs/transformers/main/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. 1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index fd64afdf486f..d7d593b21e62 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -628,6 +628,8 @@ title: UniSpeech - local: model_doc/unispeech-sat title: UniSpeech-SAT + - local: model_doc/univnet + title: UnivNet - local: model_doc/vits title: VITS - local: model_doc/wav2vec2 diff --git a/docs/source/en/index.md b/docs/source/en/index.md index b19d567f8e47..b631db63529c 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -269,6 +269,7 @@ Flax), PyTorch, and/or TensorFlow. | [UMT5](model_doc/umt5) | ✅ | ❌ | ❌ | | [UniSpeech](model_doc/unispeech) | ✅ | ❌ | ❌ | | [UniSpeechSat](model_doc/unispeech-sat) | ✅ | ❌ | ❌ | +| [UnivNet](model_doc/univnet) | ✅ | ❌ | ❌ | | [UPerNet](model_doc/upernet) | ✅ | ❌ | ❌ | | [VAN](model_doc/van) | ✅ | ❌ | ❌ | | [VideoMAE](model_doc/videomae) | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/univnet.md b/docs/source/en/model_doc/univnet.md new file mode 100644 index 000000000000..45bd94732773 --- /dev/null +++ b/docs/source/en/model_doc/univnet.md @@ -0,0 +1,80 @@ + + +# UnivNet + +## Overview + +The UnivNet model was proposed in [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kin, and Juntae Kim. +The UnivNet model is a generative adversarial network (GAN) trained to synthesize high fidelity speech waveforms. The UnivNet model shared in `transformers` is the *generator*, which maps a conditioning log-mel spectrogram and optional noise sequence to a speech waveform (e.g. a vocoder). Only the generator is required for inference. The *discriminator* used to train the `generator` is not implemented. + +The abstract from the paper is the following: + +*Most neural vocoders employ band-limited mel-spectrograms to generate waveforms. If full-band spectral features are used as the input, the vocoder can be provided with as much acoustic information as possible. However, in some models employing full-band mel-spectrograms, an over-smoothing problem occurs as part of which non-sharp spectrograms are generated. To address this problem, we propose UnivNet, a neural vocoder that synthesizes high-fidelity waveforms in real time. Inspired by works in the field of voice activity detection, we added a multi-resolution spectrogram discriminator that employs multiple linear spectrogram magnitudes computed using various parameter sets. Using full-band mel-spectrograms as input, we expect to generate high-resolution signals by adding a discriminator that employs spectrograms of multiple resolutions as the input. In an evaluation on a dataset containing information on hundreds of speakers, UnivNet obtained the best objective and subjective results among competing models for both seen and unseen speakers. These results, including the best subjective score for text-to-speech, demonstrate the potential for fast adaptation to new speakers without a need for training from scratch.* + +Tips: + +- The `noise_sequence` argument for [`UnivNetModel.forward`] should be standard Gaussian noise (such as from `torch.randn`) of shape `([batch_size], noise_length, model.config.model_in_channels)`, where `noise_length` should match the length dimension (dimension 1) of the `input_features` argument. If not supplied, it will be randomly generated; a `torch.Generator` can be supplied to the `generator` argument so that the forward pass can be reproduced. (Note that [`UnivNetFeatureExtractor`] will return generated noise by default, so it shouldn't be necessary to generate `noise_sequence` manually.) +- Padding added by [`UnivNetFeatureExtractor`] can be removed from the [`UnivNetModel`] output through the [`UnivNetFeatureExtractor.batch_decode`] method, as shown in the usage example below. +- Padding the end of each waveform with silence can reduce artifacts at the end of the generated audio sample. This can be done by supplying `pad_end = True` to [`UnivNetFeatureExtractor.__call__`]. See [this issue](https://github.com/seungwonpark/melgan/issues/8) for more details. + +Usage Example: + +```python +import torch +from scipy.io.wavfile import write +from datasets import Audio, load_dataset + +from transformers import UnivNetFeatureExtractor, UnivNetModel + +model_id_or_path = "dg845/univnet-dev" +model = UnivNetModel.from_pretrained(model_id_or_path) +feature_extractor = UnivNetFeatureExtractor.from_pretrained(model_id_or_path) + +ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") +# Resample the audio to the model and feature extractor's sampling rate. +ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate)) +# Pad the end of the converted waveforms to reduce artifacts at the end of the output audio samples. +inputs = feature_extractor( + ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], pad_end=True, return_tensors="pt" +) + +with torch.no_grad(): + audio = model(**inputs) + +# Remove the extra padding at the end of the output. +audio = feature_extractor.batch_decode(**audio)[0] +# Convert to wav file +write("sample_audio.wav", feature_extractor.sampling_rate, audio) +``` + +This model was contributed by [dg845](https://huggingface.co/dg845). +To the best of my knowledge, there is no official code release, but an unofficial implementation can be found at [maum-ai/univnet](https://github.com/maum-ai/univnet) with pretrained checkpoints [here](https://github.com/maum-ai/univnet#pre-trained-model). + + +## UnivNetConfig + +[[autodoc]] UnivNetConfig + +## UnivNetFeatureExtractor + +[[autodoc]] UnivNetFeatureExtractor + - __call__ + +## UnivNetModel + +[[autodoc]] UnivNetModel + - forward \ No newline at end of file diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 672386c7938b..e09752f5f39c 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -611,6 +611,11 @@ "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechSatConfig", ], + "models.univnet": [ + "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "UnivNetConfig", + "UnivNetFeatureExtractor", + ], "models.upernet": ["UperNetConfig"], "models.videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"], "models.vilt": [ @@ -2977,6 +2982,12 @@ "UniSpeechSatPreTrainedModel", ] ) + _import_structure["models.univnet"].extend( + [ + "UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "UnivNetModel", + ] + ) _import_structure["models.upernet"].extend( [ "UperNetForSemanticSegmentation", @@ -4817,6 +4828,11 @@ from .models.umt5 import UMT5Config from .models.unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig from .models.unispeech_sat import UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechSatConfig + from .models.univnet import ( + UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + UnivNetConfig, + UnivNetFeatureExtractor, + ) from .models.upernet import UperNetConfig from .models.videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig from .models.vilt import ( @@ -6807,6 +6823,7 @@ UniSpeechSatModel, UniSpeechSatPreTrainedModel, ) + from .models.univnet import UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST, UnivNetModel from .models.upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel from .models.videomae import ( VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 976f3f551886..997ee82b4324 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -211,6 +211,7 @@ umt5, unispeech, unispeech_sat, + univnet, upernet, videomae, vilt, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 5b19b842acb6..78a33270e7ac 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -218,6 +218,7 @@ ("umt5", "UMT5Config"), ("unispeech", "UniSpeechConfig"), ("unispeech-sat", "UniSpeechSatConfig"), + ("univnet", "UnivNetConfig"), ("upernet", "UperNetConfig"), ("van", "VanConfig"), ("videomae", "VideoMAEConfig"), @@ -424,6 +425,7 @@ ("tvp", "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("unispeech", "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("unispeech-sat", "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("univnet", "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("van", "VAN_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("videomae", "VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("vilt", "VILT_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -667,6 +669,7 @@ ("umt5", "UMT5"), ("unispeech", "UniSpeech"), ("unispeech-sat", "UniSpeechSat"), + ("univnet", "UnivNet"), ("upernet", "UPerNet"), ("van", "VAN"), ("videomae", "VideoMAE"), diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 2c2699502642..395875dfa14b 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -91,6 +91,7 @@ ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), + ("univnet", "UnivNetFeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index d435a8770c9e..a62880d32696 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -204,6 +204,7 @@ ("umt5", "UMT5Model"), ("unispeech", "UniSpeechModel"), ("unispeech-sat", "UniSpeechSatModel"), + ("univnet", "UnivNetModel"), ("van", "VanModel"), ("videomae", "VideoMAEModel"), ("vilt", "ViltModel"), diff --git a/src/transformers/models/univnet/__init__.py b/src/transformers/models/univnet/__init__.py new file mode 100644 index 000000000000..afb03ee9894b --- /dev/null +++ b/src/transformers/models/univnet/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = { + "configuration_univnet": [ + "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "UnivNetConfig", + ], + "feature_extraction_univnet": ["UnivNetFeatureExtractor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_univnet"] = [ + "UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "UnivNetModel", + ] + + +if TYPE_CHECKING: + from .configuration_univnet import ( + UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + UnivNetConfig, + ) + from .feature_extraction_univnet import UnivNetFeatureExtractor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_univnet import ( + UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST, + UnivNetModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/univnet/configuration_univnet.py b/src/transformers/models/univnet/configuration_univnet.py new file mode 100644 index 000000000000..c9dbbb532821 --- /dev/null +++ b/src/transformers/models/univnet/configuration_univnet.py @@ -0,0 +1,127 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" UnivNetModel model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "dg845/univnet-dev": "https://huggingface.co/dg845/univnet-dev/resolve/main/config.json", +} + + +class UnivNetConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`UnivNetModel`]. It is used to instantiate a + UnivNet vocoder model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the UnivNet + [dg845/univnet-dev](https://huggingface.co/dg845/univnet-dev) architecture, which corresponds to the 'c32' + architecture in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/master/config/default_c32.yaml). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + model_in_channels (`int`, *optional*, defaults to 64): + The number of input channels for the UnivNet residual network. This should correspond to + `noise_sequence.shape[1]` and the value used in the [`UnivNetFeatureExtractor`] class. + model_hidden_channels (`int`, *optional*, defaults to 32): + The number of hidden channels of each residual block in the UnivNet residual network. + num_mel_bins (`int`, *optional*, defaults to 100): + The number of frequency bins in the conditioning log-mel spectrogram. This should correspond to the value + used in the [`UnivNetFeatureExtractor`] class. + resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 3, 3]`): + A tuple of integers defining the kernel sizes of the 1D convolutional layers in the UnivNet residual + network. The length of `resblock_kernel_sizes` defines the number of resnet blocks and should match that of + `resblock_stride_sizes` and `resblock_dilation_sizes`. + resblock_stride_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 4]`): + A tuple of integers defining the stride sizes of the 1D convolutional layers in the UnivNet residual + network. The length of `resblock_stride_sizes` should match that of `resblock_kernel_sizes` and + `resblock_dilation_sizes`. + resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]]`): + A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the + UnivNet residual network. The length of `resblock_dilation_sizes` should match that of + `resblock_kernel_sizes` and `resblock_stride_sizes`. The length of each nested list in + `resblock_dilation_sizes` defines the number of convolutional layers per resnet block. + kernel_predictor_num_blocks (`int`, *optional*, defaults to 3): + The number of residual blocks in the kernel predictor network, which calculates the kernel and bias for + each location variable convolution layer in the UnivNet residual network. + kernel_predictor_hidden_channels (`int`, *optional*, defaults to 64): + The number of hidden channels for each residual block in the kernel predictor network. + kernel_predictor_conv_size (`int`, *optional*, defaults to 3): + The kernel size of each 1D convolutional layer in the kernel predictor network. + kernel_predictor_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for each residual block in the kernel predictor network. + initializer_range (`float`, *optional*, defaults to 0.01): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + leaky_relu_slope (`float`, *optional*, defaults to 0.2): + The angle of the negative slope used by the leaky ReLU activation. + + Example: + + ```python + >>> from transformers import UnivNetModel, UnivNetConfig + + >>> # Initializing a Tortoise TTS style configuration + >>> configuration = UnivNetConfig() + + >>> # Initializing a model (with random weights) from the Tortoise TTS style configuration + >>> model = UnivNetModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` + """ + + model_type = "univnet" + + def __init__( + self, + model_in_channels=64, + model_hidden_channels=32, + num_mel_bins=100, + resblock_kernel_sizes=[3, 3, 3], + resblock_stride_sizes=[8, 8, 4], + resblock_dilation_sizes=[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]], + kernel_predictor_num_blocks=3, + kernel_predictor_hidden_channels=64, + kernel_predictor_conv_size=3, + kernel_predictor_dropout=0.0, + initializer_range=0.01, + leaky_relu_slope=0.2, + **kwargs, + ): + if not (len(resblock_kernel_sizes) == len(resblock_stride_sizes) == len(resblock_dilation_sizes)): + raise ValueError( + "`resblock_kernel_sizes`, `resblock_stride_sizes`, and `resblock_dilation_sizes` must all have the" + " same length (which will be the number of resnet blocks in the model)." + ) + + self.model_in_channels = model_in_channels + self.model_hidden_channels = model_hidden_channels + self.num_mel_bins = num_mel_bins + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_stride_sizes = resblock_stride_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.kernel_predictor_num_blocks = kernel_predictor_num_blocks + self.kernel_predictor_hidden_channels = kernel_predictor_hidden_channels + self.kernel_predictor_conv_size = kernel_predictor_conv_size + self.kernel_predictor_dropout = kernel_predictor_dropout + self.initializer_range = initializer_range + self.leaky_relu_slope = leaky_relu_slope + super().__init__(**kwargs) diff --git a/src/transformers/models/univnet/convert_univnet.py b/src/transformers/models/univnet/convert_univnet.py new file mode 100644 index 000000000000..30520b7fa147 --- /dev/null +++ b/src/transformers/models/univnet/convert_univnet.py @@ -0,0 +1,162 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch + +from transformers import UnivNetConfig, UnivNetModel, logging + + +logging.set_verbosity_info() +logger = logging.get_logger("transformers.models.univnet") + + +def get_kernel_predictor_key_mapping(config: UnivNetConfig, old_prefix: str = "", new_prefix: str = ""): + mapping = {} + # Initial conv layer + mapping[f"{old_prefix}.input_conv.0.weight_g"] = f"{new_prefix}.input_conv.weight_g" + mapping[f"{old_prefix}.input_conv.0.weight_v"] = f"{new_prefix}.input_conv.weight_v" + mapping[f"{old_prefix}.input_conv.0.bias"] = f"{new_prefix}.input_conv.bias" + + # Kernel predictor resnet blocks + for i in range(config.kernel_predictor_num_blocks): + mapping[f"{old_prefix}.residual_convs.{i}.1.weight_g"] = f"{new_prefix}.resblocks.{i}.conv1.weight_g" + mapping[f"{old_prefix}.residual_convs.{i}.1.weight_v"] = f"{new_prefix}.resblocks.{i}.conv1.weight_v" + mapping[f"{old_prefix}.residual_convs.{i}.1.bias"] = f"{new_prefix}.resblocks.{i}.conv1.bias" + + mapping[f"{old_prefix}.residual_convs.{i}.3.weight_g"] = f"{new_prefix}.resblocks.{i}.conv2.weight_g" + mapping[f"{old_prefix}.residual_convs.{i}.3.weight_v"] = f"{new_prefix}.resblocks.{i}.conv2.weight_v" + mapping[f"{old_prefix}.residual_convs.{i}.3.bias"] = f"{new_prefix}.resblocks.{i}.conv2.bias" + + # Kernel output conv + mapping[f"{old_prefix}.kernel_conv.weight_g"] = f"{new_prefix}.kernel_conv.weight_g" + mapping[f"{old_prefix}.kernel_conv.weight_v"] = f"{new_prefix}.kernel_conv.weight_v" + mapping[f"{old_prefix}.kernel_conv.bias"] = f"{new_prefix}.kernel_conv.bias" + + # Bias output conv + mapping[f"{old_prefix}.bias_conv.weight_g"] = f"{new_prefix}.bias_conv.weight_g" + mapping[f"{old_prefix}.bias_conv.weight_v"] = f"{new_prefix}.bias_conv.weight_v" + mapping[f"{old_prefix}.bias_conv.bias"] = f"{new_prefix}.bias_conv.bias" + + return mapping + + +def get_key_mapping(config: UnivNetConfig): + mapping = {} + + # NOTE: inital conv layer keys are the same + + # LVC Residual blocks + for i in range(len(config.resblock_stride_sizes)): + # LVCBlock initial convt layer + mapping[f"res_stack.{i}.convt_pre.1.weight_g"] = f"resblocks.{i}.convt_pre.weight_g" + mapping[f"res_stack.{i}.convt_pre.1.weight_v"] = f"resblocks.{i}.convt_pre.weight_v" + mapping[f"res_stack.{i}.convt_pre.1.bias"] = f"resblocks.{i}.convt_pre.bias" + + # Kernel predictor + kernel_predictor_mapping = get_kernel_predictor_key_mapping( + config, old_prefix=f"res_stack.{i}.kernel_predictor", new_prefix=f"resblocks.{i}.kernel_predictor" + ) + mapping.update(kernel_predictor_mapping) + + # LVC Residual blocks + for j in range(len(config.resblock_dilation_sizes[i])): + mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_g"] = f"resblocks.{i}.resblocks.{j}.conv.weight_g" + mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_v"] = f"resblocks.{i}.resblocks.{j}.conv.weight_v" + mapping[f"res_stack.{i}.conv_blocks.{j}.1.bias"] = f"resblocks.{i}.resblocks.{j}.conv.bias" + + # Output conv layer + mapping["conv_post.1.weight_g"] = "conv_post.weight_g" + mapping["conv_post.1.weight_v"] = "conv_post.weight_v" + mapping["conv_post.1.bias"] = "conv_post.bias" + + return mapping + + +def rename_state_dict(state_dict, keys_to_modify, keys_to_remove): + model_state_dict = {} + for key, value in state_dict.items(): + if key in keys_to_remove: + continue + + if key in keys_to_modify: + new_key = keys_to_modify[key] + model_state_dict[new_key] = value + else: + model_state_dict[key] = value + return model_state_dict + + +def convert_univnet_checkpoint( + checkpoint_path, + pytorch_dump_folder_path, + config_path=None, + repo_id=None, + safe_serialization=False, +): + model_state_dict_base = torch.load(checkpoint_path, map_location="cpu") + # Get the generator's state dict + state_dict = model_state_dict_base["model_g"] + + if config_path is not None: + config = UnivNetConfig.from_pretrained(config_path) + else: + config = UnivNetConfig() + + keys_to_modify = get_key_mapping(config) + keys_to_remove = set() + hf_state_dict = rename_state_dict(state_dict, keys_to_modify, keys_to_remove) + + model = UnivNetModel(config) + # Apply weight norm since the original checkpoint has weight norm applied + model.apply_weight_norm() + model.load_state_dict(hf_state_dict) + # Remove weight norm in preparation for inference + model.remove_weight_norm() + + model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization) + + if repo_id: + print("Pushing to the hub...") + model.push_to_hub(repo_id) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") + parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") + parser.add_argument( + "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." + ) + parser.add_argument( + "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." + ) + parser.add_argument( + "--safe_serialization", action="store_true", help="Whether to save the model using `safetensors`." + ) + + args = parser.parse_args() + + convert_univnet_checkpoint( + args.checkpoint_path, + args.pytorch_dump_folder_path, + args.config_path, + args.push_to_hub, + args.safe_serialization, + ) + + +if __name__ == "__main__": + main() diff --git a/src/transformers/models/univnet/feature_extraction_univnet.py b/src/transformers/models/univnet/feature_extraction_univnet.py new file mode 100644 index 000000000000..067aacc3d8c8 --- /dev/null +++ b/src/transformers/models/univnet/feature_extraction_univnet.py @@ -0,0 +1,456 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for UnivNetModel.""" + +from typing import Any, Dict, List, Optional, Union + +import numpy as np + +from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function +from ...feature_extraction_sequence_utils import SequenceFeatureExtractor +from ...feature_extraction_utils import BatchFeature +from ...utils import PaddingStrategy, TensorType, logging + + +logger = logging.get_logger(__name__) + + +class UnivNetFeatureExtractor(SequenceFeatureExtractor): + r""" + Constructs a UnivNet feature extractor. + + This class extracts log-mel-filter bank features from raw speech using the short time Fourier Transform (STFT). The + STFT implementation follows that of TacoTron 2 and Hifi-GAN. + + This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains + most of the main methods. Users should refer to this superclass for more information regarding those methods. + + Args: + feature_size (`int`, *optional*, defaults to 1): + The feature dimension of the extracted features. + sampling_rate (`int`, *optional*, defaults to 24000): + The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). + padding_value (`float`, *optional*, defaults to 0.0): + The value to pad with when applying the padding strategy defined by the `padding` argument to + [`UnivNetFeatureExtractor.__call__`]. Should correspond to audio silence. The `pad_end` argument to + `__call__` will also use this padding value. + do_normalize (`bool`, *optional*, defaults to `False`): + Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the + performance for some models. + num_mel_bins (`int`, *optional*, defaults to 100): + The number of mel-frequency bins in the extracted spectrogram features. This should match + `UnivNetModel.config.num_mel_bins`. + hop_length (`int`, *optional*, defaults to 256): + The direct number of samples between sliding windows. Otherwise referred to as "shift" in many papers. Note + that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take + the `hop_length` in ms. + win_length (`int`, *optional*, defaults to 1024): + The direct number of samples for each sliding window. Note that this is different from other audio feature + extractors such as [`SpeechT5FeatureExtractor`] which take the `win_length` in ms. + win_function (`str`, *optional*, defaults to `"hann_window"`): + Name for the window function used for windowing, must be accessible via `torch.{win_function}` + filter_length (`int`, *optional*, defaults to 1024): + The number of FFT components to use. If `None`, this is determined using + `transformers.audio_utils.optimal_fft_length`. + max_length_s (`int`, *optional*, defaults to 10): + The maximum input lenght of the model in seconds. This is used to pad the audio. + fmin (`float`, *optional*, defaults to 0.0): + Minimum mel frequency in Hz. + fmax (`float`, *optional*): + Maximum mel frequency in Hz. If not set, defaults to `sampling_rate / 2`. + mel_floor (`float`, *optional*, defaults to 1e-09): + Minimum value of mel frequency banks. Note that the way [`UnivNetFeatureExtractor`] uses `mel_floor` is + different than in [`transformers.audio_utils.spectrogram`]. + center (`bool`, *optional*, defaults to `False`): + Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame + `t` will start at time `t * hop_length`. + compression_factor (`float`, *optional*, defaults to 1.0): + The multiplicative compression factor for dynamic range compression during spectral normalization. + compression_clip_val (`float`, *optional*, defaults to 1e-05): + The clip value applied to the waveform before applying dynamic range compression during spectral + normalization. + normalize_min (`float`, *optional*, defaults to -11.512925148010254): + The min value used for Tacotron 2-style linear normalization. The default is the original value from the + Tacotron 2 implementation. + normalize_max (`float`, *optional*, defaults to 2.3143386840820312): + The max value used for Tacotron 2-style linear normalization. The default is the original value from the + Tacotron 2 implementation. + model_in_channels (`int`, *optional*, defaults to 64): + The number of input channels to the [`UnivNetModel`] model. This should match + `UnivNetModel.config.model_in_channels`. + pad_end_length (`int`, *optional*, defaults to 10): + If padding the end of each waveform, the number of spectrogram frames worth of samples to append. The + number of appended samples will be `pad_end_length * hop_length`. + return_attention_mask (`bool`, *optional*, defaults to `True`): + Whether or not [`~UnivNetFeatureExtractor.__call__`] should return `attention_mask`. + """ + + model_input_names = ["input_features", "noise_sequence", "padding_mask"] + + def __init__( + self, + feature_size: int = 1, + sampling_rate: int = 24000, + padding_value: float = 0.0, + do_normalize: bool = False, + num_mel_bins: int = 100, + hop_length: int = 256, + win_length: int = 1024, + win_function: str = "hann_window", + filter_length: Optional[int] = 1024, + max_length_s: int = 10, + fmin: float = 0.0, + fmax: Optional[float] = None, + mel_floor: float = 1e-9, + center: bool = False, + compression_factor: float = 1.0, + compression_clip_val: float = 1e-5, + normalize_min: float = -11.512925148010254, + normalize_max: float = 2.3143386840820312, + model_in_channels: int = 64, + pad_end_length: int = 10, + return_attention_mask=True, + **kwargs, + ): + super().__init__( + feature_size=feature_size, + sampling_rate=sampling_rate, + padding_value=padding_value, + return_attention_mask=return_attention_mask, + **kwargs, + ) + + self.do_normalize = do_normalize + + self.num_mel_bins = num_mel_bins + self.hop_length = hop_length + self.win_length = win_length + self.win_function = win_function + self.filter_length = filter_length + self.fmin = fmin + if fmax is None: + # Follows the librosa.filters.mel implementation + fmax = float(sampling_rate) / 2 + self.fmax = fmax + self.mel_floor = mel_floor + + self.max_length_s = max_length_s + self.num_max_samples = max_length_s * sampling_rate + + if self.filter_length is None: + self.n_fft = optimal_fft_length(self.win_length) + else: + self.n_fft = self.filter_length + self.n_freqs = (self.n_fft // 2) + 1 + + self.window = window_function(window_length=self.win_length, name=self.win_function, periodic=True) + + self.mel_filters = mel_filter_bank( + num_frequency_bins=self.n_freqs, + num_mel_filters=self.num_mel_bins, + min_frequency=self.fmin, + max_frequency=self.fmax, + sampling_rate=self.sampling_rate, + norm="slaney", + mel_scale="slaney", + ) + + self.center = center + self.compression_factor = compression_factor + self.compression_clip_val = compression_clip_val + self.normalize_min = normalize_min + self.normalize_max = normalize_max + self.model_in_channels = model_in_channels + self.pad_end_length = pad_end_length + + def normalize(self, spectrogram): + return 2 * ((spectrogram - self.normalize_min) / (self.normalize_max - self.normalize_min)) - 1 + + def denormalize(self, spectrogram): + return self.normalize_min + (self.normalize_max - self.normalize_min) * ((spectrogram + 1) / 2) + + def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray: + """ + Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by + `int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode. + + Args: + waveform (`np.ndarray` of shape `(length,)`): + The input waveform. This must be a single real-valued, mono waveform. + + Returns: + `numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`. + """ + # Do custom padding based on the official MelGAN and Hifi-GAN implementations + # See https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/utils/stft.py#L84-L86 + waveform = np.pad( + waveform, + (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)), + mode="reflect", + ) + + # Get the complex spectrogram. + # Note: waveform must be unbatched currently due to the implementation of spectrogram(...). + complex_spectrogram = spectrogram( + waveform, + window=self.window, + frame_length=self.n_fft, + hop_length=self.hop_length, + fft_length=self.n_fft, + power=None, + center=self.center, + mel_filters=None, + mel_floor=None, + ) + + # Apply the MEL filter bank and MEL floor manually since UnivNet uses a slightly different implementation + amplitude_spectrogram = np.sqrt( + np.real(complex_spectrogram) ** 2 + np.imag(complex_spectrogram) ** 2 + self.mel_floor + ) + mel_spectrogram = np.matmul(self.mel_filters.T, amplitude_spectrogram) + + # Perform spectral normalization to get the log mel spectrogram. + log_mel_spectrogram = np.log( + np.clip(mel_spectrogram, a_min=self.compression_clip_val, a_max=None) * self.compression_factor + ) + + # Return spectrogram with num_mel_bins last + return log_mel_spectrogram.T + + def generate_noise( + self, + noise_length: int, + generator: Optional[np.random.Generator] = None, + ) -> np.ndarray: + """ + Generates a random noise sequence of standard Gaussian noise for use in the `noise_sequence` argument of + [`UnivNetModel.forward`]. + + Args: + spectrogram_length (`int`): + The length (dim 0) of the generated noise. + model_in_channels (`int`, *optional*, defaults to `None`): + The number of features (dim 1) of the generated noise. This should correspond to the + `model_in_channels` of the [`UnivNetGan`] model. If not set, this will default to + `self.config.model_in_channels`. + generator (`numpy.random.Generator`, *optional*, defaults to `None`) + An optional `numpy.random.Generator` random number generator to control noise generation. If not set, a + new generator with fresh entropy will be created. + + Returns: + `numpy.ndarray`: Array containing random standard Gaussian noise of shape `(noise_length, + model_in_channels)`. + """ + if generator is None: + generator = np.random.default_rng() + + noise_shape = (noise_length, self.model_in_channels) + noise = generator.standard_normal(noise_shape, dtype=np.float32) + + return noise + + def batch_decode(self, waveforms, waveform_lengths=None) -> List[np.ndarray]: + r""" + Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D + audio waveform arrays and not a single tensor/array because in general the waveforms will have different + lengths after removing padding. + + Args: + waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + The batched output waveforms from the [`UnivNetModel`]. + waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*): + The batched lengths of each waveform before padding. + + Returns: + `List[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed. + """ + # Collapse the batched waveform tensor to a list of 1D audio waveforms + waveforms = [waveform.detach().clone().cpu().numpy() for waveform in waveforms] + + if waveform_lengths is not None: + waveforms = [waveform[: waveform_lengths[i]] for i, waveform in enumerate(waveforms)] + + return waveforms + + def __call__( + self, + raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], + sampling_rate: Optional[int] = None, + padding: Union[bool, str, PaddingStrategy] = True, + max_length: Optional[int] = None, + truncation: bool = True, + pad_to_multiple_of: Optional[int] = None, + return_noise: bool = True, + generator: Optional[np.random.Generator] = None, + pad_end: bool = False, + pad_length: Optional[int] = None, + do_normalize: Optional[str] = None, + return_attention_mask: Optional[bool] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + ) -> BatchFeature: + """ + Main method to featurize and prepare for the model one or several sequence(s). + + Args: + raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): + The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float + values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not + stereo, i.e. single float per timestep. + sampling_rate (`int`, *optional*): + The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass + `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition + pipeline. + padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): + Select a strategy to pad the input `raw_speech` waveforms (according to the model's padding side and + padding index) among: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + + If `pad_end = True`, that padding will occur before the `padding` strategy is applied. + max_length (`int`, *optional*): + Maximum length of the returned list and optionally padding length (see above). + truncation (`bool`, *optional*, defaults to `True`): + Activates truncation to cut input sequences longer than `max_length` to `max_length`. + pad_to_multiple_of (`int`, *optional*): + If set will pad the sequence to a multiple of the provided value. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. + return_noise (`bool`, *optional*, defaults to `True`): + Whether to generate and return a noise waveform for use in [`UnivNetModel.forward`]. + generator (`numpy.random.Generator`, *optional*, defaults to `None`): + An optional `numpy.random.Generator` random number generator to use when generating noise. + pad_end (`bool`, *optional*, defaults to `False`): + Whether to pad the end of each waveform with silence. This can help reduce artifacts at the end of the + generated audio sample; see https://github.com/seungwonpark/melgan/issues/8 for more details. This + padding will be done before the padding strategy specified in `padding` is performed. + pad_length (`int`, *optional*, defaults to `None`): + If padding the end of each waveform, the length of the padding in spectrogram frames. If not set, this + will default to `self.config.pad_end_length`. + do_normalize (`bool`, *optional*): + Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve + the performance for some models. If not set, this will default to `self.config.do_normalize`. + return_attention_mask (`bool`, *optional*): + Whether to return the attention mask. If left to the default, will return the attention mask according + to the specific feature_extractor's default. + + [What are attention masks?](../glossary#attention-mask) + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.np.array` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + """ + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + + if sampling_rate is not None: + if sampling_rate != self.sampling_rate: + raise ValueError( + f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" + f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" + f" was sampled with {self.sampling_rate} and not {sampling_rate}." + ) + else: + logger.warning( + "It is strongly recommended to pass the `sampling_rate` argument to this function. " + "Failing to do so can result in silent errors that might be hard to debug." + ) + + is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 + if is_batched_numpy and len(raw_speech.shape) > 2: + raise ValueError(f"Only mono-channel audio is supported for input to {self}") + is_batched = is_batched_numpy or ( + isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) + ) + + if is_batched: + raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] + elif not is_batched and not isinstance(raw_speech, np.ndarray): + raw_speech = np.asarray(raw_speech, dtype=np.float32) + elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): + raw_speech = raw_speech.astype(np.float32) + + # always return batch + if not is_batched: + raw_speech = [np.asarray(raw_speech, dtype=np.float32)] + + # Pad end to reduce artifacts + if pad_end: + pad_length = pad_length if pad_length is not None else self.pad_end_length + raw_speech = [ + np.pad(waveform, (0, pad_length * self.hop_length), constant_values=self.padding_value) + for waveform in raw_speech + ] + + batched_speech = BatchFeature({"input_features": raw_speech}) + + padded_inputs = self.pad( + batched_speech, + padding=padding, + max_length=max_length if max_length is not None else self.num_max_samples, + truncation=truncation, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + # make sure list is in array format + # input_features = padded_inputs.get("input_features").transpose(2, 0, 1) + input_features = padded_inputs.get("input_features") + + mel_spectrograms = [self.mel_spectrogram(waveform) for waveform in input_features] + + if isinstance(input_features[0], List): + batched_speech["input_features"] = [np.asarray(mel, dtype=np.float32) for mel in mel_spectrograms] + else: + batched_speech["input_features"] = [mel.astype(np.float32) for mel in mel_spectrograms] + + # convert attention_mask to correct format + attention_mask = padded_inputs.get("attention_mask") + if attention_mask is not None: + batched_speech["padding_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] + + if return_noise: + noise = [ + self.generate_noise(spectrogram.shape[0], generator) + for spectrogram in batched_speech["input_features"] + ] + batched_speech["noise_sequence"] = noise + + if do_normalize: + batched_speech["input_features"] = [ + self.normalize(spectrogram) for spectrogram in batched_speech["input_features"] + ] + + if return_tensors is not None: + batched_speech = batched_speech.convert_to_tensors(return_tensors) + + return batched_speech + + def to_dict(self) -> Dict[str, Any]: + output = super().to_dict() + + # Don't serialize these as they are derived from the other properties. + names = ["window", "mel_filters", "n_fft", "n_freqs", "num_max_samples"] + for name in names: + if name in output: + del output[name] + + return output diff --git a/src/transformers/models/univnet/modeling_univnet.py b/src/transformers/models/univnet/modeling_univnet.py new file mode 100644 index 000000000000..dc9beddec525 --- /dev/null +++ b/src/transformers/models/univnet/modeling_univnet.py @@ -0,0 +1,636 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch UnivNetModel model.""" + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...modeling_utils import ModelOutput, PreTrainedModel +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_univnet import UnivNetConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "UnivNetConfig" + +_CHECKPOINT_FOR_DOC = "dg845/univnet-dev" + +UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "dg845/univnet-dev", + # See all UnivNet models at https://huggingface.co/models?filter=univnet +] + + +@dataclass +class UnivNetModelOutput(ModelOutput): + """ + Output class for the [`UnivNetModel`], which includes the generated audio waveforms and the original unpadded + lengths of those waveforms (so that the padding can be removed by [`UnivNetModel.batch_decode`]). + + Args: + waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Batched 1D (mono-channel) output audio waveforms. + waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`): + The batched length in samples of each unpadded waveform in `waveforms`. + """ + + waveforms: torch.FloatTensor = None + waveform_lengths: torch.FloatTensor = None + + +class UnivNetKernelPredictorResidualBlock(nn.Module): + """ + Implementation of the residual block for the kernel predictor network inside each location variable convolution + block (LVCBlock). + + Parameters: + config: (`UnivNetConfig`): + Config for the `UnivNetModel` model. + """ + + def __init__( + self, + config: UnivNetConfig, + ): + super().__init__() + self.channels = config.model_in_channels + self.kernel_size = config.kernel_predictor_conv_size + self.dropout_prob = config.kernel_predictor_dropout + self.leaky_relu_slope = config.leaky_relu_slope + + padding = (self.kernel_size - 1) // 2 + + self.dropout = nn.Dropout(self.dropout_prob) + self.conv1 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) + self.conv2 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) + + def forward(self, hidden_states: torch.FloatTensor): + # hidden_states should have shape (batch_size, channels, seq_length) + residual = hidden_states + hidden_states = self.dropout(hidden_states) + hidden_states = self.conv1(hidden_states) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.conv2(hidden_states) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + return hidden_states + residual + + def apply_weight_norm(self): + nn.utils.weight_norm(self.conv1) + nn.utils.weight_norm(self.conv2) + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.conv1) + nn.utils.remove_weight_norm(self.conv2) + + +class UnivNetKernelPredictor(nn.Module): + """ + Implementation of the kernel predictor network which supplies the kernel and bias for the location variable + convolutional layers (LVCs) in each UnivNet LVCBlock. + + Based on the KernelPredictor implementation in + [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L7). + + Parameters: + config: (`UnivNetConfig`): + Config for the `UnivNetModel` model. + conv_kernel_size (`int`, *optional*, defaults to 3): + The kernel size for the location variable convolutional layer kernels (convolutional weight tensor). + conv_layers (`int`, *optional*, defaults to 4): + The number of location variable convolutional layers to output kernels and biases for. + """ + + def __init__( + self, + config: UnivNetConfig, + conv_kernel_size: int = 3, + conv_layers: int = 4, + ): + super().__init__() + + self.conv_in_channels = config.model_hidden_channels + self.conv_out_channels = 2 * config.model_hidden_channels + self.conv_kernel_size = conv_kernel_size + self.conv_layers = conv_layers + + self.kernel_channels = ( + self.conv_in_channels * self.conv_out_channels * self.conv_kernel_size * self.conv_layers + ) + self.bias_channels = self.conv_out_channels * self.conv_layers + + self.resnet_in_channels = config.num_mel_bins + self.resnet_hidden_channels = config.kernel_predictor_hidden_channels + self.resnet_kernel_size = config.kernel_predictor_conv_size + self.num_blocks = config.kernel_predictor_num_blocks + + self.leaky_relu_slope = config.leaky_relu_slope + + padding = (self.resnet_kernel_size - 1) // 2 + + self.input_conv = nn.Conv1d(self.resnet_in_channels, self.resnet_hidden_channels, 5, padding=2, bias=True) + + self.resblocks = nn.ModuleList([UnivNetKernelPredictorResidualBlock(config) for _ in range(self.num_blocks)]) + + self.kernel_conv = nn.Conv1d( + self.resnet_hidden_channels, self.kernel_channels, self.resnet_kernel_size, padding=padding, bias=True + ) + self.bias_conv = nn.Conv1d( + self.resnet_hidden_channels, self.bias_channels, self.resnet_kernel_size, padding=padding, bias=True + ) + + def forward(self, spectrogram: torch.FloatTensor): + """ + Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location + variable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels, + seq_length). + + Args: + spectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`): + Tensor containing the log-mel spectrograms. + + Returns: + Tuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of + location variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels, + self.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of + location variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels, + seq_length)`. + """ + batch_size, _, seq_length = spectrogram.shape + + hidden_states = self.input_conv(spectrogram) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + + for resblock in self.resblocks: + hidden_states = resblock(hidden_states) + + kernel_hidden_states = self.kernel_conv(hidden_states) + bias_hidden_states = self.bias_conv(hidden_states) + + # Reshape kernels and biases to appropriate shape + kernels = kernel_hidden_states.view( + batch_size, + self.conv_layers, + self.conv_in_channels, + self.conv_out_channels, + self.conv_kernel_size, + seq_length, + ).contiguous() + biases = bias_hidden_states.view( + batch_size, + self.conv_layers, + self.conv_out_channels, + seq_length, + ).contiguous() + + return kernels, biases + + def apply_weight_norm(self): + nn.utils.weight_norm(self.input_conv) + for layer in self.resblocks: + layer.apply_weight_norm() + nn.utils.weight_norm(self.kernel_conv) + nn.utils.weight_norm(self.bias_conv) + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.input_conv) + for layer in self.resblocks: + layer.remove_weight_norm() + nn.utils.remove_weight_norm(self.kernel_conv) + nn.utils.remove_weight_norm(self.bias_conv) + + +class UnivNetLvcResidualBlock(nn.Module): + """ + Implementation of the location variable convolution (LVC) residual block for the UnivNet residual network. + + Parameters: + config: (`UnivNetConfig`): + Config for the `UnivNetModel` model. + kernel_size (`int`): + The kernel size for the dilated 1D convolutional layer. + dilation (`int`): + The dilation for the dilated 1D convolutional layer. + """ + + def __init__( + self, + config: UnivNetConfig, + kernel_size: int, + dilation: int, + ): + super().__init__() + self.hidden_channels = config.model_hidden_channels + self.kernel_size = kernel_size + self.dilation = dilation + self.leaky_relu_slope = config.leaky_relu_slope + + padding = self.dilation * (self.kernel_size - 1) // 2 + + self.conv = nn.Conv1d( + self.hidden_channels, + self.hidden_channels, + self.kernel_size, + padding=padding, + dilation=self.dilation, + ) + + def forward(self, hidden_states, kernel, bias, hop_size=256): + residual = hidden_states + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.conv(hidden_states) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.location_variable_convolution(hidden_states, kernel, bias, hop_size=hop_size) + # Gated activation unit + hidden_states = torch.sigmoid(hidden_states[:, : self.hidden_channels, :]) * torch.tanh( + hidden_states[:, self.hidden_channels :, :] + ) + # Skip connection + hidden_states = residual + hidden_states + + return hidden_states + + # Based on https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L171 + def location_variable_convolution( + self, + hidden_states: torch.FloatTensor, + kernel: torch.FloatTensor, + bias: torch.FloatTensor, + dilation: int = 1, + hop_size: int = 256, + ): + """ + Performs location-variable convolution operation on the input sequence (hidden_states) using the local + convolution kernel. This was introduced in [LVCNet: Efficient Condition-Dependent Modeling Network for Waveform + Generation](https://arxiv.org/abs/2102.10815) by Zhen Zheng, Jianzong Wang, Ning Cheng, and Jing Xiao. + + Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, in_channels, in_length)`): + The input sequence of shape (batch, in_channels, in_length). + kernel (`torch.FloatTensor` of shape `(batch_size, in_channels, out_channels, kernel_size, kernel_length)`): + The local convolution kernel of shape (batch, in_channels, out_channels, kernel_size, kernel_length). + bias (`torch.FloatTensor` of shape `(batch_size, out_channels, kernel_length)`): + The bias for the local convolution of shape (batch, out_channels, kernel_length). + dilation (`int`, *optional*, defaults to 1): + The dilation of convolution. + hop_size (`int`, *optional*, defaults to 256): + The hop_size of the conditioning sequence. + Returns: + `torch.FloatTensor`: the output sequence after performing local convolution with shape (batch_size, + out_channels, in_length). + """ + batch, _, in_length = hidden_states.shape + batch, _, out_channels, kernel_size, kernel_length = kernel.shape + if in_length != (kernel_length * hop_size): + raise ValueError( + f"Dim 2 of `hidden_states` should be {kernel_length * hop_size}) but got {in_length}. Please check" + " `hidden_states` or `kernel` and `hop_size` to make sure they are correct." + ) + + padding = dilation * int((kernel_size - 1) / 2) + + # (batch, in_channels, in_length + 2*padding) + hidden_states = nn.functional.pad(hidden_states, (padding, padding), "constant", 0) + # (batch, in_channels, kernel_length, hop_size + 2*padding) + hidden_states = hidden_states.unfold(2, hop_size + 2 * padding, hop_size) + + if hop_size < dilation: + hidden_states = nn.functional.pad(hidden_states, (0, dilation), "constant", 0) + # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation) + hidden_states = hidden_states.unfold(3, dilation, dilation) + hidden_states = hidden_states[:, :, :, :, :hop_size] + # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation) + hidden_states = hidden_states.transpose(3, 4) + # (batch, in_channels, kernel_length, dilation, _, kernel_size) + hidden_states = hidden_states.unfold(4, kernel_size, 1) + + # Apply local convolution kernel to hidden_states. + output_hidden_states = torch.einsum("bildsk,biokl->bolsd", hidden_states, kernel) + + output_hidden_states = output_hidden_states.to(memory_format=torch.channels_last_3d) + bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d) + output_hidden_states = output_hidden_states + bias + output_hidden_states = output_hidden_states.contiguous().view(batch, out_channels, -1) + + return output_hidden_states + + def apply_weight_norm(self): + nn.utils.weight_norm(self.conv) + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.conv) + + +class UnivNetLvcBlock(nn.Module): + """ + Implementation of the location variable convolution (LVC) residual block of the UnivNet residual block. Includes a + `UnivNetKernelPredictor` inside to predict the kernels and biases of the LVC layers. + + Based on LVCBlock in + [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L98) + + Parameters: + config (`UnivNetConfig`): + Config for the `UnivNetModel` model. + layer_id (`int`): + An integer corresponding to the index of the current LVC resnet block layer. This should be between 0 and + `len(config.resblock_stride_sizes) - 1)` inclusive. + lvc_hop_size (`int`, *optional*, defaults to 256): + The hop size for the location variable convolutional layers. + """ + + def __init__( + self, + config: UnivNetConfig, + layer_id: int, + lvc_hop_size: int = 256, + ): + super().__init__() + self.hidden_channels = config.model_hidden_channels + self.kernel_size = config.resblock_kernel_sizes[layer_id] + self.stride = config.resblock_stride_sizes[layer_id] + self.dilations = config.resblock_dilation_sizes[layer_id] + self.cond_hop_length = lvc_hop_size + self.leaky_relu_slope = config.leaky_relu_slope + self.num_blocks = len(self.dilations) + + self.convt_pre = nn.ConvTranspose1d( + self.hidden_channels, + self.hidden_channels, + 2 * self.stride, + stride=self.stride, + padding=self.stride // 2 + self.stride % 2, + output_padding=self.stride % 2, + ) + + self.kernel_predictor = UnivNetKernelPredictor(config, self.kernel_size, self.num_blocks) + + self.resblocks = nn.ModuleList( + [UnivNetLvcResidualBlock(config, self.kernel_size, self.dilations[i]) for i in range(self.num_blocks)] + ) + + def forward(self, hidden_states: torch.FloatTensor, spectrogram: torch.FloatTensor): + # hidden_states: (batch_size, hidden_channels, seq_length) + # spectrogram: (batch_size, cond_channels, cond_length) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.convt_pre(hidden_states) + + kernels, biases = self.kernel_predictor(spectrogram) + + for i, resblock in enumerate(self.resblocks): + kernel = kernels[:, i, :, :, :, :] + bias = biases[:, i, :, :] + hidden_states = resblock(hidden_states, kernel, bias, hop_size=self.cond_hop_length) + + return hidden_states + + def apply_weight_norm(self): + nn.utils.weight_norm(self.convt_pre) + self.kernel_predictor.apply_weight_norm() + for layer in self.resblocks: + layer.apply_weight_norm() + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.convt_pre) + self.kernel_predictor.remove_weight_norm() + for layer in self.resblocks: + layer.remove_weight_norm() + + +UNIVNET_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`UnivNetConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +UNIVNET_INPUTS_DOCSTRING = r""" + Converts a noise waveform and a conditioning spectrogram to a speech waveform. Passing a batch of log-mel + spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a + single, un-batched speech waveform. + + Args: + input_features (`torch.FloatTensor`): + Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length, + config.num_mel_channels)`, or un-batched and of shape `(sequence_length, config.num_mel_channels)`. + noise_sequence (`torch.FloatTensor`, *optional*): + Tensor containing a noise sequence of standard Gaussian noise. Can be batched and of shape `(batch_size, + sequence_length, config.model_in_channels)`, or un-batched and of shape (sequence_length, + config.model_in_channels)`. If not supplied, will be randomly generated. + padding_mask (`torch.BoolTensor`, *optional*): + Mask indicating which parts of each sequence are padded. Mask values are selected in `[0, 1]`: + + - 1 for tokens that are **not masked** + - 0 for tokens that are **masked** + + The mask can be batched and of shape `(batch_size, sequence_length)` or un-batched and of shape + `(sequence_length,)`. + generator (`torch.Generator`, *optional*): + A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation + deterministic. + return_dict: + Whether to return a [`~utils.ModelOutput`] subclass instead of a plain tuple. +""" + + +@add_start_docstrings( + """UnivNet GAN vocoder.""", + UNIVNET_START_DOCSTRING, +) +class UnivNetModel(PreTrainedModel): + config_class = UnivNetConfig + main_input_name = "input_features" + + def __init__(self, config: UnivNetConfig): + super().__init__(config) + + self.num_kernels = len(config.resblock_kernel_sizes) + self.leaky_relu_slope = config.leaky_relu_slope + + self.conv_pre = nn.Conv1d( + config.model_in_channels, + config.model_hidden_channels, + kernel_size=7, + stride=1, + padding=3, + padding_mode="reflect", + ) + + # Initialize location-variable convolution ResNet Blocks. + num_layers = len(config.resblock_stride_sizes) + hop_length = 1 + hop_lengths = [] + for stride in config.resblock_stride_sizes: + hop_length = hop_length * stride + hop_lengths.append(hop_length) + + self.resblocks = nn.ModuleList( + [ + UnivNetLvcBlock( + config, + layer_id=i, + lvc_hop_size=hop_lengths[i], + ) + for i in range(num_layers) + ] + ) + + self.conv_post = nn.Conv1d(config.model_hidden_channels, 1, 7, padding=3, padding_mode="reflect") + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(UNIVNET_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=UnivNetModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_features: torch.FloatTensor, + noise_sequence: Optional[torch.FloatTensor] = None, + padding_mask: Optional[torch.FloatTensor] = None, + generator: Optional[torch.Generator] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], UnivNetModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import UnivNetFeatureExtractor, UnivNetModel + >>> from datasets import load_dataset, Audio + + >>> model = UnivNetModel.from_pretrained("dg845/univnet-dev") + >>> feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev") + + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> # Resample the audio to the feature extractor's sampling rate. + >>> ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate)) + >>> inputs = feature_extractor( + ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" + ... ) + >>> audio = model(**inputs).waveforms + >>> list(audio.shape) + [1, 140288] + ``` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Resolve batch sizes for noise_sequence and spectrogram + spectrogram_batched = input_features.dim() == 3 + if not spectrogram_batched: + input_features = input_features.unsqueeze(0) + spectrogram_batch_size, spectrogram_length, _ = input_features.shape + + if noise_sequence is not None: + noise_sequence_batched = noise_sequence.dim() == 3 + if not noise_sequence_batched: + noise_sequence = noise_sequence.unsqueeze(0) + else: + # Randomly generate noise_sequence + noise_sequence_shape = (spectrogram_batch_size, spectrogram_length, self.config.model_in_channels) + noise_sequence = torch.randn( + noise_sequence_shape, generator=generator, dtype=input_features.dtype, device=input_features.device + ) + noise_sequence_batch_size = noise_sequence.shape[0] + + if spectrogram_batch_size > 1 and noise_sequence_batch_size == 1: + # Repeat noise_sequence spectrogram_batch_size times + noise_sequence = noise_sequence.repeat(spectrogram_batch_size, 1, 1) + elif noise_sequence_batch_size > 1 and spectrogram_batch_size == 1: + # Repeat spectrogram noise_sequence_batch_size times + input_features = input_features.repeat(noise_sequence_batch_size, 1, 1) + + if noise_sequence_batch_size != spectrogram_batch_size: + raise ValueError( + f"The batch size of `noise_sequence` is {noise_sequence_batch_size} and the batch size of" + f" `input_features` is {spectrogram_batch_size}, but the two are expected to be equal." + ) + + if padding_mask is not None: + if padding_mask.dim() == 1: + padding_mask = padding_mask.unsqueeze(0) + padding_mask_batch_size = padding_mask.shape[0] + if padding_mask_batch_size != spectrogram_batch_size: + raise ValueError( + f"The batch size of `padding_mask` is {padding_mask_batch_size} and the batch size of" + f" `input_features` is {spectrogram_batch_size}, but the two are expected to be equal." + ) + + # Change shapes to have channels before sequence lengths + hidden_states = noise_sequence.transpose(2, 1) + input_features = input_features.transpose(2, 1) + + hidden_states = self.conv_pre(hidden_states) + + for resblock in self.resblocks: + hidden_states = resblock(hidden_states, input_features) + + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.conv_post(hidden_states) + hidden_states = torch.tanh(hidden_states) + + # Remove sequence length dimension since this collapses to 1 + # NOTE: keep waveforms batched even if there's only one + waveform = hidden_states.squeeze(1) + + # Get sequence lengths for UnivNetFeatureExtractor.batch_decode. + waveform_lengths = None + if padding_mask is not None: + # Padding is always contiguous and added on the right + waveform_lengths = torch.sum(padding_mask, dim=1) + + if not return_dict: + outputs = (waveform, waveform_lengths) + return outputs + + return UnivNetModelOutput( + waveforms=waveform, + waveform_lengths=waveform_lengths, + ) + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + + def apply_weight_norm(self): + nn.utils.weight_norm(self.conv_pre) + for layer in self.resblocks: + layer.apply_weight_norm() + nn.utils.weight_norm(self.conv_post) + + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.conv_pre) + for layer in self.resblocks: + layer.remove_weight_norm() + nn.utils.remove_weight_norm(self.conv_post) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 2de58f74595d..278a97592c77 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -7985,6 +7985,16 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class UnivNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class UperNetForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] diff --git a/tests/models/univnet/__init__.py b/tests/models/univnet/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/models/univnet/test_feature_extraction_univnet.py b/tests/models/univnet/test_feature_extraction_univnet.py new file mode 100644 index 000000000000..dfa335d15383 --- /dev/null +++ b/tests/models/univnet/test_feature_extraction_univnet.py @@ -0,0 +1,365 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +import os +import random +import tempfile +import unittest + +import numpy as np +from datasets import Audio, load_dataset + +from transformers import UnivNetFeatureExtractor +from transformers.testing_utils import check_json_file_has_correct_format, require_torch, slow +from transformers.utils.import_utils import is_torch_available + +from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin + + +if is_torch_available(): + import torch + + +global_rng = random.Random() + + +# Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list +def floats_list(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + values = [] + for batch_idx in range(shape[0]): + values.append([]) + for _ in range(shape[1]): + values[-1].append(rng.random() * scale) + + return values + + +class UnivNetFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + min_seq_length=400, + max_seq_length=2000, + feature_size=1, + sampling_rate=24000, + padding_value=0.0, + do_normalize=True, + num_mel_bins=100, + hop_length=256, + win_length=1024, + win_function="hann_window", + filter_length=1024, + max_length_s=10, + fmin=0.0, + fmax=12000, + mel_floor=1e-9, + center=False, + compression_factor=1.0, + compression_clip_val=1e-5, + normalize_min=-11.512925148010254, + normalize_max=2.3143386840820312, + model_in_channels=64, + pad_end_length=10, + ): + self.parent = parent + self.batch_size = batch_size + self.min_seq_length = min_seq_length + self.max_seq_length = max_seq_length + self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) + + self.feature_size = feature_size + self.sampling_rate = sampling_rate + self.padding_value = padding_value + self.do_normalize = do_normalize + self.num_mel_bins = num_mel_bins + self.hop_length = hop_length + self.win_length = win_length + self.win_function = win_function + self.filter_length = filter_length + self.max_length_s = max_length_s + self.fmin = fmin + self.fmax = fmax + self.mel_floor = mel_floor + self.center = center + self.compression_factor = compression_factor + self.compression_clip_val = compression_clip_val + self.normalize_min = normalize_min + self.normalize_max = normalize_max + self.model_in_channels = model_in_channels + self.pad_end_length = pad_end_length + + def prepare_feat_extract_dict(self): + return { + "feature_size": self.feature_size, + "sampling_rate": self.sampling_rate, + "padding_value": self.padding_value, + "do_normalize": self.do_normalize, + "num_mel_bins": self.num_mel_bins, + "hop_length": self.hop_length, + "win_length": self.win_length, + "win_function": self.win_function, + "filter_length": self.filter_length, + "max_length_s": self.max_length_s, + "fmin": self.fmin, + "fmax": self.fmax, + "mel_floor": self.mel_floor, + "center": self.center, + "compression_factor": self.compression_factor, + "compression_clip_val": self.compression_clip_val, + "normalize_min": self.normalize_min, + "normalize_max": self.normalize_max, + "model_in_channels": self.model_in_channels, + "pad_end_length": self.pad_end_length, + } + + def prepare_inputs_for_common(self, equal_length=False, numpify=False): + def _flatten(list_of_lists): + return list(itertools.chain(*list_of_lists)) + + if equal_length: + speech_inputs = floats_list((self.batch_size, self.max_seq_length)) + else: + # make sure that inputs increase in size + speech_inputs = [ + _flatten(floats_list((x, self.feature_size))) + for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) + ] + + if numpify: + speech_inputs = [np.asarray(x) for x in speech_inputs] + + return speech_inputs + + +class UnivNetFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): + feature_extraction_class = UnivNetFeatureExtractor + + def setUp(self): + self.feat_extract_tester = UnivNetFeatureExtractionTester(self) + + # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_from_and_save_pretrained + def test_feat_extract_from_and_save_pretrained(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] + check_json_file_has_correct_format(saved_file) + feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + mel_1 = feat_extract_first.mel_filters + mel_2 = feat_extract_second.mel_filters + self.assertTrue(np.allclose(mel_1, mel_2)) + self.assertEqual(dict_first, dict_second) + + # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_to_json_file + def test_feat_extract_to_json_file(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + json_file_path = os.path.join(tmpdirname, "feat_extract.json") + feat_extract_first.to_json_file(json_file_path) + feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + mel_1 = feat_extract_first.mel_filters + mel_2 = feat_extract_second.mel_filters + self.assertTrue(np.allclose(mel_1, mel_2)) + self.assertEqual(dict_first, dict_second) + + def test_call(self): + # Tests that all call wrap to encode_plus and batch_encode_plus + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + # create three inputs of length 800, 1000, and 1200 + speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] + np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] + + # Test feature size + input_features = feature_extractor( + np_speech_inputs, padding="max_length", max_length=1600, return_tensors="np" + ).input_features + self.assertTrue(input_features.ndim == 3) + # Note: for some reason I get a weird padding error when feature_size > 1 + # self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) + # Note: we use the shape convention (batch_size, seq_len, num_mel_bins) + self.assertTrue(input_features.shape[-1] == feature_extractor.num_mel_bins) + + # Test not batched input + encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features + self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) + + # Test batched + encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Test 2-D numpy arrays are batched. + speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] + np_speech_inputs = np.asarray(speech_inputs) + encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Test truncation required + speech_inputs = [ + floats_list((1, x))[0] + for x in range((feature_extractor.num_max_samples - 100), (feature_extractor.num_max_samples + 500), 200) + ] + np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] + + speech_inputs_truncated = [x[: feature_extractor.num_max_samples] for x in speech_inputs] + np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] + + encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + def test_batched_unbatched_consistency(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + speech_inputs = floats_list((1, 800))[0] + np_speech_inputs = np.asarray(speech_inputs) + + # Test unbatched vs batched list + encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor([speech_inputs], return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Test np.ndarray vs List[np.ndarray] + encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor([np_speech_inputs], return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Test unbatched np.ndarray vs batched np.ndarray + encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor( + np.expand_dims(np_speech_inputs, axis=0), return_tensors="np" + ).input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + def test_generate_noise(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] + + features = feature_extractor(speech_inputs, return_noise=True) + input_features = features.input_features + noise_features = features.noise_sequence + + for spectrogram, noise in zip(input_features, noise_features): + self.assertEqual(spectrogram.shape[0], noise.shape[0]) + + def test_pad_end(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] + + input_features1 = feature_extractor(speech_inputs, padding=False, pad_end=False).input_features + input_features2 = feature_extractor(speech_inputs, padding=False, pad_end=True).input_features + + for spectrogram1, spectrogram2 in zip(input_features1, input_features2): + self.assertEqual(spectrogram1.shape[0] + self.feat_extract_tester.pad_end_length, spectrogram2.shape[0]) + + def test_generate_noise_and_pad_end(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] + + features = feature_extractor(speech_inputs, padding=False, return_noise=True, pad_end=True) + input_features = features.input_features + noise_features = features.noise_sequence + + for spectrogram, noise in zip(input_features, noise_features): + self.assertEqual(spectrogram.shape[0], noise.shape[0]) + + @require_torch + def test_batch_decode(self): + import torch + + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + input_lengths = list(range(800, 1400, 200)) + pad_samples = feature_extractor.pad_end_length * feature_extractor.hop_length + output_features = { + "waveforms": torch.tensor(floats_list((3, max(input_lengths) + pad_samples))), + "waveform_lengths": torch.tensor(input_lengths), + } + waveforms = feature_extractor.batch_decode(**output_features) + + for input_length, waveform in zip(input_lengths, waveforms): + self.assertTrue(len(waveform.shape) == 1, msg="Individual output waveforms should be 1D") + self.assertEqual(waveform.shape[0], input_length) + + @require_torch + # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad + def test_double_precision_pad(self): + import torch + + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + np_speech_inputs = np.random.rand(100, 32).astype(np.float64) + py_speech_inputs = np_speech_inputs.tolist() + + for inputs in [py_speech_inputs, np_speech_inputs]: + np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") + self.assertTrue(np_processed.input_features.dtype == np.float32) + pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") + self.assertTrue(pt_processed.input_features.dtype == torch.float32) + + def _load_datasamples(self, num_samples): + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + ds = ds.cast_column("audio", Audio(sampling_rate=self.feat_extract_tester.sampling_rate)) + # automatic decoding with librispeech + speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] + + return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] + + @slow + @require_torch + def test_integration(self): + # fmt: off + EXPECTED_INPUT_FEATURES = torch.tensor( + [ + -5.0229, -6.1358, -5.8346, -5.4447, -5.6707, -5.8577, -5.0464, -5.0058, + -5.6015, -5.6410, -5.4325, -5.6116, -5.3700, -5.7956, -5.3196, -5.3274, + -5.9655, -5.6057, -5.8382, -5.9602, -5.9005, -5.9123, -5.7669, -6.1441, + -5.5168, -5.1405, -5.3927, -6.0032, -5.5784, -5.3728 + ], + ) + # fmt: on + + input_speech, sr = self._load_datasamples(1) + + feature_extractor = UnivNetFeatureExtractor() + input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features + self.assertEqual(input_features.shape, (1, 548, 100)) + + input_features_mean = torch.mean(input_features) + input_features_stddev = torch.std(input_features) + + EXPECTED_MEAN = torch.tensor(-6.18862009) + EXPECTED_STDDEV = torch.tensor(2.80845642) + + torch.testing.assert_close(input_features_mean, EXPECTED_MEAN, atol=5e-5, rtol=5e-6) + torch.testing.assert_close(input_features_stddev, EXPECTED_STDDEV) + torch.testing.assert_close(input_features[0, :30, 0], EXPECTED_INPUT_FEATURES, atol=1e-4, rtol=1e-5) diff --git a/tests/models/univnet/test_modeling_univnet.py b/tests/models/univnet/test_modeling_univnet.py new file mode 100644 index 000000000000..feec48121752 --- /dev/null +++ b/tests/models/univnet/test_modeling_univnet.py @@ -0,0 +1,365 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import random +import unittest + +from datasets import Audio, load_dataset + +from transformers import UnivNetConfig, UnivNetFeatureExtractor +from transformers.testing_utils import ( + is_torch_available, + require_torch, + require_torch_gpu, + slow, + torch_device, +) + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ( + ModelTesterMixin, + floats_tensor, +) + + +if is_torch_available(): + import torch + + from transformers import UnivNetModel + + +class UnivNetModelTester: + def __init__( + self, + parent, + batch_size=2, + seq_length=7, + in_channels=8, + hidden_channels=8, + num_mel_bins=20, + kernel_predictor_hidden_channels=8, + seed=0, + is_training=False, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.num_mel_bins = num_mel_bins + self.kernel_predictor_hidden_channels = kernel_predictor_hidden_channels + self.seed = seed + self.is_training = is_training + + def prepare_noise_sequence(self): + generator = torch.manual_seed(self.seed) + noise_shape = (self.seq_length, self.in_channels) + # Create noise on CPU for reproducibility + noise_sequence = torch.randn(noise_shape, generator=generator, dtype=torch.float) + return noise_sequence + + def prepare_config_and_inputs(self): + spectrogram = floats_tensor([self.seq_length, self.num_mel_bins], scale=1.0) + noise_sequence = self.prepare_noise_sequence() + noise_sequence = noise_sequence.to(spectrogram.device) + config = self.get_config() + return config, spectrogram, noise_sequence + + def get_config(self): + return UnivNetConfig( + model_in_channels=self.in_channels, + model_hidden_channels=self.hidden_channels, + num_mel_bins=self.num_mel_bins, + kernel_predictor_hidden_channels=self.kernel_predictor_hidden_channels, + ) + + def create_and_check_model(self, config, spectrogram, noise_sequence): + model = UnivNetModel(config=config).to(torch_device).eval() + result = model(spectrogram, noise_sequence)[0] + self.parent.assertEqual(result.shape, (1, self.seq_length * 256)) + + def prepare_config_and_inputs_for_common(self): + config, spectrogram, noise_sequence = self.prepare_config_and_inputs() + inputs_dict = {"input_features": spectrogram, "noise_sequence": noise_sequence} + return config, inputs_dict + + +@require_torch +class UnivNetModelTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = (UnivNetModel,) if is_torch_available() else () + # UnivNetModel currently cannot be traced with torch.jit.trace. + test_torchscript = False + # The UnivNetModel is not a transformer and does not use any attention mechanisms, so skip transformer/attention + # related tests. + test_pruning = False + test_resize_embeddings = False + test_resize_position_embeddings = False + test_head_masking = False + # UnivNetModel is not a sequence classification model. + test_mismatched_shapes = False + # UnivNetModel does not have a base_model_prefix attribute. + test_missing_keys = False + # UnivNetModel does not implement a parallelize method. + test_model_parallel = False + is_encoder_decoder = False + has_attentions = False + + input_name = "input_features" + + def setUp(self): + self.model_tester = UnivNetModelTester(self) + self.config_tester = ConfigTester(self, config_class=UnivNetConfig) + + def test_config(self): + self.config_tester.create_and_test_config_to_json_string() + self.config_tester.create_and_test_config_to_json_file() + self.config_tester.create_and_test_config_from_and_save_pretrained() + self.config_tester.create_and_test_config_from_and_save_pretrained_subfolder() + self.config_tester.create_and_test_config_with_num_labels() + self.config_tester.check_config_can_be_init_without_params() + self.config_tester.check_config_arguments_init() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = [ + "input_features", + ] + self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) + + @unittest.skip(reason="UnivNetModel does not output hidden_states.") + def test_hidden_states_output(self): + pass + + @unittest.skip(reason="UnivNetModel.forward does not accept an inputs_embeds argument.") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="UnivNetModel does not use input embeddings and thus has no get_input_embeddings method.") + def test_model_common_attributes(self): + pass + + @unittest.skip(reason="UnivNetModel does not support all arguments tested, such as output_hidden_states.") + def test_model_outputs_equivalence(self): + pass + + @unittest.skip(reason="UnivNetModel does not output hidden_states.") + def test_retain_grad_hidden_states_attentions(self): + pass + + def test_batched_inputs_outputs(self): + config, inputs = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + + batched_spectrogram = inputs["input_features"].unsqueeze(0).repeat(2, 1, 1) + batched_noise_sequence = inputs["noise_sequence"].unsqueeze(0).repeat(2, 1, 1) + with torch.no_grad(): + batched_outputs = model( + batched_spectrogram.to(torch_device), + batched_noise_sequence.to(torch_device), + )[0] + + self.assertEqual( + batched_spectrogram.shape[0], + batched_outputs.shape[0], + msg="Got different batch dims for input and output", + ) + + def test_unbatched_inputs_outputs(self): + config, inputs = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(inputs["input_features"].to(torch_device), inputs["noise_sequence"].to(torch_device))[ + 0 + ] + self.assertTrue(outputs.shape[0] == 1, msg="Unbatched input should create batched output with bsz = 1") + + def test_unbatched_batched_outputs_consistency(self): + config, inputs = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + + unbatched_spectrogram = inputs["input_features"].detach().clone() + unbatched_noise_sequence = inputs["noise_sequence"].detach().clone() + batched_spectrogram = inputs["input_features"].unsqueeze(0) + batched_noise_sequence = inputs["noise_sequence"].unsqueeze(0) + + with torch.no_grad(): + unbatched_outputs = model( + unbatched_spectrogram.to(torch_device), + unbatched_noise_sequence.to(torch_device), + )[0] + + batched_outputs = model( + batched_spectrogram.to(torch_device), + batched_noise_sequence.to(torch_device), + )[0] + + torch.testing.assert_close(unbatched_outputs, batched_outputs) + + +@require_torch_gpu +@slow +class UnivNetModelIntegrationTests(unittest.TestCase): + def tearDown(self): + super().tearDown() + gc.collect() + torch.cuda.empty_cache() + + def _load_datasamples(self, num_samples, sampling_rate=24000): + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + ds = ds.cast_column("audio", Audio(sampling_rate=sampling_rate)) + # automatic decoding with librispeech + speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] + + return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] + + def get_inputs(self, device, num_samples: int = 3, noise_length: int = 10, seed: int = 0): + generator = torch.manual_seed(seed) + # Note: hardcode model_in_channels -> 64 + if num_samples == 1: + noise_sequence_shape = (64, noise_length) + else: + noise_sequence_shape = (num_samples, 64, noise_length) + # Explicity generate noise_sequence on CPU for consistency. + noise_sequence = torch.randn(noise_sequence_shape, generator=generator, dtype=torch.float32, device="cpu") + # Put noise_sequence on the desired device. + noise_sequence = noise_sequence.to(device) + + # Note: hardcode num_mel_channels -> 100 + if num_samples == 1: + spectrogram_shape = [100, noise_length] + else: + spectrogram_shape = [num_samples, 100, noise_length] + spectrogram = floats_tensor(spectrogram_shape, scale=1.0, rng=random.Random(seed)) + # Note: spectrogram should already be on torch_device + + # Permute to match diffusers implementation + if num_samples == 1: + noise_sequence = noise_sequence.transpose(1, 0) + spectrogram = spectrogram.transpose(1, 0) + else: + noise_sequence = noise_sequence.transpose(2, 1) + spectrogram = spectrogram.transpose(2, 1) + + inputs = { + "input_features": spectrogram, + "noise_sequence": noise_sequence, + "generator": generator, + } + + return inputs + + def test_model_inference_batched(self): + # Load sample checkpoint from Tortoise TTS + model = UnivNetModel.from_pretrained("dg845/univnet-dev") + model.eval().to(torch_device) + + # Get batched noise and spectrogram inputs. + input_speech = self.get_inputs(torch_device, num_samples=3) + + with torch.no_grad(): + waveform = model(**input_speech)[0] + waveform = waveform.cpu() + + waveform_mean = torch.mean(waveform) + waveform_stddev = torch.std(waveform) + waveform_slice = waveform[-1, -9:].flatten() + + EXPECTED_MEAN = torch.tensor(-0.19989729) + EXPECTED_STDDEV = torch.tensor(0.35230172) + EXPECTED_SLICE = torch.tensor([-0.3408, -0.6045, -0.5052, 0.1160, -0.1556, -0.0405, -0.3024, -0.5290, -0.5019]) + + torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=1e-4, rtol=1e-5) + torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5) + torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=5e-4, rtol=1e-5) + + def test_model_inference_unbatched(self): + # Load sample checkpoint from Tortoise TTS + model = UnivNetModel.from_pretrained("dg845/univnet-dev") + model.eval().to(torch_device) + + # Get unbatched noise and spectrogram inputs. + input_speech = self.get_inputs(torch_device, num_samples=1) + + with torch.no_grad(): + waveform = model(**input_speech)[0] + waveform = waveform.cpu() + + waveform_mean = torch.mean(waveform) + waveform_stddev = torch.std(waveform) + waveform_slice = waveform[-1, -9:].flatten() + + EXPECTED_MEAN = torch.tensor(-0.22895093) + EXPECTED_STDDEV = torch.tensor(0.33986747) + EXPECTED_SLICE = torch.tensor([-0.3276, -0.5504, -0.3484, 0.3574, -0.0373, -0.1826, -0.4880, -0.6431, -0.5162]) + + torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=1e-4, rtol=1e-5) + torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5) + torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=1e-3, rtol=1e-5) + + def test_integration(self): + feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev") + model = UnivNetModel.from_pretrained("dg845/univnet-dev") + model.eval().to(torch_device) + + audio, sr = self._load_datasamples(1, sampling_rate=feature_extractor.sampling_rate) + + input_features = feature_extractor(audio, sampling_rate=sr[0], return_tensors="pt").input_features + input_features = input_features.to(device=torch_device) + + input_speech = self.get_inputs(torch_device, num_samples=1, noise_length=input_features.shape[1]) + input_speech["input_features"] = input_features + + with torch.no_grad(): + waveform = model(**input_speech)[0] + waveform = waveform.cpu() + + waveform_mean = torch.mean(waveform) + waveform_stddev = torch.std(waveform) + waveform_slice = waveform[-1, -9:].flatten() + + EXPECTED_MEAN = torch.tensor(0.00051374) + EXPECTED_STDDEV = torch.tensor(0.058105603) + # fmt: off + EXPECTED_SLICE = torch.tensor([-4.3934e-04, -1.8203e-04, -3.3033e-04, -3.8716e-04, -1.6125e-04, 3.5389e-06, -3.3149e-04, -3.7613e-04, -2.3331e-04]) + # fmt: on + + torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=5e-6, rtol=1e-5) + torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5) + torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=5e-6, rtol=1e-5) From 8aca43bdb3cb9a5020f6d57589d85679dc873b1c Mon Sep 17 00:00:00 2001 From: Strive-for-excellence <1505656319@qq.com> Date: Thu, 23 Nov 2023 00:28:27 +0800 Subject: [PATCH 241/268] update Openai API call method (#27628) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: 张兴言 --- src/transformers/tools/agents.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/tools/agents.py b/src/transformers/tools/agents.py index 51e3f6db0c25..3e423ebb3055 100644 --- a/src/transformers/tools/agents.py +++ b/src/transformers/tools/agents.py @@ -440,13 +440,13 @@ def generate_one(self, prompt, stop): return self._completion_generate([prompt], stop)[0] def _chat_generate(self, prompt, stop): - result = openai.ChatCompletion.create( + result = openai.chat.completions.create( model=self.model, messages=[{"role": "user", "content": prompt}], temperature=0, stop=stop, ) - return result["choices"][0]["message"]["content"] + return result.choices[0].message.content def _completion_generate(self, prompts, stop): result = openai.Completion.create( From 1ddc4fa60e8cbf91367e33053361201dbffc193e Mon Sep 17 00:00:00 2001 From: Jialong Wu Date: Thu, 23 Nov 2023 16:09:56 +0800 Subject: [PATCH 242/268] update d_kv'annotation in mt5'configuration (#27585) * update d_kv'annotation in mt5'configuration * update d_kv'annotation in mt5'configuration * update d_kv'annotation in mt5'configuration --- src/transformers/models/mt5/configuration_mt5.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/mt5/configuration_mt5.py b/src/transformers/models/mt5/configuration_mt5.py index aab93711dfc6..9464979a2b8e 100644 --- a/src/transformers/models/mt5/configuration_mt5.py +++ b/src/transformers/models/mt5/configuration_mt5.py @@ -40,8 +40,8 @@ class MT5Config(PretrainedConfig): d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): - Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // - num_heads`. + Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`. + But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `T5Block`. num_layers (`int`, *optional*, defaults to 8): From 3bc50d81e6c70d63e59d635106bac6a561b47681 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Thu, 23 Nov 2023 15:46:51 +0530 Subject: [PATCH 243/268] [`FA2`] Add flash attention for opt (#26414) * added flash attention for opt * added to list * fix use cache (#3) * style fix * fix text * test fix2 * reverted until 689f599 * torch fx tests are working now! * small fix * added TODO docstring * changes * comments and .md file modification --------- Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> --- docs/source/en/model_doc/opt.md | 49 ++++ src/transformers/models/opt/modeling_opt.py | 304 ++++++++++++++++++-- 2 files changed, 323 insertions(+), 30 deletions(-) diff --git a/docs/source/en/model_doc/opt.md b/docs/source/en/model_doc/opt.md index 68da201f99bc..3da7b22fab74 100644 --- a/docs/source/en/model_doc/opt.md +++ b/docs/source/en/model_doc/opt.md @@ -62,6 +62,55 @@ The resource should ideally demonstrate something new instead of duplicating an - A blog post on [How 🤗 Accelerate runs very large models thanks to PyTorch](https://huggingface.co/blog/accelerate-large-models) with OPT. + +## Combining OPT and Flash Attention 2 + +First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature. + +```bash +pip install -U flash-attn --no-build-isolation +``` + +Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16``) + +To load and run a model using Flash Attention 2, refer to the snippet below: + +```python +>>> import torch +>>> from transformers import OPTForCausalLM, GPT2Tokenizer +>>> device = "cuda" # the device to load the model onto + +>>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.float16, use_flash_attention_2=True) +>>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") + +>>> prompt = ("A chat between a curious human and the Statue of Liberty.\n\nHuman: What is your name?\nStatue: I am the " + "Statue of Liberty.\nHuman: Where do you live?\nStatue: New York City.\nHuman: How long have you lived " + "there?") + +>>> model_inputs = tokenizer([prompt], return_tensors="pt").to(device) +>>> model.to(device) + +>>> generated_ids = model.generate(**model_inputs, max_new_tokens=30, do_sample=False) +>>> tokenizer.batch_decode(generated_ids)[0] +'A chat between a curious human and the Statue of Liberty.\n\nHuman: What is your name?\nStatue: I am the Statue of Liberty.\nHuman: Where do you live?\nStatue: New York City.\nHuman: How long have you lived there?\nStatue: I have lived here for about a year.\nHuman: What is your favorite place to eat?\nStatue: I love' +``` + +### Expected speedups + +Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `facebook/opt-2.7b` checkpoint and the Flash Attention 2 version of the model using two different sequence lengths. + +
+ +
+ +Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `facebook/opt-350m` checkpoint and the Flash Attention 2 version of the model using two different sequence lengths. + +
+ +
+ + + ## OPTConfig [[autodoc]] OPTConfig diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 5b56ff9e618d..2192f327bc49 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -16,6 +16,7 @@ from typing import List, Optional, Tuple, Union import torch +import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -33,12 +34,18 @@ add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, + is_flash_attn_2_available, logging, replace_return_docstrings, ) from .configuration_opt import OPTConfig +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/opt-350m" @@ -64,6 +71,19 @@ ] +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + class OPTLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. @@ -93,30 +113,49 @@ class OPTAttention(nn.Module): def __init__( self, - embed_dim: int, - num_heads: int, - dropout: float = 0.0, + config: OPTConfig, is_decoder: bool = False, - bias: bool = True, + **kwargs, ): super().__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = dropout - self.head_dim = embed_dim // num_heads + self.config = config + + def _handle_deprecated_argument(config_arg_name, config, fn_arg_name, kwargs): + """ + If a the deprecated argument `fn_arg_name` is passed, raise a deprecation + warning and return that value, otherwise take the equivalent config.config_arg_name + """ + val = None + if fn_arg_name in kwargs: + logging.warning( + "Passing in {} to {self.__class__.__name__} is deprecated and won't be supported from v4.38." + " Please set it in the config instead" + ) + val = kwargs.pop(fn_arg_name) + else: + val = getattr(config, config_arg_name) + return val - if (self.head_dim * num_heads) != self.embed_dim: + self.embed_dim = _handle_deprecated_argument("hidden_size", config, "embed_dim", kwargs) + self.num_heads = _handle_deprecated_argument("num_attention_heads", config, "num_heads", kwargs) + self.dropout = _handle_deprecated_argument("attention_dropout", config, "dropout", kwargs) + self.enable_bias = _handle_deprecated_argument("enable_bias", config, "bias", kwargs) + + self.head_dim = self.embed_dim // self.num_heads + self.is_causal = True + + if (self.head_dim * self.num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" - f" and `num_heads`: {num_heads})." + f" and `num_heads`: {self.num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder - self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() @@ -242,17 +281,210 @@ def forward( return attn_output, attn_weights_reshaped, past_key_value +class OptFlashAttention2(OPTAttention): + """ + OPT flash attention module. This module inherits from `OPTAttention` as the weights of the module stays untouched. + The only required change would be on the forward pass where it needs to correctly call the public API of flash + attention and deal with padding tokens in case the input contains any of them. + """ + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, _, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + query_length = query_states.shape[1] + tgt_len = key_states.shape[-2] + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + query_states = query_states.view(bsz, query_length, self.num_heads, self.head_dim) + key_states = key_states.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + value_states = value_states.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + + attn_dropout = self.dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + # Handle the case where the model is quantized + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, query_length, dropout=attn_dropout + ) + + attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim) + attn_output = self.out_proj(attn_weights_reshaped) + + if not output_attentions: + attn_weights_reshaped = None + + return attn_output, attn_weights_reshaped, past_key_value + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=self.is_causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=self.is_causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + class OPTDecoderLayer(nn.Module): def __init__(self, config: OPTConfig): super().__init__() self.embed_dim = config.hidden_size - self.self_attn = OPTAttention( - embed_dim=self.embed_dim, - num_heads=config.num_attention_heads, - dropout=config.attention_dropout, - is_decoder=True, - bias=config.enable_bias, - ) + + if not getattr(config, "_flash_attn_2_enabled", False): + self.self_attn = OPTAttention(config=config, is_decoder=True) + else: + self.self_attn = OptFlashAttention2(config=config, is_decoder=True) + self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] @@ -368,6 +600,7 @@ class OPTPreTrainedModel(PreTrainedModel): base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["OPTDecoderLayer"] + _supports_flash_attn_2 = True def _init_weights(self, module): std = self.config.init_std @@ -581,16 +814,27 @@ def forward( mask_seq_length = past_key_values_length + seq_length # embed positions - if attention_mask is None: - attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) - elif attention_mask.shape[1] != mask_seq_length: - raise ValueError( - f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be " - f"{mask_seq_length} (sum of the lengths of current and past inputs)" + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + causal_attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + attention_mask = ( + torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) + if attention_mask is None + else attention_mask ) - causal_attention_mask = _prepare_4d_causal_attention_mask( - attention_mask, input_shape, inputs_embeds, past_key_values_length - ) + else: + # 4d mask is passed through the layers + if attention_mask is None: + attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) + elif attention_mask.shape[1] != mask_seq_length: + raise ValueError( + f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be " + f"{mask_seq_length} (sum of the lengths of current and past inputs)" + ) + causal_attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + pos_embeds = self.embed_positions(attention_mask, past_key_values_length) if self.project_in is not None: From baabd3877a6a2e7bc5c9fd9ea62bbe1cd19edacf Mon Sep 17 00:00:00 2001 From: Merve Noyan Date: Thu, 23 Nov 2023 16:58:21 +0100 Subject: [PATCH 244/268] Extended semantic segmentation to image segmentation (#27039) * Extended semantic segmentation * Update image_segmentation.md * Changed title * Update docs/source/en/tasks/semantic_segmentation.md Co-authored-by: Pedro Cuenca * Update docs/source/en/tasks/semantic_segmentation.md Co-authored-by: Pedro Cuenca * Update docs/source/en/tasks/semantic_segmentation.md Co-authored-by: Pedro Cuenca * Update docs/source/en/tasks/semantic_segmentation.md Co-authored-by: Pedro Cuenca * Update docs/source/en/tasks/semantic_segmentation.md Co-authored-by: Pedro Cuenca * Update semantic_segmentation.md * Update docs/source/en/tasks/semantic_segmentation.md Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update docs/source/en/tasks/semantic_segmentation.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Addressed Niels' and Maria's comments * Added detail on panoptic segmentation * Added redirection and renamed the file * Update _toctree.yml * Update _redirects.yml * Rename image_segmentation.md to semantic_segmentation.md --------- Co-authored-by: Pedro Cuenca Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> --- docs/source/en/_redirects.yml | 2 +- docs/source/en/_toctree.yml | 2 +- docs/source/en/tasks/semantic_segmentation.md | 240 +++++++++++++----- 3 files changed, 184 insertions(+), 60 deletions(-) diff --git a/docs/source/en/_redirects.yml b/docs/source/en/_redirects.yml index 0dd4d2bfb34b..b6575a6b02f2 100644 --- a/docs/source/en/_redirects.yml +++ b/docs/source/en/_redirects.yml @@ -1,3 +1,3 @@ # Optimizing inference -perf_infer_gpu_many: perf_infer_gpu_one \ No newline at end of file +perf_infer_gpu_many: perf_infer_gpu_one diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index d7d593b21e62..af29898966ba 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -60,7 +60,7 @@ - local: tasks/image_classification title: Image classification - local: tasks/semantic_segmentation - title: Semantic segmentation + title: Image segmentation - local: tasks/video_classification title: Video classification - local: tasks/object_detection diff --git a/docs/source/en/tasks/semantic_segmentation.md b/docs/source/en/tasks/semantic_segmentation.md index 2895a1977721..ce6fb70c8244 100644 --- a/docs/source/en/tasks/semantic_segmentation.md +++ b/docs/source/en/tasks/semantic_segmentation.md @@ -14,29 +14,17 @@ rendered properly in your Markdown viewer. --> -# Semantic segmentation +# Image Segmentation [[open-in-colab]] -Semantic segmentation assigns a label or class to each individual pixel of an image. There are several types of segmentation, and in the case of semantic segmentation, no distinction is made between unique instances of the same object. Both objects are given the same label (for example, "car" instead of "car-1" and "car-2"). Common real-world applications of semantic segmentation include training self-driving cars to identify pedestrians and important traffic information, identifying cells and abnormalities in medical imagery, and monitoring environmental changes from satellite imagery. +Image segmentation models separate areas corresponding to different areas of interest in an image. These models work by assigning a label to each pixel. There are several types of segmentation: semantic segmentation, instance segmentation, and panoptic segmentation. -This guide will show you how to: - -1. Finetune [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer) on the [SceneParse150](https://huggingface.co/datasets/scene_parse_150) dataset. -2. Use your finetuned model for inference. - - -The task illustrated in this tutorial is supported by the following model architectures: - - - -[BEiT](../model_doc/beit), [Data2VecVision](../model_doc/data2vec-vision), [DPT](../model_doc/dpt), [MobileNetV2](../model_doc/mobilenet_v2), [MobileViT](../model_doc/mobilevit), [MobileViTV2](../model_doc/mobilevitv2), [SegFormer](../model_doc/segformer), [UPerNet](../model_doc/upernet) - - - - +In this guide, we will: +1. [Take a look at different types of segmentation](#Types-of-Segmentation), +2. [Have an end-to-end fine-tuning example for semantic segmentation](#Fine-tuning-a-Model-for-Segmentation). Before you begin, make sure you have all the necessary libraries installed: @@ -52,7 +40,178 @@ We encourage you to log in to your Hugging Face account so you can upload and sh >>> notebook_login() ``` -## Load SceneParse150 dataset +## Types of Segmentation + +Semantic segmentation assigns a label or class to every single pixel in an image. Let's take a look at a semantic segmentation model output. It will assign the same class to every instance of an object it comes across in an image, for example, all cats will be labeled as "cat" instead of "cat-1", "cat-2". +We can use transformers' image segmentation pipeline to quickly infer a semantic segmentation model. Let's take a look at the example image. + +```python +from transformers import pipeline +from PIL import Image +import requests + +url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/segmentation_input.jpg" +image = Image.open(requests.get(url, stream=True).raw) +image +``` + +
+ Segmentation Input +
+ +We will use [nvidia/segformer-b1-finetuned-cityscapes-1024-1024](https://huggingface.co/nvidia/segformer-b1-finetuned-cityscapes-1024-1024). + +```python +semantic_segmentation = pipeline("image-segmentation", "nvidia/segformer-b1-finetuned-cityscapes-1024-1024") +results = semantic_segmentation(image) +results +``` + +The segmentation pipeline output includes a mask for every predicted class. +```bash +[{'score': None, + 'label': 'road', + 'mask': }, + {'score': None, + 'label': 'sidewalk', + 'mask': }, + {'score': None, + 'label': 'building', + 'mask': }, + {'score': None, + 'label': 'wall', + 'mask': }, + {'score': None, + 'label': 'pole', + 'mask': }, + {'score': None, + 'label': 'traffic sign', + 'mask': }, + {'score': None, + 'label': 'vegetation', + 'mask': }, + {'score': None, + 'label': 'terrain', + 'mask': }, + {'score': None, + 'label': 'sky', + 'mask': }, + {'score': None, + 'label': 'car', + 'mask': }] +``` + +Taking a look at the mask for the car class, we can see every car is classified with the same mask. + +```python +results[-1]["mask"] +``` +
+ Semantic Segmentation Output +
+ +In instance segmentation, the goal is not to classify every pixel, but to predict a mask for **every instance of an object** in a given image. It works very similar to object detection, where there is a bounding box for every instance, there's a segmentation mask instead. We will use [facebook/mask2former-swin-large-cityscapes-instance](https://huggingface.co/facebook/mask2former-swin-large-cityscapes-instance) for this. + +```python +instance_segmentation = pipeline("image-segmentation", "facebook/mask2former-swin-large-cityscapes-instance") +results = instance_segmentation(Image.open(image)) +results +``` + +As you can see below, there are multiple cars classified, and there's no classification for pixels other than pixels that belong to car and person instances. + +```bash +[{'score': 0.999944, + 'label': 'car', + 'mask': }, + {'score': 0.999945, + 'label': 'car', + 'mask': }, + {'score': 0.999652, + 'label': 'car', + 'mask': }, + {'score': 0.903529, + 'label': 'person', + 'mask': }] +``` +Checking out one of the car masks below. + +```python +results[2]["mask"] +``` +
+ Semantic Segmentation Output +
+ +Panoptic segmentation combines semantic segmentation and instance segmentation, where every pixel is classified into a class and an instance of that class, and there are multiple masks for each instance of a class. We can use [facebook/mask2former-swin-large-cityscapes-panoptic](https://huggingface.co/facebook/mask2former-swin-large-cityscapes-panoptic) for this. + +```python +panoptic_segmentation = pipeline("image-segmentation", "facebook/mask2former-swin-large-cityscapes-panoptic") +results = panoptic_segmentation(Image.open(image)) +results +``` +As you can see below, we have more classes. We will later illustrate to see that every pixel is classified into one of the classes. + +```bash +[{'score': 0.999981, + 'label': 'car', + 'mask': }, + {'score': 0.999958, + 'label': 'car', + 'mask': }, + {'score': 0.99997, + 'label': 'vegetation', + 'mask': }, + {'score': 0.999575, + 'label': 'pole', + 'mask': }, + {'score': 0.999958, + 'label': 'building', + 'mask': }, + {'score': 0.999634, + 'label': 'road', + 'mask': }, + {'score': 0.996092, + 'label': 'sidewalk', + 'mask': }, + {'score': 0.999221, + 'label': 'car', + 'mask': }, + {'score': 0.99987, + 'label': 'sky', + 'mask': }] +``` + +Let's have a side by side comparison for all types of segmentation. + +
+ Segmentation Maps Compared +
+ +Seeing all types of segmentation, let's have a deep dive on fine-tuning a model for semantic segmentation. + +Common real-world applications of semantic segmentation include training self-driving cars to identify pedestrians and important traffic information, identifying cells and abnormalities in medical imagery, and monitoring environmental changes from satellite imagery. + +## Fine-tuning a Model for Segmentation + +We will now: + +1. Finetune [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer) on the [SceneParse150](https://huggingface.co/datasets/scene_parse_150) dataset. +2. Use your fine-tuned model for inference. + + +The task illustrated in this tutorial is supported by the following model architectures: + + + +[BEiT](../model_doc/beit), [Data2VecVision](../model_doc/data2vec-vision), [DPT](../model_doc/dpt), [MobileNetV2](../model_doc/mobilenet_v2), [MobileViT](../model_doc/mobilevit), [MobileViTV2](../model_doc/mobilevitv2), [SegFormer](../model_doc/segformer), [UPerNet](../model_doc/upernet) + + + + + + +### Load SceneParse150 dataset Start by loading a smaller subset of the SceneParse150 dataset from the 🤗 Datasets library. This'll give you a chance to experiment and make sure everything works before spending more time training on the full dataset. @@ -97,7 +256,7 @@ You'll also want to create a dictionary that maps a label id to a label class wh >>> num_labels = len(id2label) ``` -## Preprocess +### Preprocess The next step is to load a SegFormer image processor to prepare the images and annotations for the model. Some datasets, like this one, use the zero-index as the background class. However, the background class isn't actually included in the 150 classes, so you'll need to set `reduce_labels=True` to subtract one from all the labels. The zero-index is replaced by `255` so it's ignored by SegFormer's loss function: @@ -204,7 +363,7 @@ The transform is applied on the fly which is faster and consumes less disk space
-## Evaluate +### Evaluate Including a metric during training is often helpful for evaluating your model's performance. You can quickly load an evaluation method with the 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [mean Intersection over Union](https://huggingface.co/spaces/evaluate-metric/accuracy) (IoU) metric (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric): @@ -289,7 +448,7 @@ logits first, and then reshaped to match the size of the labels before you can c Your `compute_metrics` function is ready to go now, and you'll return to it when you setup your training. -## Train +### Train @@ -453,7 +612,7 @@ Congratulations! You have fine-tuned your model and shared it on the 🤗 Hub. Y -## Inference +### Inference Great, now that you've finetuned a model, you can use it for inference! @@ -470,43 +629,8 @@ Load an image for inference: -The simplest way to try out your finetuned model for inference is to use it in a [`pipeline`]. Instantiate a `pipeline` for image segmentation with your model, and pass your image to it: - -```py ->>> from transformers import pipeline - ->>> segmenter = pipeline("image-segmentation", model="my_awesome_seg_model") ->>> segmenter(image) -[{'score': None, - 'label': 'wall', - 'mask': }, - {'score': None, - 'label': 'sky', - 'mask': }, - {'score': None, - 'label': 'floor', - 'mask': }, - {'score': None, - 'label': 'ceiling', - 'mask': }, - {'score': None, - 'label': 'bed ', - 'mask': }, - {'score': None, - 'label': 'windowpane', - 'mask': }, - {'score': None, - 'label': 'cabinet', - 'mask': }, - {'score': None, - 'label': 'chair', - 'mask': }, - {'score': None, - 'label': 'armchair', - 'mask': }] -``` -You can also manually replicate the results of the `pipeline` if you'd like. Process the image with an image processor and place the `pixel_values` on a GPU: +We will now see how to infer without a pipeline. Process the image with an image processor and place the `pixel_values` on a GPU: ```py >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use GPU if available, otherwise use a CPU From b406c4d2611d9425dabf927d3de0fdf7981de2cb Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 23 Nov 2023 17:02:16 +0000 Subject: [PATCH 245/268] Update TVP arxiv link (#27672) Update arxiv link --- docs/source/en/model_doc/tvp.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/en/model_doc/tvp.md b/docs/source/en/model_doc/tvp.md index 1e733db6c8cb..22b400a06c73 100644 --- a/docs/source/en/model_doc/tvp.md +++ b/docs/source/en/model_doc/tvp.md @@ -23,9 +23,9 @@ The abstract from the paper is the following: This research addresses temporal video grounding (TVG), which is the process of pinpointing the start and end times of specific events in a long video, as described by a text sentence. Text-visual prompting (TVP), is proposed to enhance TVG. TVP involves integrating specially designed patterns, known as 'prompts', into both the visual (image-based) and textual (word-based) input components of a TVG model. These prompts provide additional spatial-temporal context, improving the model's ability to accurately determine event timings in the video. The approach employs 2D visual inputs in place of 3D ones. Although 3D inputs offer more spatial-temporal detail, they are also more time-consuming to process. The use of 2D inputs with the prompting method aims to provide similar levels of context and accuracy more efficiently. +alt="drawing" width="600"/> - TVP architecture. Taken from the original paper. + TVP architecture. Taken from the original paper. This model was contributed by [Jiqing Feng](https://huggingface.co/Jiqing). The original code can be found [here](https://github.com/intel/TVP). @@ -183,4 +183,4 @@ Tips: ## TvpForVideoGrounding [[autodoc]] TvpForVideoGrounding - - forward \ No newline at end of file + - forward From fe1c16e95a1c2f7730b5d7340b669a63ab0b8ded Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Thu, 23 Nov 2023 18:44:08 +0100 Subject: [PATCH 246/268] [DPT, Dinov2] Add resources (#27655) * Add resources * Remove script * Update docs/source/en/model_doc/dinov2.md Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- docs/source/en/model_doc/dinov2.md | 12 ++++++++++++ docs/source/en/model_doc/dpt.md | 15 +++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/docs/source/en/model_doc/dinov2.md b/docs/source/en/model_doc/dinov2.md index 72a0478924f4..dca94786773d 100644 --- a/docs/source/en/model_doc/dinov2.md +++ b/docs/source/en/model_doc/dinov2.md @@ -55,6 +55,18 @@ with torch.no_grad(): print((last_hidden_states - traced_outputs[0]).abs().max()) ``` +## Resources + +A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DPT. + +- Demo notebooks for DINOv2 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DINOv2). 🌎 + + + +- [`Dinov2ForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). +- See also: [Image classification task guide](../tasks/image_classification) + +If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## Dinov2Config diff --git a/docs/source/en/model_doc/dpt.md b/docs/source/en/model_doc/dpt.md index 5e3e25343cdd..a02313a31235 100644 --- a/docs/source/en/model_doc/dpt.md +++ b/docs/source/en/model_doc/dpt.md @@ -32,6 +32,21 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/isl-org/DPT). +## Usage tips + +DPT is compatible with the [`AutoBackbone`] class. This allows to use the DPT framework with various computer vision backbones available in the library, such as [`VitDetBackbone`] or [`Dinov2Backbone`]. One can create it as follows: + +```python +from transformers import Dinov2Config, DPTConfig, DPTForDepthEstimation + +# initialize with a Transformer-based backbone such as DINOv2 +# in that case, we also specify `reshape_hidden_states=False` to get feature maps of shape (batch_size, num_channels, height, width) +backbone_config = Dinov2Config.from_pretrained("facebook/dinov2-base", out_features=["stage1", "stage2", "stage3", "stage4"], reshape_hidden_states=False) + +config = DPTConfig(backbone_config=backbone_config) +model = DPTForDepthEstimation(config=config) +``` + ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DPT. From b8db265bc6d0c9208ee465a12c6497149b4ee725 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 23 Nov 2023 21:00:39 +0100 Subject: [PATCH 247/268] Update tiny model summary file (#27388) * update * fix --------- Co-authored-by: ydshieh --- .../models/auto/image_processing_auto.py | 1 + tests/models/clvp/test_modeling_clvp.py | 4 +- tests/models/fuyu/test_modeling_fuyu.py | 5 +- tests/models/kosmos2/test_modeling_kosmos2.py | 14 +- .../test_modeling_seamless_m4t.py | 18 +- tests/models/swin2sr/test_modeling_swin2sr.py | 6 +- tests/models/whisper/test_modeling_whisper.py | 1 + .../test_pipelines_text_generation.py | 7 +- tests/utils/tiny_model_summary.json | 231 +++++++++++++++++- 9 files changed, 277 insertions(+), 10 deletions(-) diff --git a/src/transformers/models/auto/image_processing_auto.py b/src/transformers/models/auto/image_processing_auto.py index 168b7a5dff3a..7d26d668ab2a 100644 --- a/src/transformers/models/auto/image_processing_auto.py +++ b/src/transformers/models/auto/image_processing_auto.py @@ -71,6 +71,7 @@ ("idefics", "IdeficsImageProcessor"), ("imagegpt", "ImageGPTImageProcessor"), ("instructblip", "BlipImageProcessor"), + ("kosmos-2", "CLIPImageProcessor"), ("layoutlmv2", "LayoutLMv2ImageProcessor"), ("layoutlmv3", "LayoutLMv3ImageProcessor"), ("levit", "LevitImageProcessor"), diff --git a/tests/models/clvp/test_modeling_clvp.py b/tests/models/clvp/test_modeling_clvp.py index 1b3ab79034a9..3ebe5fe357d6 100644 --- a/tests/models/clvp/test_modeling_clvp.py +++ b/tests/models/clvp/test_modeling_clvp.py @@ -38,6 +38,7 @@ ids_tensor, random_attention_mask, ) +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -281,9 +282,10 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class ClvpDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class ClvpDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ClvpModel, ClvpForCausalLM) if is_torch_available() else () all_generative_model_classes = (ClvpForCausalLM,) if is_torch_available() else () + pipeline_model_mapping = {"feature-extraction": ClvpModelForConditionalGeneration} if is_torch_available() else {} test_pruning = False diff --git a/tests/models/fuyu/test_modeling_fuyu.py b/tests/models/fuyu/test_modeling_fuyu.py index d475e1e0ca04..84c912889277 100644 --- a/tests/models/fuyu/test_modeling_fuyu.py +++ b/tests/models/fuyu/test_modeling_fuyu.py @@ -24,6 +24,7 @@ from transformers.utils import cached_property from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from ...test_pipeline_mixin import PipelineTesterMixin if is_vision_available(): @@ -262,9 +263,9 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class FuyuModelTest(ModelTesterMixin, unittest.TestCase): +class FuyuModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FuyuForCausalLM,) if is_torch_available() else () - pipeline_model_mapping = {"image-to-text": FuyuForCausalLM} if is_torch_available() else {} + pipeline_model_mapping = {"text-generation": FuyuForCausalLM} if is_torch_available() else {} test_head_masking = False test_pruning = False diff --git a/tests/models/kosmos2/test_modeling_kosmos2.py b/tests/models/kosmos2/test_modeling_kosmos2.py index 5491ded1bc81..dd953eedc881 100644 --- a/tests/models/kosmos2/test_modeling_kosmos2.py +++ b/tests/models/kosmos2/test_modeling_kosmos2.py @@ -37,6 +37,7 @@ ids_tensor, random_attention_mask, ) +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -244,15 +245,26 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class Kosmos2ModelTest(ModelTesterMixin, unittest.TestCase): +class Kosmos2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Kosmos2Model, Kosmos2ForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (Kosmos2ForConditionalGeneration,) if is_torch_available() else () + pipeline_model_mapping = ( + {"feature-extraction": Kosmos2Model, "image-to-text": Kosmos2ForConditionalGeneration} + if is_torch_available() + else {} + ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False + # TODO: `image-to-text` pipeline for this model needs Processor. + def is_pipeline_test_to_skip( + self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name + ): + return pipeline_test_casse_name == "ImageToTextPipelineTests" + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) diff --git a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py index ab7a48694d2f..4eb4c7359fee 100644 --- a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py +++ b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py @@ -34,6 +34,7 @@ ids_tensor, random_attention_mask, ) +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -616,7 +617,9 @@ def test_attention_outputs(self): @require_torch -class SeamlessM4TModelWithTextInputTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class SeamlessM4TModelWithTextInputTest( + ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase +): is_encoder_decoder = True fx_compatible = False test_missing_keys = False @@ -636,6 +639,19 @@ class SeamlessM4TModelWithTextInputTest(ModelTesterMixin, GenerationTesterMixin, else () ) all_generative_model_classes = (SeamlessM4TForTextToText,) if is_torch_available() else () + pipeline_model_mapping = ( + { + "automatic-speech-recognition": SeamlessM4TForSpeechToText, + "conversational": SeamlessM4TForTextToText, + "feature-extraction": SeamlessM4TModel, + "summarization": SeamlessM4TForTextToText, + "text-to-audio": SeamlessM4TForTextToSpeech, + "text2text-generation": SeamlessM4TForTextToText, + "translation": SeamlessM4TForTextToText, + } + if is_torch_available() + else {} + ) def setUp(self): self.model_tester = SeamlessM4TModelTester(self, input_modality="text") diff --git a/tests/models/swin2sr/test_modeling_swin2sr.py b/tests/models/swin2sr/test_modeling_swin2sr.py index f94e11ad6460..730689603684 100644 --- a/tests/models/swin2sr/test_modeling_swin2sr.py +++ b/tests/models/swin2sr/test_modeling_swin2sr.py @@ -162,7 +162,11 @@ def prepare_config_and_inputs_for_common(self): @require_torch class Swin2SRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Swin2SRModel, Swin2SRForImageSuperResolution) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": Swin2SRModel} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution} + if is_torch_available() + else {} + ) fx_compatible = False test_pruning = False diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index f77d81d76e52..6f01cfdac29f 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -367,6 +367,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi "audio-classification": WhisperForAudioClassification, "automatic-speech-recognition": WhisperForConditionalGeneration, "feature-extraction": WhisperModel, + "text-generation": WhisperForCausalLM, } if is_torch_available() else {} diff --git a/tests/pipelines/test_pipelines_text_generation.py b/tests/pipelines/test_pipelines_text_generation.py index b9a5febb5609..dc77204f3e13 100644 --- a/tests/pipelines/test_pipelines_text_generation.py +++ b/tests/pipelines/test_pipelines_text_generation.py @@ -242,7 +242,12 @@ def run_pipeline_test(self, text_generator, _): # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. - EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"] + EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = [ + "RwkvForCausalLM", + "XGLMForCausalLM", + "GPTNeoXForCausalLM", + "FuyuForCausalLM", + ] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS diff --git a/tests/utils/tiny_model_summary.json b/tests/utils/tiny_model_summary.json index 2a1efa7d88d2..5f2c6c0b4e74 100644 --- a/tests/utils/tiny_model_summary.json +++ b/tests/utils/tiny_model_summary.json @@ -877,6 +877,16 @@ ], "sha": "a7874595b900f9b2ddc79130dafc3ff48f4fbfb9" }, + "ClvpModelForConditionalGeneration": { + "tokenizer_classes": [ + "ClvpTokenizer" + ], + "processor_classes": [ + "ClvpFeatureExtractor" + ], + "model_classes": [], + "sha": "45df7581535be337ff781707b6c20994ca221f05" + }, "CodeGenForCausalLM": { "tokenizer_classes": [ "CodeGenTokenizer", @@ -1039,7 +1049,8 @@ "ConvNextImageProcessor" ], "model_classes": [ - "ConvNextV2ForImageClassification" + "ConvNextV2ForImageClassification", + "TFConvNextV2ForImageClassification" ], "sha": "ee22bae1cbb87d66fc7f62f7e15a43d6ff80d3cc" }, @@ -1049,7 +1060,8 @@ "ConvNextImageProcessor" ], "model_classes": [ - "ConvNextV2Model" + "ConvNextV2Model", + "TFConvNextV2Model" ], "sha": "c4dd68ee1102cba05bcc483da2a88e39427b7249" }, @@ -2136,6 +2148,56 @@ ], "sha": "683f6f73a2ab87801f1695a72d1af63cf173ab7c" }, + "FalconForCausalLM": { + "tokenizer_classes": [ + "PreTrainedTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "FalconForCausalLM" + ], + "sha": "60076d5dafc5e33ba9c90dcd05e7c0834e44049a" + }, + "FalconForQuestionAnswering": { + "tokenizer_classes": [ + "PreTrainedTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "FalconForQuestionAnswering" + ], + "sha": "b1ee9cd5fad2d177ea5a46df4611cd02f66ae788" + }, + "FalconForSequenceClassification": { + "tokenizer_classes": [ + "PreTrainedTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "FalconForSequenceClassification" + ], + "sha": "007838c0991c2b6a87dc49a8a5c20f29149a00fa" + }, + "FalconForTokenClassification": { + "tokenizer_classes": [ + "PreTrainedTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "FalconForTokenClassification" + ], + "sha": "0ea6ae548773daa6e3317fddc058957e956eebf4" + }, + "FalconModel": { + "tokenizer_classes": [ + "PreTrainedTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "FalconModel" + ], + "sha": "ca15a579c946eb00c5b39cc8e0ea63d0c1460f84" + }, "FlaubertForMultipleChoice": { "tokenizer_classes": [ "FlaubertTokenizer" @@ -2364,6 +2426,18 @@ ], "sha": "bfbaa8fa21c3abf80b94e7168b5ecff8ec5b5f76" }, + "FuyuForCausalLM": { + "tokenizer_classes": [ + "LlamaTokenizerFast" + ], + "processor_classes": [ + "FuyuImageProcessor" + ], + "model_classes": [ + "FuyuForCausalLM" + ], + "sha": "685d78258ea95c5c82e0e4555d0d4a2270ab8bff" + }, "GLPNForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ @@ -2866,6 +2940,30 @@ ], "sha": "5a7983e48d5841704733dd0756177680ed50c074" }, + "Kosmos2ForConditionalGeneration": { + "tokenizer_classes": [ + "XLMRobertaTokenizerFast" + ], + "processor_classes": [ + "CLIPImageProcessor" + ], + "model_classes": [ + "Kosmos2ForConditionalGeneration" + ], + "sha": "d1d4607782b911411676f1ee79997dee645def58" + }, + "Kosmos2Model": { + "tokenizer_classes": [ + "XLMRobertaTokenizerFast" + ], + "processor_classes": [ + "CLIPImageProcessor" + ], + "model_classes": [ + "Kosmos2Model" + ], + "sha": "379d8944a65312094d9ab1c4b8a82058a2d3274e" + }, "LEDForConditionalGeneration": { "tokenizer_classes": [ "LEDTokenizer", @@ -3820,6 +3918,39 @@ ], "sha": "f197d5bfa1fe27b5f28a6e6d4e3ad229b753450a" }, + "MistralForCausalLM": { + "tokenizer_classes": [ + "LlamaTokenizer", + "LlamaTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "MistralForCausalLM" + ], + "sha": "f7e06aeedbba8f4f665b438b868ed932d451f64b" + }, + "MistralForSequenceClassification": { + "tokenizer_classes": [ + "LlamaTokenizer", + "LlamaTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "MistralForSequenceClassification" + ], + "sha": "65045444ea1933309270d8b08b21d3fa94a84290" + }, + "MistralModel": { + "tokenizer_classes": [ + "LlamaTokenizer", + "LlamaTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "MistralModel" + ], + "sha": "becd727ad72b1e8a7c0fa0ea39b61904fa68aeac" + }, "MobileBertForMaskedLM": { "tokenizer_classes": [ "MobileBertTokenizer", @@ -4558,6 +4689,32 @@ ], "sha": "f0e27b2b4e53ba70e05d13dcfea8e85272b292a5" }, + "Owlv2ForObjectDetection": { + "tokenizer_classes": [ + "CLIPTokenizer", + "CLIPTokenizerFast" + ], + "processor_classes": [ + "Owlv2ImageProcessor" + ], + "model_classes": [ + "Owlv2ForObjectDetection" + ], + "sha": "30439c0b2749726468dc13a755261e8101170052" + }, + "Owlv2Model": { + "tokenizer_classes": [ + "CLIPTokenizer", + "CLIPTokenizerFast" + ], + "processor_classes": [ + "Owlv2ImageProcessor" + ], + "model_classes": [ + "Owlv2Model" + ], + "sha": "7aeebdad5f72b36cb07c74355afad8e6052e2377" + }, "PLBartForCausalLM": { "tokenizer_classes": [ "PLBartTokenizer" @@ -4760,6 +4917,50 @@ ], "sha": "b8c8d479e29e9ee048e2d0b05b001ac835ad8859" }, + "PhiForCausalLM": { + "tokenizer_classes": [ + "CodeGenTokenizer", + "CodeGenTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "PhiForCausalLM" + ], + "sha": "3fecc0109a4a3a230e3a5509eaf47a26eba85d79" + }, + "PhiForSequenceClassification": { + "tokenizer_classes": [ + "CodeGenTokenizer", + "CodeGenTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "PhiForSequenceClassification" + ], + "sha": "e1c9f8ebf1317516acc1cd6338de71a53e770245" + }, + "PhiForTokenClassification": { + "tokenizer_classes": [ + "CodeGenTokenizer", + "CodeGenTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "PhiForTokenClassification" + ], + "sha": "d3a8054903753b5c96c05eaf9877905a116a1d5e" + }, + "PhiModel": { + "tokenizer_classes": [ + "CodeGenTokenizer", + "CodeGenTokenizerFast" + ], + "processor_classes": [], + "model_classes": [ + "PhiModel" + ], + "sha": "99c38d5ce7ace35127d00ed3eeb3561308ea6b21" + }, "Pix2StructForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" @@ -4768,7 +4969,9 @@ "Pix2StructImageProcessor", "Pix2StructProcessor" ], - "model_classes": [], + "model_classes": [ + "Pix2StructForConditionalGeneration" + ], "sha": "42b3de00ad535076c4893e4ac5ae2d2748cc4ccb" }, "PoolFormerForImageClassification": { @@ -5691,6 +5894,16 @@ ], "sha": "25ba2d88c770533f8c69811d2a454a00c1d09f5d" }, + "Swin2SRForImageSuperResolution": { + "tokenizer_classes": [], + "processor_classes": [ + "Swin2SRImageProcessor" + ], + "model_classes": [ + "Swin2SRForImageSuperResolution" + ], + "sha": "3a2780de0b455084c018ac8a62b56040969e26ec" + }, "Swin2SRModel": { "tokenizer_classes": [], "processor_classes": [ @@ -6625,6 +6838,18 @@ ], "sha": "d71b13674b1a67443cd19d0594a3b5b1e5968f0d" }, + "WhisperForCausalLM": { + "tokenizer_classes": [ + "WhisperTokenizer" + ], + "processor_classes": [ + "WhisperFeatureExtractor" + ], + "model_classes": [ + "WhisperForCausalLM" + ], + "sha": "e7febfd7f4512e029293c677e6d2633e23fc459a" + }, "WhisperForConditionalGeneration": { "tokenizer_classes": [ "WhisperTokenizer", From a761d6e9a02598690e9eceeec7a8b662149d7bdb Mon Sep 17 00:00:00 2001 From: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Date: Fri, 24 Nov 2023 11:40:52 +0530 Subject: [PATCH 248/268] Refactoring Trainer, adds `save_only_model` arg and simplifying FSDP integration (#27652) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add code changes 1. Refactor FSDP 2. Add `--save_only_model` option: When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state. 3. Bump up the minimum `accelerate` version to `0.21.0` * quality * fix quality? * Revert "fix quality?" This reverts commit 149330a6abc078827be274db84c8a2d26a76eba1. * fix fsdp doc strings * fix quality * Update src/transformers/training_args.py Co-authored-by: Zach Mueller * please fix the quality issue 😅 * Apply suggestions from code review Co-authored-by: Benjamin Bossan * address comment * simplify conditional check as per the comment * update documentation --------- Co-authored-by: Zach Mueller Co-authored-by: Benjamin Bossan --- docs/source/en/main_classes/trainer.md | 33 ++- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- src/transformers/modeling_utils.py | 39 +-- src/transformers/trainer.py | 226 +++++++----------- src/transformers/trainer_utils.py | 2 + src/transformers/training_args.py | 32 ++- src/transformers/utils/import_utils.py | 2 +- 8 files changed, 164 insertions(+), 174 deletions(-) diff --git a/docs/source/en/main_classes/trainer.md b/docs/source/en/main_classes/trainer.md index 7f85d6d72ad0..7304de8174dc 100644 --- a/docs/source/en/main_classes/trainer.md +++ b/docs/source/en/main_classes/trainer.md @@ -426,8 +426,7 @@ To read more about it and the benefits, check out the [Fully Sharded Data Parall We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature. All you need to do is enable it through the config. -**Required PyTorch version for FSDP support**: PyTorch Nightly (or 1.12.0 if you read this after it has been released) -as the model saving with FSDP activated is only available with recent fixes. +**Required PyTorch version for FSDP support**: PyTorch >=2.1.0 **Usage**: @@ -440,6 +439,8 @@ as the model saving with FSDP activated is only available with recent fixes. - SHARD_GRAD_OP : Shards optimizer states + gradients across data parallel workers/GPUs. For this, add `--fsdp shard_grad_op` to the command line arguments. - NO_SHARD : No sharding. For this, add `--fsdp no_shard` to the command line arguments. + - HYBRID_SHARD : No sharding. For this, add `--fsdp hybrid_shard` to the command line arguments. + - HYBRID_SHARD_ZERO2 : No sharding. For this, add `--fsdp hybrid_shard_zero2` to the command line arguments. - To offload the parameters and gradients to the CPU, add `--fsdp "full_shard offload"` or `--fsdp "shard_grad_op offload"` to the command line arguments. - To automatically recursively wrap layers with FSDP using `default_auto_wrap_policy`, @@ -449,18 +450,18 @@ as the model saving with FSDP activated is only available with recent fixes. - Remaining FSDP config is passed via `--fsdp_config `. It is either a location of FSDP json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`. - If auto wrapping is enabled, you can either use transformer based auto wrap policy or size based auto wrap policy. - - For transformer based auto wrap policy, it is recommended to specify `fsdp_transformer_layer_cls_to_wrap` in the config file. If not specified, the default value is `model._no_split_modules` when available. + - For transformer based auto wrap policy, it is recommended to specify `transformer_layer_cls_to_wrap` in the config file. If not specified, the default value is `model._no_split_modules` when available. This specifies the list of transformer layer class name (case-sensitive) to wrap ,e.g, [`BertLayer`], [`GPTJBlock`], [`T5Block`] .... This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer based models. - - For size based auto wrap policy, please add `fsdp_min_num_params` in the config file. + - For size based auto wrap policy, please add `min_num_params` in the config file. It specifies FSDP's minimum number of parameters for auto wrapping. - - `fsdp_backward_prefetch` can be specified in the config file. It controls when to prefetch next set of parameters. + - `backward_prefetch` can be specified in the config file. It controls when to prefetch next set of parameters. `backward_pre` and `backward_pos` are available options. For more information refer `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch` - - `fsdp_forward_prefetch` can be specified in the config file. It controls when to prefetch next set of parameters. + - `forward_prefetch` can be specified in the config file. It controls when to prefetch next set of parameters. If `"True"`, FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. - `limit_all_gathers` can be specified in the config file. If `"True"`, FSDP explicitly synchronizes the CPU thread to prevent too many in-flight all-gathers. @@ -468,6 +469,20 @@ as the model saving with FSDP activated is only available with recent fixes. If `"True"`, FSDP activation checkpointing is a technique to reduce memory usage by clearing activations of certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time for reduced memory usage. + - `use_orig_params` can be specified in the config file. + If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres. Useful in cases such as parameter-efficient fine-tuning. This also enables to have different optimizer param groups. This should be `True` when creating optimizer object before preparing/wrapping the model with FSDP. + Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). + +**Saving and loading** +Saving entire intermediate checkpoints using `FULL_STATE_DICT` state_dict_type with CPU offloading on rank 0 takes a lot of time and often results in NCCL Timeout errors due to indefinite hanging during broadcasting. However, at the end of training, we want the whole model state dict instead of the sharded state dict which is only compatible with FSDP. Use `SHARDED_STATE_DICT` (default) state_dict_type to save the intermediate checkpoints and optimizer states in this format recommended by the PyTorch team. + +Saving the final checkpoint in transformers format using default `safetensors` format requires below changes. +```python +if trainer.is_fsdp_enabled: + trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT") + +trainer.save_model(script_args.output_dir) +``` **Few caveats to be aware of** - it is incompatible with `generate`, thus is incompatible with `--predict_with_generate` @@ -492,15 +507,15 @@ Pass `--fsdp "full shard"` along with following changes to be made in `--fsdp_co https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py). - `xla_fsdp_grad_ckpt`. When `True`, uses gradient checkpointing over each nested XLA FSDP wrapped layer. This setting can only be used when the xla flag is set to true, and an auto wrapping policy is specified through - `fsdp_min_num_params` or `fsdp_transformer_layer_cls_to_wrap`. + `min_num_params` or `transformer_layer_cls_to_wrap`. - You can either use transformer based auto wrap policy or size based auto wrap policy. - - For transformer based auto wrap policy, it is recommended to specify `fsdp_transformer_layer_cls_to_wrap` in the config file. If not specified, the default value is `model._no_split_modules` when available. + - For transformer based auto wrap policy, it is recommended to specify `transformer_layer_cls_to_wrap` in the config file. If not specified, the default value is `model._no_split_modules` when available. This specifies the list of transformer layer class name (case-sensitive) to wrap ,e.g, [`BertLayer`], [`GPTJBlock`], [`T5Block`] .... This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer based models. - - For size based auto wrap policy, please add `fsdp_min_num_params` in the config file. + - For size based auto wrap policy, please add `min_num_params` in the config file. It specifies FSDP's minimum number of parameters for auto wrapping. diff --git a/setup.py b/setup.py index a51f2a7a5266..eb240c8172f0 100644 --- a/setup.py +++ b/setup.py @@ -96,7 +96,7 @@ # 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py _deps = [ "Pillow>=10.0.1,<=15.0", - "accelerate>=0.20.3", + "accelerate>=0.21.0", "av==9.2.0", # Latest version of PyAV (10.0.0) has issues with audio stream. "beautifulsoup4", "codecarbon==1.2.0", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 08fddd2e1ecc..5bef2ec9e22e 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -3,7 +3,7 @@ # 2. run `make deps_table_update`` deps = { "Pillow": "Pillow>=10.0.1,<=15.0", - "accelerate": "accelerate>=0.20.3", + "accelerate": "accelerate>=0.21.0", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "codecarbon": "codecarbon==1.2.0", diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index e2b27de7d1e5..d60d795a0f93 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -132,8 +132,12 @@ def is_fsdp_enabled(): ) -def is_fsdp_enabled_and_dist_rank_0(): - return is_fsdp_enabled() and int(os.environ.get("LOCAL_RANK", -1)) == 0 +def is_local_dist_rank_0(): + return ( + torch.distributed.is_available() + and torch.distributed.is_initialized() + and int(os.environ.get("LOCAL_RANK", -1)) == 0 + ) if is_sagemaker_mp_enabled(): @@ -474,13 +478,12 @@ def load_state_dict(checkpoint_file: Union[str, os.PathLike]): return safe_load_file(checkpoint_file) try: if ( - (is_deepspeed_zero3_enabled() or is_fsdp_enabled()) - and torch.distributed.is_initialized() - and torch.distributed.get_rank() > 0 - ): + is_deepspeed_zero3_enabled() and torch.distributed.is_initialized() and torch.distributed.get_rank() > 0 + ) or (is_fsdp_enabled() and not is_local_dist_rank_0()): map_location = "meta" else: map_location = "cpu" + return torch.load(checkpoint_file, map_location=map_location) except Exception as e: try: @@ -3904,7 +3907,18 @@ def _find_mismatched_keys( ignore_mismatched_sizes, ) if low_cpu_mem_usage: - if not is_fsdp_enabled() or is_fsdp_enabled_and_dist_rank_0(): + if is_fsdp_enabled() and not is_local_dist_rank_0(): + for key, param in model_to_load.state_dict().items(): + if param.device == torch.device("meta"): + if not (is_quantized): + set_module_tensor_to_device( + model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) + ) + else: + set_module_quantized_tensor_to_device( + model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) + ) + else: new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model( model_to_load, state_dict, @@ -3922,17 +3936,6 @@ def _find_mismatched_keys( keep_in_fp32_modules=keep_in_fp32_modules, ) error_msgs += new_error_msgs - else: - for key, param in model_to_load.state_dict().items(): - if param.device == torch.device("meta"): - if not (is_quantized): - set_module_tensor_to_device( - model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) - ) - else: - set_module_quantized_tensor_to_device( - model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) - ) else: error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 0bb123d0e7c2..7a4fcd129cb3 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -99,7 +99,6 @@ BestRun, EvalLoopOutput, EvalPrediction, - FSDPOption, HPSearchBackend, HubStrategy, IntervalStrategy, @@ -193,15 +192,15 @@ if is_accelerate_available(): from accelerate import Accelerator, skip_first_batches from accelerate import __version__ as accelerate_version - from accelerate.utils import DistributedDataParallelKwargs, GradientAccumulationPlugin - - if version.parse(accelerate_version) > version.parse("0.20.3"): - from accelerate.utils import ( - load_fsdp_model, - load_fsdp_optimizer, - save_fsdp_model, - save_fsdp_optimizer, - ) + from accelerate.utils import ( + DistributedDataParallelKwargs, + GradientAccumulationPlugin, + load_fsdp_model, + load_fsdp_optimizer, + save_fsdp_model, + save_fsdp_optimizer, + ) + DATA_SAMPLERS = [RandomSampler] if version.parse(accelerate_version) > version.parse("0.23.0"): from accelerate.data_loader import SeedableRandomSampler @@ -226,6 +225,7 @@ OPTIMIZER_NAME_BIN = "optimizer.bin" SCHEDULER_NAME = "scheduler.pt" SCALER_NAME = "scaler.pt" +FSDP_MODEL_NAME = "pytorch_model_fsdp" class Trainer: @@ -415,7 +415,7 @@ def __init__( " model, please make sure that you have installed `bitsandbytes>=0.37.0`. " ) - self.fsdp = None + self.is_fsdp_xla_enabled = args.fsdp_config["xla"] if len(args.fsdp) > 0: if self.is_deepspeed_enabled: raise ValueError( @@ -424,32 +424,6 @@ def __init__( if not args.fsdp_config["xla"] and args.parallel_mode != ParallelMode.DISTRIBUTED: raise ValueError("Using fsdp only works in distributed training.") - # dep_version_check("torch>=1.12.0") - # Would have to update setup.py with torch>=1.12.0 - # which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0 - # below is the current alternative. - if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"): - raise ValueError("FSDP requires PyTorch >= 1.12.0") - - from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, ShardingStrategy - - if FSDPOption.FULL_SHARD in args.fsdp: - self.fsdp = ShardingStrategy.FULL_SHARD - elif FSDPOption.SHARD_GRAD_OP in args.fsdp: - self.fsdp = ShardingStrategy.SHARD_GRAD_OP - elif FSDPOption.NO_SHARD in args.fsdp: - self.fsdp = ShardingStrategy.NO_SHARD - - self.backward_prefetch = BackwardPrefetch.BACKWARD_PRE - if "backward_prefetch" in self.args.fsdp_config and "backward_post" in self.args.fsdp_config.get( - "backward_prefetch", [] - ): - self.backward_prefetch = BackwardPrefetch.BACKWARD_POST - - self.limit_all_gathers = False - if self.args.fsdp_config.get("limit_all_gathers", False): - self.limit_all_gathers = True - # one place to sort out whether to place the model on device or not # postpone switching model to cuda when: # 1. MP - since we are trying to fit a much bigger than 1 gpu model @@ -462,7 +436,7 @@ def __init__( self.is_model_parallel or self.is_deepspeed_enabled or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train) - or (self.fsdp is not None) + or self.is_fsdp_xla_enabled or self.is_fsdp_enabled ): self.place_model_on_device = False @@ -513,7 +487,7 @@ def __init__( " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." ) - if (self.is_deepspeed_enabled or (self.fsdp is not None)) and ( + if (self.is_deepspeed_enabled or self.is_fsdp_xla_enabled or self.is_fsdp_enabled) and ( self.optimizer is not None or self.lr_scheduler is not None ): raise RuntimeError( @@ -1367,7 +1341,7 @@ def _wrap_model(self, model, training=True, dataloader=None): # Distributed training (should be after apex fp16 initialization) # Distributed training using PyTorch FSDP - if self.fsdp is not None and self.args.fsdp_config["xla"]: + if self.is_fsdp_xla_enabled: try: from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP from torch_xla.distributed.fsdp import checkpoint_module @@ -1626,7 +1600,7 @@ def _inner_training_loop( else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa - delay_optimizer_creation = is_sagemaker_mp_enabled() or self.fsdp is not None or self.is_fsdp_enabled + delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled # We need to reset the scheduler, as its parameters may be different on subsequent calls if self._created_lr_scheduler: @@ -1676,8 +1650,6 @@ def _inner_training_loop( use_accelerator_prepare = True if model is self.model else False if delay_optimizer_creation: - if use_accelerator_prepare: - self.model = self.accelerator.prepare(self.model) self.create_optimizer_and_scheduler(num_training_steps=max_steps) # prepare using `accelerator` prepare @@ -1895,9 +1867,7 @@ def _inner_training_loop( ): # the `or` condition of `is_last_step_and_steps_less_than_grad_acc` is not covered # in accelerate. So, explicitly enable sync gradients to True in that case. - if is_last_step_and_steps_less_than_grad_acc or ( - version.parse(accelerate_version) <= version.parse("0.20.3") - ): + if is_last_step_and_steps_less_than_grad_acc: self.accelerator.gradient_state._set_sync_gradients(True) # Gradient clipping @@ -2051,7 +2021,7 @@ def _load_from_checkpoint(self, resume_from_checkpoint, model=None): safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME) safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME) is_fsdp_ckpt = os.path.isdir(resume_from_checkpoint) and any( - WEIGHTS_NAME.split(".")[0] in folder_name + FSDP_MODEL_NAME in folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) ) @@ -2360,56 +2330,12 @@ def _save_checkpoint(self, model, trial, metrics=None): run_dir = self._get_output_dir(trial=trial) output_dir = os.path.join(run_dir, checkpoint_folder) self.save_model(output_dir, _internal_call=True) - if self.is_deepspeed_enabled: - # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed - # config `stage3_gather_16bit_weights_on_model_save` is True - self.model_wrapped.save_checkpoint(output_dir) - # Save optimizer and scheduler - if self.fsdp or self.is_fsdp_enabled: - if self.is_fsdp_enabled: - save_fsdp_optimizer( - self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir - ) - else: - # FSDP has a different interface for saving optimizer states. - # Needs to be called on all ranks to gather all states. - # full_optim_state_dict will be deprecated after Pytorch 2.2! - full_osd = self.model.__class__.full_optim_state_dict(self.model, self.optimizer) - torch.save(full_osd, os.path.join(output_dir, OPTIMIZER_NAME)) - - if is_torch_tpu_available(): - xm.rendezvous("saving_optimizer_states") - xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) - with warnings.catch_warnings(record=True) as caught_warnings: - xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) - reissue_pt_warnings(caught_warnings) - elif is_sagemaker_mp_enabled(): - opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) - smp.barrier() - if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: - smp.save( - opt_state_dict, - os.path.join(output_dir, OPTIMIZER_NAME), - partial=True, - v3=smp.state.cfg.shard_optimizer_state, - ) - elif self.args.should_save and not self.is_deepspeed_enabled and not (self.fsdp or self.is_fsdp_enabled): - # deepspeed.save_checkpoint above saves model/optim/sched - torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) - - # Save SCHEDULER & SCALER - is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and not isinstance( - self.lr_scheduler, DeepSpeedSchedulerWrapper - ) - if ( - self.args.should_save - and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler) - and not is_torch_tpu_available() - ): - with warnings.catch_warnings(record=True) as caught_warnings: - torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) - reissue_pt_warnings(caught_warnings) + if not self.args.save_only_model: + # Save optimizer and scheduler + self._save_optimizer_and_scheduler(output_dir) + # Save RNG state + self._save_rng_state(output_dir) # Determine the new best metric / best model checkpoint if metrics is not None and self.args.metric_for_best_model is not None: @@ -2431,6 +2357,14 @@ def _save_checkpoint(self, model, trial, metrics=None): if self.args.should_save: self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + if self.args.push_to_hub: + self._push_from_checkpoint(output_dir) + + # Maybe delete some older checkpoints. + if self.args.should_save: + self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) + + def _save_rng_state(self, output_dir): # Save RNG state in non-distributed training rng_states = { "python": random.getstate(), @@ -2462,12 +2396,49 @@ def _save_checkpoint(self, model, trial, metrics=None): else: torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth")) - if self.args.push_to_hub: - self._push_from_checkpoint(output_dir) + def _save_optimizer_and_scheduler(self, output_dir): + if is_torch_tpu_available(): + xm.rendezvous("saving_optimizer_states") + xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + with warnings.catch_warnings(record=True) as caught_warnings: + xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + elif is_sagemaker_mp_enabled(): + opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) + smp.barrier() + if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: + smp.save( + opt_state_dict, + os.path.join(output_dir, OPTIMIZER_NAME), + partial=True, + v3=smp.state.cfg.shard_optimizer_state, + ) + elif self.is_deepspeed_enabled: + # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed + # config `stage3_gather_16bit_weights_on_model_save` is True + self.model_wrapped.save_checkpoint(output_dir) + elif self.is_fsdp_enabled: + # save fsdp specific ckpt for resuming from ckpt + save_fsdp_model(self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir) + save_fsdp_optimizer( + self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir + ) + elif self.args.should_save: + # deepspeed.save_checkpoint above saves model/optim/sched + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) - # Maybe delete some older checkpoints. - if self.args.should_save: - self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) + # Save SCHEDULER & SCALER + is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and not isinstance( + self.lr_scheduler, DeepSpeedSchedulerWrapper + ) + if ( + self.args.should_save + and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler) + and not is_torch_tpu_available() + ): + with warnings.catch_warnings(record=True) as caught_warnings: + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) def _load_optimizer_and_scheduler(self, checkpoint): """If optimizer and scheduler states exist, load them.""" @@ -2535,23 +2506,14 @@ def opt_load_hook(mod, opt): # In distributed training however, we load directly on each GPU and risk the GPU OOM as it's more # likely to get OOM on CPU (since we load num_gpu times the optimizer state map_location = self.args.device if self.args.world_size > 1 else "cpu" - if self.fsdp or self.is_fsdp_enabled: - if self.is_fsdp_enabled: - load_fsdp_optimizer( - self.accelerator.state.fsdp_plugin, - self.accelerator, - self.optimizer, - self.model, - checkpoint, - ) - else: - full_osd = None - # In FSDP, we need to load the full optimizer state dict on rank 0 and then shard it - if self.args.process_index == 0: - full_osd = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME)) - # call scatter_full_optim_state_dict on all ranks - sharded_osd = self.model.__class__.scatter_full_optim_state_dict(full_osd, self.model) - self.optimizer.load_state_dict(sharded_osd) + if self.is_fsdp_enabled: + load_fsdp_optimizer( + self.accelerator.state.fsdp_plugin, + self.accelerator, + self.optimizer, + self.model, + checkpoint, + ) else: self.optimizer.load_state_dict( torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location) @@ -2826,19 +2788,14 @@ def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = Fa if IS_SAGEMAKER_MP_POST_1_10: # 'user_content.pt' indicates model state_dict saved with smp >= 1.10 Path(os.path.join(output_dir, "user_content.pt")).touch() - elif self.fsdp is not None or self.is_fsdp_enabled: - state_dict = self.model.state_dict() if not self.is_fsdp_enabled else {} - if self.args.should_save: - self._save(output_dir, state_dict=state_dict) - if self.is_fsdp_enabled: - # remove the dummy state_dict - remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME]) - save_fsdp_model(self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir) - + elif self.is_fsdp_enabled: + if ("FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type)) and ( + version.parse(accelerate_version) > version.parse("0.24.1") + ): + state_dict = self.accelerator.get_state_dict(self.model) + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) elif self.is_deepspeed_enabled: - # this takes care of everything as long as we aren't under zero3 - if version.parse(accelerate_version) <= version.parse("0.20.3"): - raise ValueError("Install Accelerate from main branch") try: state_dict = self.accelerator.get_state_dict(self.deepspeed) if self.args.should_save: @@ -3247,11 +3204,7 @@ def evaluation_loop( self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. - if ( - args.eval_accumulation_steps is not None - and (step + 1) % args.eval_accumulation_steps == 0 - and (self.accelerator.sync_gradients or version.parse(accelerate_version) > version.parse("0.20.3")) - ): + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: if losses_host is not None: losses = nested_numpify(losses_host) all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) @@ -3877,8 +3830,7 @@ def _add_sm_patterns_to_gitignore(self) -> None: def create_accelerator_and_postprocess(self): grad_acc_kwargs = {"num_steps": self.args.gradient_accumulation_steps} - if version.parse(accelerate_version) > version.parse("0.20.3"): - grad_acc_kwargs["sync_with_dataloader"] = False + grad_acc_kwargs["sync_with_dataloader"] = False gradient_accumulation_plugin = GradientAccumulationPlugin(**grad_acc_kwargs) # create accelerator object diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index e6f26d0df519..dbd868d11202 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -727,6 +727,8 @@ class FSDPOption(ExplicitEnum): FULL_SHARD = "full_shard" SHARD_GRAD_OP = "shard_grad_op" NO_SHARD = "no_shard" + HYBRID_SHARD = "hybrid_shard" + HYBRID_SHARD_ZERO2 = "hybrid_shard_zero2" OFFLOAD = "offload" AUTO_WRAP = "auto_wrap" diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 430ec257426d..1ee8b2c241a6 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -304,6 +304,11 @@ class TrainingArguments: This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node. + save_only_model (`bool`, *optional*, defaults to `False`): + When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state. + Note that when this is true, you won't be able to resume training from checkpoint. + This enables you to save storage by not storing the optimizer, scheduler & rng state. + You can only load the model using `from_pretrained` with this option set to `True`. use_cpu (`bool`, *optional*, defaults to `False`): Whether or not to use cpu. If set to False, we will use cuda or mps device if available. seed (`int`, *optional*, defaults to 42): @@ -418,12 +423,14 @@ class TrainingArguments: - `"full_shard"`: Shard parameters, gradients and optimizer states. - `"shard_grad_op"`: Shard optimizer states and gradients. + - `"hybrid_shard"`: Apply `FULL_SHARD` within a node, and replicate parameters across nodes. + - `"hybrid_shard_zero2"`: Apply `SHARD_GRAD_OP` within a node, and replicate parameters across nodes. - `"offload"`: Offload parameters and gradients to CPUs (only compatible with `"full_shard"` and `"shard_grad_op"`). - `"auto_wrap"`: Automatically recursively wrap layers with FSDP using `default_auto_wrap_policy`. fsdp_config (`str` or `dict`, *optional*): Config to be used with fsdp (Pytorch Distributed Parallel Training). The value is either a location of - deepspeed json config file (e.g., `ds_config.json`) or an already loaded json file as `dict`. + fsdp json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`. A List of config and its options: - min_num_params (`int`, *optional*, defaults to `0`): @@ -452,7 +459,7 @@ class TrainingArguments: FSDP's limit_all_gathers (useful only when `fsdp` field is passed). If `"True"`, FSDP explicitly synchronizes the CPU thread to prevent too many in-flight all-gathers. - - use_orig_params (`bool`, *optional*, defaults to `False`) + - use_orig_params (`bool`, *optional*, defaults to `True`) If `"True"`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres. Useful in cases such as parameter-efficient fine-tuning. Please refer this @@ -460,6 +467,10 @@ class TrainingArguments: - sync_module_states (`bool`, *optional*, defaults to `True`) If `"True"`, each individually wrapped FSDP unit will broadcast module parameters from rank 0 to ensure they are the same across all ranks after initialization + - activation_checkpointing (`bool`, *optional*, defaults to `False`): + If `"True"`, activation checkpointing is a technique to reduce memory usage by clearing activations of + certain layers and recomputing them during a backward pass. Effectively, this trades extra + computation time for reduced memory usage. - xla (`bool`, *optional*, defaults to `False`): Whether to use PyTorch/XLA Fully Sharded Data Parallel Training. This is an experimental feature and its API may evolve in the future. @@ -472,10 +483,6 @@ class TrainingArguments: Will use gradient checkpointing over each nested XLA FSDP wrapped layer. This setting can only be used when the xla flag is set to true, and an auto wrapping policy is specified through fsdp_min_num_params or fsdp_transformer_layer_cls_to_wrap. - - activation_checkpointing (`bool`, *optional*, defaults to `False`): - If True, activation checkpointing is a technique to reduce memory usage by clearing activations of - certain layers and recomputing them during a backward pass. Effectively, this trades extra - computation time for reduced memory usage. deepspeed (`str` or `dict`, *optional*): Use [Deepspeed](https://github.com/microsoft/deepspeed). This is an experimental feature and its API may @@ -835,6 +842,17 @@ class TrainingArguments: ) }, ) + save_only_model: bool = field( + default=False, + metadata={ + "help": ( + "When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state." + "Note that when this is true, you won't be able to resume training from checkpoint." + "This enables you to save storage by not storing the optimizer, scheduler & rng state." + "You can only load the model using from_pretrained with this option set to True." + ) + }, + ) no_cuda: bool = field( default=False, metadata={"help": "This argument is deprecated. It will be removed in version 5.0 of 🤗 Transformers."}, @@ -1670,7 +1688,7 @@ def __post_init__(self): os.environ[f"{prefix}BACKWARD_PREFETCH"] = prefetch_policy.upper() os.environ[f"{prefix}FORWARD_PREFETCH"] = self.fsdp_config.get("forward_prefect", "false") os.environ[f"{prefix}SYNC_MODULE_STATES"] = self.fsdp_config.get("sync_module_states", "true") - os.environ[f"{prefix}USE_ORIG_PARAMS"] = self.fsdp_config.get("use_orig_params", "false") + os.environ[f"{prefix}USE_ORIG_PARAMS"] = self.fsdp_config.get("use_orig_params", "true") if self.tpu_metrics_debug: warnings.warn( diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index c4862b197c97..beb6c4779573 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -652,7 +652,7 @@ def is_protobuf_available(): return importlib.util.find_spec("google.protobuf") is not None -def is_accelerate_available(min_version: str = None): +def is_accelerate_available(min_version: str = "0.21.0"): if min_version is not None: return _accelerate_available and version.parse(_accelerate_version) >= version.parse(min_version) return _accelerate_available From 623432dcc925627c1925511298d1c11161cce81f Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 24 Nov 2023 09:43:20 +0100 Subject: [PATCH 249/268] Skip pipeline tests for 2 models for now (#27687) fix Co-authored-by: ydshieh --- tests/models/falcon/test_modeling_falcon.py | 6 ++++++ tests/models/mistral/test_modeling_mistral.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/tests/models/falcon/test_modeling_falcon.py b/tests/models/falcon/test_modeling_falcon.py index 5956a9ed6bf0..75204637bd07 100644 --- a/tests/models/falcon/test_modeling_falcon.py +++ b/tests/models/falcon/test_modeling_falcon.py @@ -298,6 +298,12 @@ class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix test_headmasking = False test_pruning = False + # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 + def is_pipeline_test_to_skip( + self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name + ): + return True + def setUp(self): self.model_tester = FalconModelTester(self) self.config_tester = ConfigTester(self, config_class=FalconConfig, hidden_size=37) diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py index 0c28f46d5ec2..cedcdeb4b9f3 100644 --- a/tests/models/mistral/test_modeling_mistral.py +++ b/tests/models/mistral/test_modeling_mistral.py @@ -301,6 +301,12 @@ class MistralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi test_headmasking = False test_pruning = False + # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 + def is_pipeline_test_to_skip( + self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name + ): + return True + def setUp(self): self.model_tester = MistralModelTester(self) self.config_tester = ConfigTester(self, config_class=MistralConfig, hidden_size=37) From 7293fdc5b9cc809c2aa2ceb84f903ad47e5c06f0 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 24 Nov 2023 11:48:02 +0100 Subject: [PATCH 250/268] Deprecate `TransfoXL` (#27607) * fix * fix * trigger * Apply suggestions from code review Co-authored-by: Lysandre Debut * tic * revert * revert --------- Co-authored-by: ydshieh Co-authored-by: Lysandre Debut --- docs/source/en/model_doc/transfo-xl.md | 31 +- .../source/es/converting_tensorflow_models.md | 14 - .../source/it/converting_tensorflow_models.md | 15 - .../source/pt/converting_tensorflow_models.md | 14 - src/transformers/__init__.py | 104 ++-- src/transformers/commands/convert.py | 21 +- src/transformers/models/__init__.py | 1 - .../models/auto/configuration_auto.py | 3 + .../{ => deprecated}/transfo_xl/__init__.py | 2 +- .../transfo_xl/configuration_transfo_xl.py | 10 +- ...fo_xl_original_tf_checkpoint_to_pytorch.py | 4 +- .../transfo_xl/modeling_tf_transfo_xl.py | 6 +- .../modeling_tf_transfo_xl_utilities.py | 2 +- .../transfo_xl/modeling_transfo_xl.py | 4 +- .../modeling_transfo_xl_utilities.py | 0 .../transfo_xl/tokenization_transfo_xl.py | 4 +- src/transformers/utils/dummy_pt_objects.py | 84 +-- src/transformers/utils/dummy_tf_objects.py | 90 +-- tests/generation/test_utils.py | 6 +- tests/models/transfo_xl/__init__.py | 0 .../transfo_xl/test_modeling_tf_transfo_xl.py | 282 --------- .../transfo_xl/test_modeling_transfo_xl.py | 533 ------------------ .../test_tokenization_transfo_xl.py | 156 ----- tests/test_modeling_common.py | 1 - tests/test_modeling_tf_common.py | 1 - utils/check_config_attributes.py | 1 - utils/check_docstrings.py | 1 - utils/check_repo.py | 2 - utils/not_doctested.txt | 12 +- 29 files changed, 194 insertions(+), 1210 deletions(-) rename src/transformers/models/{ => deprecated}/transfo_xl/__init__.py (96%) rename src/transformers/models/{ => deprecated}/transfo_xl/configuration_transfo_xl.py (97%) rename src/transformers/models/{ => deprecated}/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py (95%) mode change 100755 => 100644 rename src/transformers/models/{ => deprecated}/transfo_xl/modeling_tf_transfo_xl.py (99%) rename src/transformers/models/{ => deprecated}/transfo_xl/modeling_tf_transfo_xl_utilities.py (99%) rename src/transformers/models/{ => deprecated}/transfo_xl/modeling_transfo_xl.py (99%) rename src/transformers/models/{ => deprecated}/transfo_xl/modeling_transfo_xl_utilities.py (100%) rename src/transformers/models/{ => deprecated}/transfo_xl/tokenization_transfo_xl.py (99%) delete mode 100644 tests/models/transfo_xl/__init__.py delete mode 100644 tests/models/transfo_xl/test_modeling_tf_transfo_xl.py delete mode 100644 tests/models/transfo_xl/test_modeling_transfo_xl.py delete mode 100644 tests/models/transfo_xl/test_tokenization_transfo_xl.py diff --git a/docs/source/en/model_doc/transfo-xl.md b/docs/source/en/model_doc/transfo-xl.md index d75e3a37b990..05afc76f1114 100644 --- a/docs/source/en/model_doc/transfo-xl.md +++ b/docs/source/en/model_doc/transfo-xl.md @@ -16,6 +16,29 @@ rendered properly in your Markdown viewer. # Transformer XL + + +This model is in maintenance mode only, so we won't accept any new PRs changing its code. This model was deprecated due to security issues linked to `pickle.load`. + +We recommend switching to more recent models for improved security. + +In case you would still like to use `TransfoXL` in your experiments, we recommend using the [Hub checkpoint](https://huggingface.co/transfo-xl-wt103) with a specific revision to ensure you are downloading safe files from the Hub: + +``` +from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel + +checkpoint = 'transfo-xl-wt103' +revision = '40a186da79458c9f9de846edfaea79c412137f97' + +tokenizer = TransfoXLTokenizer.from_pretrained(checkpoint, revision=revision) +model = TransfoXLLMHeadModel.from_pretrained(checkpoint, revision=revision) +``` + +If you run into any issues running this model, please reinstall the last version that supported this model: v4.35.0. +You can do so by running the following command: `pip install -U transformers==4.35.0`. + + +
Models @@ -79,13 +102,13 @@ TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyT ## TransfoXL specific outputs -[[autodoc]] models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput +[[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput -[[autodoc]] models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput +[[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput -[[autodoc]] models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput +[[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput -[[autodoc]] models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput +[[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput diff --git a/docs/source/es/converting_tensorflow_models.md b/docs/source/es/converting_tensorflow_models.md index c7e22bddac70..8e5b1ad1e288 100644 --- a/docs/source/es/converting_tensorflow_models.md +++ b/docs/source/es/converting_tensorflow_models.md @@ -96,20 +96,6 @@ transformers-cli convert --model_type gpt2 \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` -## Transformer-XL - -Aquí hay un ejemplo del proceso para convertir un modelo Transformer-XL pre-entrenado (más información [aquí](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models)): - -```bash -export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint - -transformers-cli convert --model_type transfo_xl \ - --tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \ - --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ - [--config TRANSFO_XL_CONFIG] \ - [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK] -``` - ## XLNet Aquí hay un ejemplo del proceso para convertir un modelo XLNet pre-entrenado: diff --git a/docs/source/it/converting_tensorflow_models.md b/docs/source/it/converting_tensorflow_models.md index 04398636359c..f6326daa735f 100644 --- a/docs/source/it/converting_tensorflow_models.md +++ b/docs/source/it/converting_tensorflow_models.md @@ -104,21 +104,6 @@ transformers-cli convert --model_type gpt2 \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` -## Transformer-XL - - -Ecco un esempio del processo di conversione di un modello Transformer-XL pre-allenato -(vedi [qui](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models)): - -```bash -export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint -transformers-cli convert --model_type transfo_xl \ - --tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \ - --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ - [--config TRANSFO_XL_CONFIG] \ - [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK] -``` - ## XLNet Ecco un esempio del processo di conversione di un modello XLNet pre-allenato: diff --git a/docs/source/pt/converting_tensorflow_models.md b/docs/source/pt/converting_tensorflow_models.md index ac1271d2764b..97767b2ad420 100644 --- a/docs/source/pt/converting_tensorflow_models.md +++ b/docs/source/pt/converting_tensorflow_models.md @@ -109,20 +109,6 @@ transformers-cli convert --model_type gpt2 \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` -## Transformer-XL - -Aqui está um exemplo do processo de conversão para um modelo Transformer-XL pré-treinado (consulte [aqui](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-modelos-sota)) - -```bash -export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint - -transformers-cli convert --model_type transfo_xl \ - --tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \ - --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ - [--config TRANSFO_XL_CONFIG] \ - [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK] -``` - ## XLNet Aqui está um exemplo do processo de conversão para um modelo XLNet pré-treinado: diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index e09752f5f39c..3c3b70fd7244 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -307,6 +307,12 @@ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], + "models.deprecated.transfo_xl": [ + "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TransfoXLConfig", + "TransfoXLCorpus", + "TransfoXLTokenizer", + ], "models.deprecated.van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"], "models.deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"], "models.detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig"], @@ -580,12 +586,6 @@ ], "models.timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], "models.timm_backbone": ["TimmBackboneConfig"], - "models.transfo_xl": [ - "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TransfoXLConfig", - "TransfoXLCorpus", - "TransfoXLTokenizer", - ], "models.trocr": [ "TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig", @@ -1661,6 +1661,17 @@ "TrajectoryTransformerPreTrainedModel", ] ) + _import_structure["models.deprecated.transfo_xl"].extend( + [ + "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", + "AdaptiveEmbedding", + "TransfoXLForSequenceClassification", + "TransfoXLLMHeadModel", + "TransfoXLModel", + "TransfoXLPreTrainedModel", + "load_tf_weights_in_transfo_xl", + ] + ) _import_structure["models.deprecated.van"].extend( [ "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -2919,17 +2930,6 @@ ] ) _import_structure["models.timm_backbone"].extend(["TimmBackbone"]) - _import_structure["models.transfo_xl"].extend( - [ - "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", - "AdaptiveEmbedding", - "TransfoXLForSequenceClassification", - "TransfoXLLMHeadModel", - "TransfoXLModel", - "TransfoXLPreTrainedModel", - "load_tf_weights_in_transfo_xl", - ] - ) _import_structure["models.trocr"].extend( ["TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel"] ) @@ -3525,6 +3525,17 @@ "TFDeiTPreTrainedModel", ] ) + _import_structure["models.deprecated.transfo_xl"].extend( + [ + "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFAdaptiveEmbedding", + "TFTransfoXLForSequenceClassification", + "TFTransfoXLLMHeadModel", + "TFTransfoXLMainLayer", + "TFTransfoXLModel", + "TFTransfoXLPreTrainedModel", + ] + ) _import_structure["models.distilbert"].extend( [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3889,17 +3900,6 @@ "TFTapasPreTrainedModel", ] ) - _import_structure["models.transfo_xl"].extend( - [ - "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", - "TFAdaptiveEmbedding", - "TFTransfoXLForSequenceClassification", - "TFTransfoXLLMHeadModel", - "TFTransfoXLMainLayer", - "TFTransfoXLModel", - "TFTransfoXLPreTrainedModel", - ] - ) _import_structure["models.vision_encoder_decoder"].extend(["TFVisionEncoderDecoderModel"]) _import_structure["models.vision_text_dual_encoder"].extend(["TFVisionTextDualEncoderModel"]) _import_structure["models.vit"].extend( @@ -4552,6 +4552,12 @@ TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) + from .models.deprecated.transfo_xl import ( + TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, + TransfoXLConfig, + TransfoXLCorpus, + TransfoXLTokenizer, + ) from .models.deprecated.van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig from .models.deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig from .models.detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig @@ -4812,12 +4818,6 @@ ) from .models.timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig from .models.timm_backbone import TimmBackboneConfig - from .models.transfo_xl import ( - TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, - TransfoXLConfig, - TransfoXLCorpus, - TransfoXLTokenizer, - ) from .models.trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig, TrOCRProcessor from .models.tvlt import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig, TvltFeatureExtractor, TvltProcessor from .models.tvp import ( @@ -5746,6 +5746,15 @@ TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, ) + from .models.deprecated.transfo_xl import ( + TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + AdaptiveEmbedding, + TransfoXLForSequenceClassification, + TransfoXLLMHeadModel, + TransfoXLModel, + TransfoXLPreTrainedModel, + load_tf_weights_in_transfo_xl, + ) from .models.deprecated.van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, @@ -6774,15 +6783,6 @@ TimesformerPreTrainedModel, ) from .models.timm_backbone import TimmBackbone - from .models.transfo_xl import ( - TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, - AdaptiveEmbedding, - TransfoXLForSequenceClassification, - TransfoXLLMHeadModel, - TransfoXLModel, - TransfoXLPreTrainedModel, - load_tf_weights_in_transfo_xl, - ) from .models.trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel from .models.tvlt import ( TVLT_PRETRAINED_MODEL_ARCHIVE_LIST, @@ -7269,6 +7269,15 @@ TFDeiTModel, TFDeiTPreTrainedModel, ) + from .models.deprecated.transfo_xl import ( + TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFAdaptiveEmbedding, + TFTransfoXLForSequenceClassification, + TFTransfoXLLMHeadModel, + TFTransfoXLMainLayer, + TFTransfoXLModel, + TFTransfoXLPreTrainedModel, + ) from .models.distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, @@ -7554,15 +7563,6 @@ TFTapasModel, TFTapasPreTrainedModel, ) - from .models.transfo_xl import ( - TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, - TFAdaptiveEmbedding, - TFTransfoXLForSequenceClassification, - TFTransfoXLLMHeadModel, - TFTransfoXLMainLayer, - TFTransfoXLModel, - TFTransfoXLPreTrainedModel, - ) from .models.vision_encoder_decoder import TFVisionEncoderDecoderModel from .models.vision_text_dual_encoder import TFVisionTextDualEncoderModel from .models.vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel diff --git a/src/transformers/commands/convert.py b/src/transformers/commands/convert.py index b46e14f5a673..77df8ea11064 100644 --- a/src/transformers/commands/convert.py +++ b/src/transformers/commands/convert.py @@ -123,23 +123,6 @@ def run(self): ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - elif self._model_type == "transfo_xl": - try: - from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( - convert_transfo_xl_checkpoint_to_pytorch, - ) - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - if "ckpt" in self._tf_checkpoint.lower(): - TF_CHECKPOINT = self._tf_checkpoint - TF_DATASET_FILE = "" - else: - TF_DATASET_FILE = self._tf_checkpoint - TF_CHECKPOINT = "" - convert_transfo_xl_checkpoint_to_pytorch( - TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE - ) elif self._model_type == "gpt2": try: from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import ( @@ -179,6 +162,4 @@ def run(self): convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) else: - raise ValueError( - "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" - ) + raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, t5, xlnet, xlm, lxmert]") diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 997ee82b4324..317402650e54 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -204,7 +204,6 @@ time_series_transformer, timesformer, timm_backbone, - transfo_xl, trocr, tvlt, tvp, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 78a33270e7ac..663d95159992 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -706,6 +706,8 @@ ] ) +# This is tied to the processing `-` -> `_` in `model_type_to_module_name`. For example, instead of putting +# `transfo-xl` (as in `CONFIG_MAPPING_NAMES`), we should use `transfo_xl`. DEPRECATED_MODELS = [ "bort", "mctct", @@ -714,6 +716,7 @@ "retribert", "tapex", "trajectory_transformer", + "transfo_xl", "van", ] diff --git a/src/transformers/models/transfo_xl/__init__.py b/src/transformers/models/deprecated/transfo_xl/__init__.py similarity index 96% rename from src/transformers/models/transfo_xl/__init__.py rename to src/transformers/models/deprecated/transfo_xl/__init__.py index ce4215b0217b..f3674e19665c 100644 --- a/src/transformers/models/transfo_xl/__init__.py +++ b/src/transformers/models/deprecated/transfo_xl/__init__.py @@ -14,7 +14,7 @@ from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = { diff --git a/src/transformers/models/transfo_xl/configuration_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py similarity index 97% rename from src/transformers/models/transfo_xl/configuration_transfo_xl.py rename to src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py index 8550e7180286..842c1643a00b 100644 --- a/src/transformers/models/transfo_xl/configuration_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py @@ -15,8 +15,8 @@ # limitations under the License. """ Transformer XL configuration""" -from ...configuration_utils import PretrainedConfig -from ...utils import logging +from ....configuration_utils import PretrainedConfig +from ....utils import logging logger = logging.get_logger(__name__) @@ -74,7 +74,7 @@ class TransfoXLConfig(PretrainedConfig): Whether or not to use adaptive softmax. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - dropatt (`float`, *optional*, defaults to 0): + dropatt (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. untie_r (`boolean`, *optional*, defaults to `True`): Whether ot not to untie relative position biases. @@ -86,8 +86,10 @@ class TransfoXLConfig(PretrainedConfig): Parameters initialized by N(0, init_std) init_std (`float`, *optional*, defaults to 0.02): Parameters initialized by N(0, init_std) - layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): + layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers + eos_token_id (`int`, *optional*, defaults to 0): + End of stream token id. Examples: diff --git a/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py old mode 100755 new mode 100644 similarity index 95% rename from src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py index 646c8a2342fc..d2693ac333b8 --- a/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py @@ -23,8 +23,8 @@ import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl -from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils -from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES +from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils +from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging diff --git a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py similarity index 99% rename from src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py rename to src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py index 88005b7e0600..45a4ea56fd78 100644 --- a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py @@ -25,7 +25,7 @@ import numpy as np import tensorflow as tf -from ...modeling_tf_utils import ( +from ....modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, @@ -33,8 +33,8 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax -from ...utils import ( +from ....tf_utils import shape_list, stable_softmax +from ....utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, diff --git a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py similarity index 99% rename from src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py rename to src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py index dcfa84d0f94b..c6a380842e48 100644 --- a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py +++ b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py @@ -20,7 +20,7 @@ import tensorflow as tf -from ...tf_utils import shape_list +from ....tf_utils import shape_list class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer): diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py similarity index 99% rename from src/transformers/models/transfo_xl/modeling_transfo_xl.py rename to src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py index 1e944c335ae7..2d343bd71571 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py @@ -25,8 +25,8 @@ from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss -from ...modeling_utils import PreTrainedModel -from ...utils import ( +from ....modeling_utils import PreTrainedModel +from ....utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py b/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py similarity index 100% rename from src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py rename to src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py diff --git a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py similarity index 99% rename from src/transformers/models/transfo_xl/tokenization_transfo_xl.py rename to src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py index eaa5ecee4ba3..7f1ab6cd13c8 100644 --- a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py @@ -27,8 +27,8 @@ import numpy as np -from ...tokenization_utils import PreTrainedTokenizer -from ...utils import ( +from ....tokenization_utils import PreTrainedTokenizer +from ....utils import ( cached_file, is_sacremoses_available, is_torch_available, diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 278a97592c77..80f61489382d 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -2676,6 +2676,48 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AdaptiveEmbedding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_transfo_xl(*args, **kwargs): + requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) + + VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None @@ -7739,48 +7781,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -class AdaptiveEmbedding(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class TransfoXLForSequenceClassification(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class TransfoXLLMHeadModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class TransfoXLModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class TransfoXLPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -def load_tf_weights_in_transfo_xl(*args, **kwargs): - requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) - - TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 5bc238f54278..2099c18bcd71 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -1075,6 +1075,51 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFAdaptiveEmbedding(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None @@ -2613,51 +2658,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -class TFAdaptiveEmbedding(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLForSequenceClassification(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLLMHeadModel(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLMainLayer(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLModel(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLPreTrainedModel(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - class TFVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["tf"] diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 729c7f873404..f4050c582b8f 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -104,11 +104,7 @@ def _get_input_ids_and_config(self, batch_size=2): if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] - # TransfoXL has no attention mask - if "transfoxl" in config.__class__.__name__.lower(): - attention_mask = None - else: - attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:batch_size, :sequence_length] + attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:batch_size, :sequence_length] return config, input_ids, attention_mask, max_length diff --git a/tests/models/transfo_xl/__init__.py b/tests/models/transfo_xl/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py b/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py deleted file mode 100644 index fdbff90b24b0..000000000000 --- a/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py +++ /dev/null @@ -1,282 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import annotations - -import random -import unittest - -from transformers import TransfoXLConfig, is_tf_available -from transformers.testing_utils import require_tf, slow - -from ...test_configuration_common import ConfigTester -from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor -from ...test_pipeline_mixin import PipelineTesterMixin - - -if is_tf_available(): - import tensorflow as tf - - from transformers import ( - TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, - TFTransfoXLForSequenceClassification, - TFTransfoXLLMHeadModel, - TFTransfoXLModel, - ) - - -class TFTransfoXLModelTester: - def __init__( - self, - parent, - ): - self.parent = parent - self.batch_size = 13 - self.seq_length = 7 - self.mem_len = 30 - self.key_length = self.seq_length + self.mem_len - self.clamp_len = 15 - self.is_training = True - self.use_labels = True - self.vocab_size = 99 - self.cutoffs = [10, 50, 80] - self.hidden_size = 32 - self.d_embed = 32 - self.num_attention_heads = 4 - self.d_head = 8 - self.d_inner = 128 - self.div_val = 2 - self.num_hidden_layers = 2 - self.scope = None - self.seed = 1 - self.eos_token_id = 0 - self.num_labels = 3 - self.pad_token_id = self.vocab_size - 1 - self.init_range = 0.01 - - def prepare_config_and_inputs(self): - input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - - lm_labels = None - if self.use_labels: - lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - - config = TransfoXLConfig( - vocab_size=self.vocab_size, - mem_len=self.mem_len, - clamp_len=self.clamp_len, - cutoffs=self.cutoffs, - d_model=self.hidden_size, - d_embed=self.d_embed, - n_head=self.num_attention_heads, - d_head=self.d_head, - d_inner=self.d_inner, - div_val=self.div_val, - n_layer=self.num_hidden_layers, - eos_token_id=self.eos_token_id, - pad_token_id=self.vocab_size - 1, - init_range=self.init_range, - num_labels=self.num_labels, - ) - - return (config, input_ids_1, input_ids_2, lm_labels) - - def set_seed(self): - random.seed(self.seed) - tf.random.set_seed(self.seed) - - def create_and_check_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): - model = TFTransfoXLModel(config) - - hidden_states_1, mems_1 = model(input_ids_1).to_tuple() - - inputs = {"input_ids": input_ids_2, "mems": mems_1} - - hidden_states_2, mems_2 = model(inputs).to_tuple() - - self.parent.assertEqual(hidden_states_1.shape, (self.batch_size, self.seq_length, self.hidden_size)) - self.parent.assertEqual(hidden_states_2.shape, (self.batch_size, self.seq_length, self.hidden_size)) - self.parent.assertListEqual( - [mem.shape for mem in mems_1], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - self.parent.assertListEqual( - [mem.shape for mem in mems_2], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - def create_and_check_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): - model = TFTransfoXLLMHeadModel(config) - - lm_logits_1, mems_1 = model(input_ids_1).to_tuple() - - inputs = {"input_ids": input_ids_1, "labels": lm_labels} - _, mems_1 = model(inputs).to_tuple() - - lm_logits_2, mems_2 = model([input_ids_2, mems_1]).to_tuple() - - inputs = {"input_ids": input_ids_1, "mems": mems_1, "labels": lm_labels} - - _, mems_2 = model(inputs).to_tuple() - - self.parent.assertEqual(lm_logits_1.shape, (self.batch_size, self.seq_length, self.vocab_size)) - self.parent.assertListEqual( - [mem.shape for mem in mems_1], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - self.parent.assertEqual(lm_logits_2.shape, (self.batch_size, self.seq_length, self.vocab_size)) - self.parent.assertListEqual( - [mem.shape for mem in mems_2], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels): - model = TFTransfoXLForSequenceClassification(config) - result = model(input_ids_1) - self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) - - def prepare_config_and_inputs_for_common(self): - config_and_inputs = self.prepare_config_and_inputs() - (config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs - inputs_dict = {"input_ids": input_ids_1} - return config, inputs_dict - - -@require_tf -class TFTransfoXLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): - all_model_classes = ( - (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () - ) - all_generative_model_classes = () if is_tf_available() else () - pipeline_model_mapping = ( - { - "feature-extraction": TFTransfoXLModel, - "text-classification": TFTransfoXLForSequenceClassification, - "text-generation": TFTransfoXLLMHeadModel, - "zero-shot": TFTransfoXLForSequenceClassification, - } - if is_tf_available() - else {} - ) - # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented - test_resize_embeddings = False - test_head_masking = False - test_onnx = False - test_mismatched_shapes = False - - # TODO: Fix the failed tests - def is_pipeline_test_to_skip( - self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name - ): - if pipeline_test_casse_name == "TextGenerationPipelineTests": - # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. - # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple - # tokenizer. - return True - - return False - - def setUp(self): - self.model_tester = TFTransfoXLModelTester(self) - self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37) - - def test_config(self): - self.config_tester.run_common_tests() - - def test_transfo_xl_model(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_transfo_xl_model(*config_and_inputs) - - def test_transfo_xl_lm_head(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_transfo_xl_lm_head(*config_and_inputs) - - def test_transfo_xl_sequence_classification_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs) - - def test_model_common_attributes(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - list_other_models_with_output_ebd = [TFTransfoXLForSequenceClassification] - - for model_class in self.all_model_classes: - model = model_class(config) - assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) - if model_class in list_other_models_with_output_ebd: - x = model.get_output_embeddings() - assert isinstance(x, tf.keras.layers.Layer) - name = model.get_bias() - assert name is None - else: - x = model.get_output_embeddings() - assert x is None - name = model.get_bias() - assert name is None - - def test_xla_mode(self): - # TODO JP: Make TransfoXL XLA compliant - pass - - @slow - def test_model_from_pretrained(self): - for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: - model = TFTransfoXLModel.from_pretrained(model_name) - self.assertIsNotNone(model) - - @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.") - def test_dataset_conversion(self): - pass - - -@require_tf -class TFTransfoXLModelLanguageGenerationTest(unittest.TestCase): - @unittest.skip("Skip test until #12651 is resolved.") - @slow - def test_lm_generate_transfo_xl_wt103(self): - model = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") - input_ids = tf.convert_to_tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]]) # fmt: skip - # In 1991 , the remains of Russian Tsar Nicholas II and his family - # ( except for Alexei and Maria ) are discovered . - # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the - # remainder of the story . 1883 Western Siberia , - # a young Grigori Rasputin is asked by his father and a group of men to perform magic . - # Rasputin has a vision and denounces one of the men as a horse thief . Although his - # father initially slaps him for making such an accusation , Rasputin watches as the - # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of - # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , - # with people , even a bishop , begging for his blessing . - - expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # fmt: skip - # In 1991, the remains of Russian Tsar Nicholas II and his family ( except for - # Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei - # Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young - # Grigori Rasputin is asked by his father and a group of men to perform magic. - # Rasputin has a vision and denounces one of the men as a horse thief. Although - # his father initially slaps him for making such an accusation, Rasputin watches - # as the man is chased outside and beaten. Twenty years later, Rasputin sees a - # vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly - # becomes famous, with people, even a bishop, begging for his blessing. In the - # early 20th century, Rasputin became a symbol of the Russian Orthodox Church. - # The image of Rasputin was used in the Russian national anthem, " Nearer, My God, - # to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit - # of Heaven " - - output_ids = model.generate(input_ids, max_length=200, do_sample=False) - self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) diff --git a/tests/models/transfo_xl/test_modeling_transfo_xl.py b/tests/models/transfo_xl/test_modeling_transfo_xl.py deleted file mode 100644 index 9534b13c8526..000000000000 --- a/tests/models/transfo_xl/test_modeling_transfo_xl.py +++ /dev/null @@ -1,533 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import random -import unittest - -from transformers import TransfoXLConfig, is_torch_available -from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device - -from ...generation.test_utils import GenerationTesterMixin -from ...test_configuration_common import ConfigTester -from ...test_modeling_common import ModelTesterMixin, ids_tensor -from ...test_pipeline_mixin import PipelineTesterMixin - - -if is_torch_available(): - import torch - from torch import nn - - from transformers import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel - from transformers.models.transfo_xl.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST - - -class TransfoXLModelTester: - def __init__( - self, - parent, - batch_size=14, - seq_length=7, - mem_len=30, - clamp_len=15, - is_training=False, - use_labels=True, - vocab_size=99, - cutoffs=[10, 50, 80], - hidden_size=32, - d_embed=32, - num_attention_heads=4, - d_head=8, - d_inner=128, - div_val=2, - num_hidden_layers=2, - scope=None, - seed=1, - eos_token_id=0, - num_labels=3, - ): - self.parent = parent - self.batch_size = batch_size - self.seq_length = seq_length - self.mem_len = mem_len - self.key_length = self.seq_length + self.mem_len - self.clamp_len = clamp_len - self.is_training = is_training - self.use_labels = use_labels - self.vocab_size = vocab_size - self.cutoffs = cutoffs - self.hidden_size = hidden_size - self.d_embed = d_embed - self.num_attention_heads = num_attention_heads - self.d_head = d_head - self.d_inner = d_inner - self.div_val = div_val - self.num_hidden_layers = num_hidden_layers - self.scope = scope - self.seed = seed - self.eos_token_id = eos_token_id - self.num_labels = num_labels - self.pad_token_id = self.vocab_size - 1 - - def prepare_config_and_inputs(self): - input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - - lm_labels = None - if self.use_labels: - lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - - config = self.get_config() - - return (config, input_ids_1, input_ids_2, lm_labels) - - def get_config(self): - return TransfoXLConfig( - vocab_size=self.vocab_size, - mem_len=self.mem_len, - clamp_len=self.clamp_len, - cutoffs=self.cutoffs, - d_model=self.hidden_size, - d_embed=self.d_embed, - n_head=self.num_attention_heads, - d_head=self.d_head, - d_inner=self.d_inner, - div_val=self.div_val, - n_layer=self.num_hidden_layers, - eos_token_id=self.eos_token_id, - pad_token_id=self.pad_token_id, - ) - - def set_seed(self): - random.seed(self.seed) - torch.manual_seed(self.seed) - - def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): - model = TransfoXLModel(config) - model.to(torch_device) - model.eval() - - outputs1 = model(input_ids_1) - outputs2 = model(input_ids_2, outputs1["mems"]) - outputs = { - "hidden_states_1": outputs1["last_hidden_state"], - "mems_1": outputs1["mems"], - "hidden_states_2": outputs2["last_hidden_state"], - "mems_2": outputs2["mems"], - } - return outputs - - def check_transfo_xl_model_output(self, result): - self.parent.assertEqual(result["hidden_states_1"].shape, (self.batch_size, self.seq_length, self.hidden_size)) - self.parent.assertEqual(result["hidden_states_2"].shape, (self.batch_size, self.seq_length, self.hidden_size)) - self.parent.assertListEqual( - [mem.shape for mem in result["mems_1"]], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - self.parent.assertListEqual( - [mem.shape for mem in result["mems_2"]], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): - model = TransfoXLLMHeadModel(config) - model.to(torch_device) - model.eval() - - lm_logits_1 = model(input_ids_1)["prediction_scores"] - outputs1 = model(input_ids_1, labels=lm_labels) - lm_logits_2 = model(input_ids_2, mems=outputs1["mems"])["prediction_scores"] - outputs2 = model(input_ids_2, labels=lm_labels, mems=outputs1["mems"]) - - outputs = { - "loss_1": outputs1["loss"], - "losses_1": outputs1["losses"], - "mems_1": outputs1["mems"], - "lm_logits_1": lm_logits_1, - "loss_2": outputs2["loss"], - "losses_2": outputs2["losses"], - "mems_2": outputs2["mems"], - "lm_logits_2": lm_logits_2, - } - return outputs - - def check_transfo_xl_lm_head_output(self, result): - self.parent.assertEqual(result["loss_1"].shape, ()) - self.parent.assertEqual(result["losses_1"].shape, (self.batch_size, self.seq_length - 1)) - self.parent.assertEqual(result["lm_logits_1"].shape, (self.batch_size, self.seq_length, self.vocab_size)) - self.parent.assertListEqual( - [mem.shape for mem in result["mems_1"]], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - self.parent.assertEqual(result["loss_2"].shape, ()) - self.parent.assertEqual(result["losses_2"].shape, (self.batch_size, self.seq_length - 1)) - self.parent.assertEqual(result["lm_logits_2"].shape, (self.batch_size, self.seq_length, self.vocab_size)) - self.parent.assertListEqual( - [mem.shape for mem in result["mems_2"]], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - def create_transfo_xl_lm_head_trainer_compatible_tuple(self, config, input_ids_1, input_ids_2, lm_labels): - config.trainer_compatible = True - model = TransfoXLLMHeadModel(config) - model.to(torch_device) - model.eval() - - lm_logits_1 = model(input_ids_1, return_dict=False)[0] - outputs1 = model(input_ids_1, labels=lm_labels, return_dict=False) - loss_1, _, losses_1, mems_1 = outputs1[:4] - lm_logits_2 = model(input_ids_2, mems=mems_1, return_dict=False)[0] - outputs2 = model(input_ids_2, labels=lm_labels, mems=mems_1, return_dict=False) - loss_2, _, losses_2, mems_2 = outputs2[:4] - - outputs = { - "losses_1": losses_1, - "mems_1": mems_1, - "lm_logits_1": lm_logits_1, - "loss_1": loss_1, - "losses_2": losses_2, - "mems_2": mems_2, - "lm_logits_2": lm_logits_2, - "loss_2": loss_2, - } - - config.trainer_compatible = None - return outputs - - def create_transfo_xl_lm_head_trainer_incompatible_tuple(self, config, input_ids_1, input_ids_2, lm_labels): - config.trainer_compatible = False - model = TransfoXLLMHeadModel(config) - model.to(torch_device) - model.eval() - - lm_logits_1 = model(input_ids_1, return_dict=False)[0] - outputs1 = model(input_ids_1, labels=lm_labels, return_dict=False) - losses_1, _, mems_1 = outputs1[:3] - loss_1 = outputs1[-1] - lm_logits_2 = model(input_ids_2, mems=mems_1, return_dict=False)[0] - outputs2 = model(input_ids_2, labels=lm_labels, mems=mems_1) - losses_2, _, mems_2 = outputs2[:3] - loss_2 = outputs2[-1] - - outputs = { - "losses_1": losses_1, - "mems_1": mems_1, - "lm_logits_1": lm_logits_1, - "loss_1": loss_1, - "losses_2": losses_2, - "mems_2": mems_2, - "lm_logits_2": lm_logits_2, - "loss_2": loss_2, - } - - config.trainer_compatible = None - return outputs - - def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels): - config.num_labels = self.num_labels - model = TransfoXLForSequenceClassification(config) - model.to(torch_device) - model.eval() - result = model(input_ids_1) - self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) - - def prepare_config_and_inputs_for_common(self): - config_and_inputs = self.prepare_config_and_inputs() - (config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs - inputs_dict = {"input_ids": input_ids_1} - return config, inputs_dict - - -@require_torch -class TransfoXLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): - all_model_classes = ( - (TransfoXLModel, TransfoXLLMHeadModel, TransfoXLForSequenceClassification) if is_torch_available() else () - ) - all_generative_model_classes = (TransfoXLLMHeadModel,) if is_torch_available() else () - pipeline_model_mapping = ( - { - "feature-extraction": TransfoXLModel, - "text-classification": TransfoXLForSequenceClassification, - "text-generation": TransfoXLLMHeadModel, - "zero-shot": TransfoXLForSequenceClassification, - } - if is_torch_available() - else {} - ) - test_pruning = False - test_resize_embeddings = True - test_mismatched_shapes = False - - # TODO: Fix the failed tests - def is_pipeline_test_to_skip( - self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name - ): - if pipeline_test_casse_name == "TextGenerationPipelineTests": - # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. - # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple - # tokenizer. - return True - - return False - - def check_cutoffs_and_n_token( - self, copied_cutoffs, layer, model_embed, model, model_class, resized_value, vocab_size - ): - # Check that the cutoffs were modified accordingly - for i in range(len(copied_cutoffs)): - if i < layer: - self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i]) - if model_class == TransfoXLLMHeadModel: - self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i]) - if i < len(model.config.cutoffs): - self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i]) - else: - self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i] + resized_value) - if model_class == TransfoXLLMHeadModel: - self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i] + resized_value) - if i < len(model.config.cutoffs): - self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i] + resized_value) - - self.assertEqual(model_embed.n_token, vocab_size + resized_value) - if model_class == TransfoXLLMHeadModel: - self.assertEqual(model.crit.n_token, vocab_size + resized_value) - - def setUp(self): - self.model_tester = TransfoXLModelTester(self) - self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37) - - def test_config(self): - self.config_tester.run_common_tests() - - def test_transfo_xl_model(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - output_result = self.model_tester.create_transfo_xl_model(*config_and_inputs) - self.model_tester.check_transfo_xl_model_output(output_result) - - def test_transfo_xl_lm_head(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - - output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs) - self.model_tester.check_transfo_xl_lm_head_output(output_result) - - output_result = self.model_tester.create_transfo_xl_lm_head_trainer_compatible_tuple(*config_and_inputs) - self.model_tester.check_transfo_xl_lm_head_output(output_result) - - output_result = self.model_tester.create_transfo_xl_lm_head_trainer_incompatible_tuple(*config_and_inputs) - self.model_tester.check_transfo_xl_lm_head_output(output_result) - - def test_transfo_xl_sequence_classification_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs) - - def test_retain_grad_hidden_states_attentions(self): - # xlnet cannot keep gradients in attentions or hidden states - return - - @require_torch_multi_gpu - def test_multi_gpu_data_parallel_forward(self): - # Opt-out of this test. - pass - - @slow - def test_model_from_pretrained(self): - for model_name in TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: - model = TransfoXLModel.from_pretrained(model_name) - self.assertIsNotNone(model) - - def test_resize_tokens_embeddings(self): - (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() - if not self.test_resize_embeddings: - return - - for model_class in self.all_model_classes: - config = copy.deepcopy(original_config) - model = model_class(config) - model.to(torch_device) - - if self.model_tester.is_training is False: - model.eval() - - model_vocab_size = config.vocab_size - # Retrieve the embeddings and clone theme - model_embed = model.resize_token_embeddings(model_vocab_size) - cloned_embeddings = [emb.weight.clone() for emb in model_embed.emb_layers] - # Retrieve the cutoffs and copy them - copied_cutoffs = copy.copy(model_embed.cutoffs) - - test_layers = list(range(config.div_val)) - for layer in test_layers: - # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size - model_embed = model.resize_token_embeddings(model_vocab_size + 10, layer) - self.assertEqual(model.config.vocab_size, model_vocab_size + 10) - # Check that it actually resizes the embeddings matrix - self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] + 10) - # Check that the cutoffs were modified accordingly - self.check_cutoffs_and_n_token( - copied_cutoffs, layer, model_embed, model, model_class, 10, model_vocab_size - ) - - # Check that the model can still do a forward pass successfully (every parameter should be resized) - model(**inputs_dict) - - # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size - model_embed = model.resize_token_embeddings(model_vocab_size - 5, layer) - self.assertEqual(model.config.vocab_size, model_vocab_size - 5) - # Check that it actually resizes the embeddings matrix - self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] - 5) - # Check that the cutoffs were modified accordingly - self.check_cutoffs_and_n_token( - copied_cutoffs, layer, model_embed, model, model_class, -5, model_vocab_size - ) - - # Check that the model can still do a forward pass successfully (every parameter should be resized) - # Input ids should be clamped to the maximum size of the vocabulary - inputs_dict["input_ids"].clamp_(max=model_vocab_size - 5 - 1) - model(**inputs_dict) - - # Check that adding and removing tokens has not modified the first part of the embedding matrix. - models_equal = True - for p1, p2 in zip(cloned_embeddings[layer], model_embed.emb_layers[layer].weight): - if p1.data.ne(p2.data).sum() > 0: - models_equal = False - - self.assertTrue(models_equal) - - # Reset model embeddings to original size - model.resize_token_embeddings(model_vocab_size, layer) - self.assertEqual(model_vocab_size, model.config.vocab_size) - self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0]) - - def test_resize_embeddings_untied(self): - # transfo-xl requires special resize for lm-head - return - - def _check_attentions_for_generate( - self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 - ): - self.assertIsInstance(attentions, tuple) - self.assertListEqual( - [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) - ) - self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) - - for idx, iter_attentions in enumerate(attentions): - tgt_len = min_length if idx == 0 else (min_length - 2) - src_len = (min_length + config.mem_len) if idx == 0 else (min_length + config.mem_len - 2) - - expected_shape = ( - batch_size * num_beam_groups, - config.num_attention_heads, - tgt_len, - src_len, - ) - - # check attn size - self.assertListEqual( - [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) - ) - - def _check_hidden_states_for_generate( - self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 - ): - self.assertIsInstance(hidden_states, tuple) - self.assertListEqual( - [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], - [True] * len(hidden_states), - ) - self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) - - for idx, iter_hidden_states in enumerate(hidden_states): - seq_len = min_length if idx == 0 else min_length - 2 - expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) - # check hidden size - self.assertListEqual( - [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], - [expected_shape] * len(iter_hidden_states), - ) - - # overwrite from test_modeling_common - def _mock_init_weights(self, module): - if hasattr(module, "weight") and module.weight is not None: - module.weight.data.fill_(3) - if hasattr(module, "cluster_weight") and module.cluster_weight is not None: - module.cluster_weight.data.fill_(3) - if hasattr(module, "bias") and module.bias is not None: - module.bias.data.fill_(3) - if hasattr(module, "cluster_bias") and module.cluster_bias is not None: - module.cluster_bias.data.fill_(3) - - if hasattr(module, "emb_projs"): - for i in range(len(module.emb_projs)): - if module.emb_projs[i] is not None: - nn.init.constant_(module.emb_projs[i], 0.0003) - if hasattr(module, "out_projs"): - for i in range(len(module.out_projs)): - if module.out_projs[i] is not None: - nn.init.constant_(module.out_projs[i], 0.0003) - - for param in ["r_emb", "r_w_bias", "r_r_bias", "r_bias"]: - if hasattr(module, param) and getattr(module, param) is not None: - weight = getattr(module, param) - weight.data.fill_(3) - - @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) - def test_left_padding_compatibility(self): - pass - - @unittest.skip("This test is currently broken because of safetensors.") - def test_tf_from_pt_safetensors(self): - pass - - -@require_torch -class TransfoXLModelLanguageGenerationTest(unittest.TestCase): - @slow - def test_lm_generate_transfo_xl_wt103(self): - model = TransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") - model.to(torch_device) - - input_ids = torch.tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]], dtype=torch.long,device=torch_device) # fmt: skip - # In 1991 , the remains of Russian Tsar Nicholas II and his family - # ( except for Alexei and Maria ) are discovered . - # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the - # remainder of the story . 1883 Western Siberia , - # a young Grigori Rasputin is asked by his father and a group of men to perform magic . - # Rasputin has a vision and denounces one of the men as a horse thief . Although his - # father initially slaps him for making such an accusation , Rasputin watches as the - # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of - # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , - # with people , even a bishop , begging for his blessing . - - expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # fmt: skip - # In 1991, the remains of Russian Tsar Nicholas II and his family ( except for - # Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei - # Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young - # Grigori Rasputin is asked by his father and a group of men to perform magic. - # Rasputin has a vision and denounces one of the men as a horse thief. Although - # his father initially slaps him for making such an accusation, Rasputin watches - # as the man is chased outside and beaten. Twenty years later, Rasputin sees a - # vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly - # becomes famous, with people, even a bishop, begging for his blessing. In the - # early 20th century, Rasputin became a symbol of the Russian Orthodox Church. - # The image of Rasputin was used in the Russian national anthem, " Nearer, My God, - # to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit - # of Heaven " - - output_ids = model.generate(input_ids, max_length=200, do_sample=False) - self.assertListEqual(output_ids[0].tolist(), expected_output_ids) diff --git a/tests/models/transfo_xl/test_tokenization_transfo_xl.py b/tests/models/transfo_xl/test_tokenization_transfo_xl.py deleted file mode 100644 index d8835a164c61..000000000000 --- a/tests/models/transfo_xl/test_tokenization_transfo_xl.py +++ /dev/null @@ -1,156 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -import pickle -import unittest -from collections import Counter, OrderedDict - -from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer - -from ...test_tokenization_common import TokenizerTesterMixin - - -class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase): - tokenizer_class = TransfoXLTokenizer - test_rust_tokenizer = False - test_seq2seq = False - - def setUp(self): - super().setUp() - - vocab_tokens = [ - "", - "[CLS]", - "[SEP]", - "want", - "unwanted", - "wa", - "un", - "running", - ",", - "low", - "l", - ] - self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) - with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: - vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) - - saved_dict = { - "eos_idx": 0, - "min_freq": 0, - "vocab_file": None, - "counter": Counter(["welcome home"]), - "sym2idx": OrderedDict([("", 0), ("welcome", 1), ("home", 2)]), - "delimiter": None, - "idx2sym": ["", "welcome", "home"], - "max_size": None, - "lower_case": False, - "special": [""], - } - self.pretrained_vocab_file = os.path.join( - self.tmpdirname, "mock_folder", VOCAB_FILES_NAMES["pretrained_vocab_file"] - ) - os.makedirs(os.path.dirname(self.pretrained_vocab_file), exist_ok=True) - with open(self.pretrained_vocab_file, "wb") as f: - pickle.dump(saved_dict, f) - - def get_tokenizer(self, **kwargs): - kwargs["lower_case"] = True - return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs) - - def get_input_output_texts(self, tokenizer): - input_text = " UNwanted , running" - output_text = " unwanted, running" - return input_text, output_text - - def test_full_tokenizer(self): - tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True) - - tokens = tokenizer.tokenize(" UNwanted , running") - self.assertListEqual(tokens, ["", "unwanted", ",", "running"]) - - self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7]) - - def test_full_tokenizer_lower(self): - tokenizer = TransfoXLTokenizer(lower_case=True) - - self.assertListEqual( - tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"] - ) - - def test_full_tokenizer_no_lower(self): - tokenizer = TransfoXLTokenizer(lower_case=False) - - self.assertListEqual( - tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] - ) - - def test_full_tokenizer_moses_numbers(self): - tokenizer = TransfoXLTokenizer(lower_case=False) - text_in = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" - tokens_out = [ - "Hello", - "(", - "bracket", - ")", - "and", - "side", - "@-@", - "scrolled", - "[", - "and", - "]", - "Henry", - "'s", - "$", - "5", - "@,@", - "000", - "with", - "3", - "@.@", - "34", - "m", - ".", - "What", - "'s", - "up", - "!", - "?", - ] - - self.assertListEqual(tokenizer.tokenize(text_in), tokens_out) - - self.assertEqual(tokenizer.convert_tokens_to_string(tokens_out), text_in) - - def test_move_added_token(self): - tokenizer = self.get_tokenizer() - original_len = len(tokenizer) - - tokenizer.add_tokens(["new1", "new2"]) - tokenizer.move_added_token("new1", 1) - - # Check that moved token is not copied (duplicate) - self.assertEqual(len(tokenizer), original_len + 2) - # Check that token is moved to specified id - self.assertEqual(tokenizer.encode("new1"), [1]) - self.assertEqual(tokenizer.decode([1]), "new1") - - def test_from_pretrained_vocab_file(self): - tokenizer = TransfoXLTokenizer.from_pretrained(os.path.join(self.tmpdirname, "mock_folder")) - sentence = "welcome home" - self.assertEqual(tokenizer.decode(tokenizer.encode(sentence)), sentence) diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index c69b5ed77fe5..06f05ec86c0f 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -1927,7 +1927,6 @@ def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_cla "FunnelForPreTraining", "ElectraForPreTraining", "XLMWithLMHeadModel", - "TransfoXLLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index d7cd62b41a02..7ac744263cc0 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -465,7 +465,6 @@ def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_cla "TFFunnelForPreTraining", "TFElectraForPreTraining", "TFXLMWithLMHeadModel", - "TFTransfoXLLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: diff --git a/utils/check_config_attributes.py b/utils/check_config_attributes.py index 7a116d5af317..a00606033512 100644 --- a/utils/check_config_attributes.py +++ b/utils/check_config_attributes.py @@ -127,7 +127,6 @@ "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, - "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, diff --git a/utils/check_docstrings.py b/utils/check_docstrings.py index cddd04ac6516..f5f01a0d83f7 100644 --- a/utils/check_docstrings.py +++ b/utils/check_docstrings.py @@ -738,7 +738,6 @@ "TrainerState", "TrainingArguments", "TrajectoryTransformerConfig", - "TransfoXLConfig", "TranslationPipeline", "TvltImageProcessor", "UMT5Config", diff --git a/utils/check_repo.py b/utils/check_repo.py index cac78bfe80c6..fd5b4c255ab6 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -397,13 +397,11 @@ def get_model_modules() -> List[str]: "modeling_flax_speech_encoder_decoder", "modeling_flax_vision_encoder_decoder", "modeling_timm_backbone", - "modeling_transfo_xl_utilities", "modeling_tf_auto", "modeling_tf_encoder_decoder", "modeling_tf_outputs", "modeling_tf_pytorch_utils", "modeling_tf_utils", - "modeling_tf_transfo_xl_utilities", "modeling_tf_vision_encoder_decoder", "modeling_vision_encoder_decoder", ] diff --git a/utils/not_doctested.txt b/utils/not_doctested.txt index 07775fe823a4..070d0453a454 100644 --- a/utils/not_doctested.txt +++ b/utils/not_doctested.txt @@ -496,6 +496,11 @@ src/transformers/models/deprecated/tapex/tokenization_tapex.py src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py +src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py +src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py +src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py +src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py src/transformers/models/deprecated/van/configuration_van.py src/transformers/models/deprecated/van/convert_van_to_pytorch.py src/transformers/models/deprecated/van/modeling_van.py @@ -818,11 +823,6 @@ src/transformers/models/tapas/modeling_tf_tapas.py src/transformers/models/timesformer/convert_timesformer_to_pytorch.py src/transformers/models/timm_backbone/configuration_timm_backbone.py src/transformers/models/timm_backbone/modeling_timm_backbone.py -src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py -src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py -src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py -src/transformers/models/transfo_xl/modeling_transfo_xl.py -src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py src/transformers/models/tvlt/configuration_tvlt.py src/transformers/models/tvlt/modeling_tvlt.py @@ -990,4 +990,4 @@ src/transformers/utils/peft_utils.py src/transformers/utils/quantization_config.py src/transformers/utils/sentencepiece_model_pb2.py src/transformers/utils/sentencepiece_model_pb2_new.py -src/transformers/utils/versions.py \ No newline at end of file +src/transformers/utils/versions.py From 80e9f76857c3f16dc0448ee2c12568f75e7544e9 Mon Sep 17 00:00:00 2001 From: liuxueyang Date: Fri, 24 Nov 2023 19:24:04 +0800 Subject: [PATCH 251/268] Fix typo in warning message (#27055) * Fix typo in warning message The path of `default_cache_path` is hf_cache_home/hub. There is no directory named transformers under hf_cache_home * Fix a typo in comment * Update the version number v4.22.0 is the earlist version that contains those changes in PR #18492 --- src/transformers/utils/hub.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 8d2f77da6845..6ab1670ea37c 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -104,10 +104,10 @@ def is_offline_mode(): and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( - "In Transformers v4.0.0, the default path to cache downloaded models changed from" - " '~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have" + "In Transformers v4.22.0, the default path to cache downloaded models changed from" + " '~/.cache/torch/transformers' to '~/.cache/huggingface/hub'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" - " '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should" + " '~/.cache/huggingface/hub' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, constants.HF_HUB_CACHE) @@ -1126,7 +1126,7 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): if new_cache_dir is None: new_cache_dir = TRANSFORMERS_CACHE if cache_dir is None: - # Migrate from old cache in .cache/huggingface/hub + # Migrate from old cache in .cache/huggingface/transformers old_cache = Path(TRANSFORMERS_CACHE).parent / "transformers" if os.path.isdir(str(old_cache)): cache_dir = str(old_cache) From 181f85da24f4cf5b0ce466e0be7fbe276a790edc Mon Sep 17 00:00:00 2001 From: yoinked Date: Fri, 24 Nov 2023 03:34:24 -0800 Subject: [PATCH 252/268] Docs/Add conversion code to the musicgen docs (#27665) * Update musicgen.md please make it less hidden * Add cleaner formatting --- docs/source/en/model_doc/musicgen.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/source/en/model_doc/musicgen.md b/docs/source/en/model_doc/musicgen.md index d9e84294852d..bc2234ce3c41 100644 --- a/docs/source/en/model_doc/musicgen.md +++ b/docs/source/en/model_doc/musicgen.md @@ -46,6 +46,16 @@ This model was contributed by [sanchit-gandhi](https://huggingface.co/sanchit-ga [here](https://github.com/facebookresearch/audiocraft). The pre-trained checkpoints can be found on the [Hugging Face Hub](https://huggingface.co/models?sort=downloads&search=facebook%2Fmusicgen-). +## Usage tips + +- After downloading the original checkpoints from [here](https://github.com/facebookresearch/audiocraft/blob/main/docs/MUSICGEN.md#importing--exporting-models) , you can convert them using the **conversion script** available at +`src/transformers/models/musicgen/convert_musicgen_transformers.py` with the following command: + +```bash +python src/transformers/models/musicgen/convert_musicgen_transformers.py \ + --checkpoint small --pytorch_dump_folder /output/path --safe_serialization +``` + ## Generation MusicGen is compatible with two generation modes: greedy and sampling. In practice, sampling leads to significantly From 2098d343cc4b4b9d2aea84b3cf1eb5a1e610deff Mon Sep 17 00:00:00 2001 From: Anirudh Haritas Murali <49116134+anihm136@users.noreply.github.com> Date: Fri, 24 Nov 2023 17:11:16 +0530 Subject: [PATCH 253/268] Fix semantic error in evaluation section (#27675) Change "convert predictions to logits" to "convert logits to predictions" to fix semantic error in the evaluation section. Logits need to be converted to predictions to evaluate the accuracy, not the other way round --- docs/source/en/training.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/training.md b/docs/source/en/training.md index 1744a441535d..8e81048bf54e 100644 --- a/docs/source/en/training.md +++ b/docs/source/en/training.md @@ -119,7 +119,7 @@ Specify where to save the checkpoints from your training: >>> metric = evaluate.load("accuracy") ``` -Call [`~evaluate.compute`] on `metric` to calculate the accuracy of your predictions. Before passing your predictions to `compute`, you need to convert the predictions to logits (remember all 🤗 Transformers models return logits): +Call [`~evaluate.compute`] on `metric` to calculate the accuracy of your predictions. Before passing your predictions to `compute`, you need to convert the logits to predictions (remember all 🤗 Transformers models return logits): ```py >>> def compute_metrics(eval_pred): From a6d178e23892a2b833523bb1ce4611b32e171f4f Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Fri, 24 Nov 2023 16:30:05 +0100 Subject: [PATCH 254/268] [`DocString`] Support a revision in the docstring `add_code_sample_docstrings` to facilitate integrations (#27645) * initial commit * dummy changes * style * Update src/transformers/utils/doc.py Co-authored-by: Alex McKinney <44398246+vvvm23@users.noreply.github.com> * nits * nit use ` if re.match(r'^refs/pr/\d*', revision):` * restrict * nit * test the doc vuilder * wow * oke the order was wrong --------- Co-authored-by: Alex McKinney <44398246+vvvm23@users.noreply.github.com> --- src/transformers/modeling_flax_utils.py | 3 ++- src/transformers/models/albert/modeling_flax_albert.py | 4 +++- src/transformers/utils/doc.py | 10 ++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index d4617c111bb2..d48c991319ac 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -1267,13 +1267,14 @@ def overwrite_call_docstring(model_class, docstring): model_class.__call__ = add_start_docstrings_to_model_forward(docstring)(model_class.__call__) -def append_call_sample_docstring(model_class, checkpoint, output_type, config_class, mask=None): +def append_call_sample_docstring(model_class, checkpoint, output_type, config_class, mask=None, revision=None): model_class.__call__ = copy_func(model_class.__call__) model_class.__call__ = add_code_sample_docstrings( checkpoint=checkpoint, output_type=output_type, config_class=config_class, model_cls=model_class.__name__, + revision=revision, )(model_class.__call__) diff --git a/src/transformers/models/albert/modeling_flax_albert.py b/src/transformers/models/albert/modeling_flax_albert.py index ba054cb17932..6333f0bd3ac2 100644 --- a/src/transformers/models/albert/modeling_flax_albert.py +++ b/src/transformers/models/albert/modeling_flax_albert.py @@ -829,7 +829,9 @@ class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMaskedLMModule -append_call_sample_docstring(FlaxAlbertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC) +append_call_sample_docstring( + FlaxAlbertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC, revision="refs/pr/11" +) class FlaxAlbertForSequenceClassificationModule(nn.Module): diff --git a/src/transformers/utils/doc.py b/src/transformers/utils/doc.py index ac35542f7c30..23679f31a3e2 100644 --- a/src/transformers/utils/doc.py +++ b/src/transformers/utils/doc.py @@ -1075,6 +1075,7 @@ def add_code_sample_docstrings( expected_output=None, expected_loss=None, real_checkpoint=None, + revision=None, ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise @@ -1143,6 +1144,15 @@ def docstring_decorator(fn): func_doc = (fn.__doc__ or "") + "".join(docstr) output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) built_doc = code_sample.format(**doc_kwargs) + if revision is not None: + if re.match(r"^refs/pr/\\d+", revision): + raise ValueError( + f"The provided revision '{revision}' is incorrect. It should point to" + " a pull request reference on the hub like 'refs/pr/6'" + ) + built_doc = built_doc.replace( + f'from_pretrained("{checkpoint}")', f'from_pretrained("{checkpoint}", revision="{revision}")' + ) fn.__doc__ = func_doc + output_doc + built_doc return fn From c13a43aaf2194b5d05272af5c9c7e394688b72df Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Fri, 24 Nov 2023 16:59:17 +0100 Subject: [PATCH 255/268] Reflect RoCm support in the documentation (#27636) * reflect RoCm support in the documentation * Update docs/source/en/main_classes/trainer.md Co-authored-by: Lysandre Debut * fix review comments * use ROCm instead of RoCm --------- Co-authored-by: Lysandre Debut --- docs/source/en/main_classes/trainer.md | 6 +++--- docs/source/en/model_doc/t5.md | 2 +- docs/source/en/perf_infer_gpu_one.md | 8 ++++---- docs/source/en/perf_train_gpu_one.md | 4 ++-- docs/source/en/tasks/text-to-speech.md | 8 +++++++- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/docs/source/en/main_classes/trainer.md b/docs/source/en/main_classes/trainer.md index 7304de8174dc..d46892c07e81 100644 --- a/docs/source/en/main_classes/trainer.md +++ b/docs/source/en/main_classes/trainer.md @@ -26,7 +26,7 @@ If you're looking to fine-tune a language model like Llama-2 or Mistral on a tex Before instantiating your [`Trainer`], create a [`TrainingArguments`] to access all the points of customization during training. -The API supports distributed training on multiple GPUs/TPUs, mixed precision through [NVIDIA Apex](https://github.com/NVIDIA/apex) and Native AMP for PyTorch. +The API supports distributed training on multiple GPUs/TPUs, mixed precision through [NVIDIA Apex] for NVIDIA GPUs, [ROCm APEX](https://github.com/ROCmSoftwarePlatform/apex) for AMD GPUs, and Native AMP for PyTorch. The [`Trainer`] contains the basic training loop which supports the above features. To inject custom behavior you can subclass them and override the following methods: @@ -272,7 +272,7 @@ but this approach can be confusing since you may forget you set up the environme There is an additional environment variable `CUDA_DEVICE_ORDER` that controls how the physical devices are ordered. The two choices are: -1. ordered by PCIe bus IDs (matches `nvidia-smi`'s order) - this is the default. +1. ordered by PCIe bus IDs (matches `nvidia-smi` and `rocm-smi`'s order) - this is the default. ```bash export CUDA_DEVICE_ORDER=PCI_BUS_ID @@ -284,7 +284,7 @@ export CUDA_DEVICE_ORDER=PCI_BUS_ID export CUDA_DEVICE_ORDER=FASTEST_FIRST ``` -Most of the time you don't need to care about this environment variable, but it's very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can't swap the cards (e.g., if the cooling of the devices gets impacted) then setting `CUDA_DEVICE_ORDER=FASTEST_FIRST` will always put the newer faster card first. It'll be somewhat confusing though since `nvidia-smi` will still report them in the PCIe order. +Most of the time you don't need to care about this environment variable, but it's very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can't swap the cards (e.g., if the cooling of the devices gets impacted) then setting `CUDA_DEVICE_ORDER=FASTEST_FIRST` will always put the newer faster card first. It'll be somewhat confusing though since `nvidia-smi` (or `rocm-smi`) will still report them in the PCIe order. The other solution to swapping the order is to use: diff --git a/docs/source/en/model_doc/t5.md b/docs/source/en/model_doc/t5.md index 704d05987b9b..a7e78976cf94 100644 --- a/docs/source/en/model_doc/t5.md +++ b/docs/source/en/model_doc/t5.md @@ -314,7 +314,7 @@ The predicted tokens will then be placed between the sentinel tokens. ## Performance -If you'd like a faster training and inference performance, install [apex](https://github.com/NVIDIA/apex#quick-start) and then the model will automatically use `apex.normalization.FusedRMSNorm` instead of `T5LayerNorm`. The former uses an optimized fused kernel which is several times faster than the latter. +If you'd like a faster training and inference performance, install [NVIDIA APEX](https://github.com/NVIDIA/apex#quick-start) for NVIDIA GPUs, or [ROCm APEX](https://github.com/ROCmSoftwarePlatform/apex) for AMD GPUs and then the model will automatically use `apex.normalization.FusedRMSNorm` instead of `T5LayerNorm`. The former uses an optimized fused kernel which is several times faster than the latter. ## Resources diff --git a/docs/source/en/perf_infer_gpu_one.md b/docs/source/en/perf_infer_gpu_one.md index ba339c1a3068..82ec39441f39 100644 --- a/docs/source/en/perf_infer_gpu_one.md +++ b/docs/source/en/perf_infer_gpu_one.md @@ -15,7 +15,7 @@ rendered properly in your Markdown viewer. # GPU inference -GPUs are the standard choice of hardware for machine learning, unlike CPUs, because they are optimized for memory bandwidth and parallelism. To keep up with the larger sizes of modern models or to run these large models on existing and older hardware, there are several optimizations you can use to speed up GPU inference. In this guide, you'll learn how to use FlashAttention-2 (a more memory-efficient attention mechanism), BetterTransformer (a PyTorch native fastpath execution), and bitsandbytes to quantize your model to a lower precision. Finally, learn how to use 🤗 Optimum to accelerate inference with ONNX Runtime on Nvidia GPUs. +GPUs are the standard choice of hardware for machine learning, unlike CPUs, because they are optimized for memory bandwidth and parallelism. To keep up with the larger sizes of modern models or to run these large models on existing and older hardware, there are several optimizations you can use to speed up GPU inference. In this guide, you'll learn how to use FlashAttention-2 (a more memory-efficient attention mechanism), BetterTransformer (a PyTorch native fastpath execution), and bitsandbytes to quantize your model to a lower precision. Finally, learn how to use 🤗 Optimum to accelerate inference with ONNX Runtime on Nvidia and AMD GPUs. @@ -276,13 +276,13 @@ Feel free to try running a 11 billion parameter [T5 model](https://colab.researc -Learn more details about using ORT with 🤗 Optimum in the [Accelerated inference on NVIDIA GPUs](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#accelerated-inference-on-nvidia-gpus) guide. This section only provides a brief and simple example. +Learn more details about using ORT with 🤗 Optimum in the [Accelerated inference on NVIDIA GPUs](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#accelerated-inference-on-nvidia-gpus) and [Accelerated inference on AMD GPUs](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/amdgpu#accelerated-inference-on-amd-gpus) guides. This section only provides a brief and simple example. -ONNX Runtime (ORT) is a model accelerator that supports accelerated inference on Nvidia GPUs. ORT uses optimization techniques like fusing common operations into a single node and constant folding to reduce the number of computations performed and speedup inference. ORT also places the most computationally intensive operations on the GPU and the rest on the CPU to intelligently distribute the workload between the two devices. +ONNX Runtime (ORT) is a model accelerator that supports accelerated inference on Nvidia GPUs, and AMD GPUs that use [ROCm](https://www.amd.com/en/products/software/rocm.html) stack. ORT uses optimization techniques like fusing common operations into a single node and constant folding to reduce the number of computations performed and speedup inference. ORT also places the most computationally intensive operations on the GPU and the rest on the CPU to intelligently distribute the workload between the two devices. -ORT is supported by 🤗 Optimum which can be used in 🤗 Transformers. You'll need to use an [`~optimum.onnxruntime.ORTModel`] for the task you're solving, and specify the `provider` parameter which can be set to either [`CUDAExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#cudaexecutionprovider) or [`TensorrtExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#tensorrtexecutionprovider). If you want to load a model that was not yet exported to ONNX, you can set `export=True` to convert your model on-the-fly to the ONNX format : +ORT is supported by 🤗 Optimum which can be used in 🤗 Transformers. You'll need to use an [`~optimum.onnxruntime.ORTModel`] for the task you're solving, and specify the `provider` parameter which can be set to either [`CUDAExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#cudaexecutionprovider), [`ROCMExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/amdgpu) or [`TensorrtExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#tensorrtexecutionprovider). If you want to load a model that was not yet exported to ONNX, you can set `export=True` to convert your model on-the-fly to the ONNX format: ```py from optimum.onnxruntime import ORTModelForSequenceClassification diff --git a/docs/source/en/perf_train_gpu_one.md b/docs/source/en/perf_train_gpu_one.md index 25117241f78f..089c9905caba 100644 --- a/docs/source/en/perf_train_gpu_one.md +++ b/docs/source/en/perf_train_gpu_one.md @@ -237,7 +237,7 @@ You can speedup the training throughput by using Flash Attention 2 integration i The most common optimizer used to train transformer models is Adam or AdamW (Adam with weight decay). Adam achieves good convergence by storing the rolling average of the previous gradients; however, it adds an additional memory footprint of the order of the number of model parameters. To remedy this, you can use an alternative optimizer. -For example if you have [NVIDIA/apex](https://github.com/NVIDIA/apex) installed, `adamw_apex_fused` will give you the +For example if you have [NVIDIA/apex](https://github.com/NVIDIA/apex) installed for NVIDIA GPUs, or [ROCmSoftwarePlatform/apex](https://github.com/ROCmSoftwarePlatform/apex) for AMD GPUs, `adamw_apex_fused` will give you the fastest training experience among all supported AdamW optimizers. [`Trainer`] integrates a variety of optimizers that can be used out of box: `adamw_hf`, `adamw_torch`, `adamw_torch_fused`, @@ -529,4 +529,4 @@ By default, in training mode, the BetterTransformer integration **drops the mask -Check out this [blogpost](https://pytorch.org/blog/out-of-the-box-acceleration/) to learn more about acceleration and memory-savings with SDPA. \ No newline at end of file +Check out this [blogpost](https://pytorch.org/blog/out-of-the-box-acceleration/) to learn more about acceleration and memory-savings with SDPA. diff --git a/docs/source/en/tasks/text-to-speech.md b/docs/source/en/tasks/text-to-speech.md index 86a0d49fd04d..216c3c1f1133 100644 --- a/docs/source/en/tasks/text-to-speech.md +++ b/docs/source/en/tasks/text-to-speech.md @@ -74,6 +74,12 @@ To follow this guide you will need a GPU. If you're working in a notebook, run t !nvidia-smi ``` +or alternatively for AMD GPUs: + +```bash +!rocm-smi +``` + We encourage you to log in to your Hugging Face account to upload and share your model with the community. When prompted, enter your token to log in: @@ -630,4 +636,4 @@ see if this improves the results. Finally, it is essential to consider ethical considerations. Although TTS technology has numerous useful applications, it may also be used for malicious purposes, such as impersonating someone's voice without their knowledge or consent. Please -use TTS judiciously and responsibly. \ No newline at end of file +use TTS judiciously and responsibly. From 29c94808ea4bdfc3f13f3c718fba99dc00441d7a Mon Sep 17 00:00:00 2001 From: Chi Date: Fri, 24 Nov 2023 22:25:08 +0530 Subject: [PATCH 256/268] Successfully Resolved The ZeroDivisionError Exception. (#27524) * Successfully resolved the ZeroDivisionError exception in the utils.notebook.y file. * Now I update little code mentioned by Peter * Using Black package to reformat my file * Now I using ruff libary to reformated my file --- src/transformers/utils/notebook.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/transformers/utils/notebook.py b/src/transformers/utils/notebook.py index c97849d02dbe..f7396642732e 100644 --- a/src/transformers/utils/notebook.py +++ b/src/transformers/utils/notebook.py @@ -161,7 +161,7 @@ def update(self, value: int, force_update: bool = False, comment: str = None): self.update_bar(value) self.last_value = value self.last_time = current_time - if self.average_time_per_item is None: + if (self.average_time_per_item is None) or (self.average_time_per_item == 0): self.wait_for = 1 else: self.wait_for = max(int(self.update_every / self.average_time_per_item), 1) @@ -177,7 +177,11 @@ def update_bar(self, value, comment=None): f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <" f" {format_time(self.predicted_remaining)}" ) - self.label += f", {1/self.average_time_per_item:.2f} it/s" + if self.average_time_per_item == 0: + self.label += ", +inf it/s" + else: + self.label += f", {1/self.average_time_per_item:.2f} it/s" + self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]" self.display() @@ -367,6 +371,8 @@ def on_evaluate(self, args, state, control, metrics=None, **kwargs): def on_train_end(self, args, state, control, **kwargs): self.training_tracker.update( - state.global_step, comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", force_update=True + state.global_step, + comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", + force_update=True, ) self.training_tracker = None From 35551f9a0f66a22de4971b4a51b3c172d3b87f95 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 24 Nov 2023 19:47:50 +0100 Subject: [PATCH 257/268] Fix `TVPModelTest` (#27695) * fix * fix * fix * fix * fix --------- Co-authored-by: ydshieh --- src/transformers/models/tvp/modeling_tvp.py | 13 +++++++++---- tests/models/tvp/test_modeling_tvp.py | 3 +++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/tvp/modeling_tvp.py b/src/transformers/models/tvp/modeling_tvp.py index bf1fc5afbaac..65fac8b3a0e0 100644 --- a/src/transformers/models/tvp/modeling_tvp.py +++ b/src/transformers/models/tvp/modeling_tvp.py @@ -606,12 +606,15 @@ def __init__(self, config): def forward(self, pixel_values): if self.visual_prompter_apply != "add": - visual_prompt_mask = torch.ones([self.max_img_size, self.max_img_size], dtype=pixel_values.dtype) + visual_prompt_mask = torch.ones( + [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device + ) visual_prompt_mask[self.max_img_size - self.visual_prompt_size : self.max_img_size, :] = 0.0 pixel_values *= visual_prompt_mask if self.visual_prompter_apply != "remove": prompt = torch.zeros( - [pixel_values.shape[0], pixel_values.shape[1], 3, self.max_img_size, self.max_img_size] + [pixel_values.shape[0], pixel_values.shape[1], 3, self.max_img_size, self.max_img_size], + device=pixel_values.device, ) start_point = self.max_img_size - self.visual_prompt_size prompt[:, :, :, start_point : self.max_img_size, :] = self.pad_down @@ -667,10 +670,12 @@ def forward(self, pixel_values): if self.visual_prompter_apply not in ("add", "remove", "replace"): raise ValueError(f"Invalid visual_prompter_apply value {self.visual_prompter_apply}") if self.visual_prompter_apply in ("replace", "remove"): - visual_prompt_mask = torch.ones([self.max_img_size, self.max_img_size], dtype=pixel_values.dtype) + visual_prompt_mask = torch.ones( + [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device + ) pixel_values *= visual_prompt_mask if self.visual_prompter_apply in ("replace", "add"): - base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size) + base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size, device=pixel_values.device) prompt = torch.cat([self.pad_left, base, self.pad_right], dim=4) prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=3) prompt = torch.cat(pixel_values.size(0) * [prompt]) diff --git a/tests/models/tvp/test_modeling_tvp.py b/tests/models/tvp/test_modeling_tvp.py index b81635888c74..ebdd4fb0b769 100644 --- a/tests/models/tvp/test_modeling_tvp.py +++ b/tests/models/tvp/test_modeling_tvp.py @@ -176,6 +176,9 @@ class TVPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else {} ) + # TODO: Enable this once this model gets more usage + test_torchscript = False + def setUp(self): self.model_tester = TVPModelTester(self) From f70db28322150dd986298cc1d1be8bc144cc1a88 Mon Sep 17 00:00:00 2001 From: Ilya Gusev Date: Sun, 26 Nov 2023 16:28:37 +0100 Subject: [PATCH 258/268] Fix sliding_window hasattr in Mistral (#27041) * Fix sliding_window hasattr in Mistral * hasattr -> getattr for sliding_window in Mistral --------- Co-authored-by: Ilya Gusev --- src/transformers/models/mistral/modeling_mistral.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index e56ebc031066..72525e665aef 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -351,7 +351,7 @@ def forward( use_sliding_windows = ( _flash_supports_window_size - and hasattr(self.config, "sliding_window") is not None + and getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window ) @@ -363,7 +363,7 @@ def forward( if past_key_value is not None: # Activate slicing cache only if the config has a value `sliding_windows` attribute - if hasattr(self.config, "sliding_window") and kv_seq_len > self.config.sliding_window: + if getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window: slicing_tokens = kv_seq_len - self.config.sliding_window past_key = past_key_value[0] From e11e26df93f98104d6ef99c9458059fa787b857a Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 27 Nov 2023 09:11:58 +0100 Subject: [PATCH 259/268] Fix Past CI (#27696) fix Co-authored-by: ydshieh --- .github/workflows/self-past.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index 6a154544df8b..2ece4388d27c 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -88,6 +88,10 @@ jobs: working-directory: /transformers run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . + - name: Update some packages + working-directory: /transformers + run: python3 -m pip install -U datasets + - name: Echo folder ${{ matrix.folders }} shell: bash # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to @@ -164,6 +168,10 @@ jobs: working-directory: /transformers run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . + - name: Update some packages + working-directory: /transformers + run: python3 -m pip install -U datasets + - name: Echo folder ${{ matrix.folders }} shell: bash # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to @@ -240,6 +248,10 @@ jobs: working-directory: /transformers run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . + - name: Update some packages + working-directory: /transformers + run: python3 -m pip install -U datasets + - name: Install working-directory: /transformers run: | From 5c30dd40e768f6f30cc58128c711563cc8cf5b21 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Mon, 27 Nov 2023 09:14:40 +0100 Subject: [PATCH 260/268] fix warning (#27689) --- src/transformers/generation/configuration_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index 6b0b434ec726..cb9240d3bf33 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -410,7 +410,7 @@ def validate(self, is_init=False): # 2. detect beam-only parameterization when not in beam mode if self.num_beams is None: - logging.warning("`num_beams` is set to None - defaulting to 1.", UserWarning) + warnings.warn("`num_beams` is set to None - defaulting to 1.", UserWarning) self.num_beams = 1 if self.num_beams == 1: From 27b752bcf16708dc062a1d69325a07e4560aa75a Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Mon, 27 Nov 2023 09:38:18 +0100 Subject: [PATCH 261/268] Reorder the code on the Hub to explicit that sharing on the Hub isn't a requirement (#27691) Reorder --- docs/source/en/custom_models.md | 46 +++++++++++++++++---------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/docs/source/en/custom_models.md b/docs/source/en/custom_models.md index 4abc3ce5773a..22ba58b9d9dd 100644 --- a/docs/source/en/custom_models.md +++ b/docs/source/en/custom_models.md @@ -14,7 +14,7 @@ rendered properly in your Markdown viewer. --> -# Sharing custom models +# Building custom models The 🤗 Transformers library is designed to be easily extensible. Every model is fully coded in a given subfolder of the repository with no abstraction, so you can easily copy a modeling file and tweak it to your needs. @@ -22,7 +22,8 @@ of the repository with no abstraction, so you can easily copy a modeling file an If you are writing a brand new model, it might be easier to start from scratch. In this tutorial, we will show you how to write a custom model and its configuration so it can be used inside Transformers, and how you can share it with the community (with the code it relies on) so that anyone can use it, even if it's not present in the 🤗 -Transformers library. +Transformers library. We'll see how to build upon transformers and extend the framework with your hooks and +custom code. We will illustrate all of this on a ResNet model, by wrapping the ResNet class of the [timm library](https://github.com/rwightman/pytorch-image-models) into a [`PreTrainedModel`]. @@ -218,6 +219,27 @@ resnet50d.model.load_state_dict(pretrained_model.state_dict()) Now let's see how to make sure that when we do [`~PreTrainedModel.save_pretrained`] or [`~PreTrainedModel.push_to_hub`], the code of the model is saved. +## Registering a model with custom code to the auto classes + +If you are writing a library that extends 🤗 Transformers, you may want to extend the auto classes to include your own +model. This is different from pushing the code to the Hub in the sense that users will need to import your library to +get the custom models (contrarily to automatically downloading the model code from the Hub). + +As long as your config has a `model_type` attribute that is different from existing model types, and that your model +classes have the right `config_class` attributes, you can just add them to the auto classes like this: + +```py +from transformers import AutoConfig, AutoModel, AutoModelForImageClassification + +AutoConfig.register("resnet", ResnetConfig) +AutoModel.register(ResnetConfig, ResnetModel) +AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) +``` + +Note that the first argument used when registering your custom config to [`AutoConfig`] needs to match the `model_type` +of your custom config, and the first argument used when registering your custom models to any auto model class needs +to match the `config_class` of those models. + ## Sending the code to the Hub @@ -350,23 +372,3 @@ model = AutoModelForImageClassification.from_pretrained( Note that when browsing the commit history of the model repo on the Hub, there is a button to easily copy the commit hash of any commit. -## Registering a model with custom code to the auto classes - -If you are writing a library that extends 🤗 Transformers, you may want to extend the auto classes to include your own -model. This is different from pushing the code to the Hub in the sense that users will need to import your library to -get the custom models (contrarily to automatically downloading the model code from the Hub). - -As long as your config has a `model_type` attribute that is different from existing model types, and that your model -classes have the right `config_class` attributes, you can just add them to the auto classes like this: - -```py -from transformers import AutoConfig, AutoModel, AutoModelForImageClassification - -AutoConfig.register("resnet", ResnetConfig) -AutoModel.register(ResnetConfig, ResnetModel) -AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) -``` - -Note that the first argument used when registering your custom config to [`AutoConfig`] needs to match the `model_type` -of your custom config, and the first argument used when registering your custom models to any auto model class needs -to match the `config_class` of those models. From b09912c8f452ac485933ac0f86937aa01de3c398 Mon Sep 17 00:00:00 2001 From: Yanan Xie <108375850+lorabit110@users.noreply.github.com> Date: Mon, 27 Nov 2023 01:18:41 -0800 Subject: [PATCH 262/268] Fix mistral generate for long prompt / response (#27548) * Fix mistral generate for long prompt / response * Add unit test * fix linter * fix linter * fix test * add assisted generation test for mistral and load the model in 4 bit + fa2 --- .../models/mistral/modeling_mistral.py | 2 +- tests/models/mistral/test_modeling_mistral.py | 30 +++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 72525e665aef..9c6300ab5e0e 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -364,7 +364,7 @@ def forward( if past_key_value is not None: # Activate slicing cache only if the config has a value `sliding_windows` attribute if getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window: - slicing_tokens = kv_seq_len - self.config.sliding_window + slicing_tokens = 1 - self.config.sliding_window past_key = past_key_value[0] past_value = past_key_value[1] diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py index cedcdeb4b9f3..dba013b20574 100644 --- a/tests/models/mistral/test_modeling_mistral.py +++ b/tests/models/mistral/test_modeling_mistral.py @@ -24,6 +24,7 @@ from transformers import AutoTokenizer, MistralConfig, is_torch_available from transformers.testing_utils import ( backend_empty_cache, + require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, @@ -494,3 +495,32 @@ def test_model_7b_generation(self): del model backend_empty_cache(torch_device) gc.collect() + + @require_bitsandbytes + @slow + @require_flash_attn + def test_model_7b_long_prompt(self): + EXPECTED_OUTPUT_TOKEN_IDS = [306, 338] + # An input with 4097 tokens that is above the size of the sliding window + input_ids = [1] + [306, 338] * 2048 + model = MistralForCausalLM.from_pretrained( + "mistralai/Mistral-7B-v0.1", + device_map="auto", + load_in_4bit=True, + use_flash_attention_2=True, + ) + input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) + generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) + self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) + + # Assisted generation + assistant_model = model + assistant_model.generation_config.num_assistant_tokens = 2 + assistant_model.generation_config.num_assistant_tokens_schedule = "constant" + generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) + self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) + + del assistant_model + del model + backend_empty_cache(torch_device) + gc.collect() From 307cf3a2ab3faf50fed5b3aabf180a4c98188587 Mon Sep 17 00:00:00 2001 From: yhshin11 Date: Mon, 27 Nov 2023 22:59:59 +0900 Subject: [PATCH 263/268] Fix oneformer instance segmentation RuntimeError (#27725) --- .../models/oneformer/image_processing_oneformer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/oneformer/image_processing_oneformer.py b/src/transformers/models/oneformer/image_processing_oneformer.py index da060fa0514f..c42001a96252 100644 --- a/src/transformers/models/oneformer/image_processing_oneformer.py +++ b/src/transformers/models/oneformer/image_processing_oneformer.py @@ -1167,6 +1167,7 @@ def post_process_instance_segmentation( class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + device = masks_queries_logits.device batch_size = class_queries_logits.shape[0] num_queries = class_queries_logits.shape[1] num_classes = class_queries_logits.shape[-1] - 1 @@ -1177,7 +1178,7 @@ def post_process_instance_segmentation( for i in range(batch_size): # [Q, K] scores = torch.nn.functional.softmax(class_queries_logits[i], dim=-1)[:, :-1] - labels = torch.arange(num_classes).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) + labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) From 1d7f406e19d2983fcb623c6cd8357c13a0be07b7 Mon Sep 17 00:00:00 2001 From: jiqing-feng <107918818+jiqing-feng@users.noreply.github.com> Date: Mon, 27 Nov 2023 22:23:54 +0800 Subject: [PATCH 264/268] fix assisted decoding assistant model inputs (#27503) * fix assisted decoding attention_cat * fix attention_mask for assisted decoding * fix attention_mask len * fix attn len * Use a more clean way to prepare assistant models inputs * fix param meaning * fix param name * fix assistant model inputs * update token type ids * fix assistant kwargs copy * add encoder-decoder tests of assisted decoding * check if assistant kwargs contains updated keys * revert test * fix whisper tests * fix assistant kwargs * revert whisper test * delete _extend funcs --- src/transformers/generation/utils.py | 177 +++++++++--------- .../models/nllb_moe/test_modeling_nllb_moe.py | 4 - .../test_modeling_switch_transformers.py | 4 - tests/models/t5/test_modeling_t5.py | 4 - 4 files changed, 86 insertions(+), 103 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 077bc16aff8b..424eb4fa7e50 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -1391,43 +1391,6 @@ def _validate_generated_length(self, generation_config, input_ids_length, has_de UserWarning, ) - def _extend_attention_mask(self, model_kwargs: Dict[str, Any], new_mask_length: int) -> Dict[str, Any]: - if self.config.is_encoder_decoder: - key = "decoder_attention_mask" - else: - key = "attention_mask" - - if key not in model_kwargs: - return model_kwargs - - mask = model_kwargs[key] - mask_extension_length = new_mask_length - mask.shape[1] - - if mask_extension_length < 0: - raise ValueError("Cannot extend attention mask to a length less than it already is") - - model_kwargs[key] = torch.cat( - [mask, mask.new_ones((mask.shape[0], mask_extension_length))], - dim=-1, - ) - - return model_kwargs - - def _extend_token_type_ids(self, model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]: - if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None: - return model_kwargs - - token_type_ids = model_kwargs["token_type_ids"] - final_token_type = token_type_ids[:, -1].unsqueeze(-1) - extension_length = new_length - token_type_ids.shape[1] - token_type_copies = final_token_type.repeat(1, extension_length) - model_kwargs["token_type_ids"] = torch.cat( - [model_kwargs["token_type_ids"], token_type_copies], - dim=-1, - ) - - return model_kwargs - @torch.no_grad() def generate( self, @@ -4505,11 +4468,6 @@ def assisted_decoding( else: num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens - # check if assistant model accepts encoder_outputs - assistant_accepts_encoder_outputs = "encoder_outputs" in set( - inspect.signature(assistant_model.forward).parameters.keys() - ) - # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() @@ -4547,20 +4505,32 @@ def assisted_decoding( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) + # prepare assistant model's keys of inputs + assistant_kwargs = copy.copy(model_kwargs) + if assistant_model.config.is_encoder_decoder: + # both are encoder-decoder + input_ids_key = "decoder_input_ids" + attention_key = "decoder_attention_mask" + assistant_kwargs["encoder_outputs"] = assistant_kwargs.pop("assistant_encoder_outputs") + elif "assistant_encoder_outputs" in assistant_kwargs: + # special case for encoder-decoder with decoder-only assistant (like DistilWhisper) + input_ids_key = "input_ids" + attention_key = "attention_mask" + assistant_kwargs["attention_mask"] = assistant_kwargs.get( + "decoder_attention_mask", + torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long), + ) + assistant_kwargs["encoder_outputs"] = assistant_kwargs.pop("assistant_encoder_outputs") + else: + # both are decoder-only + input_ids_key = "input_ids" + attention_key = "attention_mask" + # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) # other auxiliary variables max_len = stopping_criteria[0].max_length - assistant_kv_indexing = ( - 1 - if "bloom" in assistant_model.__class__.__name__.lower() - or ( - assistant_model.config.architectures is not None - and "bloom" in assistant_model.config.architectures[0].lower() - ) - else 0 - ) this_peer_finished = False # used by synced_gpus only while True: @@ -4582,44 +4552,21 @@ def assisted_decoding( # need access to the assistant cache to secure strong speedups. candidate_input_ids = input_ids for _ in range(int(num_assistant_tokens)): - # 1.1. use the assistant model to obtain the next candidate logits - if "assistant_past_key_values" in model_kwargs: - prev_seq_len = model_kwargs["assistant_past_key_values"][0][assistant_kv_indexing].shape[-2] - # `new_token_len` can be 1 or 2 (next token in assistant + last token picked by the larger model) - new_token_len = candidate_input_ids.shape[1] - prev_seq_len - assist_inputs = candidate_input_ids[:, -new_token_len:] - # TODO (joao): make it compatible with models that use unconventional fwd pass logic, like blip2 - if assistant_model.config.is_encoder_decoder: - assistant_model_outputs = assistant_model( - decoder_input_ids=assist_inputs, - past_key_values=model_kwargs["assistant_past_key_values"], - encoder_outputs=model_kwargs["assistant_encoder_outputs"], - ) - else: - encoder_kwargs = {} - - if assistant_accepts_encoder_outputs and "assistant_encoder_outputs" in model_kwargs: - encoder_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] - - assistant_model_outputs = assistant_model( - assist_inputs, past_key_values=model_kwargs["assistant_past_key_values"], **encoder_kwargs - ) - else: - if assistant_model.config.is_encoder_decoder: - assistant_model_outputs = assistant_model( - decoder_input_ids=candidate_input_ids, - encoder_outputs=model_kwargs["assistant_encoder_outputs"], - ) - else: - encoder_kwargs = {} + # 1.1 prepare assistant model inputs + assistant_inputs = assistant_model.prepare_inputs_for_generation( + candidate_input_ids, + **assistant_kwargs, + ) - if assistant_accepts_encoder_outputs and "assistant_encoder_outputs" in model_kwargs: - encoder_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] + # 1.2. check if the input ids length is correct + has_past_key_values = assistant_inputs.get("past_key_values", None) is not None + if has_past_key_values and assistant_inputs[input_ids_key].shape[-1] not in (1, 2): + raise ValueError("The length of the input ids in assistant inputs should be 1 or 2") - assistant_model_outputs = assistant_model(candidate_input_ids, **encoder_kwargs) + # 1.3. use the assistant model to obtain the next candidate logits + assistant_model_outputs = assistant_model(**assistant_inputs) - # 1.2. greedily select the next candidate token - model_kwargs["assistant_past_key_values"] = assistant_model_outputs.past_key_values + # 1.4. greedily select the next candidate token if len(logits_processor) > 0: assistant_model_outputs.logits[:, -1, :] = logits_processor( candidate_input_ids, assistant_model_outputs.logits[:, -1, :] @@ -4627,7 +4574,13 @@ def assisted_decoding( new_token = assistant_model_outputs.logits[:, -1, :].argmax(dim=-1) candidate_input_ids = torch.cat((candidate_input_ids, new_token[:, None]), dim=-1) - # 1.3. stop assistant generation on EOS + # 1.5. update assistant model inputs + if assistant_kwargs.get(attention_key, None) is not None: + mask = assistant_kwargs[attention_key] + assistant_kwargs[attention_key] = torch.cat([mask, mask.new_ones((mask.shape[0], 1))], dim=-1) + assistant_kwargs["past_key_values"] = assistant_model_outputs.past_key_values + + # 1.6. stop assistant generation on EOS if eos_token_id_tensor is not None: last_assistant_token_is_eos = new_token.tile(eos_token_id_tensor.shape[0], 1) last_assistant_token_is_eos = ( @@ -4646,8 +4599,10 @@ def assisted_decoding( # 2.1. Prepare the model inputs candidate_kwargs = copy.copy(model_kwargs) - candidate_kwargs = self._extend_attention_mask(candidate_kwargs, candidate_input_ids.shape[1]) - candidate_kwargs = self._extend_token_type_ids(candidate_kwargs, candidate_input_ids.shape[1]) + candidate_kwargs = _prepare_attention_mask( + candidate_kwargs, candidate_input_ids.shape[1], self.config.is_encoder_decoder + ) + candidate_kwargs = _prepare_token_type_ids(candidate_kwargs, candidate_input_ids.shape[1]) model_inputs = self.prepare_inputs_for_generation(candidate_input_ids, **candidate_kwargs) @@ -4699,8 +4654,8 @@ def assisted_decoding( # 5.3. Discard past key values relative to unused assistant tokens new_cache_size = new_cur_len - 1 outputs.past_key_values = _crop_past_key_values(self, outputs.past_key_values, new_cache_size) - model_kwargs["assistant_past_key_values"] = _crop_past_key_values( - assistant_model, model_kwargs["assistant_past_key_values"], new_cache_size - 1 + assistant_kwargs["past_key_values"] = _crop_past_key_values( + assistant_model, assistant_kwargs["past_key_values"], new_cache_size - 1 ) # the assistant does not have the token after the last match, hence the -1 # 6. Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic, @@ -4761,6 +4716,12 @@ def assisted_decoding( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) + # Update assistant_kwargs for the assistant's next round of generations + assistant_kwargs = _prepare_attention_mask( + assistant_kwargs, new_cur_len, assistant_model.config.is_encoder_decoder + ) + assistant_kwargs = _prepare_token_type_ids(assistant_kwargs, new_cur_len) + # if eos_token was found in one sentence, set sentence to finished if eos_token_id_tensor is not None: unfinished_sequences = unfinished_sequences.mul( @@ -4938,3 +4899,37 @@ def _ranking_fast( contrastive_score = torch.stack(torch.split(contrastive_score, beam_width)) # [B, K] _, selected_idx = contrastive_score.max(dim=-1) # [B] return selected_idx + + +def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]: + """Expands or crops the model's mask for decoding purposes, to the defined length""" + + mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask" + if mask_key not in model_kwargs: + return model_kwargs + + mask = model_kwargs[mask_key] + mask_length_diff = new_length - mask.shape[1] + + if mask_length_diff < 0: + model_kwargs[mask_key] = mask[:, :mask_length_diff] + elif mask_length_diff > 0: + model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1) + return model_kwargs + + +def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]: + """Expands or crops the model's token_type_ids for decoding purposes, to the defined length""" + if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None: + return model_kwargs + + token_type_ids = model_kwargs["token_type_ids"] + final_token_type = token_type_ids[:, -1].unsqueeze(-1) + type_length_diff = new_length - token_type_ids.shape[1] + + if type_length_diff < 0: + token_type_ids = token_type_ids[:, :type_length_diff] + elif type_length_diff > 0: + token_type_copies = final_token_type.repeat(1, type_length_diff) + model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1) + return model_kwargs diff --git a/tests/models/nllb_moe/test_modeling_nllb_moe.py b/tests/models/nllb_moe/test_modeling_nllb_moe.py index 1109948e0e70..2e8ba30ce675 100644 --- a/tests/models/nllb_moe/test_modeling_nllb_moe.py +++ b/tests/models/nllb_moe/test_modeling_nllb_moe.py @@ -348,10 +348,6 @@ def test_get_loss(self): self.assertIsNotNone(model(**input_dict)["encoder_router_logits"][1]) self.assertIsNotNone(model(**input_dict)["decoder_router_logits"][0]) - @unittest.skip("Test does not fail individually but fails on the CI @ArthurZucker looking into it") - def test_assisted_decoding_sample(self): - pass - @require_torch @require_sentencepiece diff --git a/tests/models/switch_transformers/test_modeling_switch_transformers.py b/tests/models/switch_transformers/test_modeling_switch_transformers.py index 5458b5666679..aa226f82ae36 100644 --- a/tests/models/switch_transformers/test_modeling_switch_transformers.py +++ b/tests/models/switch_transformers/test_modeling_switch_transformers.py @@ -726,10 +726,6 @@ def test_generate_with_head_masking(self): def test_disk_offload(self): pass - @unittest.skip("Test does not fail individually but fails on the CI @ArthurZucker looking into it") - def test_assisted_decoding_sample(self): - pass - class SwitchTransformersEncoderOnlyModelTester: def __init__( diff --git a/tests/models/t5/test_modeling_t5.py b/tests/models/t5/test_modeling_t5.py index fe0983047359..68b9f45e155b 100644 --- a/tests/models/t5/test_modeling_t5.py +++ b/tests/models/t5/test_modeling_t5.py @@ -1036,10 +1036,6 @@ def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) - @unittest.skip("Test does not fail individually but fails on the CI @ArthurZucker looking into it") - def test_assisted_decoding_sample(self): - pass - def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) From 59499bbe8b8ae4c76732afbe0279cbb6c5df1db3 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Mon, 27 Nov 2023 15:48:17 +0100 Subject: [PATCH 265/268] Update forward signature test for vision models (#27681) * Update forward signature * Empty-Commit --- tests/models/beit/test_modeling_beit.py | 13 ------------- tests/models/bit/test_modeling_bit.py | 13 ------------- tests/models/convnext/test_modeling_convnext.py | 13 ------------- tests/models/convnextv2/test_modeling_convnextv2.py | 13 ------------- tests/models/cvt/test_modeling_cvt.py | 13 ------------- .../data2vec/test_modeling_data2vec_vision.py | 13 ------------- tests/models/deit/test_modeling_deit.py | 13 ------------- tests/models/dinat/test_modeling_dinat.py | 13 ------------- tests/models/dinov2/test_modeling_dinov2.py | 13 ------------- tests/models/donut/test_modeling_donut_swin.py | 13 ------------- tests/models/dpt/test_modeling_dpt.py | 13 ------------- tests/models/dpt/test_modeling_dpt_auto_backbone.py | 13 ------------- tests/models/dpt/test_modeling_dpt_hybrid.py | 13 ------------- .../test_modeling_efficientformer.py | 13 ------------- .../efficientnet/test_modeling_efficientnet.py | 13 ------------- tests/models/focalnet/test_modeling_focalnet.py | 13 ------------- tests/models/glpn/test_modeling_glpn.py | 13 ------------- tests/models/levit/test_modeling_levit.py | 13 ------------- .../models/mask2former/test_modeling_mask2former.py | 13 ------------- tests/models/maskformer/test_modeling_maskformer.py | 13 ------------- .../maskformer/test_modeling_maskformer_swin.py | 13 ------------- tests/models/mgp_str/test_modeling_mgp_str.py | 13 ------------- .../mobilenet_v1/test_modeling_mobilenet_v1.py | 13 ------------- .../mobilenet_v2/test_modeling_mobilenet_v2.py | 13 ------------- tests/models/mobilevit/test_modeling_mobilevit.py | 13 ------------- .../models/mobilevitv2/test_modeling_mobilevitv2.py | 13 ------------- tests/models/nat/test_modeling_nat.py | 13 ------------- tests/models/poolformer/test_modeling_poolformer.py | 13 ------------- tests/models/pvt/test_modeling_pvt.py | 13 ------------- tests/models/regnet/test_modeling_regnet.py | 13 ------------- tests/models/resnet/test_modeling_resnet.py | 13 ------------- tests/models/sam/test_modeling_sam.py | 13 ------------- tests/models/segformer/test_modeling_segformer.py | 13 ------------- .../models/swiftformer/test_modeling_swiftformer.py | 13 ------------- tests/models/swin/test_modeling_swin.py | 13 ------------- tests/models/swin2sr/test_modeling_swin2sr.py | 13 ------------- tests/models/swinv2/test_modeling_swinv2.py | 13 ------------- .../models/timesformer/test_modeling_timesformer.py | 13 ------------- tests/models/upernet/test_modeling_upernet.py | 13 ------------- tests/models/videomae/test_modeling_videomae.py | 13 ------------- tests/models/vit/test_modeling_vit.py | 13 ------------- tests/models/vit_hybrid/test_modeling_vit_hybrid.py | 13 ------------- tests/models/vit_mae/test_modeling_vit_mae.py | 13 ------------- tests/models/vit_msn/test_modeling_vit_msn.py | 13 ------------- tests/models/vitdet/test_modeling_vitdet.py | 13 ------------- tests/models/vitmatte/test_modeling_vitmatte.py | 13 ------------- tests/models/yolos/test_modeling_yolos.py | 13 ------------- tests/test_modeling_common.py | 2 +- 48 files changed, 1 insertion(+), 612 deletions(-) diff --git a/tests/models/beit/test_modeling_beit.py b/tests/models/beit/test_modeling_beit.py index 8774503e7694..4ed7da6b10c8 100644 --- a/tests/models/beit/test_modeling_beit.py +++ b/tests/models/beit/test_modeling_beit.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch BEiT model. """ -import inspect import unittest from datasets import load_dataset @@ -236,18 +235,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/bit/test_modeling_bit.py b/tests/models/bit/test_modeling_bit.py index d7d2c60347f6..03e2bd109519 100644 --- a/tests/models/bit/test_modeling_bit.py +++ b/tests/models/bit/test_modeling_bit.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch Bit model. """ -import inspect import unittest from transformers import BitConfig @@ -202,18 +201,6 @@ def test_inputs_embeds(self): def test_model_common_attributes(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/convnext/test_modeling_convnext.py b/tests/models/convnext/test_modeling_convnext.py index 397fa596f102..ac2b6f927c8d 100644 --- a/tests/models/convnext/test_modeling_convnext.py +++ b/tests/models/convnext/test_modeling_convnext.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch ConvNext model. """ -import inspect import unittest from transformers import ConvNextConfig @@ -212,18 +211,6 @@ def test_model_common_attributes(self): def test_feed_forward_chunking(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/convnextv2/test_modeling_convnextv2.py b/tests/models/convnextv2/test_modeling_convnextv2.py index c3f8804f1cca..694901a18469 100644 --- a/tests/models/convnextv2/test_modeling_convnextv2.py +++ b/tests/models/convnextv2/test_modeling_convnextv2.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch ConvNextV2 model. """ -import inspect import unittest from transformers import ConvNextV2Config @@ -265,18 +264,6 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/cvt/test_modeling_cvt.py b/tests/models/cvt/test_modeling_cvt.py index 6f4f63f0f9df..4abeb5571c7b 100644 --- a/tests/models/cvt/test_modeling_cvt.py +++ b/tests/models/cvt/test_modeling_cvt.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch CvT model. """ -import inspect import unittest from math import floor @@ -191,18 +190,6 @@ def test_inputs_embeds(self): def test_model_common_attributes(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/data2vec/test_modeling_data2vec_vision.py b/tests/models/data2vec/test_modeling_data2vec_vision.py index 69a763a4f2ec..bdb95588ac5c 100644 --- a/tests/models/data2vec/test_modeling_data2vec_vision.py +++ b/tests/models/data2vec/test_modeling_data2vec_vision.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch Data2VecVision model. """ -import inspect import unittest from transformers import Data2VecVisionConfig @@ -220,18 +219,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/deit/test_modeling_deit.py b/tests/models/deit/test_modeling_deit.py index 4a9945a731fd..9cd5be8fd375 100644 --- a/tests/models/deit/test_modeling_deit.py +++ b/tests/models/deit/test_modeling_deit.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch DeiT model. """ -import inspect import unittest import warnings @@ -238,18 +237,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/dinat/test_modeling_dinat.py b/tests/models/dinat/test_modeling_dinat.py index a7e0b7d0650e..c824060cf816 100644 --- a/tests/models/dinat/test_modeling_dinat.py +++ b/tests/models/dinat/test_modeling_dinat.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch Dinat model. """ import collections -import inspect import unittest from transformers import DinatConfig @@ -264,18 +263,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_attention_outputs(self): self.skipTest("Dinat's attention operation is handled entirely by NATTEN.") diff --git a/tests/models/dinov2/test_modeling_dinov2.py b/tests/models/dinov2/test_modeling_dinov2.py index 4e3839749bdd..4586f68ac5ad 100644 --- a/tests/models/dinov2/test_modeling_dinov2.py +++ b/tests/models/dinov2/test_modeling_dinov2.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch Dinov2 model. """ -import inspect import unittest from transformers import Dinov2Config @@ -265,18 +264,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/donut/test_modeling_donut_swin.py b/tests/models/donut/test_modeling_donut_swin.py index 2a0d9f5e17cb..e52e679e42e6 100644 --- a/tests/models/donut/test_modeling_donut_swin.py +++ b/tests/models/donut/test_modeling_donut_swin.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch Donut Swin model. """ import collections -import inspect import unittest from transformers import DonutSwinConfig @@ -186,18 +185,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py index 100803bcdebf..0b398c923e68 100644 --- a/tests/models/dpt/test_modeling_dpt.py +++ b/tests/models/dpt/test_modeling_dpt.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch DPT model. """ -import inspect import unittest from transformers import DPTConfig @@ -195,18 +194,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/dpt/test_modeling_dpt_auto_backbone.py b/tests/models/dpt/test_modeling_dpt_auto_backbone.py index 95e3128ff0ed..76ab220583fe 100644 --- a/tests/models/dpt/test_modeling_dpt_auto_backbone.py +++ b/tests/models/dpt/test_modeling_dpt_auto_backbone.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch DPT model. """ -import inspect import unittest from transformers import Dinov2Config, DPTConfig @@ -154,18 +153,6 @@ def test_config(self): def test_inputs_embeds(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) diff --git a/tests/models/dpt/test_modeling_dpt_hybrid.py b/tests/models/dpt/test_modeling_dpt_hybrid.py index 82055b210557..689863795141 100644 --- a/tests/models/dpt/test_modeling_dpt_hybrid.py +++ b/tests/models/dpt/test_modeling_dpt_hybrid.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch DPT model. """ -import inspect import unittest from transformers import DPTConfig @@ -209,18 +208,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/efficientformer/test_modeling_efficientformer.py b/tests/models/efficientformer/test_modeling_efficientformer.py index 2774a210da56..73283fbbf600 100644 --- a/tests/models/efficientformer/test_modeling_efficientformer.py +++ b/tests/models/efficientformer/test_modeling_efficientformer.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch EfficientFormer model. """ -import inspect import unittest import warnings from typing import List @@ -223,18 +222,6 @@ def test_inputs_embeds(self): def test_model_common_attributes(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) diff --git a/tests/models/efficientnet/test_modeling_efficientnet.py b/tests/models/efficientnet/test_modeling_efficientnet.py index 38a359c574f8..32050e3d21a5 100644 --- a/tests/models/efficientnet/test_modeling_efficientnet.py +++ b/tests/models/efficientnet/test_modeling_efficientnet.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch EfficientNet model. """ -import inspect import unittest from transformers import EfficientNetConfig @@ -172,18 +171,6 @@ def test_model_common_attributes(self): def test_feed_forward_chunking(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/focalnet/test_modeling_focalnet.py b/tests/models/focalnet/test_modeling_focalnet.py index ce96f0ade414..6de095d97523 100644 --- a/tests/models/focalnet/test_modeling_focalnet.py +++ b/tests/models/focalnet/test_modeling_focalnet.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch FocalNet model. """ import collections -import inspect import unittest from transformers import FocalNetConfig @@ -299,18 +298,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes[:-1]: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) diff --git a/tests/models/glpn/test_modeling_glpn.py b/tests/models/glpn/test_modeling_glpn.py index 60e29b739f26..138a8cf2832e 100644 --- a/tests/models/glpn/test_modeling_glpn.py +++ b/tests/models/glpn/test_modeling_glpn.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch GLPN model. """ -import inspect import unittest from transformers import is_torch_available, is_vision_available @@ -177,18 +176,6 @@ def test_inputs_embeds(self): def test_model_common_attributes(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True diff --git a/tests/models/levit/test_modeling_levit.py b/tests/models/levit/test_modeling_levit.py index 0e46f6f56dd7..d569b2b53852 100644 --- a/tests/models/levit/test_modeling_levit.py +++ b/tests/models/levit/test_modeling_levit.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch LeViT model. """ -import inspect import unittest import warnings from math import ceil, floor @@ -218,18 +217,6 @@ def test_model_common_attributes(self): def test_attention_outputs(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) diff --git a/tests/models/mask2former/test_modeling_mask2former.py b/tests/models/mask2former/test_modeling_mask2former.py index b2fc84e7d324..8dc70d7c750d 100644 --- a/tests/models/mask2former/test_modeling_mask2former.py +++ b/tests/models/mask2former/test_modeling_mask2former.py @@ -14,7 +14,6 @@ # limitations under the License. """ Testing suite for the PyTorch Mask2Former model. """ -import inspect import unittest import numpy as np @@ -242,18 +241,6 @@ def test_resize_tokens_embeddings(self): def test_multi_gpu_data_parallel_forward(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - @slow def test_model_from_pretrained(self): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: diff --git a/tests/models/maskformer/test_modeling_maskformer.py b/tests/models/maskformer/test_modeling_maskformer.py index fe1cc3423e0f..ffa77a051259 100644 --- a/tests/models/maskformer/test_modeling_maskformer.py +++ b/tests/models/maskformer/test_modeling_maskformer.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch MaskFormer model. """ import copy -import inspect import unittest import numpy as np @@ -266,18 +265,6 @@ def test_resize_tokens_embeddings(self): def test_multi_gpu_data_parallel_forward(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - @slow def test_model_from_pretrained(self): for model_name in ["facebook/maskformer-swin-small-coco"]: diff --git a/tests/models/maskformer/test_modeling_maskformer_swin.py b/tests/models/maskformer/test_modeling_maskformer_swin.py index 4125f36db798..8d29e8ebee09 100644 --- a/tests/models/maskformer/test_modeling_maskformer_swin.py +++ b/tests/models/maskformer/test_modeling_maskformer_swin.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch MaskFormer Swin model. """ import collections -import inspect import unittest from typing import Dict, List, Tuple @@ -234,18 +233,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def test_attention_outputs(self): pass diff --git a/tests/models/mgp_str/test_modeling_mgp_str.py b/tests/models/mgp_str/test_modeling_mgp_str.py index d8ba50a35002..a7fd95a1311c 100644 --- a/tests/models/mgp_str/test_modeling_mgp_str.py +++ b/tests/models/mgp_str/test_modeling_mgp_str.py @@ -14,7 +14,6 @@ # limitations under the License. """ Testing suite for the PyTorch MGP-STR model. """ -import inspect import unittest import requests @@ -151,18 +150,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - @unittest.skip(reason="MgpstrModel does not support feedforward chunking") def test_feed_forward_chunking(self): pass diff --git a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py index 8c24935800b9..35848da3161d 100644 --- a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch MobileNetV1 model. """ -import inspect import unittest from transformers import MobileNetV1Config @@ -177,18 +176,6 @@ def test_model_common_attributes(self): def test_attention_outputs(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py index 06b2bd9d3fc4..bbd83408853c 100644 --- a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch MobileNetV2 model. """ -import inspect import unittest from transformers import MobileNetV2Config @@ -228,18 +227,6 @@ def test_model_common_attributes(self): def test_attention_outputs(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/mobilevit/test_modeling_mobilevit.py b/tests/models/mobilevit/test_modeling_mobilevit.py index 2c01ea0c99bb..563bee802322 100644 --- a/tests/models/mobilevit/test_modeling_mobilevit.py +++ b/tests/models/mobilevit/test_modeling_mobilevit.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch MobileViT model. """ -import inspect import unittest from transformers import MobileViTConfig @@ -221,18 +220,6 @@ def test_model_common_attributes(self): def test_attention_outputs(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py index b1961b2e6d4a..192cf3a9e1e8 100644 --- a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py +++ b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch MobileViTV2 model. """ -import inspect import unittest from transformers import MobileViTV2Config @@ -228,18 +227,6 @@ def test_attention_outputs(self): def test_multi_gpu_data_parallel_forward(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/nat/test_modeling_nat.py b/tests/models/nat/test_modeling_nat.py index a27b087ce519..3ab49d2d9557 100644 --- a/tests/models/nat/test_modeling_nat.py +++ b/tests/models/nat/test_modeling_nat.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch Nat model. """ import collections -import inspect import unittest from transformers import NatConfig @@ -261,18 +260,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_attention_outputs(self): self.skipTest("Nat's attention operation is handled entirely by NATTEN.") diff --git a/tests/models/poolformer/test_modeling_poolformer.py b/tests/models/poolformer/test_modeling_poolformer.py index 99667d6f1b45..070564e718bf 100644 --- a/tests/models/poolformer/test_modeling_poolformer.py +++ b/tests/models/poolformer/test_modeling_poolformer.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch PoolFormer model. """ -import inspect import unittest from transformers import is_torch_available, is_vision_available @@ -208,18 +207,6 @@ def test_training(self): loss = model(**inputs).loss loss.backward() - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - @slow def test_model_from_pretrained(self): for model_name in POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: diff --git a/tests/models/pvt/test_modeling_pvt.py b/tests/models/pvt/test_modeling_pvt.py index 04ce21530531..e174b67a0788 100644 --- a/tests/models/pvt/test_modeling_pvt.py +++ b/tests/models/pvt/test_modeling_pvt.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch Pvt model. """ -import inspect import unittest from transformers import is_torch_available, is_vision_available @@ -253,18 +252,6 @@ def test_training(self): loss = model(**inputs).loss loss.backward() - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - @slow def test_model_from_pretrained(self): for model_name in PVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: diff --git a/tests/models/regnet/test_modeling_regnet.py b/tests/models/regnet/test_modeling_regnet.py index debd8271b5c6..9840575f317e 100644 --- a/tests/models/regnet/test_modeling_regnet.py +++ b/tests/models/regnet/test_modeling_regnet.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch RegNet model. """ -import inspect import unittest from transformers import RegNetConfig @@ -161,18 +160,6 @@ def test_inputs_embeds(self): def test_model_common_attributes(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/resnet/test_modeling_resnet.py b/tests/models/resnet/test_modeling_resnet.py index 4cfa18d6bcf4..bae9eb6d24c8 100644 --- a/tests/models/resnet/test_modeling_resnet.py +++ b/tests/models/resnet/test_modeling_resnet.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch ResNet model. """ -import inspect import unittest from transformers import ResNetConfig @@ -206,18 +205,6 @@ def test_inputs_embeds(self): def test_model_common_attributes(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/sam/test_modeling_sam.py b/tests/models/sam/test_modeling_sam.py index a84e3695c13f..3e63edb23a0c 100644 --- a/tests/models/sam/test_modeling_sam.py +++ b/tests/models/sam/test_modeling_sam.py @@ -16,7 +16,6 @@ import gc -import inspect import unittest import requests @@ -338,18 +337,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/segformer/test_modeling_segformer.py b/tests/models/segformer/test_modeling_segformer.py index 0506be9b1f11..d9a4dce9ffeb 100644 --- a/tests/models/segformer/test_modeling_segformer.py +++ b/tests/models/segformer/test_modeling_segformer.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch SegFormer model. """ -import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available @@ -212,18 +211,6 @@ def test_inputs_embeds(self): def test_model_common_attributes(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True diff --git a/tests/models/swiftformer/test_modeling_swiftformer.py b/tests/models/swiftformer/test_modeling_swiftformer.py index 3e286cc32048..83b6aa3510d9 100644 --- a/tests/models/swiftformer/test_modeling_swiftformer.py +++ b/tests/models/swiftformer/test_modeling_swiftformer.py @@ -16,7 +16,6 @@ import copy -import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig @@ -177,18 +176,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/swin/test_modeling_swin.py b/tests/models/swin/test_modeling_swin.py index 383f0fe867d4..e82c13f8db27 100644 --- a/tests/models/swin/test_modeling_swin.py +++ b/tests/models/swin/test_modeling_swin.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch Swin model. """ import collections -import inspect import unittest from transformers import SwinConfig @@ -300,18 +299,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True diff --git a/tests/models/swin2sr/test_modeling_swin2sr.py b/tests/models/swin2sr/test_modeling_swin2sr.py index 730689603684..581e8debc7e7 100644 --- a/tests/models/swin2sr/test_modeling_swin2sr.py +++ b/tests/models/swin2sr/test_modeling_swin2sr.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Swin2SR model. """ -import inspect import unittest from transformers import Swin2SRConfig @@ -232,18 +231,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - @slow def test_model_from_pretrained(self): for model_name in SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: diff --git a/tests/models/swinv2/test_modeling_swinv2.py b/tests/models/swinv2/test_modeling_swinv2.py index 5a771b2c4b63..9b9d08b39fd1 100644 --- a/tests/models/swinv2/test_modeling_swinv2.py +++ b/tests/models/swinv2/test_modeling_swinv2.py @@ -14,7 +14,6 @@ # limitations under the License. """ Testing suite for the PyTorch Swinv2 model. """ import collections -import inspect import unittest from transformers import Swinv2Config @@ -220,18 +219,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True diff --git a/tests/models/timesformer/test_modeling_timesformer.py b/tests/models/timesformer/test_modeling_timesformer.py index 2b7a5e279fe7..d4e71c8c5999 100644 --- a/tests/models/timesformer/test_modeling_timesformer.py +++ b/tests/models/timesformer/test_modeling_timesformer.py @@ -16,7 +16,6 @@ import copy -import inspect import unittest import numpy as np @@ -204,18 +203,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/upernet/test_modeling_upernet.py b/tests/models/upernet/test_modeling_upernet.py index 84c32f7233e7..aeeba191b67a 100644 --- a/tests/models/upernet/test_modeling_upernet.py +++ b/tests/models/upernet/test_modeling_upernet.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch UperNet framework. """ -import inspect import unittest from huggingface_hub import hf_hub_download @@ -170,18 +169,6 @@ def test_config(self): def create_and_test_config_common_properties(self): return - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) diff --git a/tests/models/videomae/test_modeling_videomae.py b/tests/models/videomae/test_modeling_videomae.py index 9fb9c9e7f376..2fd9f90c3085 100644 --- a/tests/models/videomae/test_modeling_videomae.py +++ b/tests/models/videomae/test_modeling_videomae.py @@ -16,7 +16,6 @@ import copy -import inspect import unittest import numpy as np @@ -228,18 +227,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/vit/test_modeling_vit.py b/tests/models/vit/test_modeling_vit.py index d1e887183329..2e9a632a3719 100644 --- a/tests/models/vit/test_modeling_vit.py +++ b/tests/models/vit/test_modeling_vit.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch ViT model. """ -import inspect import unittest from transformers import ViTConfig @@ -224,18 +223,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/vit_hybrid/test_modeling_vit_hybrid.py b/tests/models/vit_hybrid/test_modeling_vit_hybrid.py index 3ea407eafd4e..870a4c833583 100644 --- a/tests/models/vit_hybrid/test_modeling_vit_hybrid.py +++ b/tests/models/vit_hybrid/test_modeling_vit_hybrid.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch ViT Hybrid model. """ -import inspect import unittest from transformers import ViTHybridConfig @@ -185,18 +184,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/vit_mae/test_modeling_vit_mae.py b/tests/models/vit_mae/test_modeling_vit_mae.py index 89a3a0d803e4..21a66b8a6d92 100644 --- a/tests/models/vit_mae/test_modeling_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_vit_mae.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch ViTMAE model. """ -import inspect import math import tempfile import unittest @@ -192,18 +191,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/vit_msn/test_modeling_vit_msn.py b/tests/models/vit_msn/test_modeling_vit_msn.py index a53163775150..2125e897b9ca 100644 --- a/tests/models/vit_msn/test_modeling_vit_msn.py +++ b/tests/models/vit_msn/test_modeling_vit_msn.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch ViTMSN model. """ -import inspect import unittest from transformers import ViTMSNConfig @@ -183,18 +182,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/vitdet/test_modeling_vitdet.py b/tests/models/vitdet/test_modeling_vitdet.py index 361e563d58d4..a18fdda96a5d 100644 --- a/tests/models/vitdet/test_modeling_vitdet.py +++ b/tests/models/vitdet/test_modeling_vitdet.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch ViTDet model. """ -import inspect import unittest from transformers import VitDetConfig @@ -210,18 +209,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/vitmatte/test_modeling_vitmatte.py b/tests/models/vitmatte/test_modeling_vitmatte.py index fcc99de0ba93..c9446b116f1e 100644 --- a/tests/models/vitmatte/test_modeling_vitmatte.py +++ b/tests/models/vitmatte/test_modeling_vitmatte.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch VitMatte model. """ -import inspect import unittest from huggingface_hub import hf_hub_download @@ -189,18 +188,6 @@ def test_training_gradient_checkpointing_use_reentrant_false(self): def test_model_common_attributes(self): pass - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/models/yolos/test_modeling_yolos.py b/tests/models/yolos/test_modeling_yolos.py index c1fb50e30b76..73281df02560 100644 --- a/tests/models/yolos/test_modeling_yolos.py +++ b/tests/models/yolos/test_modeling_yolos.py @@ -15,7 +15,6 @@ """ Testing suite for the PyTorch YOLOS model. """ -import inspect import unittest from transformers import YolosConfig @@ -217,18 +216,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_forward_signature(self): - config, _ = self.model_tester.prepare_config_and_inputs_for_common() - - for model_class in self.all_model_classes: - model = model_class(config) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["pixel_values"] - self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 06f05ec86c0f..79c630c0d537 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -543,7 +543,7 @@ def test_forward_signature(self): ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: - expected_arg_names = ["input_ids"] + expected_arg_names = [model.main_input_name] self.assertListEqual(arg_names[:1], expected_arg_names) def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): From 334a6d18a105598fae26889992985e56fe7aad31 Mon Sep 17 00:00:00 2001 From: Yixiao Yuan <57313880+eshoyuan@users.noreply.github.com> Date: Mon, 27 Nov 2023 10:25:46 -0500 Subject: [PATCH 266/268] Modify group_sub_entities in TokenClassification Pipeline to support label with "-" (#27325) * fix group_sub_entities bug * add space --- src/transformers/pipelines/token_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/pipelines/token_classification.py b/src/transformers/pipelines/token_classification.py index a32a9aa9ad8b..42c5d927079c 100644 --- a/src/transformers/pipelines/token_classification.py +++ b/src/transformers/pipelines/token_classification.py @@ -503,7 +503,7 @@ def group_sub_entities(self, entities: List[dict]) -> dict: entities (`dict`): The entities predicted by the pipeline. """ # Get the first entity in the entity group - entity = entities[0]["entity"].split("-")[-1] + entity = entities[0]["entity"].split("-", 1)[-1] scores = np.nanmean([entity["score"] for entity in entities]) tokens = [entity["word"] for entity in entities] From c832bcb812fc962830c11ea64c5ff623240a3d6d Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Mon, 27 Nov 2023 16:29:07 +0100 Subject: [PATCH 267/268] Fix owlv2 code snippet (#27698) * Fix code snippet * Improve code snippet --- .../models/owlv2/modeling_owlv2.py | 76 ++++++++++++++----- 1 file changed, 56 insertions(+), 20 deletions(-) diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index 9c7cede8fbf9..6cc996966bd7 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -1544,19 +1544,38 @@ def image_guided_detection( >>> import requests >>> from PIL import Image >>> import torch + >>> import numpy as np >>> from transformers import AutoProcessor, Owlv2ForObjectDetection + >>> from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble") + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt") + + >>> # forward pass >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) - >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] - >>> target_sizes = torch.Tensor([image.size[::-1]]) + + >>> # Note: boxes need to be visualized on the padded, unnormalized image + >>> # hence we'll set the target image sizes (height, width) based on that + + >>> def get_preprocessed_image(pixel_values): + ... pixel_values = pixel_values.squeeze().numpy() + ... unnormalized_image = (pixel_values * np.array(OPENAI_CLIP_STD)[:, None, None]) + np.array(OPENAI_CLIP_MEAN)[:, None, None] + ... unnormalized_image = (unnormalized_image * 255).astype(np.uint8) + ... unnormalized_image = np.moveaxis(unnormalized_image, 0, -1) + ... unnormalized_image = Image.fromarray(unnormalized_image) + ... return unnormalized_image + + >>> unnormalized_image = get_preprocessed_image(inputs.pixel_values) + + >>> target_sizes = torch.Tensor([unnormalized_image.size[::-1]]) + >>> # Convert outputs (bounding boxes and class logits) to COCO API >>> results = processor.post_process_image_guided_detection( ... outputs=outputs, threshold=0.9, nms_threshold=0.3, target_sizes=target_sizes @@ -1566,19 +1585,19 @@ def image_guided_detection( >>> for box, score in zip(boxes, scores): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}") - Detected similar object with confidence 0.938 at location [327.31, 54.94, 547.39, 268.06] - Detected similar object with confidence 0.959 at location [5.78, 360.65, 619.12, 366.39] - Detected similar object with confidence 0.902 at location [2.85, 360.01, 627.63, 380.79] - Detected similar object with confidence 0.985 at location [176.97, -29.45, 672.69, 182.83] - Detected similar object with confidence 1.0 at location [6.53, 14.35, 624.87, 470.82] - Detected similar object with confidence 0.998 at location [579.98, 29.14, 615.49, 489.05] - Detected similar object with confidence 0.985 at location [206.15, 10.53, 247.74, 466.01] - Detected similar object with confidence 0.947 at location [18.62, 429.72, 646.5, 457.72] - Detected similar object with confidence 0.996 at location [523.88, 20.69, 586.84, 483.18] - Detected similar object with confidence 0.998 at location [3.39, 360.59, 617.29, 499.21] - Detected similar object with confidence 0.969 at location [4.47, 449.05, 614.5, 474.76] - Detected similar object with confidence 0.966 at location [31.44, 463.65, 654.66, 471.07] - Detected similar object with confidence 0.924 at location [30.93, 468.07, 635.35, 475.39] + Detected similar object with confidence 0.938 at location [490.96, 109.89, 821.09, 536.11] + Detected similar object with confidence 0.959 at location [8.67, 721.29, 928.68, 732.78] + Detected similar object with confidence 0.902 at location [4.27, 720.02, 941.45, 761.59] + Detected similar object with confidence 0.985 at location [265.46, -58.9, 1009.04, 365.66] + Detected similar object with confidence 1.0 at location [9.79, 28.69, 937.31, 941.64] + Detected similar object with confidence 0.998 at location [869.97, 58.28, 923.23, 978.1] + Detected similar object with confidence 0.985 at location [309.23, 21.07, 371.61, 932.02] + Detected similar object with confidence 0.947 at location [27.93, 859.45, 969.75, 915.44] + Detected similar object with confidence 0.996 at location [785.82, 41.38, 880.26, 966.37] + Detected similar object with confidence 0.998 at location [5.08, 721.17, 925.93, 998.41] + Detected similar object with confidence 0.969 at location [6.7, 898.1, 921.75, 949.51] + Detected similar object with confidence 0.966 at location [47.16, 927.29, 981.99, 942.14] + Detected similar object with confidence 0.924 at location [46.4, 936.13, 953.02, 950.78] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( @@ -1650,8 +1669,10 @@ def forward( ```python >>> import requests >>> from PIL import Image + >>> import numpy as np >>> import torch >>> from transformers import AutoProcessor, Owlv2ForObjectDetection + >>> from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble") @@ -1660,10 +1681,25 @@ def forward( >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = [["a photo of a cat", "a photo of a dog"]] >>> inputs = processor(text=texts, images=image, return_tensors="pt") - >>> outputs = model(**inputs) - >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] - >>> target_sizes = torch.Tensor([image.size[::-1]]) + >>> # forward pass + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> # Note: boxes need to be visualized on the padded, unnormalized image + >>> # hence we'll set the target image sizes (height, width) based on that + + >>> def get_preprocessed_image(pixel_values): + ... pixel_values = pixel_values.squeeze().numpy() + ... unnormalized_image = (pixel_values * np.array(OPENAI_CLIP_STD)[:, None, None]) + np.array(OPENAI_CLIP_MEAN)[:, None, None] + ... unnormalized_image = (unnormalized_image * 255).astype(np.uint8) + ... unnormalized_image = np.moveaxis(unnormalized_image, 0, -1) + ... unnormalized_image = Image.fromarray(unnormalized_image) + ... return unnormalized_image + + >>> unnormalized_image = get_preprocessed_image(inputs.pixel_values) + + >>> target_sizes = torch.Tensor([unnormalized_image.size[::-1]]) >>> # Convert outputs (bounding boxes and class logits) to final bounding boxes and scores >>> results = processor.post_process_object_detection( ... outputs=outputs, threshold=0.2, target_sizes=target_sizes @@ -1676,8 +1712,8 @@ def forward( >>> for box, score, label in zip(boxes, scores, labels): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}") - Detected a photo of a cat with confidence 0.614 at location [341.67, 17.54, 642.32, 278.51] - Detected a photo of a cat with confidence 0.665 at location [6.75, 38.97, 326.62, 354.85] + Detected a photo of a cat with confidence 0.614 at location [512.5, 35.08, 963.48, 557.02] + Detected a photo of a cat with confidence 0.665 at location [10.13, 77.94, 489.93, 709.69] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( From ce315081340fdf6846f16c321eb53878b6272d53 Mon Sep 17 00:00:00 2001 From: Peter Pan Date: Tue, 28 Nov 2023 00:26:33 +0800 Subject: [PATCH 268/268] docs: replace torch.distributed.run by torchrun (#27528) * docs: replace torch.distributed.run by torchrun `transformers` now officially support pytorch >= 1.10. The entrypoint `torchrun`` is present from 1.10 onwards. Signed-off-by: Peter Pan * Update src/transformers/trainer.py with @ArthurZucker's suggestion Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --------- Signed-off-by: Peter Pan Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- ISSUES.md | 2 +- docs/source/de/run_scripts.md | 2 +- docs/source/en/main_classes/deepspeed.md | 2 +- docs/source/en/main_classes/trainer.md | 8 ++++---- docs/source/en/perf_hardware.md | 4 ++-- docs/source/en/perf_train_gpu_many.md | 4 ++-- docs/source/en/run_scripts.md | 2 +- docs/source/es/run_scripts.md | 2 +- docs/source/it/perf_hardware.md | 4 ++-- docs/source/it/run_scripts.md | 2 +- docs/source/ja/main_classes/trainer.md | 8 ++++---- docs/source/ja/perf_hardware.md | 4 ++-- docs/source/ja/perf_train_gpu_many.md | 4 ++-- docs/source/ja/run_scripts.md | 2 +- docs/source/ko/perf_hardware.md | 4 ++-- docs/source/ko/perf_train_gpu_many.md | 4 ++-- docs/source/ko/run_scripts.md | 2 +- docs/source/pt/run_scripts.md | 2 +- docs/source/zh/perf_hardware.md | 4 ++-- docs/source/zh/run_scripts.md | 2 +- examples/legacy/question-answering/README.md | 6 +++--- examples/legacy/seq2seq/README.md | 4 ++-- examples/pytorch/README.md | 4 ++-- examples/pytorch/speech-recognition/README.md | 8 ++++---- src/transformers/trainer.py | 2 +- 25 files changed, 46 insertions(+), 46 deletions(-) diff --git a/ISSUES.md b/ISSUES.md index 95f2334b26c8..a5969a3027f8 100644 --- a/ISSUES.md +++ b/ISSUES.md @@ -152,7 +152,7 @@ You are not required to read the following guidelines before opening an issue. H ```bash cd examples/seq2seq - python -m torch.distributed.launch --nproc_per_node=2 ./finetune_trainer.py \ + torchrun --nproc_per_node=2 ./finetune_trainer.py \ --model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \ --output_dir output_dir --overwrite_output_dir \ --do_train --n_train 500 --num_train_epochs 1 \ diff --git a/docs/source/de/run_scripts.md b/docs/source/de/run_scripts.md index 2902d4c08414..4afe72dae6d6 100644 --- a/docs/source/de/run_scripts.md +++ b/docs/source/de/run_scripts.md @@ -130,7 +130,7 @@ Der [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) unt - Legen Sie die Anzahl der zu verwendenden GPUs mit dem Argument `nproc_per_node` fest. ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ diff --git a/docs/source/en/main_classes/deepspeed.md b/docs/source/en/main_classes/deepspeed.md index 277610ce9cda..8133f6c097c9 100644 --- a/docs/source/en/main_classes/deepspeed.md +++ b/docs/source/en/main_classes/deepspeed.md @@ -287,7 +287,7 @@ The information in this section isn't not specific to the DeepSpeed integration For the duration of this section let's assume that you have 2 nodes with 8 gpus each. And you can reach the first node with `ssh hostname1` and second node with `ssh hostname2`, and both must be able to reach each other via ssh locally without a password. Of course, you will need to rename these host (node) names to the actual host names you are working with. -#### The torch.distributed.run launcher +#### The torch.distributed.run(torchrun) launcher For example, to use `torch.distributed.run`, you could do: diff --git a/docs/source/en/main_classes/trainer.md b/docs/source/en/main_classes/trainer.md index d46892c07e81..cf1dd672d3d4 100644 --- a/docs/source/en/main_classes/trainer.md +++ b/docs/source/en/main_classes/trainer.md @@ -206,7 +206,7 @@ Let's discuss how you can tell your program which GPUs are to be used and in wha When using [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do: ```bash -python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ... +torchrun --nproc_per_node=2 trainer-program.py ... ``` if you have either [`accelerate`](https://github.com/huggingface/accelerate) or [`deepspeed`](https://github.com/microsoft/DeepSpeed) installed you can also accomplish the same by using one of: @@ -233,7 +233,7 @@ If you have multiple GPUs and you'd like to use only 1 or a few of those GPUs, s For example, let's say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do: ```bash -CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ... +CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ... ``` So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to `cuda:0` and `cuda:1` correspondingly. @@ -241,7 +241,7 @@ So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped You can even change their order: ```bash -CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ... +CUDA_VISIBLE_DEVICES=2,0 torchrun trainer-program.py ... ``` Here your physical GPUs 0 and 2 are mapped to `cuda:1` and `cuda:0` correspondingly. @@ -263,7 +263,7 @@ As with any environment variable you can, of course, export those instead of add ```bash export CUDA_VISIBLE_DEVICES=0,2 -python -m torch.distributed.launch trainer-program.py ... +torchrun trainer-program.py ... ``` but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it's a common practice to set the environment variable just for a specific run on the same command line as it's shown in most examples of this section. diff --git a/docs/source/en/perf_hardware.md b/docs/source/en/perf_hardware.md index a28824346e4b..18c70e1b30a5 100644 --- a/docs/source/en/perf_hardware.md +++ b/docs/source/en/perf_hardware.md @@ -134,7 +134,7 @@ Here is the full benchmark code and outputs: ```bash # DDP w/ NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 @@ -143,7 +143,7 @@ rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch # DDP w/o NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 diff --git a/docs/source/en/perf_train_gpu_many.md b/docs/source/en/perf_train_gpu_many.md index 1795782949d1..4d89cf341a55 100644 --- a/docs/source/en/perf_train_gpu_many.md +++ b/docs/source/en/perf_train_gpu_many.md @@ -153,7 +153,7 @@ python examples/pytorch/language-modeling/run_clm.py \ ``` rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ -python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ +torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 @@ -164,7 +164,7 @@ python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language- ``` rm -r /tmp/test-clm; NCCL_P2P_DISABLE=1 CUDA_VISIBLE_DEVICES=0,1 \ -python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ +torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 diff --git a/docs/source/en/run_scripts.md b/docs/source/en/run_scripts.md index 3b40b6ea0672..0652bb1da5e4 100644 --- a/docs/source/en/run_scripts.md +++ b/docs/source/en/run_scripts.md @@ -130,7 +130,7 @@ The [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) sup - Set the number of GPUs to use with the `nproc_per_node` argument. ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ diff --git a/docs/source/es/run_scripts.md b/docs/source/es/run_scripts.md index a66fd1e47e13..8b762fdddc28 100644 --- a/docs/source/es/run_scripts.md +++ b/docs/source/es/run_scripts.md @@ -130,7 +130,7 @@ python examples/tensorflow/summarization/run_summarization.py \ - Establece la cantidad de GPU que se usará con el argumento `nproc_per_node`. ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ diff --git a/docs/source/it/perf_hardware.md b/docs/source/it/perf_hardware.md index a579362e2b1b..dd1187a01b59 100644 --- a/docs/source/it/perf_hardware.md +++ b/docs/source/it/perf_hardware.md @@ -134,7 +134,7 @@ Ecco il codice benchmark completo e gli output: ```bash # DDP w/ NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 @@ -143,7 +143,7 @@ rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch # DDP w/o NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 diff --git a/docs/source/it/run_scripts.md b/docs/source/it/run_scripts.md index 327eb9374d38..c376ff32c2a8 100644 --- a/docs/source/it/run_scripts.md +++ b/docs/source/it/run_scripts.md @@ -130,7 +130,7 @@ Il [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) supp - Imposta un numero di GPU da usare con l'argomento `nproc_per_node`. ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ diff --git a/docs/source/ja/main_classes/trainer.md b/docs/source/ja/main_classes/trainer.md index f05aa83eadc1..f18c1b86809f 100644 --- a/docs/source/ja/main_classes/trainer.md +++ b/docs/source/ja/main_classes/trainer.md @@ -196,7 +196,7 @@ _python_、_numpy_、および _pytorch_ の RNG 状態は、そのチェック [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.Parallel.DistributedDataParallel.html) を使用して GPU のサブセットのみを使用する場合、使用する GPU の数を指定するだけです。 。たとえば、GPU が 4 つあるが、最初の 2 つを使用したい場合は、次のようにします。 ```bash -python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ... +torchrun --nproc_per_node=2 trainer-program.py ... ``` [`accelerate`](https://github.com/huggingface/accelerate) または [`deepspeed`](https://github.com/microsoft/DeepSpeed) がインストールされている場合は、次を使用して同じことを達成することもできます。の一つ: @@ -223,7 +223,7 @@ deepspeed --num_gpus 2 trainer-program.py ... たとえば、4 つの GPU (0、1、2、3) があるとします。物理 GPU 0 と 2 のみで実行するには、次のようにします。 ```bash -CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ... +CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ... ``` したがって、pytorch は 2 つの GPU のみを認識し、物理 GPU 0 と 2 はそれぞれ `cuda:0` と `cuda:1` にマッピングされます。 @@ -231,7 +231,7 @@ CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py . 順序を変更することもできます。 ```bash -CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ... +CUDA_VISIBLE_DEVICES=2,0 torchrun trainer-program.py ... ``` ここでは、物理 GPU 0 と 2 がそれぞれ`cuda:1`と`cuda:0`にマッピングされています。 @@ -253,7 +253,7 @@ CUDA_VISIBLE_DEVICES= python trainer-program.py ... ```bash export CUDA_VISIBLE_DEVICES=0,2 -python -m torch.distributed.launch trainer-program.py ... +torchrun trainer-program.py ... ``` ただし、この方法では、以前に環境変数を設定したことを忘れて、なぜ間違った GPU が使用されているのか理解できない可能性があるため、混乱を招く可能性があります。したがって、このセクションのほとんどの例で示されているように、同じコマンド ラインで特定の実行に対してのみ環境変数を設定するのが一般的です。 diff --git a/docs/source/ja/perf_hardware.md b/docs/source/ja/perf_hardware.md index b58db6e76d09..a0db527a94b6 100644 --- a/docs/source/ja/perf_hardware.md +++ b/docs/source/ja/perf_hardware.md @@ -139,7 +139,7 @@ NVLinkを使用すると、トレーニングが約23%速く完了すること ```bash # DDP w/ NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 @@ -148,7 +148,7 @@ rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch # DDP w/o NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 diff --git a/docs/source/ja/perf_train_gpu_many.md b/docs/source/ja/perf_train_gpu_many.md index fd7713c49369..71d6c2805865 100644 --- a/docs/source/ja/perf_train_gpu_many.md +++ b/docs/source/ja/perf_train_gpu_many.md @@ -143,7 +143,7 @@ python examples/pytorch/language-modeling/run_clm.py \ # DDP w/ NVlink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ -python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ +torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 @@ -151,7 +151,7 @@ python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language- # DDP w/o NVlink rm -r /tmp/test-clm; NCCL_P2P_DISABLE=1 CUDA_VISIBLE_DEVICES=0,1 \ -python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ +torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 diff --git a/docs/source/ja/run_scripts.md b/docs/source/ja/run_scripts.md index 1fde9afc0c6e..a7cc89d13484 100644 --- a/docs/source/ja/run_scripts.md +++ b/docs/source/ja/run_scripts.md @@ -140,7 +140,7 @@ python examples/tensorflow/summarization/run_summarization.py \ 以下は提供されたBashコードです。このコードの日本語訳をMarkdown形式で記載します。 ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ diff --git a/docs/source/ko/perf_hardware.md b/docs/source/ko/perf_hardware.md index e715b39487f3..bb35e6fae2f2 100644 --- a/docs/source/ko/perf_hardware.md +++ b/docs/source/ko/perf_hardware.md @@ -135,7 +135,7 @@ NVLink 사용 시 훈련이 약 23% 더 빠르게 완료됨을 확인할 수 있 ```bash # DDP w/ NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 @@ -144,7 +144,7 @@ rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch # DDP w/o NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 diff --git a/docs/source/ko/perf_train_gpu_many.md b/docs/source/ko/perf_train_gpu_many.md index 9d80fd65727d..706832a8a1dc 100644 --- a/docs/source/ko/perf_train_gpu_many.md +++ b/docs/source/ko/perf_train_gpu_many.md @@ -145,7 +145,7 @@ python examples/pytorch/language-modeling/run_clm.py \ # DDP w/ NVlink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ -python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ +torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 @@ -153,7 +153,7 @@ python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language- # DDP w/o NVlink rm -r /tmp/test-clm; NCCL_P2P_DISABLE=1 CUDA_VISIBLE_DEVICES=0,1 \ -python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ +torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 diff --git a/docs/source/ko/run_scripts.md b/docs/source/ko/run_scripts.md index c1af1677183b..f88e8e8252f9 100644 --- a/docs/source/ko/run_scripts.md +++ b/docs/source/ko/run_scripts.md @@ -141,7 +141,7 @@ python examples/tensorflow/summarization/run_summarization.py \ - `nproc_per_node` 인수를 추가해 사용할 GPU 개수를 설정합니다. ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ diff --git a/docs/source/pt/run_scripts.md b/docs/source/pt/run_scripts.md index 8d87c10c2713..ff3110817e8a 100644 --- a/docs/source/pt/run_scripts.md +++ b/docs/source/pt/run_scripts.md @@ -131,7 +131,7 @@ O [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) ofere - Defina o número de GPUs a serem usadas com o argumento `nproc_per_node`. ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ diff --git a/docs/source/zh/perf_hardware.md b/docs/source/zh/perf_hardware.md index f49e9a582963..ce7ab36151bf 100644 --- a/docs/source/zh/perf_hardware.md +++ b/docs/source/zh/perf_hardware.md @@ -135,7 +135,7 @@ GPU1 PHB X 0-11 N/A ```bash # DDP w/ NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 @@ -144,7 +144,7 @@ rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch # DDP w/o NVLink -rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 python -m torch.distributed.launch \ +rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 diff --git a/docs/source/zh/run_scripts.md b/docs/source/zh/run_scripts.md index e5cc56487dab..0a0121c32f0b 100644 --- a/docs/source/zh/run_scripts.md +++ b/docs/source/zh/run_scripts.md @@ -133,7 +133,7 @@ python examples/tensorflow/summarization/run_summarization.py \ ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ diff --git a/examples/legacy/question-answering/README.md b/examples/legacy/question-answering/README.md index 494ae4ffd7ee..905fabf35bdf 100644 --- a/examples/legacy/question-answering/README.md +++ b/examples/legacy/question-answering/README.md @@ -18,7 +18,7 @@ in Huang et al. [Improve Transformer Models with Better Relative Position Embedd ```bash export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 -python -m torch.distributed.launch --nproc_per_node=8 ./examples/question-answering/run_squad.py \ +torchrun --nproc_per_node=8 ./examples/question-answering/run_squad.py \ --model_name_or_path zhiheng-huang/bert-base-uncased-embedding-relative-key-query \ --dataset_name squad \ --do_train \ @@ -46,7 +46,7 @@ gpu training leads to the f1 score of 90.71. ```bash export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 -python -m torch.distributed.launch --nproc_per_node=8 ./examples/question-answering/run_squad.py \ +torchrun --nproc_per_node=8 ./examples/question-answering/run_squad.py \ --model_name_or_path zhiheng-huang/bert-large-uncased-whole-word-masking-embedding-relative-key-query \ --dataset_name squad \ --do_train \ @@ -68,7 +68,7 @@ Training with the above command leads to the f1 score of 93.52, which is slightl Here is an example using distributed training on 8 V100 GPUs and Bert Whole Word Masking uncased model to reach a F1 > 93 on SQuAD1.1: ```bash -python -m torch.distributed.launch --nproc_per_node=8 ./examples/question-answering/run_squad.py \ +torchrun --nproc_per_node=8 ./examples/question-answering/run_squad.py \ --model_name_or_path bert-large-uncased-whole-word-masking \ --dataset_name squad \ --do_train \ diff --git a/examples/legacy/seq2seq/README.md b/examples/legacy/seq2seq/README.md index 5a3c2dbd3506..347a980a74da 100644 --- a/examples/legacy/seq2seq/README.md +++ b/examples/legacy/seq2seq/README.md @@ -140,7 +140,7 @@ python finetune_trainer.py --help For multi-gpu training use `torch.distributed.launch`, e.g. with 2 gpus: ```bash -python -m torch.distributed.launch --nproc_per_node=2 finetune_trainer.py ... +torchrun --nproc_per_node=2 finetune_trainer.py ... ``` **At the moment, `Seq2SeqTrainer` does not support *with teacher* distillation.** @@ -214,7 +214,7 @@ because it uses SortishSampler to minimize padding. You can also use it on 1 GPU `{type_path}.source` and `{type_path}.target`. Run `./run_distributed_eval.py --help` for all clargs. ```bash -python -m torch.distributed.launch --nproc_per_node=8 run_distributed_eval.py \ +torchrun --nproc_per_node=8 run_distributed_eval.py \ --model_name sshleifer/distilbart-large-xsum-12-3 \ --save_dir xsum_generations \ --data_dir xsum \ diff --git a/examples/pytorch/README.md b/examples/pytorch/README.md index fd98a8e9180d..ab2f05337c5d 100644 --- a/examples/pytorch/README.md +++ b/examples/pytorch/README.md @@ -98,7 +98,7 @@ the [Trainer API](https://huggingface.co/transformers/main_classes/trainer.html) use the following command: ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node number_of_gpu_you_have path_to_script.py \ --all_arguments_of_the_script ``` @@ -107,7 +107,7 @@ As an example, here is how you would fine-tune the BERT large model (with whole classification MNLI task using the `run_glue` script, with 8 GPUs: ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 pytorch/text-classification/run_glue.py \ --model_name_or_path bert-large-uncased-whole-word-masking \ --task_name mnli \ diff --git a/examples/pytorch/speech-recognition/README.md b/examples/pytorch/speech-recognition/README.md index 99eec85537b2..32fa9ac8b8e3 100644 --- a/examples/pytorch/speech-recognition/README.md +++ b/examples/pytorch/speech-recognition/README.md @@ -100,7 +100,7 @@ of **0.35**. The following command shows how to fine-tune [XLSR-Wav2Vec2](https://huggingface.co/transformers/main/model_doc/xlsr_wav2vec2.html) on [Common Voice](https://huggingface.co/datasets/common_voice) using 8 GPUs in half-precision. ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 run_speech_recognition_ctc.py \ --dataset_name="common_voice" \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ @@ -147,7 +147,7 @@ However, the `--shuffle_buffer_size` argument controls how many examples we can ```bash -**python -m torch.distributed.launch \ +**torchrun \ --nproc_per_node 4 run_speech_recognition_ctc_streaming.py \ --dataset_name="common_voice" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ @@ -404,7 +404,7 @@ If training on a different language, you should be sure to change the `language` #### Multi GPU Whisper Training The following example shows how to fine-tune the [Whisper small](https://huggingface.co/openai/whisper-small) checkpoint on the Hindi subset of [Common Voice 11](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) using 2 GPU devices in half-precision: ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 2 run_speech_recognition_seq2seq.py \ --model_name_or_path="openai/whisper-small" \ --dataset_name="mozilla-foundation/common_voice_11_0" \ @@ -572,7 +572,7 @@ cross-entropy loss of **0.405** and word error rate of **0.0728**. The following command shows how to fine-tune [XLSR-Wav2Vec2](https://huggingface.co/transformers/main/model_doc/xlsr_wav2vec2.html) on [Common Voice](https://huggingface.co/datasets/common_voice) using 8 GPUs in half-precision. ```bash -python -m torch.distributed.launch \ +torchrun \ --nproc_per_node 8 run_speech_recognition_seq2seq.py \ --dataset_name="librispeech_asr" \ --model_name_or_path="./" \ diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 7a4fcd129cb3..bd6dd6fe9581 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1595,7 +1595,7 @@ def _inner_training_loop( # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" - " (torch.distributed.launch)." + " (torchrun or torch.distributed.launch (deprecated))." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa