From 7667cfcb41dfeb8f217e4314dcf2d561b8ca41d2 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Mon, 16 Dec 2024 15:36:26 -0800 Subject: [PATCH] [docs] Add missing AttnProcessors (#10246) * attnprocessors * lora * make style * fix * fix * sana * typo --- docs/source/en/api/attnprocessor.md | 115 ++++++++++++++++++-- src/diffusers/models/attention_processor.py | 16 +++ 2 files changed, 120 insertions(+), 11 deletions(-) diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index 5b1f0be72ae6..fee0d7e35764 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -15,40 +15,133 @@ specific language governing permissions and limitations under the License. An attention processor is a class for applying different types of attention mechanisms. ## AttnProcessor + [[autodoc]] models.attention_processor.AttnProcessor -## AttnProcessor2_0 [[autodoc]] models.attention_processor.AttnProcessor2_0 -## AttnAddedKVProcessor [[autodoc]] models.attention_processor.AttnAddedKVProcessor -## AttnAddedKVProcessor2_0 [[autodoc]] models.attention_processor.AttnAddedKVProcessor2_0 +[[autodoc]] models.attention_processor.AttnProcessorNPU + +[[autodoc]] models.attention_processor.FusedAttnProcessor2_0 + +## Allegro + +[[autodoc]] models.attention_processor.AllegroAttnProcessor2_0 + +## AuraFlow + +[[autodoc]] models.attention_processor.AuraFlowAttnProcessor2_0 + +[[autodoc]] models.attention_processor.FusedAuraFlowAttnProcessor2_0 + +## CogVideoX + +[[autodoc]] models.attention_processor.CogVideoXAttnProcessor2_0 + +[[autodoc]] models.attention_processor.FusedCogVideoXAttnProcessor2_0 + ## CrossFrameAttnProcessor + [[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor -## CustomDiffusionAttnProcessor +## Custom Diffusion + [[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor -## CustomDiffusionAttnProcessor2_0 [[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor2_0 -## CustomDiffusionXFormersAttnProcessor [[autodoc]] models.attention_processor.CustomDiffusionXFormersAttnProcessor -## FusedAttnProcessor2_0 -[[autodoc]] models.attention_processor.FusedAttnProcessor2_0 +## Flux + +[[autodoc]] models.attention_processor.FluxAttnProcessor2_0 + +[[autodoc]] models.attention_processor.FusedFluxAttnProcessor2_0 + +[[autodoc]] models.attention_processor.FluxSingleAttnProcessor2_0 + +## Hunyuan + +[[autodoc]] models.attention_processor.HunyuanAttnProcessor2_0 + +[[autodoc]] models.attention_processor.FusedHunyuanAttnProcessor2_0 + +[[autodoc]] models.attention_processor.PAGHunyuanAttnProcessor2_0 + +[[autodoc]] models.attention_processor.PAGCFGHunyuanAttnProcessor2_0 + +## IdentitySelfAttnProcessor2_0 + +[[autodoc]] models.attention_processor.PAGIdentitySelfAttnProcessor2_0 + +[[autodoc]] models.attention_processor.PAGCFGIdentitySelfAttnProcessor2_0 + +## IP-Adapter + +[[autodoc]] models.attention_processor.IPAdapterAttnProcessor + +[[autodoc]] models.attention_processor.IPAdapterAttnProcessor2_0 + +## JointAttnProcessor2_0 + +[[autodoc]] models.attention_processor.JointAttnProcessor2_0 + +[[autodoc]] models.attention_processor.PAGJointAttnProcessor2_0 + +[[autodoc]] models.attention_processor.PAGCFGJointAttnProcessor2_0 + +[[autodoc]] models.attention_processor.FusedJointAttnProcessor2_0 + +## LoRA + +[[autodoc]] models.attention_processor.LoRAAttnProcessor + +[[autodoc]] models.attention_processor.LoRAAttnProcessor2_0 + +[[autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor + +[[autodoc]] models.attention_processor.LoRAXFormersAttnProcessor + +## Lumina-T2X + +[[autodoc]] models.attention_processor.LuminaAttnProcessor2_0 + +## Mochi + +[[autodoc]] models.attention_processor.MochiAttnProcessor2_0 + +[[autodoc]] models.attention_processor.MochiVaeAttnProcessor2_0 + +## Sana + +[[autodoc]] models.attention_processor.SanaLinearAttnProcessor2_0 + +[[autodoc]] models.attention_processor.SanaMultiscaleAttnProcessor2_0 + +[[autodoc]] models.attention_processor.PAGCFGSanaLinearAttnProcessor2_0 + +[[autodoc]] models.attention_processor.PAGIdentitySanaLinearAttnProcessor2_0 + +## Stable Audio + +[[autodoc]] models.attention_processor.StableAudioAttnProcessor2_0 ## SlicedAttnProcessor + [[autodoc]] models.attention_processor.SlicedAttnProcessor -## SlicedAttnAddedKVProcessor [[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor ## XFormersAttnProcessor + [[autodoc]] models.attention_processor.XFormersAttnProcessor -## AttnProcessorNPU -[[autodoc]] models.attention_processor.AttnProcessorNPU +[[autodoc]] models.attention_processor.XFormersAttnAddedKVProcessor + +## XLAFlashAttnProcessor2_0 + +[[autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0 diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index ee6b010519e2..be8d654ca66a 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -5423,21 +5423,37 @@ def __call__(self, attn: SanaMultiscaleLinearAttention, hidden_states: torch.Ten class LoRAAttnProcessor: + r""" + Processor for implementing attention with LoRA. + """ + def __init__(self): pass class LoRAAttnProcessor2_0: + r""" + Processor for implementing attention with LoRA (enabled by default if you're using PyTorch 2.0). + """ + def __init__(self): pass class LoRAXFormersAttnProcessor: + r""" + Processor for implementing attention with LoRA using xFormers. + """ + def __init__(self): pass class LoRAAttnAddedKVProcessor: + r""" + Processor for implementing attention with LoRA with extra learnable key and value matrices for the text encoder. + """ + def __init__(self): pass