Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

dev_support_diffusers_ipa #837

Open
wants to merge 37 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 31 commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
cc53cd6
dev_support_diffusers_ipa
ccssu Apr 24, 2024
007c140
support ipa
ccssu Apr 24, 2024
7de9ecf
Merge branch 'main' into dev_support_diffusers_ipa
ccssu Apr 24, 2024
31518db
Merge branch 'main' into dev_support_diffusers_ipa
doombeaker May 11, 2024
9ef38de
Merge branch 'main' into dev_support_diffusers_ipa
lijunliangTG May 13, 2024
c3a66fc
Merge branch 'main' into dev_support_diffusers_ipa
strint May 15, 2024
72ebe66
Merge branch 'main' into dev_support_diffusers_ipa
ccssu May 21, 2024
d50ae6c
refine
ccssu May 21, 2024
f3f7e4e
Merge branch 'main' into dev_support_diffusers_ipa
lijunliangTG May 27, 2024
8a4c3e7
Merge branch 'main' into dev_support_diffusers_ipa
ccssu Jun 5, 2024
df74cc1
Merge branch 'main' into dev_support_diffusers_ipa
lijunliangTG Jul 16, 2024
8504d7e
add nexfort ipadapter demo
marigoold Jul 17, 2024
ab9446b
refine
marigoold Jul 17, 2024
f207f39
refine
marigoold Jul 17, 2024
01a78f2
Merge branch 'add-ipadapter-nexfort-demo' into dev_support_diffusers_ipa
marigoold Jul 17, 2024
3ad5e25
refine
marigoold Jul 17, 2024
d71772f
multi scale run
marigoold Jul 18, 2024
59d0690
update example, add todo for loadpipe bug fixing
marigoold Jul 19, 2024
016de27
add multi resolution
marigoold Jul 19, 2024
45ff683
Merge branch 'main' into dev_support_diffusers_ipa
marigoold Jul 19, 2024
f42589b
merge main, resolve conflict
marigoold Jul 25, 2024
430cbec
fix bug of custom transform
marigoold Jul 25, 2024
5dd0e32
merge main, resolve conflict
marigoold Jul 25, 2024
a6ab752
support dynamic shape
marigoold Jul 26, 2024
d5368a6
merge main
marigoold Jul 26, 2024
05d895f
refine
marigoold Jul 26, 2024
b144ed8
format
marigoold Jul 26, 2024
b1847a0
refine
marigoold Jul 26, 2024
ee2f10f
Merge branch 'main' into dev_support_diffusers_ipa
marigoold Jul 26, 2024
a6c7c13
remove load pipe
marigoold Jul 26, 2024
1e18d3e
Merge branch 'dev_support_diffusers_ipa' of github.com:siliconflow/on…
marigoold Jul 26, 2024
73d65cc
add doc and profile
marigoold Jul 26, 2024
3fa2ff7
format
marigoold Jul 26, 2024
1246acc
update readme
marigoold Jul 27, 2024
ca07001
Merge branch 'main' into dev_support_diffusers_ipa
marigoold Jul 27, 2024
fc3b587
Merge branch 'main' into dev_support_diffusers_ipa
marigoold Jul 29, 2024
e2db704
Merge branch 'main' into dev_support_diffusers_ipa
marigoold Aug 1, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
155 changes: 155 additions & 0 deletions onediff_diffusers_extensions/examples/text_to_image_ip_adapter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
import argparse
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个参考 sdxl 提供一个专门的文件夹和 readme 作为加速报告吧

import json
import os
from pathlib import Path

import torch

from diffusers import AutoPipelineForText2Image
from diffusers.utils import load_image
from onediffx import compile_pipe, load_pipe, save_pipe

nexfort_options = {
"mode": "cudagraphs:benchmark:max-autotune:low-precision:cache-all",
"memory_format": "channels_last",
"options": {
"inductor.optimize_linear_epilogue": False,
"overrides.conv_benchmark": True,
"overrides.matmul_allow_tf32": True,
},
}

parser = argparse.ArgumentParser()
parser.add_argument("--base", type=str, default="runwayml/stable-diffusion-v1-5")
parser.add_argument("--ipadapter", type=str, default="h94/IP-Adapter")
parser.add_argument("--subfolder", type=str, default="models")
parser.add_argument("--weight_name", type=str, default="ip-adapter_sd15.bin")
parser.add_argument("--scale", type=float, nargs="+", default=0.5)
parser.add_argument(
"--input_image",
type=str,
default="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_diner.png",
)
parser.add_argument(
"--prompt",
default="a polar bear sitting in a chair drinking a milkshake",
help="Prompt",
)
parser.add_argument(
"--negative-prompt",
default="deformed, ugly, wrong proportion, low res, bad anatomy, worst quality, low quality",
help="Negative prompt",
)
parser.add_argument("--height", type=int, default=512)
parser.add_argument("--width", type=int, default=512)
parser.add_argument("--n_steps", type=int, default=100)
parser.add_argument(
"--saved_image", type=str, required=False, default="ip-adapter-out.png"
)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--warmup", type=int, default=1)
parser.add_argument("--run", type=int, default=3)
parser.add_argument(
"--compile",
type=(lambda x: str(x).lower() in ["true", "1", "yes"]),
default=True,
)
parser.add_argument(
"--compiler", type=str, default="oneflow", choices=["nexfort", "oneflow"]
)
parser.add_argument("--compile-options", type=str, default=nexfort_options)
parser.add_argument("--cache-dir", default="./onediff_cache", help="cache directory")
parser.add_argument("--multi-resolution", action="store_true")
args = parser.parse_args()

# load an image
ip_adapter_image = load_image(args.input_image)

# load stable diffusion and ip-adapter
pipe = AutoPipelineForText2Image.from_pretrained(
args.base,
torch_dtype=torch.float16,
variant="fp16",
)
pipe.load_ip_adapter(
args.ipadapter, subfolder=args.subfolder, weight_name=args.weight_name
)

# Set ipadapter scale as a tensor instead of a float
# If scale is a float, it cannot be modified after the graph is traced
ipadapter_scale = torch.tensor(0.6, dtype=torch.float, device="cuda")
pipe.set_ip_adapter_scale(ipadapter_scale)
pipe.to("cuda")


if args.compiler == "nexfort":
compile_options = args.compile_options
if isinstance(compile_options, str):
compile_options = json.loads(compile_options)
os.environ.setdefault("TORCHINDUCTOR_CACHE_DIR", "./.torchinductor")
else:
compile_options = None

cache_path = os.path.join(args.cache_dir, type(pipe).__name__)

if args.compile:
pipe = compile_pipe(pipe, backend=args.compiler, options=compile_options)
if args.compiler == "oneflow" and os.path.exists(cache_path):
# TODO(WangYi): load pipe has bug here, which makes scale unchangeable
# load_pipe(pipe, cache_path)
pass


# generate image
print("Warmup")
for i in range(args.warmup):
images = pipe(
prompt=args.prompt,
height=args.height,
width=args.width,
ip_adapter_image=ip_adapter_image,
num_inference_steps=args.n_steps,
).images

print("Run")
scales = args.scale if isinstance(args.scale, list) else [args.scale]
for scale in scales:
# Use ipadapter_scale.copy_ instead of pipeline.set_ip_adapter_scale to modify scale
ipadapter_scale.copy_(torch.tensor(scale, dtype=torch.float, device="cuda"))
pipe.set_ip_adapter_scale(ipadapter_scale)
image = pipe(
prompt=args.prompt,
ip_adapter_image=ip_adapter_image,
negative_prompt=args.negative_prompt,
height=args.height,
width=args.width,
num_inference_steps=args.n_steps,
generator=torch.Generator(device="cpu").manual_seed(0),
).images[0]
image_path = (
f"{Path(args.saved_image).stem}_{scale}" + Path(args.saved_image).suffix
)
print(f"save output image to {image_path}")
image.save(image_path)

if args.multi_resolution:
from itertools import product

sizes = [1024, 512, 768, 256]
for h, w in product(sizes, sizes):
image = pipe(
prompt=args.prompt,
ip_adapter_image=ip_adapter_image,
negative_prompt=args.negative_prompt,
height=h,
width=w,
num_inference_steps=args.n_steps,
generator=torch.Generator(device="cpu").manual_seed(0),
).images[0]
print(f"Running at resolution: {h}x{w}")


if args.compiler == "oneflow":
if not os.path.exists(cache_path):
os.makedirs(cache_path)
save_pipe(pipe, cache_path)
17 changes: 17 additions & 0 deletions src/infer_compiler_registry/register_diffusers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@
from .attention_processor_oflow import (
Attention as AttentionOflow,
AttnProcessor as AttnProcessorOflow,
is_ip_adapter_available,
LoRAAttnProcessor2_0 as LoRAAttnProcessorOflow,
)
from .resnet_oflow import Upsample2D as Upsample2DOflow
Expand Down Expand Up @@ -102,6 +103,22 @@
LoRAAttnProcessor2_0: LoRAAttnProcessorOflow,
}

if is_ip_adapter_available():
from diffusers.models.attention_processor import (
IPAdapterAttnProcessor,
IPAdapterAttnProcessor2_0,
)

from .attention_processor_oflow import (
IPAdapterAttnProcessor as IPAdapterAttnProcessorOflow,
IPAdapterAttnProcessor2_0 as IPAdapterLoRAAttnProcessor2_0Oflow,
)

torch2oflow_class_map.update({IPAdapterAttnProcessor: IPAdapterAttnProcessorOflow})
torch2oflow_class_map.update(
{IPAdapterAttnProcessor2_0: IPAdapterLoRAAttnProcessor2_0Oflow}
)

torch2oflow_class_map.update({Transformer2DModel: Transformer2DModelOflow})
torch2oflow_class_map.update({UNet2DConditionModel: UNet2DConditionModelOflow})
torch2oflow_class_map.update({AttnUpBlock2D: AttnUpBlock2DOflow})
Expand Down
Loading
Loading