diff --git a/docker_images/diffusers/app/pipelines/image_to_image.py b/docker_images/diffusers/app/pipelines/image_to_image.py index d8fcffc2..2c991415 100644 --- a/docker_images/diffusers/app/pipelines/image_to_image.py +++ b/docker_images/diffusers/app/pipelines/image_to_image.py @@ -233,6 +233,9 @@ def _process_req(self, image, prompt, **kwargs): "negative_prompt": kwargs.get("negative_prompt", None), "guidance_scale": kwargs.get("guidance_scale", 7), } + if int(kwargs["num_inference_steps"]) <= 4 and 'guidance_scale' not in kwargs.keys(): + kwargs["guidance_scale"] = 0 + prior_args["guidance_scale"] = 0 image_emb, zero_image_emb = self.prior(prompt, **prior_args).to_tuple() images = self.ldm( prompt, diff --git a/docker_images/diffusers/app/pipelines/text_to_image.py b/docker_images/diffusers/app/pipelines/text_to_image.py index 7fafe10b..42e16452 100644 --- a/docker_images/diffusers/app/pipelines/text_to_image.py +++ b/docker_images/diffusers/app/pipelines/text_to_image.py @@ -169,6 +169,9 @@ def _process_req(self, inputs, **kwargs): kwargs["num_inference_steps"] = 20 # Else, don't specify anything, leave the default behaviour + if int(kwargs.get("num_inference_steps", 20)) <= 4 and 'guidance_scale' not in kwargs.keys(): + kwargs["guidance_scale"] = 0 + if "seed" in kwargs: seed = int(kwargs["seed"]) generator = torch.Generator().manual_seed(seed)