diff --git a/.gitignore b/.gitignore
index 765a2d99..a82d44f2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,13 @@
.idea/
+idea/
.venv/
+venv/
.gitignore/
+gitignore/
TechnicalFiles/pip_cache/
TechnicalFiles/cache/
TechnicalFiles/temp/
TechnicalFiles/logs/
inputs/
-outputs/
\ No newline at end of file
+outputs/
+ThirdPartyRepository/
\ No newline at end of file
diff --git a/LaunchFile/app.py b/LaunchFile/app.py
index 42095751..de8f6bb0 100644
--- a/LaunchFile/app.py
+++ b/LaunchFile/app.py
@@ -19,6 +19,12 @@
temp_dir = os.path.join("TechnicalFiles/temp")
os.makedirs(temp_dir, exist_ok=True)
os.environ["TMPDIR"] = temp_dir
+unet_path = os.path.join("inputs/image/sd_models/rembg")
+os.makedirs(unet_path, exist_ok=True)
+os.environ["U2NET_HOME"] = unet_path
+roop_path = os.path.join("ThirdPartyRepository/insightface_models")
+os.makedirs(roop_path, exist_ok=True)
+os.environ["insightface"] = roop_path
sys.modules['triton'] = None
from threading import Thread
import gradio as gr
@@ -512,10 +518,6 @@ def parse_pdf(pdf_path):
def remove_bg(src_img_path, out_img_path):
- model_path = "inputs/image/sd_models/rembg"
- os.makedirs(model_path, exist_ok=True)
-
- os.environ["U2NET_HOME"] = model_path
with open(src_img_path, "rb") as input_file:
input_data = input_file.read()
@@ -1056,7 +1058,7 @@ def transcribe_audio(audio_file_path):
else:
device = "cpu"
- whisper_model_path = "inputs/text/whisper-medium"
+ whisper_model_path = "inputs/text/whisper-large-v3-turbo"
if not os.path.exists(whisper_model_path):
gr.Info("Downloading Whisper...")
os.makedirs(whisper_model_path, exist_ok=True)
@@ -1094,7 +1096,7 @@ def load_freevc_model():
def load_whisper_model():
- whisper_model_path = "inputs/text/whisper-medium"
+ whisper_model_path = "inputs/text/whisper-large-v3-turbo"
if not os.path.exists(whisper_model_path):
gr.Info("Downloading Whisper...")
os.makedirs(whisper_model_path, exist_ok=True)
@@ -6219,7 +6221,7 @@ def generate_image_ip_adapter_faceid(prompt, negative_prompt, face_image, s_scal
torch_dtype = torch.float32
variant = "fp32"
- app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
+ app = FaceAnalysis(name="buffalo_l", root=roop_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
image = cv2.imread(face_image)
@@ -11226,85 +11228,130 @@ def open_outputs_folder():
os.system(f'open "{outputs_folder}"' if os.name == "darwin" else f'xdg-open "{outputs_folder}"')
-llm_models_list = [None, "Moondream2-Image", "LLaVA-NeXT-Video", "Qwen2-Audio"] + [model for model in os.listdir("inputs/text/llm_models") if not model.endswith(".txt") and model != "vikhyatk" and model != "lora"]
-llm_lora_models_list = [None] + [model for model in os.listdir("inputs/text/llm_models/lora") if not model.endswith(".txt")]
-speaker_wavs_list = [None] + [wav for wav in os.listdir("inputs/audio/voices") if not wav.endswith(".txt")]
-stable_diffusion_models_list = [None] + [model for model in os.listdir("inputs/image/sd_models")
- if (model.endswith(".safetensors") or model.endswith(".ckpt") or model.endswith(".gguf") or not model.endswith(".txt") and not model.endswith(".py") and not os.path.isdir(os.path.join("inputs/image/sd_models")))]
+def get_llm_models():
+ return [None, "Moondream2-Image", "LLaVA-NeXT-Video", "Qwen2-Audio"] + [
+ model for model in os.listdir("inputs/text/llm_models")
+ if not model.endswith(".txt") and model != "vikhyatk" and model != "lora" and model != "avatars"
+ ]
+
+
+def get_llm_lora_models():
+ return [None] + [
+ model for model in os.listdir("inputs/text/llm_models/lora")
+ if not model.endswith(".txt")
+ ]
+
+
+def get_speaker_wavs():
+ return [None] + [
+ wav for wav in os.listdir("inputs/audio/voices")
+ if not wav.endswith(".txt")
+ ]
+
+
+def get_stable_diffusion_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/sd_models")
+ if model.endswith((".safetensors", ".ckpt", ".gguf"))
+ or (not model.endswith((".txt", ".py")) and not os.path.isdir(os.path.join("inputs/image/sd_models", model)))
+ ]
+
+
audiocraft_models_list = [None] + ["musicgen-stereo-medium", "audiogen-medium", "musicgen-stereo-melody", "musicgen-medium", "musicgen-melody", "musicgen-large",
"hybrid-magnet-medium", "magnet-medium-30sec", "magnet-medium-10sec", "audio-magnet-medium"]
-vae_models_list = [None] + [model for model in os.listdir("inputs/image/sd_models/vae") if
- model.endswith(".safetensors") or not model.endswith(".txt")]
-flux_vae_models_list = [None] + [model for model in os.listdir("inputs/image/flux/flux-vae") if
- model.endswith(".safetensors") or not model.endswith(".txt")]
-lora_models_list = [None] + [model for model in os.listdir("inputs/image/sd_models/lora") if
- model.endswith(".safetensors") or model.endswith(".pt")]
-quantized_flux_models_list = [None] + [model for model in os.listdir("inputs/image/flux/quantize-flux") if
- model.endswith(".gguf") or model.endswith(".safetensors") or not model.endswith(".txt") and not model.endswith(".safetensors") and not model.endswith(".py")]
-flux_lora_models_list = [None] + [model for model in os.listdir("inputs/image/flux/flux-lora") if
- model.endswith(".safetensors")]
-auraflow_lora_models_list = [None] + [model for model in os.listdir("inputs/image/auraflow-lora") if
- model.endswith(".safetensors")]
-kolors_lora_models_list = [None] + [model for model in os.listdir("inputs/image/kolors-lora") if
- model.endswith(".safetensors")]
-textual_inversion_models_list = [None] + [model for model in os.listdir("inputs/image/sd_models/embedding") if model.endswith(".pt") or model.endswith(".safetensors")]
-inpaint_models_list = [None] + [model for model in
- os.listdir("inputs/image/sd_models/inpaint")
- if (model.endswith(".safetensors") or model.endswith(".ckpt") or not model.endswith(".txt"))]
+
+
+def get_vae_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/sd_models/vae")
+ if model.endswith(".safetensors") or not model.endswith(".txt")
+ ]
+
+
+def get_flux_vae_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/flux/flux-vae")
+ if model.endswith(".safetensors") or not model.endswith(".txt")
+ ]
+
+
+def get_lora_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/sd_models/lora")
+ if model.endswith((".safetensors", ".pt"))
+ ]
+
+
+def get_quantized_flux_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/flux/quantize-flux")
+ if model.endswith((".gguf", ".safetensors"))
+ or (not model.endswith((".txt", ".safetensors", ".py")))
+ ]
+
+
+def get_flux_lora_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/flux/flux-lora")
+ if model.endswith(".safetensors")
+ ]
+
+
+def get_auraflow_lora_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/auraflow-lora")
+ if model.endswith(".safetensors")
+ ]
+
+
+def get_kolors_lora_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/kolors-lora")
+ if model.endswith(".safetensors")
+ ]
+
+
+def get_textual_inversion_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/sd_models/embedding")
+ if model.endswith((".pt", ".safetensors"))
+ ]
+
+
+def get_inpaint_models():
+ return [None] + [
+ model for model in os.listdir("inputs/image/sd_models/inpaint")
+ if model.endswith((".safetensors", ".ckpt")) or not model.endswith(".txt")
+ ]
+
+
controlnet_models_list = [None, "openpose", "depth", "canny", "lineart", "scribble"]
-rvc_models_list = [model_folder for model_folder in os.listdir("inputs/audio/rvc_models")
- if os.path.isdir(os.path.join("inputs/audio/rvc_models", model_folder))
- and any(file.endswith('.pth') for file in os.listdir(os.path.join("inputs/audio/rvc_models", model_folder)))]
-
-
-def reload_model_lists():
- global llm_models_list, llm_lora_models_list, speaker_wavs_list, stable_diffusion_models_list, vae_models_list, lora_models_list, quantized_flux_models_list, flux_lora_models_list, auraflow_lora_models_list, kolors_lora_models_list, textual_inversion_models_list, inpaint_models_list, rvc_models_list
-
- llm_models_list = [None, "Moondream2-Image", "LLaVA-NeXT-Video", "Qwen2-Audio"] + [model for model in os.listdir("inputs/text/llm_models") if
- not model.endswith(".txt") and model != "vikhyatk" and model != "lora"]
- llm_lora_models_list = [None] + [model for model in os.listdir("inputs/text/llm_models/lora") if
- not model.endswith(".txt")]
- speaker_wavs_list = [None] + [wav for wav in os.listdir("inputs/audio/voices") if not wav.endswith(".txt")]
- stable_diffusion_models_list = [None] + [model for model in os.listdir("inputs/image/sd_models")
- if (model.endswith(".safetensors") or model.endswith(
- ".ckpt") or model.endswith(".gguf") or not model.endswith(".txt") and not model.endswith(
- ".py") and not os.path.isdir(os.path.join("inputs/image/sd_models")))]
- vae_models_list = [None] + [model for model in os.listdir("inputs/image/sd_models/vae") if
- model.endswith(".safetensors") or not model.endswith(".txt")]
- flux_vae_models_list = [None] + [model for model in os.listdir("inputs/image/flux/flux-vae") if
- model.endswith(".safetensors") or not model.endswith(".txt")]
- lora_models_list = [None] + [model for model in os.listdir("inputs/image/sd_models/lora") if
- model.endswith(".safetensors") or model.endswith(".pt")]
- quantized_flux_models_list = [None] + [model for model in os.listdir("inputs/image/flux/quantize-flux") if
- model.endswith(".gguf") or model.endswith(".safetensors") or not model.endswith(".txt") and not model.endswith(
- ".safetensors") and not model.endswith(".py")]
- flux_lora_models_list = [None] + [model for model in os.listdir("inputs/image/flux/flux-lora") if
- model.endswith(".safetensors")]
- auraflow_lora_models_list = [None] + [model for model in os.listdir("inputs/image/auraflow-lora") if
- model.endswith(".safetensors")]
- kolors_lora_models_list = [None] + [model for model in os.listdir("inputs/image/kolors-lora") if
- model.endswith(".safetensors")]
- textual_inversion_models_list = [None] + [model for model in os.listdir("inputs/image/sd_models/embedding") if
- model.endswith(".pt") or model.endswith(".safetensors")]
- inpaint_models_list = [None] + [model for model in
- os.listdir("inputs/image/sd_models/inpaint")
- if (model.endswith(".safetensors") or model.endswith(".ckpt") or not model.endswith(
- ".txt"))]
- rvc_models_list = [model_folder for model_folder in os.listdir("inputs/audio/rvc_models")
- if os.path.isdir(os.path.join("inputs/audio/rvc_models", model_folder))
- and any(
- file.endswith('.pth') for file in os.listdir(os.path.join("inputs/audio/rvc_models", model_folder)))]
-
- chat_files = get_existing_chats()
-
- gallery_files = get_output_files()
-
- return [llm_models_list, llm_lora_models_list, speaker_wavs_list, stable_diffusion_models_list, vae_models_list, flux_vae_models_list, lora_models_list, quantized_flux_models_list, flux_lora_models_list, auraflow_lora_models_list, kolors_lora_models_list, textual_inversion_models_list, inpaint_models_list, rvc_models_list, chat_files, gallery_files]
-
-
-def reload_interface():
- updated_lists = reload_model_lists()[:16]
- return [gr.Dropdown(choices=list) for list in updated_lists]
+
+
+def get_rvc_models():
+ return [
+ model_folder for model_folder in os.listdir("inputs/audio/rvc_models")
+ if os.path.isdir(os.path.join("inputs/audio/rvc_models", model_folder))
+ and any(file.endswith('.pth') for file in os.listdir(os.path.join("inputs/audio/rvc_models", model_folder)))
+ ]
+
+
+model_lists = {
+ "stable_diffusion": get_stable_diffusion_models(),
+ "llm": get_llm_models(),
+ "llm_lora": get_llm_lora_models(),
+ "speaker_wavs": get_speaker_wavs(),
+ "vae": get_vae_models(),
+ "flux_vae": get_flux_vae_models(),
+ "lora": get_lora_models(),
+ "quantized_flux": get_quantized_flux_models(),
+ "flux_lora": get_flux_lora_models(),
+ "auraflow_lora": get_auraflow_lora_models(),
+ "kolors_lora": get_kolors_lora_models(),
+ "textual_inversion": get_textual_inversion_models(),
+ "inpaint": get_inpaint_models(),
+ "rvc": get_rvc_models(),
+}
def create_footer():
@@ -11315,7 +11362,7 @@ def create_footer():
🦙 llama-cpp-python: 0.3.1
🖼️ stable-diffusion-cpp-python: 0.2.1
🎵 rvc-python: 0.1.5
- ℹ️ gradio: 5.4.0
+ ℹ️ gradio: 5.5.0
"""
return gr.Markdown(footer_html)
@@ -11333,8 +11380,8 @@ def create_footer():
gr.Textbox(label=_("Enter your system prompt", lang)),
gr.Audio(type="filepath", label=_("Record your request (optional)", lang)),
gr.Radio(choices=["Transformers", "GPTQ", "AWQ", "BNB", "Llama", "ExLlamaV2"], label=_("Select model type", lang), value="Transformers"),
- gr.Dropdown(choices=llm_models_list, label=_("Select LLM model", lang), value=None),
- gr.Dropdown(choices=llm_lora_models_list, label=_("Select LoRA model (optional)", lang), value=None),
+ gr.Dropdown(choices=model_lists["llm"], label=_("Select LLM model", lang), value=None),
+ gr.Dropdown(choices=model_lists["llm_lora"], label=_("Select LoRA model (optional)", lang), value=None),
gr.Dropdown(choices=get_existing_chats(), label=_("Select existing chat (optional)", lang), value=None)
],
additional_inputs=[
@@ -11375,7 +11422,7 @@ def create_footer():
gr.Slider(minimum=1, maximum=10, value=1, step=1, label=_("Num return sequences", lang)),
gr.Radio(choices=["txt", "json"], label=_("Select chat history format", lang), value="txt", interactive=True),
gr.HTML(_("
TTS Settings
", lang)),
- gr.Dropdown(choices=speaker_wavs_list, label=_("Select voice", lang), interactive=True),
+ gr.Dropdown(choices=model_lists["speaker_wavs"], label=_("Select voice", lang), interactive=True),
gr.Dropdown(choices=["en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh-cn", "ja", "hu", "ko", "hi"], label=_("Select language", lang), interactive=True),
gr.Slider(minimum=0.1, maximum=1.9, value=1.0, step=0.1, label=_("TTS Temperature", lang), interactive=True),
gr.Slider(minimum=0.01, maximum=1.0, value=0.9, step=0.01, label=_("TTS Top P", lang), interactive=True),
@@ -11407,7 +11454,7 @@ def create_footer():
],
additional_inputs=[
gr.HTML(_("TTS Settings
", lang)),
- gr.Dropdown(choices=speaker_wavs_list, label=_("Select voice", lang), interactive=True),
+ gr.Dropdown(choices=model_lists["speaker_wavs"], label=_("Select voice", lang), interactive=True),
gr.Dropdown(choices=["en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh-cn", "ja", "hu", "ko", "hi"], label=_("Select language", lang), interactive=True),
gr.Slider(minimum=0.1, maximum=1.9, value=1.0, step=0.1, label=_("TTS Temperature", lang), interactive=True),
gr.Slider(minimum=0.01, maximum=1.0, value=0.9, step=0.01, label=_("TTS Top P", lang), interactive=True),
@@ -11540,12 +11587,12 @@ def create_footer():
gr.Textbox(label=_("Enter your prompt", lang), placeholder=_("+ and - for Weighting; ('p', 'p').blend(0.x, 0.x) for Blending; ['p', 'p', 'p'].and() for Conjunction", lang)),
gr.Textbox(label=_("Enter your negative prompt", lang), value=""),
gr.Dropdown(choices=list(styles.keys()), label=_("Select Style (optional)", lang), value=None),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
gr.Checkbox(label=_("Enable Quantize", lang), value=False),
- gr.Dropdown(choices=vae_models_list, label=_("Select VAE model (optional)", lang), value=None),
- gr.Dropdown(choices=lora_models_list, label=_("Select LORA models (optional)", lang), value=None, multiselect=True),
+ gr.Dropdown(choices=model_lists["vae"], label=_("Select VAE model (optional)", lang), value=None),
+ gr.Dropdown(choices=model_lists["lora"], label=_("Select LORA models (optional)", lang), value=None, multiselect=True),
gr.Textbox(label=_("LoRA Scales", lang)),
- gr.Dropdown(choices=textual_inversion_models_list, label=_("Select Embedding models (optional)", lang), value=None, multiselect=True),
+ gr.Dropdown(choices=model_lists["textual_inversion"], label=_("Select Embedding models (optional)", lang), value=None, multiselect=True),
gr.HTML(_("StableDiffusion Settings
", lang)),
gr.Radio(choices=["SD", "SD2", "SDXL"], label=_("Select model type", lang), value="SD"),
gr.Dropdown(choices=[
@@ -11612,12 +11659,12 @@ def create_footer():
gr.Image(label=_("Initial image", lang), type="filepath"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.01, label=_("Strength (Initial image)", lang)),
gr.Radio(choices=["SD", "SD2", "SDXL"], label=_("Select model type", lang), value="SD"),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
gr.Checkbox(label=_("Enable Quantize", lang), value=False),
- gr.Dropdown(choices=vae_models_list, label=_("Select VAE model (optional)", lang), value=None),
- gr.Dropdown(choices=lora_models_list, label=_("Select LORA models (optional)", lang), value=None, multiselect=True),
+ gr.Dropdown(choices=model_lists["vae"], label=_("Select VAE model (optional)", lang), value=None),
+ gr.Dropdown(choices=model_lists["lora"], label=_("Select LORA models (optional)", lang), value=None, multiselect=True),
gr.Textbox(label=_("LoRA Scales", lang)),
- gr.Dropdown(choices=textual_inversion_models_list, label=_("Select Embedding models (optional)", lang), value=None, multiselect=True),
+ gr.Dropdown(choices=model_lists["textual_inversion"], label=_("Select Embedding models (optional)", lang), value=None, multiselect=True),
gr.Textbox(label=_("Seed (optional)", lang), value=""),
gr.Button(value=_("Stop generation", lang), interactive=True, variant="stop")
],
@@ -11739,7 +11786,7 @@ def create_footer():
"UniPCMultistepScheduler", "LCMScheduler", "DPMSolverSDEScheduler",
"TCDScheduler", "DDIMScheduler", "PNDMScheduler", "DDPMScheduler"
], label=_("Select scheduler", lang), value="EulerDiscreteScheduler"),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
gr.Dropdown(choices=controlnet_models_list, label=_("Select ControlNet model", lang), value=None),
gr.Textbox(label=_("Seed (optional)", lang), value=""),
gr.Button(value=_("Stop generation", lang), interactive=True, variant="stop")
@@ -11830,8 +11877,8 @@ def create_footer():
"UniPCMultistepScheduler", "LCMScheduler", "DPMSolverSDEScheduler",
"TCDScheduler", "DDIMScheduler", "PNDMScheduler", "DDPMScheduler"
], label=_("Select scheduler", lang), value="EulerDiscreteScheduler"),
- gr.Dropdown(choices=inpaint_models_list, label=_("Select Inpaint model", lang), value=None),
- gr.Dropdown(choices=vae_models_list, label=_("Select VAE model (optional)", lang), value=None),
+ gr.Dropdown(choices=model_lists["inpaint"], label=_("Select Inpaint model", lang), value=None),
+ gr.Dropdown(choices=model_lists["vae"], label=_("Select VAE model (optional)", lang), value=None),
gr.Textbox(label=_("Seed (optional)", lang), value=""),
gr.Button(value=_("Stop generation", lang), interactive=True, variant="stop")
],
@@ -11864,7 +11911,7 @@ def create_footer():
gr.Textbox(label=_("Enter your negative prompt", lang), value=""),
gr.Image(label=_("Initial image", lang), type="filepath"),
gr.Radio(choices=["SD", "SD2", "SDXL"], label=_("Select model type", lang), value="SD"),
- gr.Dropdown(choices=inpaint_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["inpaint"], label=_("Select StableDiffusion model", lang), value=None),
gr.Textbox(label=_("Seed (optional)", lang), value="")
],
additional_inputs=[
@@ -11897,7 +11944,7 @@ def create_footer():
gr.Textbox(label=_("Enter GLIGEN phrases", lang), value=""),
gr.Textbox(label=_("Enter GLIGEN boxes", lang), value=""),
gr.Radio(choices=["SD", "SD2", "SDXL"], label=_("Select model type", lang), value="SD"),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
gr.Textbox(label=_("Seed (optional)", lang), value="")
],
additional_inputs=[
@@ -11989,7 +12036,7 @@ def create_footer():
gr.Image(label=_("Initial GIF", lang), type="filepath"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.01, label=_("Strength (Initial GIF)", lang)),
gr.Radio(choices=["sd", "sdxl"], label=_("Select model type", lang), value="sd"),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
gr.Textbox(label=_("Seed (optional)", lang), value="")
],
additional_inputs=[
@@ -12119,15 +12166,15 @@ def create_footer():
gr.Textbox(label=_("Enter your negative prompt", lang), placeholder=_("(prompt:x.x) for Weighting", lang), value=""),
gr.Radio(choices=["Diffusers", "Safetensors"], label=_("Select model type", lang), value="Diffusers"),
gr.Radio(choices=["3-Medium", "3.5-Large", "3.5-Large-Turbo"], label=_("Select Diffusers model", lang), value="3-Medium"),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
gr.Checkbox(label=_("Enable Quantize", lang), value=False),
gr.Radio(choices=["SD3", "SD3.5"], label=_("Select quantize model type", lang), value="SD3"),
gr.Textbox(label=_("Seed (optional)", lang), value=""),
gr.Button(value=_("Stop generation", lang), interactive=True, variant="stop")
],
additional_inputs=[
- gr.Dropdown(choices=vae_models_list, label=_("Select VAE model (optional)", lang), value=None),
- gr.Dropdown(choices=lora_models_list, label=_("Select LORA models (optional)", lang), value=None, multiselect=True),
+ gr.Dropdown(choices=model_lists["vae"], label=_("Select VAE model (optional)", lang), value=None),
+ gr.Dropdown(choices=model_lists["lora"], label=_("Select LORA models (optional)", lang), value=None, multiselect=True),
gr.Textbox(label=_("LoRA Scales", lang)),
gr.Dropdown(choices=["FlowMatchEulerDiscreteScheduler", "FlowMatchHeunDiscreteScheduler"], label=_("Select scheduler", lang), value="FlowMatchEulerDiscreteScheduler"),
gr.Slider(minimum=1, maximum=100, value=40, step=1, label=_("Steps", lang)),
@@ -12163,7 +12210,7 @@ def create_footer():
gr.Image(label=_("Initial image", lang), type="filepath"),
gr.Slider(minimum=0.0, maximum=1.0, value=0.8, step=0.01, label=_("Strength (Initial image)", lang)),
gr.Radio(choices=["3-Medium", "3.5-Large", "3.5-Large-Turbo"], label=_("Select Diffusers model", lang), value="3-Medium"),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
gr.Checkbox(label=_("Enable Quantize", lang), value=False),
gr.Radio(choices=["SD3", "SD3.5"], label=_("Select quantize model type", lang), value="SD3"),
gr.Textbox(label=_("Seed (optional)", lang), value=""),
@@ -12314,7 +12361,7 @@ def create_footer():
gr.Textbox(label=_("Enter your negative prompt", lang), value=""),
gr.Image(label=_("IP-Adapter Image", lang), type="filepath"),
gr.Radio(choices=["SD", "SDXL"], label=_("Select model type", lang), value="SD"),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
gr.Textbox(label=_("Seed (optional)", lang), value="")
],
additional_inputs=[
@@ -12347,7 +12394,7 @@ def create_footer():
gr.Image(label=_("Face image", lang), type="filepath"),
gr.Slider(minimum=0.1, maximum=2, value=1, step=0.1, label=_("Scale (Face image)", lang)),
gr.Radio(choices=["SD", "SDXL"], label=_("Select model type", lang), value="SD"),
- gr.Dropdown(choices=stable_diffusion_models_list, label=_("Select StableDiffusion model", lang), value=None),
+ gr.Dropdown(choices=model_lists["stable_diffusion"], label=_("Select StableDiffusion model", lang), value=None),
],
additional_inputs=[
gr.Slider(minimum=1, maximum=100, value=30, step=1, label=_("Steps", lang)),
@@ -12547,14 +12594,14 @@ def create_footer():
inputs=[
gr.Textbox(label=_("Enter your prompt", lang)),
gr.Radio(choices=["FLUX.1-schnell", "FLUX.1-dev"], label=_("Select model type", lang), value="FLUX.1-schnell"),
- gr.Dropdown(choices=quantized_flux_models_list, label=_("Select safetensors Flux model (GGUF if enabled quantize)", lang), value=None),
+ gr.Dropdown(choices=model_lists["quantized_flux"], label=_("Select safetensors Flux model (GGUF if enabled quantize)", lang), value=None),
gr.Checkbox(label=_("Enable Quantize", lang), value=False),
gr.Textbox(label=_("Seed (optional)", lang), value=""),
gr.Button(value=_("Stop generation", lang), interactive=True, variant="stop")
],
additional_inputs=[
- gr.Dropdown(choices=flux_vae_models_list, label=_("Select VAE model (optional)", lang), value=None),
- gr.Dropdown(choices=flux_lora_models_list, label=_("Select LORA models (optional)", lang), value=None, multiselect=True),
+ gr.Dropdown(choices=model_lists["flux_vae"], label=_("Select VAE model (optional)", lang), value=None),
+ gr.Dropdown(choices=model_lists["flux_lora"], label=_("Select LORA models (optional)", lang), value=None, multiselect=True),
gr.Textbox(label=_("LoRA Scales", lang)),
gr.Slider(minimum=0.0, maximum=10.0, value=0.0, step=0.1, label=_("Guidance Scale", lang)),
gr.Slider(minimum=256, maximum=2048, value=768, step=64, label=_("Height", lang)),
@@ -12586,15 +12633,15 @@ def create_footer():
gr.Image(label=_("Initial image", lang), type="filepath"),
gr.Dropdown(choices=["FLUX.1-schnell", "FLUX.1-dev"], label=_("Select Flux model", lang),
value="FLUX.1-schnell"),
- gr.Dropdown(choices=quantized_flux_models_list,
+ gr.Dropdown(choices=model_lists["quantized_flux"],
label=_("Select quantized Flux model (optional if enabled quantize)", lang), value=None),
gr.Checkbox(label=_("Enable Quantize", lang), value=False),
gr.Textbox(label=_("Seed (optional)", lang), value=""),
gr.Button(value=_("Stop generation", lang), interactive=True, variant="stop")
],
additional_inputs=[
- gr.Dropdown(choices=vae_models_list, label=_("Select VAE model (optional)", lang), value=None),
- gr.Dropdown(choices=flux_lora_models_list, label=_("Select LORA models (optional)", lang), value=None,
+ gr.Dropdown(choices=model_lists["flux_vae"], label=_("Select VAE model (optional)", lang), value=None),
+ gr.Dropdown(choices=model_lists["flux_lora"], label=_("Select LORA models (optional)", lang), value=None,
multiselect=True),
gr.Textbox(label=_("LoRA Scales", lang)),
gr.Slider(minimum=1, maximum=100, value=4, step=1, label=_("Steps", lang)),
@@ -12798,7 +12845,7 @@ def create_footer():
gr.Textbox(label=_("Seed (optional)", lang), value="")
],
additional_inputs=[
- gr.Dropdown(choices=kolors_lora_models_list, label=_("Select LORA models (optional)", lang), value=None,
+ gr.Dropdown(choices=model_lists["kolors_lora"], label=_("Select LORA models (optional)", lang), value=None,
multiselect=True),
gr.Textbox(label=_("LoRA Scales", lang)),
gr.Slider(minimum=1.0, maximum=20.0, value=6.5, step=0.1, label=_("Guidance Scale", lang)),
@@ -12891,7 +12938,7 @@ def create_footer():
gr.Textbox(label=_("Seed (optional)", lang), value="")
],
additional_inputs=[
- gr.Dropdown(choices=auraflow_lora_models_list, label=_("Select LORA models (optional)", lang), value=None,
+ gr.Dropdown(choices=model_lists["auraflow_lora"], label=_("Select LORA models (optional)", lang), value=None,
multiselect=True),
gr.Textbox(label=_("LoRA Scales", lang)),
gr.Slider(minimum=1, maximum=100, value=25, step=1, label=_("Steps", lang)),
@@ -13597,7 +13644,7 @@ def create_footer():
fn=process_rvc,
inputs=[
gr.Audio(label=_("Input audio", lang), type="filepath"),
- gr.Dropdown(choices=rvc_models_list, label=_("Select RVC model", lang), value=None)
+ gr.Dropdown(choices=model_lists["rvc"], label=_("Select RVC model", lang), value=None)
],
additional_inputs=[
gr.Radio(choices=['harvest', "crepe", "rmvpe", 'pm'], label=_("RVC Method", lang), value="harvest", interactive=True),
@@ -14043,49 +14090,9 @@ def create_footer():
stableaudio_interface.input_components[3].click(stop_generation, [], [], queue=False)
audioldm2_interface.input_components[4].click(stop_generation, [], [], queue=False)
- reload_button = gr.Button(_("Reload interface", lang))
close_button = gr.Button(_("Close terminal", lang))
folder_button = gr.Button(_("Outputs", lang))
- dropdowns_to_update = [
- chat_interface.input_components[4],
- chat_interface.input_components[5],
- chat_interface.input_components[6],
- chat_interface.input_components[44],
- tts_stt_interface.input_components[3],
- txt2img_interface.input_components[3],
- txt2img_interface.input_components[5],
- txt2img_interface.input_components[6],
- txt2img_interface.input_components[8],
- img2img_interface.input_components[5],
- img2img_interface.input_components[7],
- img2img_interface.input_components[8],
- img2img_interface.input_components[10],
- controlnet_interface.input_components[4],
- inpaint_interface.input_components[6],
- inpaint_interface.input_components[7],
- outpaint_interface.input_components[4],
- gligen_interface.input_components[5],
- animatediff_interface.input_components[5],
- sd3_txt2img_interface.input_components[4],
- sd3_txt2img_interface.input_components[9],
- sd3_txt2img_interface.input_components[10],
- sd3_img2img_interface.input_components[6],
- t2i_ip_adapter_interface.input_components[4],
- ip_adapter_faceid_interface.input_components[5],
- flux_txt2img_interface.input_components[2],
- flux_txt2img_interface.input_components[6],
- flux_txt2img_interface.input_components[7],
- flux_img2img_interface.input_components[3],
- flux_img2img_interface.input_components[7],
- flux_img2img_interface.input_components[8],
- auraflow_interface.input_components[3],
- kolors_txt2img_interface.input_components[3],
- rvc_interface.input_components[1],
- gallery_interface.input_components[0]
- ]
-
- reload_button.click(reload_interface, outputs=dropdowns_to_update[:16])
close_button.click(close_terminal, [], [], queue=False)
folder_button.click(open_outputs_folder, [], [], queue=False)
diff --git "a/RequirementsFiles/requirements-\320\241PU.txt" "b/RequirementsFiles/requirements-\320\241PU.txt"
index 21e7d548..7b5bda6d 100644
--- "a/RequirementsFiles/requirements-\320\241PU.txt"
+++ "b/RequirementsFiles/requirements-\320\241PU.txt"
@@ -84,7 +84,7 @@ gitdb==4.0.11
GitPython==3.1.43
google-pasta==0.2.0
gpytoolbox==0.3.2
-gradio==5.4.0
+gradio==5.5.0
gradio_client==1.4.2
grpcio==1.62.2
gruut==2.2.3
diff --git a/RequirementsFiles/requirements.txt b/RequirementsFiles/requirements.txt
index 3043f0d6..44c81727 100644
--- a/RequirementsFiles/requirements.txt
+++ b/RequirementsFiles/requirements.txt
@@ -86,7 +86,7 @@ GitPython==3.1.43
google-pasta==0.2.0
GPUtil==1.4.0
gpytoolbox==0.3.2
-gradio==5.4.0
+gradio==5.5.0
gradio_client==1.4.2
grpcio==1.62.2
gruut==2.2.3