import os import json import copy import time import requests import random import logging import numpy as np import spaces from typing import Any, Dict, List, Optional, Union from civitai_utils import get_civitai_safetensors, LORA_CHECKPOINTS_CACHE import torch from PIL import Image import gradio as gr from diffusers import ( DiffusionPipeline, AutoencoderKL, ZImagePipeline ) from huggingface_hub import ( hf_hub_download, HfFileSystem, ModelCard, snapshot_download) from diffusers.utils import load_image from typing import Iterable from gradio.themes import Soft from gradio.themes.utils import colors, fonts, sizes colors.orange_red = colors.Color( name="orange_red", c50="#FFF0E5", c100="#FFE0CC", c200="#FFC299", c300="#FFA366", c400="#FF8533", c500="#FF4500", c600="#E63E00", c700="#CC3700", c800="#B33000", c900="#992900", c950="#802200", ) class OrangeRedTheme(Soft): def __init__( self, *, primary_hue: colors.Color | str = colors.gray, secondary_hue: colors.Color | str = colors.orange_red, # Use the new color neutral_hue: colors.Color | str = colors.slate, text_size: sizes.Size | str = sizes.text_lg, font: fonts.Font | str | Iterable[fonts.Font | str] = ( fonts.GoogleFont("Outfit"), "Arial", "sans-serif", ), font_mono: fonts.Font | str | Iterable[fonts.Font | str] = ( fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace", ), ): super().__init__( primary_hue=primary_hue, secondary_hue=secondary_hue, neutral_hue=neutral_hue, text_size=text_size, font=font, font_mono=font_mono, ) super().set( background_fill_primary="*primary_50", background_fill_primary_dark="*primary_900", body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)", body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)", button_primary_text_color="white", button_primary_text_color_hover="white", button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)", button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)", button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)", button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)", button_secondary_text_color="black", button_secondary_text_color_hover="white", button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)", button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)", button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)", button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)", slider_color="*secondary_500", slider_color_dark="*secondary_600", block_title_text_weight="600", block_border_width="3px", block_shadow="*shadow_drop_lg", button_primary_shadow="*shadow_drop_lg", button_large_padding="11px", color_accent_soft="*primary_100", block_label_background_fill="*primary_200", ) orange_red_theme = OrangeRedTheme() # Load loras as list of dictionaries loras = [] with open(os.path.join(os.getcwd(), "loras.json"), "r") as f: loras = json.load(f) dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" base_model = "Tongyi-MAI/Z-Image-Turbo" print(f"Loading {base_model} pipeline...") # Initialize Pipeline pipe = ZImagePipeline.from_pretrained( base_model, torch_dtype=dtype, low_cpu_mem_usage=False, ).to(device) # ======== AoTI compilation + FA3 ======== # As per reference for optimization try: print("Applying AoTI compilation and FA3...") pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"] spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3") print("Optimization applied successfully.") except Exception as e: print(f"Optimization warning: {e}. Continuing with standard pipeline.") MAX_SEED = np.iinfo(np.int32).max class calculateDuration: def __init__(self, activity_name=""): self.activity_name = activity_name def __enter__(self): self.start_time = time.time() return self def __exit__(self, exc_type, exc_value, traceback): self.end_time = time.time() self.elapsed_time = self.end_time - self.start_time if self.activity_name: print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds") else: print(f"Elapsed time: {self.elapsed_time:.6f} seconds") def update_selection(evt: gr.SelectData, width, height): selected_lora = loras[evt.index] new_placeholder = f"Type a prompt for {selected_lora['title']}" lora_repo = selected_lora["repo"] updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅" if "aspect" in selected_lora: if selected_lora["aspect"] == "portrait": width = 768 height = 1024 elif selected_lora["aspect"] == "landscape": width = 1024 height = 768 else: width = 1024 height = 1024 return ( gr.update(placeholder=new_placeholder), updated_text, evt.index, width, height, ) def load_lora_from_hub(lora: dict, lora_scale: float): """Load LoRA weights from huggingface hub""" with calculateDuration(f"Loading LoRA weights for {lora.get('title')}"): try: pipe.load_lora_weights( lora.get("repo", ""), weight_name=lora.get("weights", None), adapter_name="default", low_cpu_mem_usage=True ) # Set adapter scale pipe.set_adapters(["default"], adapter_weights=[lora_scale]) except Exception as e: print(f"Error loading LoRA: {e}") gr.Warning("Failed to load LoRA weights. Generating with base model.") def load_local_lora(lora: dict, lora_scale: float): """Load LoRA weights from local cache folder""" with calculateDuration(f"Loading LoRA weights for {lora.get('title')}"): try: pipe.load_lora_weights( LORA_CHECKPOINTS_CACHE, cache_dir=LORA_CHECKPOINTS_CACHE, adapter_name="local_lora", weight_name=lora.get("weights", None), local_files_only=True, low_cpu_mem_usage=True ) # Set adapter scale pipe.set_adapters(["local_lora"], adapter_weights=[lora_scale]) except Exception as e: print(f"Error loading LoRA: {e}") gr.Warning("Failed to load LoRA weights. Generating with base model.") @spaces.GPU def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)): # Clean up previous LoRAs in both cases with calculateDuration("Unloading LoRA"): pipe.unload_lora_weights() prompt_mash = prompt # Check if a LoRA is selected if selected_index is not None and selected_index < len(loras): selected_lora = loras[selected_index] trigger_word = selected_lora["trigger_word"] # Prepare Prompt with Trigger Word if len(trigger_word): if "trigger_position" in selected_lora: if selected_lora["trigger_position"] == "prepend": prompt_mash = f"{trigger_word} {prompt}" else: prompt_mash = f"{prompt} {trigger_word}" else: prompt_mash = f"{trigger_word} {prompt}" # Special handling of lora loading if there's a civitai key if selected_lora.get("src") == "civitai": load_local_lora(selected_lora, lora_scale) else: load_lora_from_hub(selected_lora, lora_scale) else: # Base Model Case print("No LoRA selected. Running with Base Model.") prompt_mash = prompt with calculateDuration("Randomizing seed"): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator(device=device).manual_seed(seed) # Note: Z-Image-Turbo is strictly T2I in this reference implementation. # Img2Img via image_input is disabled/ignored for this pipeline update. with calculateDuration("Generating image"): # For Turbo models, guidance_scale is typically 0.0 forced_guidance = 0.0 # Turbo mode final_image = pipe( prompt=prompt_mash, height=int(height), width=int(width), num_inference_steps=int(steps), guidance_scale=forced_guidance, generator=generator, ).images[0] yield final_image, seed, gr.update(visible=False) def get_huggingface_safetensors(link) -> dict: split_link = link.split("/") if(len(split_link) == 2): model_card = ModelCard.load(link) base_model_list = model_card.data.get("base_model") # Relaxed check to allow Z-Image or Flux or others, assuming user knows what they are doing # or specifically check for Z-Image-Turbo if base_model_list[0] not in ["Tongyi-MAI/Z-Image-Turbo", "black-forest-labs/FLUX.1-dev"]: # Just a warning instead of error to allow experimentation print("Warning: Base model might not match.") image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None) trigger_word = model_card.data.get("instance_prompt", "") image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None fs = HfFileSystem() try: list_of_files = fs.ls(link, detail=False) for file in list_of_files: if(file.endswith(".safetensors")): safetensors_name = file.split("/")[-1] if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))): image_elements = file.split("/") image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}" except Exception as e: print(e) gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA") raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA") lora_info = { "image": image_url, "title": split_link[1], "repo": link, "weights": safetensors_name, "trigger_word": trigger_word } return lora_info def check_custom_model(link) -> dict: if(link.startswith("https://")): if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")): link_split = link.split("huggingface.co/") return get_huggingface_safetensors(link_split[1]) elif "civitai" in link: return get_civitai_safetensors(link) else: return {} def add_custom_lora(custom_lora): global loras if(custom_lora): try: lora_info = check_custom_model(custom_lora) repo = lora_info.get("repo") image = lora_info.get("image") trigger_word = lora_info.get("trigger_word") path = lora_info.get("weights") title = lora_info.get("title") src = lora_info.get("src") repo = "civitai" if src == "civitai" else lora_info.get("repo") print(f"Loaded custom LoRA: {repo}") card = f'''
"+trigger_word+" as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}