import spaces import gradio as gr import torch import os import traceback from diffusers import ZImagePipeline from huggingface_hub import list_repo_files from PIL import Image # ============================================================ # CONFIG # ============================================================ MODEL_ID = "Tongyi-MAI/Z-Image-Turbo" DEFAULT_LORA_REPO = "rahul7star/ZImageLora" DTYPE = torch.bfloat16 DEVICE = "cuda" if torch.cuda.is_available() else "cpu" # ============================================================ # GLOBAL STATE # ============================================================ pipe = None CURRENT_LORA_REPO = None CURRENT_LORA_FILE = None # ============================================================ # LOGGING # ============================================================ def log(msg): print(msg) return msg # ============================================================ # PIPELINE BUILD (ONCE) # ============================================================ try: pipe = ZImagePipeline.from_pretrained( MODEL_ID, torch_dtype=DTYPE, ) pipe.to(DEVICE) log("β Pipeline built successfully") except Exception as e: log("β Pipeline build failed") log(traceback.format_exc()) pipe = None # ============================================================ # HELPERS # ============================================================ def list_loras_from_repo(repo_id: str): try: files = list_repo_files(repo_id) return [f for f in files if f.endswith(".safetensors")] except Exception as e: log(f"β Failed to list LoRAs: {e}") return [] # ============================================================ # IMAGE GENERATION (SAFE LORA LOGIC) # ============================================================ @spaces.GPU() def generate_image(prompt, height, width, steps, seed, guidance_scale): LOGS = [] print(prompt) if pipe is None: return None, [], "β Pipeline not initialized" generator = torch.Generator().manual_seed(int(seed)) placeholder = Image.new("RGB", (width, height), (255, 255, 255)) previews = [] # ---- Always start clean ---- try: pipe.unload_lora_weights() except Exception: pass # ---- Load LoRA for this run only ---- if CURRENT_LORA_FILE: try: pipe.load_lora_weights( CURRENT_LORA_REPO, weight_name=CURRENT_LORA_FILE ) LOGS.append(f"π§© LoRA loaded: {CURRENT_LORA_FILE}") except Exception as e: LOGS.append(f"β LoRA load failed: {e}") # ---- Preview steps (lightweight) ---- try: num_previews = min(5, steps) for i in range(num_previews): out = pipe( prompt=prompt, height=height // 4, width=width // 4, num_inference_steps=i + 1, guidance_scale=guidance_scale, generator=generator, ) img = out.images[0].resize((width, height)) previews.append(img) yield None, previews, "\n".join(LOGS) except Exception as e: LOGS.append(f"β οΈ Preview failed: {e}") # ---- Final image ---- try: out = pipe( prompt=prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=guidance_scale, generator=generator, ) final_img = out.images[0] previews.append(final_img) LOGS.append("β Image generated") yield final_img, previews, "\n".join(LOGS) except Exception as e: LOGS.append(f"β Generation failed: {e}") yield placeholder, previews, "\n".join(LOGS) finally: # ---- CRITICAL: unload after run ---- try: pipe.unload_lora_weights() LOGS.append("π§Ή LoRA unloaded") except Exception: pass # ============================================================ # GRADIO UI # ============================================================ css = """ .gradio-container { max-width: 100% !important; padding: 16px 32px !important; } .section { margin-bottom: 12px; } .generate-btn { background: linear-gradient(90deg, #4b6cb7, #182848) !important; color: white !important; font-weight: 600; height: 46px; border-radius: 10px; } .secondary-btn { height: 42px; border-radius: 10px; } textarea, input { border-radius: 10px !important; } """ with gr.Blocks( title="Z-Image-Turbo (Runtime LoRA)", css=css, ) as demo: gr.Markdown( """
Get support β’ Share results β’ Discuss LoRAs β’ Report bugs
π Join Discord