Spaces:
Running
Running
| import gradio as gr | |
| import torch | |
| from diffusers import ZImagePipeline, GGUFQuantizationConfig | |
| from PIL import Image | |
| # 1. LOAD THE GGUF MODEL | |
| # This pulls the optimized 5GB version instead of the 32GB monster | |
| model_id = "Tongyi-MAI/Z-Image-Turbo" | |
| gguf_path = "https://huggingface.co/unsloth/Z-Image-Turbo-GGUF/resolve/main/z-image-turbo-Q4_K_M.gguf" | |
| print("Loading optimized GGUF model...") | |
| # We load the pipeline but swap the heavy transformer for the GGUF version | |
| pipe = ZImagePipeline.from_pretrained( | |
| model_id, | |
| transformer_path=gguf_path, | |
| torch_dtype=torch.float32 # Use float32 for CPU stability | |
| ) | |
| pipe.to("cpu") | |
| # --- FUNCTIONS --- | |
| def generate_t2i(prompt, steps, guidance): | |
| return pipe( | |
| prompt=prompt, | |
| num_inference_steps=int(steps), | |
| guidance_scale=guidance | |
| ).images[0] | |
| def generate_i2i(image, prompt, strength, steps, guidance): | |
| if image is None: return None | |
| # Resize to 512 to keep CPU usage low | |
| image = image.convert("RGB").resize((512, 512)) | |
| return pipe( | |
| prompt=prompt, | |
| image=image, | |
| strength=strength, | |
| num_inference_steps=int(steps), | |
| guidance_scale=guidance | |
| ).images[0] | |
| # --- UI LAYOUT --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# 🚀 Z-Image Turbo (Unsloth GGUF Edition)") | |
| gr.Markdown("Using the Q4_K_M quantization to save your RAM.") | |
| with gr.Tabs(): | |
| with gr.TabItem("Text to Image"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| t2i_prompt = gr.Textbox(label="Prompt", value="A futuristic city in a liminal space") | |
| t2i_steps = gr.Slider(1, 12, value=8, step=1, label="Steps") | |
| t2i_guidance = gr.Slider(0.0, 2.0, value=0.0, step=0.1, label="Guidance") | |
| t2i_btn = gr.Button("Generate") | |
| with gr.Column(): | |
| t2i_output = gr.Image(label="Result") | |
| t2i_btn.click(generate_t2i, inputs=[t2i_prompt, t2i_steps, t2i_guidance], outputs=t2i_output) | |
| with gr.TabItem("Image to Image"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| i2i_input = gr.Image(type="pil", label="Source") | |
| i2i_prompt = gr.Textbox(label="Edit Prompt") | |
| i2i_strength = gr.Slider(0.1, 1.0, value=0.5, step=0.05, label="Edit Strength") | |
| i2i_steps = gr.Slider(1, 12, value=10, step=1, label="Steps") | |
| i2i_btn = gr.Button("Apply") | |
| with gr.Column(): | |
| i2i_output = gr.Image(label="Result") | |
| i2i_btn.click(generate_i2i, inputs=[i2i_input, i2i_prompt, i2i_strength, i2i_steps], outputs=i2i_output) | |
| demo.launch(theme=gr.themes.Soft(), ssr_mode=False) |