Zitc / app.py
programmersd's picture
Update app.py
9cd03c5 verified
raw
history blame
3.9 kB
import os
import gc
import time
import random
import torch
import gradio as gr
from huggingface_hub import hf_hub_download
from diffusers import (
ZImagePipeline,
ZImageTransformer2DModel,
GGUFQuantizationConfig,
FlowMatchEulerDiscreteScheduler
)
# =========================
# HARD CPU MODE
# =========================
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
cpu_cores = os.cpu_count() or 1
torch.set_num_threads(cpu_cores)
torch.set_num_interop_threads(cpu_cores)
os.environ["OMP_NUM_THREADS"] = str(cpu_cores)
os.environ["MKL_NUM_THREADS"] = str(cpu_cores)
torch.backends.mkldnn.enabled = True
torch.backends.quantized.engine = "fbgemm"
device = torch.device("cpu")
dtype = torch.float16
# =========================
# MODEL CONFIG
# =========================
BASE_MODEL_ID = "Tongyi-MAI/Z-Image-Turbo"
GGUF_REPO_ID = "unsloth/Z-Image-Turbo-GGUF"
GGUF_FILENAME = "z-image-turbo-Q2_K.gguf"
CACHE_DIR = "models"
os.makedirs(CACHE_DIR, exist_ok=True)
def download_gguf():
local_path = os.path.join(CACHE_DIR, GGUF_FILENAME)
if os.path.exists(local_path):
return local_path
return hf_hub_download(
repo_id=GGUF_REPO_ID,
filename=GGUF_FILENAME,
cache_dir=CACHE_DIR,
resume_download=True
)
# =========================
# LOAD PIPELINE ULTRA LEAN
# =========================
def load_pipeline():
scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
BASE_MODEL_ID,
subfolder="scheduler",
cache_dir=CACHE_DIR
)
pipe = ZImagePipeline.from_pretrained(
BASE_MODEL_ID,
scheduler=scheduler,
torch_dtype=dtype,
cache_dir=CACHE_DIR,
low_cpu_mem_usage=True
)
gguf_path = download_gguf()
transformer = ZImageTransformer2DModel.from_single_file(
gguf_path,
quantization_config=GGUFQuantizationConfig(compute_dtype=dtype),
torch_dtype=dtype
).to(device)
pipe.transformer = transformer
pipe.enable_attention_slicing()
pipe.enable_vae_slicing()
pipe.enable_sequential_cpu_offload()
pipe = pipe.to(device)
return pipe
pipe = load_pipeline()
# =========================
# GENERATION (MIN RAM)
# =========================
def generate(prompt, seed, progress=gr.Progress()):
if not prompt:
raise gr.Error("Prompt required")
if seed < 0:
seed = random.randint(0, 2**31 - 1)
generator = torch.Generator(device=device).manual_seed(seed)
steps = 4
width = 256
height = 256
start = time.time()
def callback(step, timestep, latents):
done = step + 1
elapsed = time.time() - start
avg = elapsed / done
eta = avg * (steps - done)
progress(done / steps, desc=f"Step {done}/{steps} | ETA {eta:.1f}s")
with torch.inference_mode():
gc.collect()
image = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=steps,
guidance_scale=1.0,
generator=generator,
callback=callback,
callback_steps=1
).images[0]
gc.collect()
return image, seed
# =========================
# UI
# =========================
with gr.Blocks(title="Z-Image Turbo Ultra Lean CPU") as demo:
gr.Markdown("# Z-Image Turbo Q2_K — Ultra Lean 16GB CPU Mode")
prompt = gr.Textbox(label="Prompt", lines=3)
seed = gr.Number(label="Seed (-1 random)", value=-1, precision=0)
btn = gr.Button("Generate")
image_out = gr.Image()
seed_out = gr.Number(interactive=False)
btn.click(generate, inputs=[prompt, seed], outputs=[image_out, seed_out])
demo.queue(max_size=5, concurrency_count=1)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)