Spaces:
Sleeping
Sleeping
| import os | |
| import time | |
| import random | |
| import gc | |
| import torch | |
| import gradio as gr | |
| from huggingface_hub import hf_hub_download | |
| from transformers import AutoTokenizer, AutoModel | |
| from diffusers import ( | |
| ZImagePipeline, | |
| ZImageTransformer2DModel, | |
| GGUFQuantizationConfig, | |
| AutoencoderKL, | |
| FlowMatchEulerDiscreteScheduler | |
| ) | |
| # ========================= | |
| # FORCE CPU ENV | |
| # ========================= | |
| os.environ["CUDA_VISIBLE_DEVICES"] = "" | |
| os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1" | |
| os.environ["TOKENIZERS_PARALLELISM"] = "false" | |
| cpu_cores = os.cpu_count() or 1 | |
| torch.set_num_threads(cpu_cores) | |
| torch.set_num_interop_threads(cpu_cores) | |
| os.environ["OMP_NUM_THREADS"] = str(cpu_cores) | |
| os.environ["MKL_NUM_THREADS"] = str(cpu_cores) | |
| torch.backends.mkldnn.enabled = True | |
| torch.backends.quantized.engine = "fbgemm" | |
| torch.backends.cudnn.enabled = False | |
| torch.set_float32_matmul_precision("high") | |
| dtype = torch.float32 | |
| device = torch.device("cpu") | |
| # ========================= | |
| # MODEL CONFIG | |
| # ========================= | |
| BASE_MODEL_ID = "Tongyi-MAI/Z-Image-Turbo" | |
| TEXT_ENCODER_ID = "Qwen/Qwen3-4B" | |
| GGUF_REPO_ID = "unsloth/Z-Image-Turbo-GGUF" | |
| GGUF_FILENAME = "z-image-turbo-Q2_K.gguf" | |
| CACHE_DIR = "models" | |
| os.makedirs(CACHE_DIR, exist_ok=True) | |
| def download_if_needed(repo_id, filename): | |
| local_path = os.path.join(CACHE_DIR, filename) | |
| if os.path.exists(local_path): | |
| print("Model cached locally.") | |
| return local_path | |
| print("Downloading model (first run)...") | |
| path = hf_hub_download( | |
| repo_id=repo_id, | |
| filename=filename, | |
| cache_dir=CACHE_DIR, | |
| resume_download=True | |
| ) | |
| print("Download finished.") | |
| return path | |
| # ========================= | |
| # LOAD PIPELINE CPU ONLY | |
| # ========================= | |
| def load_pipeline(): | |
| scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( | |
| BASE_MODEL_ID, | |
| subfolder="scheduler", | |
| cache_dir=CACHE_DIR | |
| ) | |
| vae = AutoencoderKL.from_pretrained( | |
| BASE_MODEL_ID, | |
| subfolder="vae", | |
| torch_dtype=dtype, | |
| cache_dir=CACHE_DIR | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| TEXT_ENCODER_ID, | |
| cache_dir=CACHE_DIR | |
| ) | |
| text_encoder = AutoModel.from_pretrained( | |
| TEXT_ENCODER_ID, | |
| torch_dtype=dtype, | |
| cache_dir=CACHE_DIR | |
| ).to(device) | |
| gguf_path = download_if_needed(GGUF_REPO_ID, GGUF_FILENAME) | |
| transformer = ZImageTransformer2DModel.from_single_file( | |
| gguf_path, | |
| quantization_config=GGUFQuantizationConfig(compute_dtype=dtype), | |
| torch_dtype=dtype | |
| ).to(device) | |
| pipe = ZImagePipeline( | |
| vae=vae.to(device), | |
| text_encoder=text_encoder, | |
| tokenizer=tokenizer, | |
| transformer=transformer, | |
| scheduler=scheduler | |
| ).to(device) | |
| pipe.unet.to(memory_format=torch.channels_last) | |
| pipe.text_encoder.to(memory_format=torch.channels_last) | |
| pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True) | |
| pipe.text_encoder = torch.compile(pipe.text_encoder, mode="max-autotune", fullgraph=True) | |
| return pipe | |
| pipe = load_pipeline() | |
| # Warmup compile | |
| with torch.inference_mode(): | |
| _ = pipe( | |
| prompt="warmup", | |
| width=256, | |
| height=256, | |
| num_inference_steps=1, | |
| guidance_scale=1.0 | |
| ) | |
| # ========================= | |
| # GENERATION WITH PROGRESS | |
| # ========================= | |
| def generate(prompt, seed, progress=gr.Progress()): | |
| if not prompt: | |
| raise gr.Error("Prompt required") | |
| if seed < 0: | |
| seed = random.randint(0, 2**31 - 1) | |
| generator = torch.Generator(device=device).manual_seed(seed) | |
| total_steps = 4 | |
| start_time = time.time() | |
| def step_callback(step, timestep, latents): | |
| elapsed = time.time() - start_time | |
| done = step + 1 | |
| avg = elapsed / done | |
| eta = avg * (total_steps - done) | |
| progress(done / total_steps, desc=f"Step {done}/{total_steps} | ETA {eta:.1f}s") | |
| with torch.inference_mode(): | |
| gc.disable() | |
| try: | |
| image = pipe( | |
| prompt=prompt, | |
| width=256, | |
| height=256, | |
| num_inference_steps=total_steps, | |
| guidance_scale=1.0, | |
| generator=generator, | |
| callback=step_callback, | |
| callback_steps=1 | |
| ).images[0] | |
| finally: | |
| gc.enable() | |
| return image, seed | |
| # ========================= | |
| # UI + QUEUE | |
| # ========================= | |
| with gr.Blocks(title="Z-Image Turbo Q2_K CPU MAX") as demo: | |
| gr.Markdown("# Z-Image Turbo Q2_K — FULL CPU MAX MODE") | |
| prompt = gr.Textbox(label="Prompt", lines=3) | |
| seed = gr.Number(label="Seed (-1 random)", value=-1, precision=0) | |
| btn = gr.Button("Generate") | |
| image_out = gr.Image() | |
| seed_out = gr.Number(interactive=False) | |
| btn.click(generate, inputs=[prompt, seed], outputs=[image_out, seed_out]) | |
| demo.queue(max_size=10, concurrency_count=1) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |