programmersd commited on
Commit
abf0f61
Β·
verified Β·
1 Parent(s): 47e9544

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -0
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from diffusers import ZImagePipeline
4
+ import gradio as gr
5
+ import threading
6
+ import queue
7
+ import psutil
8
+
9
+ # =================== CPU GOD MODE SETTINGS ===================
10
+ torch.set_num_threads(torch.get_num_threads())
11
+ torch.inference_mode()
12
+
13
+ MODEL_ID = "Tongyi-MAI/Z-Image-Turbo"
14
+
15
+ pipe = ZImagePipeline.from_pretrained(
16
+ MODEL_ID,
17
+ torch_dtype=torch.bfloat16,
18
+ low_cpu_mem_usage=True
19
+ )
20
+ pipe.to("cpu")
21
+ pipe.enable_attention_slicing()
22
+ pipe.enable_model_cpu_offload()
23
+
24
+ try:
25
+ pipe.transformer.compile(fullgraph=True, dynamic=True)
26
+ except Exception:
27
+ pass
28
+
29
+ try:
30
+ pipe.enable_attention_slicing(slice_size="auto")
31
+ except Exception:
32
+ pass
33
+
34
+ MAX_THREADS = min(torch.get_num_threads(), os.cpu_count() or 4)
35
+
36
+ # =================== QUEUE & WORKERS ===================
37
+ job_queue = queue.Queue()
38
+ status_dict = {}
39
+
40
+ def worker(worker_id):
41
+ while True:
42
+ job = job_queue.get()
43
+ if job is None:
44
+ break
45
+ job_id, prompt, width, height, steps, seed, batch, out_folder = job
46
+ status_dict[job_id] = f"Worker {worker_id}: Processing..."
47
+ for i in range(batch):
48
+ img_seed = seed + i
49
+ image = pipe(
50
+ prompt=prompt,
51
+ height=height,
52
+ width=width,
53
+ num_inference_steps=steps,
54
+ guidance_scale=0.0,
55
+ generator=torch.Generator("cpu").manual_seed(img_seed),
56
+ ).images[0]
57
+ out_path = os.path.join(out_folder, f"{job_id}_{i}.png")
58
+ image.save(out_path)
59
+ status_dict[job_id] = f"Worker {worker_id}: Done ({batch} images)"
60
+ job_queue.task_done()
61
+
62
+ workers = []
63
+ for i in range(MAX_THREADS):
64
+ t = threading.Thread(target=worker, args=(i+1,), daemon=True)
65
+ t.start()
66
+ workers.append(t)
67
+
68
+ # =================== JOB MANAGEMENT ===================
69
+ job_counter = 0
70
+ OUTPUT_DIR = "outputs"
71
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
72
+
73
+ def enqueue_job(prompt, width, height, steps, seed, batch):
74
+ global job_counter
75
+ job_counter += 1
76
+ job_id = f"job_{job_counter}"
77
+ job_queue.put((job_id, prompt, width, height, steps, seed, batch, OUTPUT_DIR))
78
+ status_dict[job_id] = "Queued"
79
+ return job_id
80
+
81
+ # =================== GRADIO INTERFACE ===================
82
+ with gr.Blocks() as demo:
83
+ gr.Markdown("# ⚑ CPU God Mode Z-Image + Gradio Ultimate")
84
+
85
+ with gr.Row():
86
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Type prompt here...")
87
+ seed_input = gr.Number(label="Seed", value=42, precision=0)
88
+
89
+ with gr.Row():
90
+ width_input = gr.Dropdown(["256","512","768","1024"], value="512", label="Width")
91
+ height_input = gr.Dropdown(["256","512","768","1024"], value="512", label="Height")
92
+ batch_input = gr.Slider(1, 5, value=1, step=1, label="Batch Size")
93
+
94
+ steps_input = gr.Slider(1, 25, value=9, step=1, label="Inference Steps")
95
+ output_gallery = gr.Gallery(label="Generated Images").style(grid=[2], height="auto")
96
+ status_box = gr.Textbox(label="Queue Status", interactive=False)
97
+
98
+ generate_btn = gr.Button("Generate")
99
+
100
+ def on_generate(prompt, width, height, steps, seed, batch):
101
+ width = int(width)
102
+ height = int(height)
103
+ seed = int(seed)
104
+ batch = int(batch)
105
+ job_id = enqueue_job(prompt, width, height, steps, seed, batch)
106
+ return [], f"Job {job_id} queued ({batch} images)"
107
+
108
+ def poll_status():
109
+ if status_dict:
110
+ return "\n".join([f"{k}: {v}" for k,v in status_dict.items()])
111
+ return "No jobs in queue."
112
+
113
+ generate_btn.click(
114
+ on_generate,
115
+ inputs=[prompt_input, width_input, height_input, steps_input, seed_input, batch_input],
116
+ outputs=[output_gallery, status_box]
117
+ )
118
+
119
+ status_box.change(fn=lambda: poll_status(), inputs=[], outputs=status_box)
120
+
121
+ demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))