programmersd commited on
Commit
a04da6e
ยท
verified ยท
1 Parent(s): 1e958eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -4
app.py CHANGED
@@ -7,6 +7,7 @@ import torch
7
  import gradio as gr
8
  from threading import Lock
9
  from contextlib import contextmanager
 
10
 
11
  # --- LOGGING FOR UI ---
12
  LOG_BUFFER = []
@@ -20,10 +21,10 @@ def log(message):
20
  LOG_BUFFER.pop(0)
21
  return "\n".join(LOG_BUFFER)
22
 
23
- # ๐Ÿš€ Initialization log
24
  _initial_logs = log("๐Ÿš€ Initializing Ultimate Z-Image Turbo CPU Edition...")
25
 
26
- # CPU THREAD OPTIMIZATION
27
  CPU_THREADS = min(8, os.cpu_count() or 1)
28
  os.environ["OMP_NUM_THREADS"] = str(CPU_THREADS)
29
  os.environ["MKL_NUM_THREADS"] = str(CPU_THREADS)
@@ -35,6 +36,9 @@ os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
35
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "0"
36
  os.environ["TRANSFORMERS_CACHE"] = "./hf_cache"
37
  os.environ["HF_DATASETS_CACHE"] = "./hf_cache"
 
 
 
38
 
39
  torch.set_num_threads(CPU_THREADS)
40
  torch.set_grad_enabled(False)
@@ -61,6 +65,23 @@ pipe = None
61
  _pipe_lock = Lock()
62
  _generation_lock = Lock()
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  @contextmanager
65
  def managed_memory():
66
  try:
@@ -81,9 +102,9 @@ def load_pipeline():
81
  start_load = time.time()
82
 
83
  pipe = ZImagePipeline.from_pretrained(
84
- "Tongyi-MAI/Z-Image-Turbo",
85
  torch_dtype=DTYPE,
86
- cache_dir=CACHE_DIR,
87
  low_cpu_mem_usage=True
88
  )
89
 
@@ -163,6 +184,7 @@ def generate(prompt, quality_mode, seed, progress=gr.Progress()):
163
 
164
  return image, seed
165
 
 
166
  with gr.Blocks(title="๐Ÿš€ Z-Image Turbo Pro Max + Live Logs") as demo:
167
  gr.Markdown("## GPUโ€‘FREE CPU Turbo โ€” Live Logs Below")
168
 
 
7
  import gradio as gr
8
  from threading import Lock
9
  from contextlib import contextmanager
10
+ from huggingface_hub import snapshot_download
11
 
12
  # --- LOGGING FOR UI ---
13
  LOG_BUFFER = []
 
21
  LOG_BUFFER.pop(0)
22
  return "\n".join(LOG_BUFFER)
23
 
24
+ # ๐Ÿš€ Initialization
25
  _initial_logs = log("๐Ÿš€ Initializing Ultimate Z-Image Turbo CPU Edition...")
26
 
27
+ # --- ENVIRONMENT SETUP ---
28
  CPU_THREADS = min(8, os.cpu_count() or 1)
29
  os.environ["OMP_NUM_THREADS"] = str(CPU_THREADS)
30
  os.environ["MKL_NUM_THREADS"] = str(CPU_THREADS)
 
36
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "0"
37
  os.environ["TRANSFORMERS_CACHE"] = "./hf_cache"
38
  os.environ["HF_DATASETS_CACHE"] = "./hf_cache"
39
+ os.environ["HF_HUB_OFFLINE"] = "1"
40
+ os.environ["TRANSFORMERS_OFFLINE"] = "1"
41
+ os.environ["HF_DATASETS_OFFLINE"] = "1"
42
 
43
  torch.set_num_threads(CPU_THREADS)
44
  torch.set_grad_enabled(False)
 
65
  _pipe_lock = Lock()
66
  _generation_lock = Lock()
67
 
68
+ # --- Pre-download full snapshot once ---
69
+ MODEL_ID = "Tongyi-MAI/Z-Image-Turbo"
70
+ MODEL_LOCAL = os.path.join(CACHE_DIR, "Z-Image-Turbo-snapshot")
71
+ os.makedirs(MODEL_LOCAL, exist_ok=True)
72
+
73
+ if not os.listdir(MODEL_LOCAL):
74
+ log("๐Ÿ“ฅ Downloading full model snapshot, please wait...")
75
+ snapshot_download(
76
+ repo_id=MODEL_ID,
77
+ cache_dir=MODEL_LOCAL,
78
+ local_dir=MODEL_LOCAL,
79
+ local_dir_use_symlinks=False
80
+ )
81
+ log(f"๐Ÿ“ฆ Model snapshot cached at: {MODEL_LOCAL}")
82
+ else:
83
+ log(f"๐Ÿ“ฆ Model snapshot already exists at: {MODEL_LOCAL}")
84
+
85
  @contextmanager
86
  def managed_memory():
87
  try:
 
102
  start_load = time.time()
103
 
104
  pipe = ZImagePipeline.from_pretrained(
105
+ MODEL_LOCAL,
106
  torch_dtype=DTYPE,
107
+ local_files_only=True,
108
  low_cpu_mem_usage=True
109
  )
110
 
 
184
 
185
  return image, seed
186
 
187
+ # --- GRADIO UI ---
188
  with gr.Blocks(title="๐Ÿš€ Z-Image Turbo Pro Max + Live Logs") as demo:
189
  gr.Markdown("## GPUโ€‘FREE CPU Turbo โ€” Live Logs Below")
190