更新 README.md 和 app.py,修改 HF_TOKEN 使用方式,增强模型加载的错误处理
Browse files
README.md
CHANGED
|
@@ -12,7 +12,7 @@ Gradio Space using the official Z-Image pipeline (`Tongyi-MAI/Z-Image-Turbo`) wi
|
|
| 12 |
1) Create a Space (Python) and select a GPU hardware type.
|
| 13 |
2) Add/clone this repo into the Space.
|
| 14 |
3) Manually add the LoRA file from https://civitai.com/models/2206377/zit-mystic-xxx to `lora/zit-mystic-xxx.safetensors` (or set `LORA_PATH`). Network fetch of Civitai is not handled in the Space.
|
| 15 |
-
4)
|
| 16 |
5) (Optional) Toggle advanced envs below; then the Space will launch `app.py`. The header shows whether the LoRA was detected/loaded.
|
| 17 |
|
| 18 |
## Environment variables
|
|
|
|
| 12 |
1) Create a Space (Python) and select a GPU hardware type.
|
| 13 |
2) Add/clone this repo into the Space.
|
| 14 |
3) Manually add the LoRA file from https://civitai.com/models/2206377/zit-mystic-xxx to `lora/zit-mystic-xxx.safetensors` (or set `LORA_PATH`). Network fetch of Civitai is not handled in the Space.
|
| 15 |
+
4) If model download fails with a token error, set `HF_TOKEN` in the Space secrets (some repos require authentication).
|
| 16 |
5) (Optional) Toggle advanced envs below; then the Space will launch `app.py`. The header shows whether the LoRA was detected/loaded.
|
| 17 |
|
| 18 |
## Environment variables
|
app.py
CHANGED
|
@@ -135,7 +135,8 @@ def load_models() -> Tuple[ZImagePipeline, bool, str | None]:
|
|
| 135 |
if pipe is not None:
|
| 136 |
return pipe, lora_loaded, lora_error
|
| 137 |
|
| 138 |
-
use_auth_token = HF_TOKEN if HF_TOKEN else
|
|
|
|
| 139 |
print(f"Loading Z-Image from {MODEL_PATH}...")
|
| 140 |
|
| 141 |
if not os.path.exists(MODEL_PATH):
|
|
@@ -143,15 +144,15 @@ def load_models() -> Tuple[ZImagePipeline, bool, str | None]:
|
|
| 143 |
MODEL_PATH,
|
| 144 |
subfolder="vae",
|
| 145 |
torch_dtype=torch.bfloat16,
|
| 146 |
-
|
| 147 |
)
|
| 148 |
text_encoder = AutoModelForCausalLM.from_pretrained(
|
| 149 |
MODEL_PATH,
|
| 150 |
subfolder="text_encoder",
|
| 151 |
torch_dtype=torch.bfloat16,
|
| 152 |
-
|
| 153 |
).eval()
|
| 154 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, subfolder="tokenizer",
|
| 155 |
else:
|
| 156 |
vae = AutoencoderKL.from_pretrained(os.path.join(MODEL_PATH, "vae"), torch_dtype=torch.bfloat16)
|
| 157 |
text_encoder = AutoModelForCausalLM.from_pretrained(
|
|
@@ -168,8 +169,8 @@ def load_models() -> Tuple[ZImagePipeline, bool, str | None]:
|
|
| 168 |
transformer = ZImageTransformer2DModel.from_pretrained(
|
| 169 |
MODEL_PATH,
|
| 170 |
subfolder="transformer",
|
| 171 |
-
use_auth_token=use_auth_token,
|
| 172 |
torch_dtype=torch.bfloat16,
|
|
|
|
| 173 |
)
|
| 174 |
else:
|
| 175 |
transformer = ZImageTransformer2DModel.from_pretrained(
|
|
@@ -303,17 +304,20 @@ def warmup_model(pipeline: ZImagePipeline, resolutions: List[str]) -> None:
|
|
| 303 |
|
| 304 |
|
| 305 |
def init_app() -> None:
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
|
|
|
|
|
|
|
|
|
| 317 |
|
| 318 |
|
| 319 |
@spaces.GPU
|
|
|
|
| 135 |
if pipe is not None:
|
| 136 |
return pipe, lora_loaded, lora_error
|
| 137 |
|
| 138 |
+
use_auth_token = HF_TOKEN if HF_TOKEN else None
|
| 139 |
+
hf_kwargs = {"use_auth_token": use_auth_token} if use_auth_token else {}
|
| 140 |
print(f"Loading Z-Image from {MODEL_PATH}...")
|
| 141 |
|
| 142 |
if not os.path.exists(MODEL_PATH):
|
|
|
|
| 144 |
MODEL_PATH,
|
| 145 |
subfolder="vae",
|
| 146 |
torch_dtype=torch.bfloat16,
|
| 147 |
+
**hf_kwargs,
|
| 148 |
)
|
| 149 |
text_encoder = AutoModelForCausalLM.from_pretrained(
|
| 150 |
MODEL_PATH,
|
| 151 |
subfolder="text_encoder",
|
| 152 |
torch_dtype=torch.bfloat16,
|
| 153 |
+
**hf_kwargs,
|
| 154 |
).eval()
|
| 155 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, subfolder="tokenizer", **hf_kwargs)
|
| 156 |
else:
|
| 157 |
vae = AutoencoderKL.from_pretrained(os.path.join(MODEL_PATH, "vae"), torch_dtype=torch.bfloat16)
|
| 158 |
text_encoder = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 169 |
transformer = ZImageTransformer2DModel.from_pretrained(
|
| 170 |
MODEL_PATH,
|
| 171 |
subfolder="transformer",
|
|
|
|
| 172 |
torch_dtype=torch.bfloat16,
|
| 173 |
+
**hf_kwargs,
|
| 174 |
)
|
| 175 |
else:
|
| 176 |
transformer = ZImageTransformer2DModel.from_pretrained(
|
|
|
|
| 304 |
|
| 305 |
|
| 306 |
def init_app() -> None:
|
| 307 |
+
try:
|
| 308 |
+
ensure_models_loaded()
|
| 309 |
+
if ENABLE_WARMUP and pipe is not None:
|
| 310 |
+
ensure_on_gpu()
|
| 311 |
+
try:
|
| 312 |
+
all_resolutions: List[str] = []
|
| 313 |
+
for cat in RES_CHOICES.values():
|
| 314 |
+
all_resolutions.extend(cat)
|
| 315 |
+
warmup_model(pipe, all_resolutions)
|
| 316 |
+
finally:
|
| 317 |
+
if OFFLOAD_TO_CPU_AFTER_RUN:
|
| 318 |
+
offload_to_cpu()
|
| 319 |
+
except Exception as exc: # noqa: BLE001
|
| 320 |
+
print(f"Model init failed: {exc}")
|
| 321 |
|
| 322 |
|
| 323 |
@spaces.GPU
|