cpuai commited on
Commit
d0aeb54
·
verified ·
1 Parent(s): 4a8de12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +480 -431
app.py CHANGED
@@ -1,525 +1,574 @@
1
  # app.py
2
- # HuggingFace Spaces (Gradio + ZeroGPU) 单文件示例:
3
- # - 自动下载 LongCat-Video GitHub 代码(zip)
4
- # - 自动下载 LongCat-Video / LongCat-Video-Avatar 权重(HF Hub)
5
- # - 通过 spaces.GPU ZeroGPU 环境下按需申请 GPU 执行推理
6
- # - 支持单人:AT2V / AI2V
7
- #
8
- # 说明:
9
- # 1) 官方示例使用 torchrun nproc=2(多进程/可能更快):
10
- # 这里默认改为 nproc=1 + context_parallel_size=1,更适合 Spaces。
11
- # 2) FlashAttention 默认在 config 开启,但在 Spaces 上未必能顺利安装;
12
- # 本示例会尝试把 config 里所有包含 "flash" 的 attention backend 字段递归替换为 "sdpa"。
13
- #
14
- # 参考:
15
- # - ZeroGPU 官方用法:@spaces.GPU(duration=...) :contentReference[oaicite:5]{index=5}(用户侧不需要引用,代码内不写引用)
16
- # - LongCat-Video-Avatar 模型卡:推理命令/参数/权重目录结构 :contentReference[oaicite:6]{index=6}
17
 
18
  import os
19
- import re
20
  import sys
21
  import json
22
  import time
23
  import shutil
24
- import zipfile
25
- import hashlib
26
  import subprocess
27
  from pathlib import Path
28
- from datetime import datetime
29
- from typing import Any, Dict, Tuple, Optional
30
-
31
- # ----------------------------
32
- # 运行时“尽量单文件”的依赖安装
33
- # ----------------------------
34
- def _pip_install(pkgs):
35
- """在 Spaces 里尽量避免反复安装:用一个标记文件 + 简单 import 探测。"""
36
- cmd = [sys.executable, "-m", "pip", "install", "-U"] + pkgs
37
- print("[pip]", " ".join(cmd))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  subprocess.check_call(cmd)
39
 
40
- def _ensure_imports():
41
  """
42
- 只安装本 app 直接需要的包。
43
- LongCat-Video 自身依赖很多(官方 requirements),这里不强制全量预装,
44
- 而是交给官方脚本在运行时 import;若缺包会在日志里体现,再按需加到下面列表。
45
  """
46
- try:
47
- import gradio as gr # noqa
48
- except Exception:
49
- _pip_install(["gradio>=4.0.0"])
50
-
51
- try:
52
- import requests # noqa
53
- except Exception:
54
- _pip_install(["requests>=2.31.0"])
55
-
56
- try:
57
- from huggingface_hub import snapshot_download # noqa
58
- except Exception:
59
- _pip_install(["huggingface_hub[cli]>=0.24.0"])
60
 
61
- # ZeroGPU 推荐的 spaces 包:多数 ZeroGPU 环境自带;没有就装
 
 
 
 
 
 
 
62
  try:
63
- import spaces # noqa
64
- except Exception:
65
- _pip_install(["spaces>=0.27.0"])
 
66
 
67
- _ensure_imports()
 
68
 
69
- import gradio as gr
70
- import requests
71
- from huggingface_hub import snapshot_download
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- # spaces 在非 ZeroGPU 环境也应可安全使用;若导入失败已在上面安装
74
- import spaces
 
 
 
75
 
 
76
 
77
- # ----------------------------
78
- # 配置区(可按需改)
79
- # ----------------------------
80
- GITHUB_ZIP_URL = "https://github.com/meituan-longcat/LongCat-Video/archive/refs/heads/main.zip"
81
 
82
- # HF 权重(模型卡说明的目录) :contentReference[oaicite:7]{index=7}
83
- HF_MODEL_LONGCAT_VIDEO = "meituan-longcat/LongCat-Video"
84
- HF_MODEL_LONGCAT_AVATAR = "meituan-longcat/LongCat-Video-Avatar"
 
85
 
86
- # 本地缓存目录:Spaces 上建议放到 /home/user 或当前目录
87
- BASE_DIR = Path(__file__).parent.resolve()
88
- CACHE_DIR = BASE_DIR / "_cache"
89
- REPO_DIR = CACHE_DIR / "LongCat-Video-main" # zip 解压后的目录名
90
- WEIGHTS_DIR = CACHE_DIR / "weights"
91
- WEIGHTS_LONGCAT_VIDEO = WEIGHTS_DIR / "LongCat-Video"
92
- WEIGHTS_LONGCAT_AVATAR = WEIGHTS_DIR / "LongCat-Video-Avatar"
93
- OUTPUT_DIR = CACHE_DIR / "outputs"
94
- TMP_DIR = CACHE_DIR / "tmp"
95
 
96
- # 为了减少 torch CUDA 内存碎片(有时有用)
97
- os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True")
 
 
 
98
 
 
 
 
 
 
 
99
 
100
- # ----------------------------
101
- # 工具函数
102
- # ----------------------------
103
- def _sha1(s: str) -> str:
104
- return hashlib.sha1(s.encode("utf-8")).hexdigest()[:10]
105
 
106
- def _run(cmd, cwd: Optional[Path] = None, env: Optional[Dict[str, str]] = None) -> Tuple[int, str]:
107
- """运行命令并返回 (code, stdout+stderr)。"""
108
- print("[run]", " ".join(cmd))
109
- p = subprocess.Popen(
110
- cmd,
111
- cwd=str(cwd) if cwd else None,
112
- env=env,
113
- stdout=subprocess.PIPE,
114
- stderr=subprocess.STDOUT,
115
- text=True,
116
- bufsize=1,
117
- universal_newlines=True,
118
  )
119
- out_lines = []
120
- while True:
121
- line = p.stdout.readline()
122
- if not line and p.poll() is not None:
123
- break
124
- if line:
125
- out_lines.append(line)
126
- code = p.wait()
127
- return code, "".join(out_lines)
128
-
129
- def _download_and_extract_repo():
130
- """下载并解压 GitHub zip 到 CACHE_DIR。"""
131
- CACHE_DIR.mkdir(parents=True, exist_ok=True)
132
- zip_path = CACHE_DIR / "LongCat-Video-main.zip"
133
-
134
- if REPO_DIR.exists() and (REPO_DIR / "run_demo_avatar_single_audio_to_video.py").exists():
135
- return
136
 
137
- # 清理旧目录
138
- if REPO_DIR.exists():
139
- shutil.rmtree(REPO_DIR, ignore_errors=True)
140
-
141
- # 下载 zip
142
- if not zip_path.exists():
143
- r = requests.get(GITHUB_ZIP_URL, stream=True, timeout=120)
144
- r.raise_for_status()
145
- with open(zip_path, "wb") as f:
146
- for chunk in r.iter_content(chunk_size=1024 * 1024):
147
- if chunk:
148
- f.write(chunk)
149
-
150
- # 解压
151
- with zipfile.ZipFile(zip_path, "r") as zf:
152
- zf.extractall(CACHE_DIR)
153
-
154
- # 基本校验
155
- if not (REPO_DIR / "run_demo_avatar_single_audio_to_video.py").exists():
156
- raise RuntimeError("仓库解压后未找到 run_demo_avatar_single_audio_to_video.py,可能 GitHub 结构变化。")
157
-
158
- def _download_weights():
159
- """下载 HF 权重到 WEIGHTS_DIR。"""
160
  WEIGHTS_DIR.mkdir(parents=True, exist_ok=True)
161
 
162
- # 使用 token(若你在 Space Secrets 里配置了 HF_TOKEN)
163
- token = os.environ.get("HF_TOKEN", None)
 
164
 
165
- if not WEIGHTS_LONGCAT_VIDEO.exists():
166
- snapshot_download(
167
- repo_id=HF_MODEL_LONGCAT_VIDEO,
168
- local_dir=str(WEIGHTS_LONGCAT_VIDEO),
169
- token=token,
170
- local_dir_use_symlinks=False,
171
- )
172
 
173
- if not WEIGHTS_LONGCAT_AVATAR.exists():
174
- snapshot_download(
175
- repo_id=HF_MODEL_LONGCAT_AVATAR,
176
- local_dir=str(WEIGHTS_LONGCAT_AVATAR),
177
- token=token,
178
- local_dir_use_symlinks=False,
 
 
 
 
 
 
 
 
 
 
 
179
  )
 
180
 
181
- def _recursive_patch_attention_backend(obj: Any) -> Any:
182
  """
183
- 递归把 config 里疑似 flash-attn backend 的字段替换为 sdpa。
184
- 不依赖具体 key 名,尽量“宽松匹配”:
185
- - key 或 value 里出现 flash / flashattn / flash_attn => 改成 "sdpa"
186
  """
 
187
  if isinstance(obj, dict):
188
- new = {}
189
  for k, v in obj.items():
190
- lk = str(k).lower()
191
- if any(x in lk for x in ["attn", "attention", "backend"]):
192
- # 先递归处理 value
193
- vv = _recursive_patch_attention_backend(v)
194
- # 再判断是否需要替换
195
- if isinstance(vv, str) and ("flash" in vv.lower() or "flash_attn" in vv.lower() or "flashattn" in vv.lower()):
196
- new[k] = "sdpa"
197
- else:
198
- new[k] = vv
199
- else:
200
- new[k] = _recursive_patch_attention_backend(v)
201
- return new
202
  elif isinstance(obj, list):
203
- return [_recursive_patch_attention_backend(x) for x in obj]
204
- else:
205
- # 普通标量
206
- if isinstance(obj, str):
207
- lo = obj.lower()
208
- if "flash_attn" in lo or "flashattn" in lo or lo.strip() == "flash" or "flash" == lo.strip():
209
- return "sdpa"
210
- return obj
211
-
212
- def _try_patch_avatar_configs():
213
- """
214
- 官方说明:avatar_single/config.json 和 avatar_multi/config.json 默认启用 FlashAttention-2 :contentReference[oaicite:8]{index=8}
215
- 这里尽量替换为 sdpa,避免必须安装 flash-attn。
216
- """
217
- cfgs = [
218
- WEIGHTS_LONGCAT_AVATAR / "avatar_single" / "config.json",
219
- WEIGHTS_LONGCAT_AVATAR / "avatar_multi" / "config.json",
220
- ]
221
- for cfg in cfgs:
222
- if not cfg.exists():
223
- continue
224
- try:
225
- raw = json.loads(cfg.read_text(encoding="utf-8"))
226
- patched = _recursive_patch_attention_backend(raw)
227
- if patched != raw:
228
- cfg.write_text(json.dumps(patched, ensure_ascii=False, indent=2), encoding="utf-8")
229
- except Exception as e:
230
- print(f"[warn] patch config failed: {cfg} -> {e}")
231
-
232
- def _load_template_json(template_path: Path) -> Dict[str, Any]:
233
- data = json.loads(template_path.read_text(encoding="utf-8"))
234
- if not isinstance(data, dict):
235
- raise ValueError("模板 JSON 不是 dict 结构,无法安全修改。")
236
- return data
237
 
238
- def _recursive_replace_first_match(data: Any, key_pred, value_pred, new_value) -> Tuple[Any, bool]:
239
  """
240
- 在任意 JSON 结构中,找到第一个满足条件的 (key, value) 并替换 value。
241
- 返回 (new_data, replaced?)
242
  """
243
- if isinstance(data, dict):
244
- out = {}
245
- replaced = False
246
- for k, v in data.items():
247
- if (not replaced) and key_pred(k) and value_pred(v):
248
- out[k] = new_value
249
- replaced = True
250
- else:
251
- nv, r = _recursive_replace_first_match(v, key_pred, value_pred, new_value)
252
- out[k] = nv
253
- replaced = replaced or r
254
- return out, replaced
255
- elif isinstance(data, list):
256
- out_list = []
257
- replaced = False
258
- for item in data:
259
- if replaced:
260
- out_list.append(item)
261
- continue
262
- nv, r = _recursive_replace_first_match(item, key_pred, value_pred, new_value)
263
- out_list.append(nv)
264
- replaced = replaced or r
265
- return out_list, replaced
 
 
 
 
266
  else:
267
- return data, False
268
 
269
- def _build_input_json_single(
270
- mode: str,
271
- audio_path: Path,
272
  prompt: str,
273
- ref_image_path: Optional[Path],
274
- seed: int,
275
- resolution: str
276
- ) -> Path:
277
  """
278
- 基于 assets/avatar/single_example_1.json 模板生成 input_json。
279
- 官方脚本以 --input_json 读取参数 :contentReference[oaicite:9]{index=9}
 
 
280
  """
281
- template = REPO_DIR / "assets" / "avatar" / "single_example_1.json"
282
- if not template.exists():
283
- raise RuntimeError("未找到模板 assets/avatar/single_example_1.json(仓库结构可能变化)。")
284
-
285
- data = _load_template_json(template)
286
-
287
- # 替换 prompt:优先找 key 包含 prompt/text 之类
288
- data, _ = _recursive_replace_first_match(
289
- data,
290
- key_pred=lambda k: "prompt" in str(k).lower() or "text" in str(k).lower(),
291
- value_pred=lambda v: isinstance(v, str),
292
- new_value=prompt.strip() if prompt else "A person is talking."
293
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
 
295
- # 替换 audio path:找 key 包含 audio 且 value 是字符串
296
- data, _ = _recursive_replace_first_match(
297
- data,
298
- key_pred=lambda k: "audio" in str(k).lower(),
299
- value_pred=lambda v: isinstance(v, str),
300
- new_value=str(audio_path)
 
 
 
 
 
 
 
301
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
 
303
- # 替换 image path(仅 AI2V)
304
- if mode == "ai2v" and ref_image_path is not None:
305
- data, _ = _recursive_replace_first_match(
306
- data,
307
- key_pred=lambda k: ("image" in str(k).lower()) or ("ref" in str(k).lower()),
308
- value_pred=lambda v: isinstance(v, str),
309
- new_value=str(ref_image_path)
310
- )
311
 
312
- # seed(若模板里有)
313
- data, _ = _recursive_replace_first_match(
314
- data,
315
- key_pred=lambda k: "seed" in str(k).lower(),
316
- value_pred=lambda v: isinstance(v, (int, float, str)),
317
- new_value=int(seed)
318
- )
 
 
 
 
 
 
319
 
320
- # resolution(若模板里有)
321
- data, _ = _recursive_replace_first_match(
322
- data,
323
- key_pred=lambda k: "resolution" in str(k).lower(),
324
- value_pred=lambda v: isinstance(v, str),
325
- new_value=str(resolution)
326
- )
327
 
328
- TMP_DIR.mkdir(parents=True, exist_ok=True)
329
- out_path = TMP_DIR / f"single_{mode}_{_sha1(str(audio_path) + prompt + str(seed) + str(time.time()))}.json"
330
- out_path.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
331
- return out_path
332
 
333
- def _find_latest_mp4(since_ts: float) -> Optional[Path]:
334
- if not OUTPUT_DIR.exists():
335
- return None
336
- candidates = []
337
- for p in OUTPUT_DIR.rglob("*.mp4"):
338
- try:
339
- if p.stat().st_mtime >= since_ts - 2:
340
- candidates.append(p)
341
- except Exception:
342
- pass
343
- if not candidates:
344
- return None
345
- candidates.sort(key=lambda x: x.stat().st_mtime, reverse=True)
346
- return candidates[0]
347
 
348
- def _ensure_ready() -> str:
 
 
 
 
349
  """
350
- 准备:
351
- - 下载 repo
352
- - 下载权重
353
- - 尝试 patch attention backend
354
  """
355
- _download_and_extract_repo()
356
- _download_weights()
357
- _try_patch_avatar_configs()
358
-
359
- # 输出目录
360
- OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
 
362
- return "准备完成:代码与权重已就绪。"
 
 
363
 
364
 
365
- # ----------------------------
366
- # GPU 推理函数(ZeroGPU 核心)
367
- # ----------------------------
368
- @spaces.GPU(duration=900) # 生成视频通常 >60s,给足时间;你可视情况调小/调大 :contentReference[oaicite:10]{index=10}
369
  def generate_single(
370
- mode: str,
371
- audio_file: str,
372
  prompt: str,
373
- ref_image_file: Optional[str],
374
- seed: int,
375
  resolution: str,
376
  num_segments: int,
377
  ref_img_index: int,
378
  mask_frame_range: int,
 
 
379
  ) -> Tuple[Optional[str], str]:
380
  """
381
- 返回:(mp4路径 or None, 日志文本)
 
 
382
  """
383
- t0 = time.time()
384
-
385
- # 文件落盘路径(Gradio 传入的是本地临时文件路径字符串)
386
- audio_path = Path(audio_file).resolve()
387
- ref_image_path = Path(ref_image_file).resolve() if ref_image_file else None
388
-
389
- # 构造 input_json
390
- input_json = _build_input_json_single(
391
- mode=mode,
392
- audio_path=audio_path,
393
  prompt=prompt,
394
- ref_image_path=ref_image_path,
395
- seed=seed,
396
- resolution=resolution,
397
  )
398
 
399
- # 运行官方脚本(单进程 torchrun)
400
- # 官方示例:torchrun --nproc_per_node=2 ... --context_parallel_size=2 ... :contentReference[oaicite:11]{index=11}
401
- # 这里适配 Space:nproc=1, context_parallel_size=1
402
  cmd = [
403
- sys.executable, "-m", "torch.distributed.run",
404
- "--nproc_per_node=1",
405
  "run_demo_avatar_single_audio_to_video.py",
406
- "--context_parallel_size=1",
407
- f"--checkpoint_dir={WEIGHTS_LONGCAT_AVATAR}",
408
- f"--stage_1={mode}",
409
  f"--input_json={input_json}",
410
  f"--resolution={resolution}",
 
 
 
411
  ]
412
 
413
- # 续写参数(用户设置 >1 才启用)
414
- if num_segments and int(num_segments) > 1:
415
- cmd += [
416
- f"--num_segments={int(num_segments)}",
417
- f"--ref_img_index={int(ref_img_index)}",
418
- f"--mask_frame_range={int(mask_frame_range)}",
419
- ]
420
 
421
- # 环境变量:让脚本能找到模块
422
- env = dict(os.environ)
423
- env["PYTHONPATH"] = str(REPO_DIR) + (os.pathsep + env["PYTHONPATH"] if env.get("PYTHONPATH") else "")
424
- env["HF_HOME"] = str(CACHE_DIR / "hf_home")
425
- env["TORCH_HOME"] = str(CACHE_DIR / "torch_home")
 
 
 
426
 
427
- # 约定输出目录(若脚本支持/或脚本默认输出在当前目录下的 outputs)
428
- # 我们用 cwd + 输出扫描兜底
429
- env["OUTPUT_DIR"] = str(OUTPUT_DIR)
430
 
431
- code, log = _run(cmd, cwd=REPO_DIR, env=env)
432
 
433
- # 尝试找到最新 mp4
434
- mp4 = _find_latest_mp4(t0)
435
- if mp4 is None:
436
- # 兜底:在 repo 内也扫一下
437
- repo_candidates = list(REPO_DIR.rglob("*.mp4"))
438
- repo_candidates.sort(key=lambda x: x.stat().st_mtime, reverse=True)
439
- if repo_candidates and repo_candidates[0].stat().st_mtime >= t0 - 2:
440
- mp4 = repo_candidates[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441
 
442
- if code != 0:
443
- return None, f"执行失败(exit={code})。日志如下:\n\n{log}"
 
 
 
 
 
 
 
 
 
 
 
444
 
445
- if mp4 is None or not mp4.exists():
446
- return None, f"执行完成,但未找到 mp4 输出文件。日志如下:\n\n{log}"
 
447
 
448
- return str(mp4), f"执行成功:{mp4}\n\n日志如下:\n\n{log}"
 
 
 
 
 
 
449
 
 
 
450
 
451
- # ----------------------------
452
- # Gradio UI
453
- # ----------------------------
454
- def ui_prepare() -> str:
455
- try:
456
- return _ensure_ready()
457
- except Exception as e:
458
- return f"准备失败:{e}"
459
 
460
- with gr.Blocks(title="LongCat-Video-Avatar (ZeroGPU) - Single File Space") as demo:
 
461
  gr.Markdown(
462
- """
463
- # LongCat-Video-Avatar(ZeroGPU / 单文件 Space)
464
-
465
- - 单人模式:**AT2V(音频+文本)** / **AI2V(音频+图片)**
466
- - 续写(Video Continuation):把 **num_segments** 设为 > 1 即可(官方参数:ref_img_index / mask_frame_range)
467
- - 提示:为了更自然的口型,prompt 里建议包含 talking/speaking 等动作词(模型卡建议)
468
- """
469
  )
470
 
471
- with gr.Row():
472
- btn_prepare = gr.Button("一键准备(下载代码+权重)", variant="primary")
473
- prep_status = gr.Textbox(label="准备状态", value="尚未准备。首次准备会下载较大权重。", lines=2)
474
-
475
- btn_prepare.click(fn=ui_prepare, outputs=prep_status)
476
-
477
- with gr.Row():
478
- mode = gr.Radio(
479
- choices=[("Audio-Text-to-Video (AT2V)", "at2v"), ("Audio-Image-to-Video (AI2V)", "ai2v")],
480
- value="at2v",
481
- label="模式"
482
- )
483
-
484
- with gr.Row():
485
- audio_in = gr.Audio(label="输入音频(wav/mp3等)", type="filepath")
486
- ref_img = gr.Image(label="参考图(仅 AI2V 需要)", type="filepath")
487
-
488
- prompt = gr.Textbox(
489
- label="Prompt(建议包含 talking/speaking 等动作词)",
490
- value="A young person is talking naturally, realistic style.",
491
- lines=2
492
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
 
494
- with gr.Row():
495
- seed = gr.Number(label="Seed", value=0, precision=0)
496
- resolution = gr.Dropdown(label="分辨率", choices=["480P", "720P"], value="480P")
497
-
498
- with gr.Accordion("高级参数(续写/一致性/防重复)", open=False):
499
- num_segments = gr.Slider(label="num_segments(>1 启用续写)", minimum=1, maximum=8, step=1, value=1)
500
- ref_img_index = gr.Slider(label="ref_img_index(默认 10)", minimum=-30, maximum=60, step=1, value=10)
501
- mask_frame_range = gr.Slider(label="mask_frame_range(默认 3)", minimum=1, maximum=12, step=1, value=3)
502
-
503
- btn = gr.Button("生成视频", variant="primary")
504
-
505
- out_video = gr.Video(label="输出视频(mp4)")
506
- out_log = gr.Textbox(label="运行日志", lines=18)
507
-
508
- def _validate(mode_v, audio_fp, img_fp):
509
- if not audio_fp:
510
- raise gr.Error("请先上传音频。")
511
- if mode_v == "ai2v" and not img_fp:
512
- raise gr.Error("AI2V 模式必须上传参考图。")
513
-
514
- def run(mode_v, audio_fp, prompt_v, img_fp, seed_v, res_v, seg_v, idx_v, mask_v):
515
- _validate(mode_v, audio_fp, img_fp)
516
- # seed=0 时也允许;如果想随机可自己改成 random
517
- return generate_single(mode_v, audio_fp, prompt_v, img_fp, int(seed_v), res_v, int(seg_v), int(idx_v), int(mask_v))
518
-
519
- btn.click(
520
- fn=run,
521
- inputs=[mode, audio_in, prompt, ref_img, seed, resolution, num_segments, ref_img_index, mask_frame_range],
522
- outputs=[out_video, out_log],
523
  )
524
 
525
- demo.queue(max_size=12).launch()
 
1
  # app.py
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ LongCat-Video-Avatar | Hugging Face Spaces (ZeroGPU) | 单文件 Gradio 应用
5
+ - 自动:clone 推理代码仓库 + 下载权重到 ./weights
6
+ - ZeroGPU:用 @spaces.GPU 在 fork 子进程里执行 CUDA 推理
7
+ - 输入:单人/双人(多音频)模板 JSON 自动填充
8
+ """
 
 
 
 
 
 
 
 
9
 
10
  import os
 
11
  import sys
12
  import json
13
  import time
14
  import shutil
15
+ import copy
16
+ import re
17
  import subprocess
18
  from pathlib import Path
19
+ from typing import Any, Dict, List, Tuple, Optional
20
+
21
+ # -------------------- 基础路径 --------------------
22
+ ROOT = Path(__file__).resolve().parent
23
+ REPO_DIR = ROOT / "LongCat-Video"
24
+ WEIGHTS_DIR = ROOT / "weights"
25
+ WEIGHTS_LONGCAT_VIDEO = WEIGHTS_DIR / "LongCat-Video"
26
+ WEIGHTS_AVATAR = WEIGHTS_DIR / "LongCat-Video-Avatar"
27
+
28
+ # Hugging Face 仓库(权重)
29
+ HF_REPO_LONGCAT_VIDEO = "meituan-longcat/LongCat-Video"
30
+ HF_REPO_AVATAR = "meituan-longcat/LongCat-Video-Avatar"
31
+
32
+ # GitHub 代码仓库(推理脚本/实现)
33
+ GIT_REPO_URL = "https://github.com/meituan-longcat/LongCat-Video.git"
34
+ GIT_BRANCH = "main"
35
+
36
+ # 自举标记:避免每次启动都 pip install
37
+ BOOTSTRAP_MARK = ROOT / ".bootstrap_done"
38
+
39
+ # -------------------- 依赖自举(单文件策略) --------------------
40
+ def _pip_install(args: List[str]) -> None:
41
+ cmd = [sys.executable, "-m", "pip", "install", "--no-cache-dir"] + args
42
+ print("[pip]", " ".join(cmd), flush=True)
43
  subprocess.check_call(cmd)
44
 
45
+ def _ensure_bootstrap() -> None:
46
  """
47
+ 为了“单文件”,这里做最小自举:
48
+ 1) 确保 gradio/spaces/huggingface_hub 等可用
49
+ 2) clone repo 后,安装官方 requirements(若首次启动)
50
  """
51
+ if BOOTSTRAP_MARK.exists():
52
+ return
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
+ # 先装运行必须的基础包
55
+ base_pkgs = [
56
+ "gradio>=5.0.0",
57
+ "huggingface_hub[cli]>=0.24.0",
58
+ "gitpython>=3.1.0",
59
+ "spaces>=0.33.0",
60
+ "imageio-ffmpeg>=0.5.0", # 提供 ffmpeg 可执行文件,避免系统缺 ffmpeg
61
+ ]
62
  try:
63
+ _pip_install(base_pkgs)
64
+ except Exception as e:
65
+ # 如果某些包已存在或网络抖动,仍继续尝试后续步骤
66
+ print("[bootstrap] base pip install warning:", repr(e), flush=True)
67
 
68
+ # clone 代码仓库(若未 clone)
69
+ _ensure_repo_cloned()
70
 
71
+ # 安装官方 requirements(可能很大;只在首次启动做)
72
+ # 注意:官方 README/Model Card 提到 requirements.txt + requirements_avatar.txt。:contentReference[oaicite:5]{index=5}
73
+ req_main = REPO_DIR / "requirements.txt"
74
+ req_avatar = REPO_DIR / "requirements_avatar.txt"
75
+ if req_main.exists():
76
+ try:
77
+ _pip_install(["-r", str(req_main)])
78
+ except Exception as e:
79
+ print("[bootstrap] install requirements.txt warning:", repr(e), flush=True)
80
+ if req_avatar.exists():
81
+ try:
82
+ _pip_install(["-r", str(req_avatar)])
83
+ except Exception as e:
84
+ print("[bootstrap] install requirements_avatar.txt warning:", repr(e), flush=True)
85
 
86
+ # librosa/ffmpeg 在官方说明里是 conda 安装。Space 没有 conda,这里用 pip + imageio-ffmpeg 兜底。:contentReference[oaicite:6]{index=6}
87
+ try:
88
+ _pip_install(["librosa>=0.10.0", "soundfile>=0.12.0"])
89
+ except Exception as e:
90
+ print("[bootstrap] install librosa/soundfile warning:", repr(e), flush=True)
91
 
92
+ BOOTSTRAP_MARK.write_text(f"ok {time.time()}\n", encoding="utf-8")
93
 
 
 
 
 
94
 
95
+ # -------------------- Repo/权重准备 --------------------
96
+ def _ensure_repo_cloned() -> None:
97
+ if REPO_DIR.exists() and (REPO_DIR / ".git").exists():
98
+ return
99
 
100
+ REPO_DIR.mkdir(parents=True, exist_ok=True)
101
+ # 如果目录非空,先清理,避免 git clone 失败
102
+ if any(REPO_DIR.iterdir()):
103
+ shutil.rmtree(REPO_DIR)
104
+ REPO_DIR.mkdir(parents=True, exist_ok=True)
 
 
 
 
105
 
106
+ print("[git] cloning repo...", flush=True)
107
+ subprocess.check_call([
108
+ "git", "clone", "--single-branch", "--branch", GIT_BRANCH, GIT_REPO_URL, str(REPO_DIR)
109
+ ])
110
+ print("[git] cloned:", REPO_DIR, flush=True)
111
 
112
+ def _hf_snapshot_download(repo_id: str, local_dir: Path) -> None:
113
+ """
114
+ 使用 huggingface_hub 下载权重到本地目录(会自动缓存并增量更新)。
115
+ 官方模型卡建议用 huggingface-cli download 到 ./weights/... :contentReference[oaicite:7]{index=7}
116
+ """
117
+ from huggingface_hub import snapshot_download
118
 
119
+ local_dir.mkdir(parents=True, exist_ok=True)
120
+ print(f"[hf] downloading {repo_id} -> {local_dir}", flush=True)
 
 
 
121
 
122
+ # local_dir_use_symlinks=False:在 Spaces 环境里更稳
123
+ snapshot_download(
124
+ repo_id=repo_id,
125
+ local_dir=str(local_dir),
126
+ local_dir_use_symlinks=False,
127
+ resume_download=True,
 
 
 
 
 
 
128
  )
129
+ print(f"[hf] done: {repo_id}", flush=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
+ def _ensure_weights_downloaded() -> None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  WEIGHTS_DIR.mkdir(parents=True, exist_ok=True)
133
 
134
+ # LongCat-Video
135
+ if not (WEIGHTS_LONGCAT_VIDEO.exists() and any(WEIGHTS_LONGCAT_VIDEO.iterdir())):
136
+ _hf_snapshot_download(HF_REPO_LONGCAT_VIDEO, WEIGHTS_LONGCAT_VIDEO)
137
 
138
+ # LongCat-Video-Avatar
139
+ if not (WEIGHTS_AVATAR.exists() and any(WEIGHTS_AVATAR.iterdir())):
140
+ _hf_snapshot_download(HF_REPO_AVATAR, WEIGHTS_AVATAR)
 
 
 
 
141
 
142
+
143
+ # -------------------- JSON 模板读取与“灌参” --------------------
144
+ def _load_json(p: Path) -> Any:
145
+ return json.loads(p.read_text(encoding="utf-8"))
146
+
147
+ def _find_avatar_templates() -> Tuple[Path, Path]:
148
+ """
149
+ 读取官方 repo 里自带的模板 JSON:
150
+ - assets/avatar/single_example_1.json
151
+ - assets/avatar/multi_example_1.json
152
+ """
153
+ single = REPO_DIR / "assets" / "avatar" / "single_example_1.json"
154
+ multi = REPO_DIR / "assets" / "avatar" / "multi_example_1.json"
155
+ if not single.exists() or not multi.exists():
156
+ raise FileNotFoundError(
157
+ "未找到 assets/avatar/single_example_1.json 或 multi_example_1.json。"
158
+ "请确认仓库结构与官方一致。"
159
  )
160
+ return single, multi
161
 
162
+ def _collect_string_nodes(obj: Any, path: str = "") -> List[Tuple[str, str]]:
163
  """
164
+ 收集所有字符串叶子节点:返回 (json_path, value)
 
 
165
  """
166
+ out = []
167
  if isinstance(obj, dict):
 
168
  for k, v in obj.items():
169
+ out.extend(_collect_string_nodes(v, f"{path}.{k}" if path else str(k)))
 
 
 
 
 
 
 
 
 
 
 
170
  elif isinstance(obj, list):
171
+ for i, v in enumerate(obj):
172
+ out.extend(_collect_string_nodes(v, f"{path}[{i}]"))
173
+ elif isinstance(obj, str):
174
+ out.append((path, obj))
175
+ return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
 
177
+ def _set_by_path(obj: Any, path: str, value: Any) -> None:
178
  """
179
+ 按类似 a.b[0].c 的路径写入值
 
180
  """
181
+ cur = obj
182
+ # 分割 tokens:key / [idx]
183
+ tokens = []
184
+ i = 0
185
+ while i < len(path):
186
+ if path[i] == "[":
187
+ j = path.index("]", i)
188
+ tokens.append(("idx", int(path[i+1:j])))
189
+ i = j + 1
190
+ elif path[i] == ".":
191
+ i += 1
192
+ else:
193
+ j = i
194
+ while j < len(path) and path[j] not in ".[":
195
+ j += 1
196
+ tokens.append(("key", path[i:j]))
197
+ i = j
198
+
199
+ for ttype, tval in tokens[:-1]:
200
+ if ttype == "key":
201
+ cur = cur[tval]
202
+ else:
203
+ cur = cur[tval]
204
+
205
+ last_type, last_val = tokens[-1]
206
+ if last_type == "key":
207
+ cur[last_val] = value
208
  else:
209
+ cur[last_val] = value
210
 
211
+ def _patch_template_with_inputs(
212
+ template: Any,
 
213
  prompt: str,
214
+ image_paths: List[str],
215
+ audio_paths: List[str],
216
+ ) -> Any:
 
217
  """
218
+ 不依赖 schema 的通用替换:
219
+ - 按出现顺序替换模板里第 N 个“像音频路径”的字符串为 audio_paths[N]
220
+ - 按出现顺序替换模板里第 N 个“像图片路径”的字符串为 image_paths[N]
221
+ - 尝试替换常见 prompt 字段
222
  """
223
+ patched = copy.deepcopy(template)
224
+ string_nodes = _collect_string_nodes(patched)
225
+
226
+ # 识别“可能是音频/图片路径”的节点(按出现顺序)
227
+ audio_like = []
228
+ image_like = []
229
+ prompt_like = []
230
+
231
+ for pth, val in string_nodes:
232
+ low = val.lower()
233
+ # 音频后缀或路径特征
234
+ if any(low.endswith(ext) for ext in [".wav", ".mp3", ".flac", ".m4a", ".aac", ".ogg"]):
235
+ audio_like.append((pth, val))
236
+ # 图片后缀
237
+ if any(low.endswith(ext) for ext in [".png", ".jpg", ".jpeg", ".webp", ".bmp"]):
238
+ image_like.append((pth, val))
239
+ # prompt 字段(按路径名判断更靠谱)
240
+ if re.search(r"(prompt|caption|text|instruction)$", pth, re.IGNORECASE):
241
+ prompt_like.append((pth, val))
242
+
243
+ # 替换音频(按顺序)
244
+ for idx, (pth, _) in enumerate(audio_like):
245
+ if idx < len(audio_paths):
246
+ _set_by_path(patched, pth, audio_paths[idx])
247
+
248
+ # 替换图片(按顺序)
249
+ for idx, (pth, _) in enumerate(image_like):
250
+ if idx < len(image_paths):
251
+ _set_by_path(patched, pth, image_paths[idx])
252
+
253
+ # 替换 prompt(如果模板里有多个 prompt 字段,就全写同一个)
254
+ if prompt.strip():
255
+ for pth, _ in prompt_like:
256
+ _set_by_path(patched, pth, prompt.strip())
257
+
258
+ return patched
259
+
260
+
261
+ # -------------------- 推理执行(调用官方脚本) --------------------
262
+ def _ensure_ffmpeg_in_path() -> None:
263
+ """
264
+ 使用 imageio-ffmpeg 提供的 ffmpeg,把它加入 PATH。
265
+ """
266
+ try:
267
+ import imageio_ffmpeg
268
+ ffmpeg_exe = imageio_ffmpeg.get_ffmpeg_exe()
269
+ ffmpeg_dir = str(Path(ffmpeg_exe).parent)
270
+ os.environ["PATH"] = ffmpeg_dir + os.pathsep + os.environ.get("PATH", "")
271
+ os.environ["IMAGEIO_FFMPEG_EXE"] = ffmpeg_exe
272
+ print("[ffmpeg] using:", ffmpeg_exe, flush=True)
273
+ except Exception as e:
274
+ print("[ffmpeg] warning:", repr(e), flush=True)
275
 
276
+ def _run_subprocess(cmd: List[str], cwd: Path) -> Tuple[int, str]:
277
+ """
278
+ 运行命令并收集输出(stdout+stderr)
279
+ """
280
+ print("[cmd]", " ".join(cmd), flush=True)
281
+ p = subprocess.Popen(
282
+ cmd,
283
+ cwd=str(cwd),
284
+ stdout=subprocess.PIPE,
285
+ stderr=subprocess.STDOUT,
286
+ text=True,
287
+ bufsize=1,
288
+ universal_newlines=True,
289
  )
290
+ lines = []
291
+ assert p.stdout is not None
292
+ for line in p.stdout:
293
+ lines.append(line)
294
+ p.wait()
295
+ out = "".join(lines)
296
+ return p.returncode, out
297
+
298
+ def _find_latest_video(search_dir: Path) -> Optional[str]:
299
+ mp4s = list(search_dir.rglob("*.mp4"))
300
+ if not mp4s:
301
+ return None
302
+ mp4s.sort(key=lambda p: p.stat().st_mtime, reverse=True)
303
+ return str(mp4s[0])
304
 
305
+ def _extract_video_path_from_log(log: str) -> Optional[str]:
306
+ # 从日志里提取类似 xxx.mp4 的路径
307
+ cand = re.findall(r"([^\s\"']+\.mp4)", log)
308
+ if not cand:
309
+ return None
310
+ # 取最后一个更可能是输出
311
+ return cand[-1]
 
312
 
313
+ def _prepare_runtime() -> None:
314
+ """
315
+ 启动阶段准备:
316
+ - 自举依赖
317
+ - clone repo
318
+ - 下载权重
319
+ - ffmpeg 兜底
320
+ - sys.path 加入 repo(以便脚本 import)
321
+ """
322
+ _ensure_bootstrap()
323
+ _ensure_repo_cloned()
324
+ _ensure_weights_downloaded()
325
+ _ensure_ffmpeg_in_path()
326
 
327
+ if str(REPO_DIR) not in sys.path:
328
+ sys.path.insert(0, str(REPO_DIR))
 
 
 
 
 
329
 
 
 
 
 
330
 
331
+ # -------------------- Gradio / ZeroGPU --------------------
332
+ _prepare_runtime()
 
 
 
 
 
 
 
 
 
 
 
 
333
 
334
+ import gradio as gr
335
+ import spaces
336
+
337
+
338
+ def _save_upload_to_dir(upload_path: str, dst_dir: Path, prefix: str) -> str:
339
  """
340
+ 将 gradio 上传的临时文件复制到工作目录,返回新路径
 
 
 
341
  """
342
+ dst_dir.mkdir(parents=True, exist_ok=True)
343
+ src = Path(upload_path)
344
+ ext = src.suffix
345
+ dst = dst_dir / f"{prefix}_{int(time.time()*1000)}{ext}"
346
+ shutil.copy2(src, dst)
347
+ return str(dst)
348
+
349
+ def _build_input_json_file(
350
+ mode: str,
351
+ prompt: str,
352
+ images: List[Optional[str]],
353
+ audios: List[Optional[str]],
354
+ ) -> str:
355
+ """
356
+ mode: "single" or "multi"
357
+ """
358
+ single_tpl_path, multi_tpl_path = _find_avatar_templates()
359
+ tpl = _load_json(single_tpl_path if mode == "single" else multi_tpl_path)
360
+
361
+ work_dir = REPO_DIR / "assets" / "avatar" / "custom_inputs"
362
+ img_paths = []
363
+ aud_paths = []
364
+
365
+ # 复制输入文件到 repo 内(避免脚本用相对路径时找不到)
366
+ for i, p in enumerate(images):
367
+ if p:
368
+ img_paths.append(_save_upload_to_dir(p, work_dir, f"img{i+1}"))
369
+ for i, p in enumerate(audios):
370
+ if p:
371
+ aud_paths.append(_save_upload_to_dir(p, work_dir, f"aud{i+1}"))
372
+
373
+ patched = _patch_template_with_inputs(
374
+ template=tpl,
375
+ prompt=prompt or "",
376
+ image_paths=img_paths,
377
+ audio_paths=aud_paths,
378
+ )
379
 
380
+ out_json = work_dir / f"input_{mode}_{int(time.time()*1000)}.json"
381
+ out_json.write_text(json.dumps(patched, ensure_ascii=False, indent=2), encoding="utf-8")
382
+ return str(out_json)
383
 
384
 
385
+ @spaces.GPU
 
 
 
386
  def generate_single(
387
+ stage_1: str,
 
388
  prompt: str,
389
+ image_path: Optional[str],
390
+ audio_path: str,
391
  resolution: str,
392
  num_segments: int,
393
  ref_img_index: int,
394
  mask_frame_range: int,
395
+ nproc: int,
396
+ context_parallel_size: int,
397
  ) -> Tuple[Optional[str], str]:
398
  """
399
+ 单人:Audio-Text-to-Video (at2v) Audio-Image-to-Video (ai2v)
400
+ 注意:官方示例使用 torchrun nproc=2 / context_parallel_size=2。:contentReference[oaicite:8]{index=8}
401
+ ZeroGPU 下默认先尝试 nproc=1。
402
  """
403
+ input_json = _build_input_json_file(
404
+ mode="single",
 
 
 
 
 
 
 
 
405
  prompt=prompt,
406
+ images=[image_path] if image_path else [],
407
+ audios=[audio_path],
 
408
  )
409
 
410
+ # 分辨率参数(官方说明可 480P/720P):contentReference[oaicite:9]{index=9}
411
+ # 这里不假设脚本参数名,直接透传 --resolution
 
412
  cmd = [
413
+ "torchrun",
414
+ f"--nproc_per_node={nproc}",
415
  "run_demo_avatar_single_audio_to_video.py",
416
+ f"--context_parallel_size={context_parallel_size}",
417
+ f"--checkpoint_dir={str(WEIGHTS_AVATAR)}",
418
+ f"--stage_1={stage_1}",
419
  f"--input_json={input_json}",
420
  f"--resolution={resolution}",
421
+ f"--num_segments={num_segments}",
422
+ f"--ref_img_index={ref_img_index}",
423
+ f"--mask_frame_range={mask_frame_range}",
424
  ]
425
 
426
+ rc, log = _run_subprocess(cmd, cwd=REPO_DIR)
427
+ if rc != 0:
428
+ return None, f"运行失败(exit={rc})。日志如下:\n{log}"
 
 
 
 
429
 
430
+ vid = _extract_video_path_from_log(log)
431
+ if vid:
432
+ # 相对路径转绝对
433
+ p = Path(vid)
434
+ if not p.is_absolute():
435
+ p = (REPO_DIR / p).resolve()
436
+ if p.exists():
437
+ return str(p), log
438
 
439
+ # 兜底:找最近生成的 mp4
440
+ fallback = _find_latest_video(REPO_DIR)
441
+ return fallback, log
442
 
 
443
 
444
+ @spaces.GPU
445
+ def generate_multi(
446
+ prompt: str,
447
+ image1: str,
448
+ image2: str,
449
+ audio1: str,
450
+ audio2: str,
451
+ audio_type: str, # para/add
452
+ resolution: str,
453
+ num_segments: int,
454
+ ref_img_index: int,
455
+ mask_frame_range: int,
456
+ nproc: int,
457
+ context_parallel_size: int,
458
+ ) -> Tuple[Optional[str], str]:
459
+ """
460
+ 多人:Audio-Image-to-Video(官方示例):contentReference[oaicite:10]{index=10}
461
+ """
462
+ input_json = _build_input_json_file(
463
+ mode="multi",
464
+ prompt=prompt,
465
+ images=[image1, image2],
466
+ audios=[audio1, audio2],
467
+ )
468
 
469
+ cmd = [
470
+ "torchrun",
471
+ f"--nproc_per_node={nproc}",
472
+ "run_demo_avatar_multi_audio_to_video.py",
473
+ f"--context_parallel_size={context_parallel_size}",
474
+ f"--checkpoint_dir={str(WEIGHTS_AVATAR)}",
475
+ f"--input_json={input_json}",
476
+ f"--audio_type={audio_type}",
477
+ f"--resolution={resolution}",
478
+ f"--num_segments={num_segments}",
479
+ f"--ref_img_index={ref_img_index}",
480
+ f"--mask_frame_range={mask_frame_range}",
481
+ ]
482
 
483
+ rc, log = _run_subprocess(cmd, cwd=REPO_DIR)
484
+ if rc != 0:
485
+ return None, f"运行失败(exit={rc})。日志如下:\n{log}"
486
 
487
+ vid = _extract_video_path_from_log(log)
488
+ if vid:
489
+ p = Path(vid)
490
+ if not p.is_absolute():
491
+ p = (REPO_DIR / p).resolve()
492
+ if p.exists():
493
+ return str(p), log
494
 
495
+ fallback = _find_latest_video(REPO_DIR)
496
+ return fallback, log
497
 
 
 
 
 
 
 
 
 
498
 
499
+ # -------------------- UI --------------------
500
+ with gr.Blocks(title="LongCat-Video-Avatar (ZeroGPU)", fill_height=True) as demo:
501
  gr.Markdown(
502
+ "## LongCat-Video-Avatar (ZeroGPU)\n"
503
+ "- 启动后会自动下载权重到 `./weights` 并准备环境。\n"
504
+ "- ZeroGPU 会在点击生成时,按需分配 GPU 执行(@spaces.GPU)。\n"
505
+ "- 如果你发现必须 2 卡才能跑通,可把 **nproc/context_parallel_size** 改为 2。"
 
 
 
506
  )
507
 
508
+ with gr.Accordion("高级参数(默认先按 ZeroGPU 更稳的 1 卡尝试)", open=False):
509
+ nproc = gr.Slider(1, 2, value=1, step=1, label="torchrun --nproc_per_node")
510
+ cps = gr.Slider(1, 2, value=1, step=1, label="--context_parallel_size")
511
+ resolution = gr.Radio(["480p", "720p"], value="480p", label="resolution")
512
+ num_segments = gr.Slider(1, 8, value=1, step=1, label="num_segments(>1 启用续写/长视频段)")
513
+ ref_img_index = gr.Slider(-24, 48, value=10, step=1, label="ref_img_index(减少重复动作/增强一致性)")
514
+ mask_frame_range = gr.Slider(0, 12, value=3, step=1, label="mask_frame_range(过大可能出伪影)")
515
+
516
+ with gr.Tabs():
517
+ with gr.Tab("单人(AT2V / AI2V)"):
518
+ stage_1 = gr.Radio(["at2v", "ai2v"], value="ai2v", label="stage_1")
519
+ prompt = gr.Textbox(
520
+ label="文本提示(建议包含 talking/speaking 等动词提示)",
521
+ value="A realistic person is speaking naturally, talking to the camera.",
522
+ lines=2,
523
+ )
524
+ img = gr.Image(type="filepath", label="参考图(ai2v 必填,at2v 可不传)")
525
+ aud = gr.Audio(type="filepath", label="音频(必填)")
526
+ btn = gr.Button("生成", variant="primary")
527
+ out_v = gr.Video(label="输出视频")
528
+ out_log = gr.Textbox(label="日志", lines=12)
529
+
530
+ btn.click(
531
+ fn=generate_single,
532
+ inputs=[stage_1, prompt, img, aud, resolution, num_segments, ref_img_index, mask_frame_range, nproc, cps],
533
+ outputs=[out_v, out_log],
534
+ api_name="generate_single",
535
+ )
536
+
537
+ with gr.Tab("双人(Multi)"):
538
+ prompt_m = gr.Textbox(
539
+ label="文本提示(可选)",
540
+ value="Two people are talking in turns naturally, facing the camera.",
541
+ lines=2,
542
+ )
543
+ c1, c2 = gr.Row(), gr.Row()
544
+ with c1:
545
+ img1 = gr.Image(type="filepath", label="人物1参考图(必填)")
546
+ aud1 = gr.Audio(type="filepath", label="人物1音频(必填)")
547
+ with c2:
548
+ img2 = gr.Image(type="filepath", label="人物2参考图(必填)")
549
+ aud2 = gr.Audio(type="filepath", label="人物2音频(必填)")
550
+
551
+ audio_type = gr.Radio(
552
+ ["para", "add"],
553
+ value="add",
554
+ label="双音频模式:para=混合(等长) / add=拼接(可不等长)",
555
+ )
556
+ btn2 = gr.Button("生成(双人)", variant="primary")
557
+ out_v2 = gr.Video(label="输出视频")
558
+ out_log2 = gr.Textbox(label="日志", lines=12)
559
+
560
+ btn2.click(
561
+ fn=generate_multi,
562
+ inputs=[prompt_m, img1, img2, aud1, aud2, audio_type, resolution, num_segments, ref_img_index, mask_frame_range, nproc, cps],
563
+ outputs=[out_v2, out_log2],
564
+ api_name="generate_multi",
565
+ )
566
 
567
+ gr.Markdown(
568
+ "### 重要说明\n"
569
+ "- 该模型当前 **没有 Inference Provider 托管**,因此 Space 必须本地跑推理代码与权重。:contentReference[oaicite:11]{index=11}\n"
570
+ "- ZeroGPU 的 CUDA 任务会在 `@spaces.GPU` 的函数调用时 fork 执行并释放。:contentReference[oaicite:12]{index=12}\n"
571
+ "- 官方示例的 Avatar 推理默认用 2 进程(nproc=2)。:contentReference[oaicite:13]{index=13}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
572
  )
573
 
574
+ demo.queue(concurrency_count=1).launch()