C4G-HKUST commited on
Commit
b7867cd
·
1 Parent(s): 7c629b9

Fix GPU detection and CPU fallback: remove ssr_mode, add runtime GPU check, support CPU mode for FaceInference

Browse files
Files changed (2) hide show
  1. app.py +39 -8
  2. utils/get_face_bbox.py +12 -3
app.py CHANGED
@@ -17,13 +17,22 @@ from PIL import Image
17
  from huggingface_hub import snapshot_download
18
 
19
  # 检查 GPU 可用性(参考 Meigen-MultiTalk)
20
- is_gpu_available = torch.cuda.is_available()
21
- if is_gpu_available:
22
- # 初始化 CUDA,确保设备可用
 
 
23
  try:
24
- _ = torch.cuda.current_device()
25
- except RuntimeError:
26
- is_gpu_available = False
 
 
 
 
 
 
 
27
 
28
  # 导入 AnyTalker 相关的模块
29
  import wan
@@ -374,6 +383,25 @@ def run_graio_demo(args):
374
 
375
  os.makedirs(args.audio_save_dir, exist_ok=True)
376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
  logging.info("Creating AnyTalker pipeline.")
378
  # 加载模型
379
  wan_a2v = wan.WanAF2V(
@@ -390,7 +418,9 @@ def run_graio_demo(args):
390
  )
391
 
392
  # 创建 InsightFace 人脸检测器
393
- face_processor = FaceInference(det_thresh=args.det_thresh, ctx_id=local_rank)
 
 
394
  logging.info("Model and face processor loaded successfully.")
395
 
396
  def generate_video(img2vid_image, img2vid_prompt, n_prompt, img2vid_audio_1, img2vid_audio_2, img2vid_audio_3,
@@ -672,7 +702,8 @@ def run_graio_demo(args):
672
  )
673
  # 参考 Meigen-MultiTalk 的成功配置
674
  # 在 Hugging Face Spaces 上,Gradio 会自动处理端口和服务器配置
675
- demo.queue(max_size=4).launch(ssr_mode=False, show_error=True, show_api=False)
 
676
 
677
 
678
 
 
17
  from huggingface_hub import snapshot_download
18
 
19
  # 检查 GPU 可用性(参考 Meigen-MultiTalk)
20
+ # 参考: https://huggingface.co/spaces/fffiloni/Meigen-MultiTalk/blob/main/app.py
21
+ is_shared_ui = True if os.environ.get('SPACE_ID', '').startswith('C4G-HKUST/AnyTalker') else False
22
+ is_gpu_associated = torch.cuda.is_available()
23
+
24
+ if is_gpu_associated:
25
  try:
26
+ num_gpus = torch.cuda.device_count()
27
+ print(f"GPU AVAILABLE: {num_gpus} GPU(s)")
28
+ if num_gpus > 0:
29
+ gpu_name = torch.cuda.get_device_name(0)
30
+ print(f"GPU Name: {gpu_name}")
31
+ except Exception as e:
32
+ print(f"GPU detection error: {e}")
33
+ is_gpu_associated = False
34
+ else:
35
+ print("No CUDA-compatible GPU found. Will use CPU (slower).")
36
 
37
  # 导入 AnyTalker 相关的模块
38
  import wan
 
383
 
384
  os.makedirs(args.audio_save_dir, exist_ok=True)
385
 
386
+ # 运行时动态检测 GPU 可用性(参考 Meigen-MultiTalk)
387
+ if torch.cuda.is_available():
388
+ try:
389
+ num_gpus = torch.cuda.device_count()
390
+ if num_gpus > 0:
391
+ gpu_name = torch.cuda.get_device_name(0)
392
+ logging.info(f"GPU AVAILABLE: {num_gpus} GPU(s), Name: {gpu_name}")
393
+ # 使用 GPU
394
+ device = local_rank if world_size > 1 else 0
395
+ else:
396
+ logging.warning("CUDA is available but no GPU devices found. Using CPU.")
397
+ device = -1 # 使用 CPU
398
+ except Exception as e:
399
+ logging.warning(f"GPU detection error: {e}. Using CPU.")
400
+ device = -1 # 使用 CPU
401
+ else:
402
+ logging.warning("No CUDA-compatible GPU found. Using CPU (slower).")
403
+ device = -1 # 使用 CPU
404
+
405
  logging.info("Creating AnyTalker pipeline.")
406
  # 加载模型
407
  wan_a2v = wan.WanAF2V(
 
418
  )
419
 
420
  # 创建 InsightFace 人脸检测器
421
+ # ctx_id=-1 表示使用 CPU,否则使用 GPU
422
+ face_processor_ctx_id = -1 if device == -1 else local_rank
423
+ face_processor = FaceInference(det_thresh=args.det_thresh, ctx_id=face_processor_ctx_id)
424
  logging.info("Model and face processor loaded successfully.")
425
 
426
  def generate_video(img2vid_image, img2vid_prompt, n_prompt, img2vid_audio_1, img2vid_audio_2, img2vid_audio_3,
 
702
  )
703
  # 参考 Meigen-MultiTalk 的成功配置
704
  # 在 Hugging Face Spaces 上,Gradio 会自动处理端口和服务器配置
705
+ # 参考 Meigen-MultiTalk 的启动方式
706
+ demo.queue(max_size=4).launch(show_error=True, show_api=False)
707
 
708
 
709
 
utils/get_face_bbox.py CHANGED
@@ -17,12 +17,21 @@ class FaceInference:
17
  Args:
18
  det_thresh: 检测阈值
19
  det_size: 检测图像尺寸
20
- ctx_id: GPU设备ID,如果为None则自动检测当前进程的local_rank
21
  """
 
 
 
 
 
 
 
 
 
22
  self.face_analysis = FaceAnalysis(
23
  allowed_modules=['detection'],
24
- providers=['CUDAExecutionProvider'],
25
- provider_options=[{"device_id": str(ctx_id)}], # make sure use provider_options to specify GPU ran
26
  )
27
 
28
  self.face_analysis.prepare(ctx_id=ctx_id, det_thresh=det_thresh, det_size=det_size)
 
17
  Args:
18
  det_thresh: 检测阈值
19
  det_size: 检测图像尺寸
20
+ ctx_id: GPU设备ID,如果为-1则使用CPU,否则使用GPU
21
  """
22
+ # 如果 ctx_id 为 -1,使用 CPU;否则使用 GPU
23
+ if ctx_id == -1:
24
+ providers = ['CPUExecutionProvider']
25
+ provider_options = [{}]
26
+ ctx_id = -1 # InsightFace 使用 -1 表示 CPU
27
+ else:
28
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
29
+ provider_options = [{"device_id": str(ctx_id)}, {}]
30
+
31
  self.face_analysis = FaceAnalysis(
32
  allowed_modules=['detection'],
33
+ providers=providers,
34
+ provider_options=provider_options,
35
  )
36
 
37
  self.face_analysis.prepare(ctx_id=ctx_id, det_thresh=det_thresh, det_size=det_size)