Hive-Dataset commited on
Commit
45fdacc
·
1 Parent(s): adbe9ab

Change CUDA to CPU

Browse files
Files changed (2) hide show
  1. app.py +13 -15
  2. look2hear/models/dolphin.py +7 -7
app.py CHANGED
@@ -23,8 +23,8 @@ from face_detection_utils import detect_faces
23
  # Use HF Space's temp directory
24
  TEMP_DIR = os.environ.get('TMPDIR', '/tmp')
25
 
26
- # Shared state for relaying GPU-side status back to the UI thread.
27
- GPU_PROGRESS_STATE = {"progress": 0.0, "status": "Processing on GPU..."}
28
  GPU_PROGRESS_LOCK = threading.Lock()
29
 
30
  class LogCollector:
@@ -184,10 +184,9 @@ def process_detected_faces(boxes, probs, frame_rgb, frame_pil):
184
  full_frame_annotated = np.array(full_frame_pil)
185
  return face_images, len(boxes), full_frame_annotated, None
186
 
187
- @spaces.GPU(duration=60, enable_queue=True)
188
  def detect_faces_gpu(frame_pil):
189
- """GPU-accelerated face detection"""
190
- print("Detecting faces with RetinaFace")
191
 
192
  frame_array = np.array(frame_pil)
193
 
@@ -271,16 +270,15 @@ def detect_and_extract_all_faces(video_path):
271
  traceback.print_exc()
272
  return [], 0, None, f"Failed to open video file. Error: {str(e)}"
273
 
274
- @spaces.GPU(duration=300, enable_queue=True)
275
  def process_video_gpu(video_file, temp_dir, num_speakers):
276
- """GPU-accelerated video processing"""
277
  try:
278
  from Inference_with_status import process_video_with_status
279
 
280
  # Define status callback inside GPU function
281
  def gpu_status_callback(message):
282
  status_text = message.get('status', 'Processing...')
283
- print(f"GPU Processing: {status_text}")
284
  progress_value = message.get('progress')
285
  with GPU_PROGRESS_LOCK:
286
  GPU_PROGRESS_STATE["status"] = status_text
@@ -391,13 +389,13 @@ def process_video_auto(video_file, progress=gr.Progress()):
391
  yield snapshot()
392
 
393
  try:
394
- print("Starting GPU-accelerated video processing...")
395
  with GPU_PROGRESS_LOCK:
396
  GPU_PROGRESS_STATE["progress"] = 0.0
397
- GPU_PROGRESS_STATE["status"] = "Processing on GPU..."
398
 
399
- progress(0.4, desc="Processing on GPU...")
400
- status_value = "Processing on GPU..."
401
  yield snapshot()
402
 
403
  gpu_result = {"output_files": None, "exception": None}
@@ -416,12 +414,12 @@ def process_video_auto(video_file, progress=gr.Progress()):
416
  gpu_thread.start()
417
 
418
  last_reported_progress = 0.4
419
- last_status_message = "Processing on GPU..."
420
 
421
  while gpu_thread.is_alive():
422
  time.sleep(0.5)
423
  with GPU_PROGRESS_LOCK:
424
- gpu_status = GPU_PROGRESS_STATE.get("status", "Processing on GPU...")
425
  gpu_progress_value = GPU_PROGRESS_STATE.get("progress", 0.0)
426
 
427
  mapped_progress = 0.4 + 0.5 * gpu_progress_value
@@ -632,7 +630,7 @@ with gr.Blocks(
632
  ### 🚀 Powered by:
633
  - RetinaFace for face detection
634
  - Dolphin model for audio-visual separation
635
- - GPU acceleration when available
636
  <footer style="display:none;">
637
  <a href='https://clustrmaps.com/site/1c828' title='Visit tracker'>
638
  <img src='//clustrmaps.com/map_v2.png?cl=080808&w=300&t=tt&d=XYmTC4S_SxuX7G06iJ16lU43VCNkCBFRLXMfEM5zvmo&co=ffffff&ct=808080'/>
 
23
  # Use HF Space's temp directory
24
  TEMP_DIR = os.environ.get('TMPDIR', '/tmp')
25
 
26
+ # Shared state for relaying CPU-side status back to the UI thread.
27
+ GPU_PROGRESS_STATE = {"progress": 0.0, "status": "Processing on CPU..."}
28
  GPU_PROGRESS_LOCK = threading.Lock()
29
 
30
  class LogCollector:
 
184
  full_frame_annotated = np.array(full_frame_pil)
185
  return face_images, len(boxes), full_frame_annotated, None
186
 
 
187
  def detect_faces_gpu(frame_pil):
188
+ """CPU face detection"""
189
+ print("Detecting faces with RetinaFace (CPU)")
190
 
191
  frame_array = np.array(frame_pil)
192
 
 
270
  traceback.print_exc()
271
  return [], 0, None, f"Failed to open video file. Error: {str(e)}"
272
 
 
273
  def process_video_gpu(video_file, temp_dir, num_speakers):
274
+ """CPU video processing"""
275
  try:
276
  from Inference_with_status import process_video_with_status
277
 
278
  # Define status callback inside GPU function
279
  def gpu_status_callback(message):
280
  status_text = message.get('status', 'Processing...')
281
+ print(f"CPU Processing: {status_text}")
282
  progress_value = message.get('progress')
283
  with GPU_PROGRESS_LOCK:
284
  GPU_PROGRESS_STATE["status"] = status_text
 
389
  yield snapshot()
390
 
391
  try:
392
+ print("Starting CPU video processing...")
393
  with GPU_PROGRESS_LOCK:
394
  GPU_PROGRESS_STATE["progress"] = 0.0
395
+ GPU_PROGRESS_STATE["status"] = "Processing on CPU..."
396
 
397
+ progress(0.4, desc="Processing on CPU...")
398
+ status_value = "Processing on CPU..."
399
  yield snapshot()
400
 
401
  gpu_result = {"output_files": None, "exception": None}
 
414
  gpu_thread.start()
415
 
416
  last_reported_progress = 0.4
417
+ last_status_message = "Processing on CPU..."
418
 
419
  while gpu_thread.is_alive():
420
  time.sleep(0.5)
421
  with GPU_PROGRESS_LOCK:
422
+ gpu_status = GPU_PROGRESS_STATE.get("status", "Processing on CPU...")
423
  gpu_progress_value = GPU_PROGRESS_STATE.get("progress", 0.0)
424
 
425
  mapped_progress = 0.4 + 0.5 * gpu_progress_value
 
630
  ### 🚀 Powered by:
631
  - RetinaFace for face detection
632
  - Dolphin model for audio-visual separation
633
+ - CPU processing
634
  <footer style="display:none;">
635
  <a href='https://clustrmaps.com/site/1c828' title='Visit tracker'>
636
  <img src='//clustrmaps.com/map_v2.png?cl=080808&w=300&t=tt&d=XYmTC4S_SxuX7G06iJ16lU43VCNkCBFRLXMfEM5zvmo&co=ffffff&ct=808080'/>
look2hear/models/dolphin.py CHANGED
@@ -1256,13 +1256,13 @@ class Dolphin(nn.Module, PyTorchModelHubMixin):
1256
  cls,
1257
  *,
1258
  model_id: str,
1259
- revision: str,
1260
- cache_dir: str,
1261
- force_download: bool,
1262
- proxies: dict,
1263
- resume_download: bool,
1264
- local_files_only: bool,
1265
- token: str,
1266
  map_location: str = "cpu",
1267
  strict: bool = False,
1268
  **model_kwargs,
 
1256
  cls,
1257
  *,
1258
  model_id: str,
1259
+ revision: str = None,
1260
+ cache_dir: str = None,
1261
+ force_download: bool = False,
1262
+ proxies: dict = None,
1263
+ resume_download: bool = False,
1264
+ local_files_only: bool = False,
1265
+ token: str = None,
1266
  map_location: str = "cpu",
1267
  strict: bool = False,
1268
  **model_kwargs,