crash10155 commited on
Commit
863c627
Β·
verified Β·
1 Parent(s): 6ae913c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +540 -412
app.py CHANGED
@@ -26,13 +26,19 @@ from pathlib import Path
26
  # Set up environment for HuggingFace Spaces
27
  os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
28
  os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "TRUE"
 
29
 
30
  # Core imports
31
  import gradio as gr
32
  import torch
33
- import onnxruntime as ort
34
 
35
  # Optional imports with graceful degradation
 
 
 
 
 
 
36
  try:
37
  from moviepy.editor import VideoFileClip
38
  MOVIEPY_AVAILABLE = True
@@ -41,29 +47,41 @@ except ImportError as e:
41
  print(f"⚠️ MoviePy not available: {e}")
42
  MOVIEPY_AVAILABLE = False
43
 
44
- # Try to import enhancement modules
 
45
  try:
46
- sys.path.append("./SwitcherAI/processors/frame/modules")
47
- import face_enhancer
48
- import frame_enhancer
49
- ENHANCEMENT_AVAILABLE = True
50
- print("βœ… Enhancement modules loaded successfully")
51
- except ImportError as e:
 
 
 
 
 
 
 
 
 
 
52
  print(f"⚠️ Enhancement modules not available: {e}")
53
- ENHANCEMENT_AVAILABLE = False
54
 
55
  # Directory setup for HuggingFace Spaces
56
- BASE_DIR = Path(__file__).parent
57
- TEMP_DIR = Path(tempfile.mkdtemp(prefix="facefusion_"))
58
  OUTPUT_DIR = BASE_DIR / "outputs"
59
  CONVERT_DIR = BASE_DIR / "Convert"
60
  ASSETS_DIR = BASE_DIR / ".assets" / "models"
61
 
62
- # Create directories
63
- TEMP_DIR.mkdir(exist_ok=True)
64
- OUTPUT_DIR.mkdir(exist_ok=True)
65
- CONVERT_DIR.mkdir(exist_ok=True)
66
- ASSETS_DIR.mkdir(parents=True, exist_ok=True)
 
 
67
 
68
  print(f"πŸ“ Base directory: {BASE_DIR}")
69
  print(f"πŸ“‚ Temp directory: {TEMP_DIR}")
@@ -71,28 +89,25 @@ print(f"πŸ“€ Output directory: {OUTPUT_DIR}")
71
  print(f"🎯 Assets directory: {ASSETS_DIR}")
72
  print(f"πŸ“ Convert directory: {CONVERT_DIR}")
73
 
 
74
  try:
75
- from SwitcherAI.utilities import set_temp_directory, get_temp_directory_info
 
 
 
 
 
76
 
77
- # Set up proper temp directory for HuggingFace Spaces
78
- temp_base_dir = BASE_DIR / "temp_switcher"
79
- temp_base_dir.mkdir(exist_ok=True)
80
- set_temp_directory(str(temp_base_dir))
81
 
82
- # Print temp directory info for debugging
83
- temp_info = get_temp_directory_info()
84
- print("πŸ”§ SwitcherAI Temp Directory Setup:")
85
- print(f" πŸ“ Base temp: {temp_info['base_temp']}")
86
- print(f" πŸ“ Platform: {temp_info['platform']}")
87
- print(f" πŸ“ HF Spaces: {temp_info['is_hf_spaces']}")
88
- print(f" πŸ“ Exists: {temp_info['temp_exists']}")
89
- print(f" πŸ“ Writable: {temp_info['temp_writable']}")
90
 
91
  except ImportError as e:
92
  print(f"⚠️ Could not import SwitcherAI utilities: {e}")
93
  print("πŸ”„ Using default temp directory behavior")
94
 
95
- # Download required model files
96
  def download_required_models():
97
  """Download required model files if not present"""
98
  import urllib.request
@@ -101,7 +116,7 @@ def download_required_models():
101
  models_to_download = [
102
  {
103
  'name': 'GFPGANv1.4.pth',
104
- 'url': 'https://huggingface.co/talhaty/GFPGANv1.4/resolve/main/GFPGANv1.4.pth',
105
  'path': ASSETS_DIR / 'GFPGANv1.4.pth',
106
  'description': 'GFPGAN face enhancement model'
107
  }
@@ -112,33 +127,42 @@ def download_required_models():
112
  model_url = model['url']
113
  model_name = model['name']
114
 
115
- if model_path.exists():
116
- file_size = model_path.stat().st_size / (1024 * 1024) # MB
117
- print(f"βœ… {model_name} already exists ({file_size:.1f}MB)")
118
- continue
 
 
 
119
 
120
  try:
121
  print(f"πŸ“₯ Downloading {model_name}...")
122
  print(f" URL: {model_url}")
123
  print(f" Path: {model_path}")
124
 
 
 
 
125
  # Create a progress callback
126
  def progress_callback(block_num, block_size, total_size):
127
  if total_size > 0:
128
  percent = min(100, (block_num * block_size * 100) / total_size)
129
- if block_num % 50 == 0: # Update every 50 blocks to avoid spam
130
  print(f" Progress: {percent:.1f}%")
131
 
132
  # Download with progress
133
  urllib.request.urlretrieve(model_url, str(model_path), progress_callback)
134
 
135
  # Verify download
136
- if model_path.exists() and model_path.stat().st_size > 0:
137
  file_size = model_path.stat().st_size / (1024 * 1024) # MB
138
  print(f"βœ… {model_name} downloaded successfully ({file_size:.1f}MB)")
139
  else:
140
- print(f"❌ {model_name} download failed - file not created or empty")
141
-
 
 
 
142
  except urllib.error.URLError as e:
143
  print(f"❌ Network error downloading {model_name}: {e}")
144
  except Exception as e:
@@ -146,8 +170,11 @@ def download_required_models():
146
 
147
  # Download models at startup
148
  print("\nπŸ”„ Checking required model files...")
149
- download_required_models()
150
- print("βœ… Model check complete\n")
 
 
 
151
 
152
  # Global variables
153
  current_process = None
@@ -163,61 +190,83 @@ def get_available_gpus():
163
  print("❌ CUDA not available")
164
  return ["CPU Only"]
165
 
166
- device_count = torch.cuda.device_count()
167
- print(f"πŸ”’ CUDA devices detected: {device_count}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
- for i in range(device_count):
170
- try:
171
- props = torch.cuda.get_device_properties(i)
172
- gpu_name = props.name
173
- gpu_memory = props.total_memory / (1024**3) # GB
174
-
175
- # Test device accessibility
176
- torch.cuda.set_device(i)
177
- test_tensor = torch.tensor([1.0], device=f'cuda:{i}')
178
-
179
- gpu_entry = f"GPU {i}: {gpu_name} ({gpu_memory:.1f}GB)"
180
- available_gpus.append(gpu_entry)
181
- print(f"βœ… {gpu_entry}")
182
-
183
- del test_tensor
184
-
185
- except Exception as e:
186
- print(f"❌ Error with GPU {i}: {e}")
187
- available_gpus.append(f"GPU {i}: Error")
188
 
189
  available_gpus.append("CPU Only")
190
  return available_gpus
191
 
192
  def set_gpu_device(gpu_selection):
193
  """Set CUDA device based on selection"""
194
- if gpu_selection.startswith("GPU"):
195
- try:
196
  gpu_id = gpu_selection.split(":")[0].split(" ")[1]
197
  os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
198
  print(f"πŸ–₯️ Using GPU {gpu_id}")
199
  return gpu_id
200
- except:
201
  os.environ["CUDA_VISIBLE_DEVICES"] = ""
202
- print("πŸ–₯️ Falling back to CPU")
203
  return "cpu"
204
- else:
 
205
  os.environ["CUDA_VISIBLE_DEVICES"] = ""
206
- print("πŸ–₯️ Using CPU mode")
207
  return "cpu"
208
 
209
  def safe_copy_file(source, destination):
210
  """Safely copy file with verification"""
211
  try:
 
 
 
 
 
212
  destination.parent.mkdir(parents=True, exist_ok=True)
 
 
 
 
 
 
 
 
 
 
213
  shutil.copy2(source, destination)
214
 
 
215
  if destination.exists() and destination.stat().st_size > 0:
216
  print(f"βœ… File copied: {destination.name}")
217
  return True
218
  else:
219
  print(f"❌ Copy verification failed: {destination.name}")
220
  return False
 
221
  except Exception as e:
222
  print(f"❌ Copy error: {e}")
223
  return False
@@ -228,9 +277,12 @@ def handle_batch_file_upload(files):
228
  return "πŸ“ No files uploaded"
229
 
230
  # Clear existing files in convert directory
231
- for existing_file in CONVERT_DIR.glob("*"):
232
- if existing_file.is_file():
233
- existing_file.unlink()
 
 
 
234
 
235
  uploaded_count = 0
236
  failed_count = 0
@@ -245,9 +297,8 @@ def handle_batch_file_upload(files):
245
 
246
  # Copy file to convert directory
247
  dest_path = CONVERT_DIR / original_name
248
- shutil.copy2(file, dest_path)
249
 
250
- if dest_path.exists() and dest_path.stat().st_size > 0:
251
  file_size = dest_path.stat().st_size / (1024 * 1024) # MB
252
  print(f"βœ… Uploaded: {original_name} ({file_size:.1f}MB)")
253
  uploaded_count += 1
@@ -264,38 +315,48 @@ def handle_batch_file_upload(files):
264
  status_msg += f"❌ Failed: {failed_count} files\n"
265
 
266
  # List uploaded files
267
- uploaded_files = [f.name for f in CONVERT_DIR.glob("*.mp4")] + [f.name for f in CONVERT_DIR.glob("*.avi")] + [f.name for f in CONVERT_DIR.glob("*.mov")]
268
- if uploaded_files:
269
- status_msg += f"πŸ“ Files ready for processing:\n" + "\n".join([f" β€’ {f}" for f in uploaded_files[:10]])
270
- if len(uploaded_files) > 10:
271
- status_msg += f"\n ... and {len(uploaded_files) - 10} more"
 
 
 
272
 
273
  return status_msg
274
 
275
  def resize_video(input_path, output_path, fps=30):
276
  """Resize/process video with fallback"""
277
- if not MOVIEPY_AVAILABLE:
278
- print("⚠️ MoviePy not available - copying video directly")
279
- shutil.copy2(input_path, output_path)
280
- return
281
-
282
  try:
 
 
 
 
 
283
  print(f"🎬 Processing video: {input_path.name}")
284
  clip = VideoFileClip(str(input_path))
285
  clip.write_videofile(str(output_path), fps=fps, audio_codec='aac', verbose=False, logger=None)
286
  clip.close()
287
  print("βœ… Video processed successfully")
 
 
288
  except Exception as e:
289
  print(f"❌ Video processing failed: {e}")
290
- shutil.copy2(input_path, output_path)
 
 
 
 
 
291
 
292
  def extract_audio(video_path, audio_path):
293
  """Extract audio from video"""
294
- if not MOVIEPY_AVAILABLE:
295
- print("⚠️ MoviePy not available - cannot extract audio")
296
- return False
297
-
298
  try:
 
 
 
 
299
  clip = VideoFileClip(str(video_path))
300
  if clip.audio is not None:
301
  clip.audio.write_audiofile(str(audio_path), logger=None, verbose=False)
@@ -304,6 +365,7 @@ def extract_audio(video_path, audio_path):
304
  else:
305
  clip.close()
306
  return False
 
307
  except Exception as e:
308
  print(f"❌ Audio extraction failed: {e}")
309
  return False
@@ -344,6 +406,7 @@ def create_batch_zip():
344
 
345
  print(f"βœ… Batch zip created: {zip_path.name}")
346
  return zip_path
 
347
  except Exception as e:
348
  print(f"❌ Zip creation failed: {e}")
349
  return None
@@ -359,6 +422,7 @@ def get_download_file():
359
  file_size = latest_file.stat().st_size / (1024 * 1024) # MB
360
 
361
  return str(latest_file), f"πŸ“₯ Ready: {latest_file.name} ({file_size:.1f}MB)"
 
362
  except Exception as e:
363
  return None, f"❌ Error: {e}"
364
 
@@ -369,73 +433,79 @@ def run_single_video(source_image, target_video, frame_processor, face_analyser_
369
  global last_output_path, last_batch_mode, current_process
370
  last_batch_mode = False
371
 
372
- set_gpu_device(gpu_selection)
373
-
374
- # Setup temp files
375
- temp_source = TEMP_DIR / 'source-image.jpg'
376
- temp_target = TEMP_DIR / 'resize-vid.mp4'
377
-
378
- # Copy and process files
379
- if not safe_copy_file(Path(source_image), temp_source):
380
- return "❌ Failed to copy source image", ""
381
-
382
- try:
383
- resize_video(Path(target_video), temp_target)
384
- except Exception as e:
385
- return f"❌ Video processing failed: {e}", ""
386
-
387
- # Generate output filename
388
- source_name = Path(source_image).stem
389
- target_name = Path(target_video).stem
390
- suffix = "_lipsynced" if enable_lip_sync else ""
391
- output_filename = f"{source_name}_{target_name}{suffix}.mp4"
392
- output_path = OUTPUT_DIR / output_filename
393
-
394
- # Handle lip sync
395
- audio_path = None
396
- if enable_lip_sync:
397
- audio_path = TEMP_DIR / 'target-audio.wav'
398
- if not extract_audio(temp_target, audio_path):
399
- print("⚠️ Lip sync disabled - audio extraction failed")
400
- enable_lip_sync = False
401
-
402
- # Build command
403
- execution_provider = "cuda" if gpu_selection.startswith("GPU") else "cpu"
404
-
405
- cmd = [
406
- "python", "run.py",
407
- "--execution-providers", execution_provider,
408
- "--execution-thread-count", "8",
409
- "--reference-face-distance", "1.5",
410
- "-s", str(temp_source),
411
- "-t", str(temp_target),
412
- "-o", str(output_path),
413
- "--frame-processors"] + frame_processor + [
414
- "--face-analyser-direction", face_analyser_direction,
415
- "--face-analyser-age", face_analyser_age
416
- ]
417
-
418
- if enable_lip_sync and audio_path:
419
- cmd.extend(["--source-paths", str(audio_path)])
420
- cmd.extend(["--lip-syncer-model", lip_syncer_model])
421
- if 'lip_syncer' not in frame_processor:
422
- idx = cmd.index("--frame-processors") + 1
423
- cmd[idx:idx] = ['lip_syncer']
424
-
425
- if face_recognition != 'none':
426
- cmd.extend(["--face-recognition", face_recognition])
427
- if face_analyser_gender != 'none':
428
- cmd.extend(["--face-analyser-gender", face_analyser_gender])
429
- if skip_audio and not enable_lip_sync:
430
- cmd.append("--skip-audio")
431
- if keep_fps:
432
- cmd.append("--keep-fps")
433
-
434
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
  print("πŸš€ Starting face swap processing...")
 
436
  start_time = time.time()
437
 
438
- current_process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, text=True, bufsize=1)
 
 
 
 
 
 
 
439
 
440
  cli_output = ""
441
  while True:
@@ -461,19 +531,22 @@ def run_single_video(source_image, target_video, frame_processor, face_analyser_
461
  return "❌ Processing failed", cli_output + f"\n\n⏱️ Time: {execution_time:.2f}s"
462
 
463
  # Cleanup
464
- if torch.cuda.is_available():
465
- torch.cuda.empty_cache()
466
- gc.collect()
467
-
468
- if audio_path and audio_path.exists():
469
- audio_path.unlink()
 
 
 
470
 
471
  last_output_path = str(output_path)
472
 
473
  return str(output_path), cli_output + f"\n\nβœ… Completed in {execution_time:.2f}s"
474
 
475
  except Exception as e:
476
- return f"❌ Error: {e}", cli_output
477
 
478
  def run_batch_processing(source_image, frame_processor, face_analyser_direction, face_recognition,
479
  face_analyser_gender, skip_audio, keep_fps, lip_syncer_model, enable_lip_sync, gpu_selection):
@@ -481,241 +554,288 @@ def run_batch_processing(source_image, frame_processor, face_analyser_direction,
481
  global last_output_path, last_batch_mode, current_process
482
  last_batch_mode = True
483
 
484
- set_gpu_device(gpu_selection)
485
-
486
- video_files = list(CONVERT_DIR.glob("*.mp4")) + list(CONVERT_DIR.glob("*.avi")) + list(CONVERT_DIR.glob("*.mov"))
487
-
488
- if not video_files:
489
- yield None, f"πŸ“ No video files found in Convert folder.\nPlease upload videos using the file input above."
490
- return
491
-
492
- temp_source = TEMP_DIR / 'source-image.jpg'
493
- if not safe_copy_file(Path(source_image), temp_source):
494
- yield None, "❌ Failed to copy source image"
495
- return
496
-
497
- source_name = Path(source_image).stem
498
- cli_output = f"πŸ“Š Processing {len(video_files)} videos in batch mode\n🎯 Source: {source_name}\n\n"
499
- yield None, cli_output
500
-
501
- successful = 0
502
- failed = 0
503
-
504
- for i, video_file in enumerate(video_files, 1):
505
- current_output = f"[{i}/{len(video_files)}] 🎬 {video_file.name}\n"
506
- cli_output += current_output
507
- yield None, cli_output
508
-
509
- temp_target = TEMP_DIR / 'resize-vid.mp4'
510
 
511
- try:
512
- resize_video(video_file, temp_target)
513
- except Exception as e:
514
- error_msg = f"❌ Video resize failed: {e}\n"
515
- cli_output += error_msg
516
- failed += 1
517
- yield None, cli_output
518
- continue
519
 
520
- suffix = "_lipsynced" if enable_lip_sync else ""
521
- output_filename = f"{source_name}_{video_file.stem}{suffix}.mp4"
522
- output_path = OUTPUT_DIR / output_filename
523
-
524
- # Handle lip sync
525
- audio_path = None
526
- if enable_lip_sync:
527
- audio_path = TEMP_DIR / 'target-audio.wav'
528
- if not extract_audio(temp_target, audio_path):
529
- enable_lip_sync = False
530
 
531
- # Build command
532
- execution_provider = "cuda" if gpu_selection.startswith("GPU") else "cpu"
 
 
533
 
534
- cmd = [
535
- "python", "run.py",
536
- "--execution-providers", execution_provider,
537
- "--execution-thread-count", "24",
538
- "--reference-face-distance", "1.5",
539
- "-s", str(temp_source),
540
- "-t", str(temp_target),
541
- "-o", str(output_path),
542
- "--frame-processors"] + frame_processor + [
543
- "--face-analyser-direction", face_analyser_direction
544
- ]
545
 
546
- if enable_lip_sync and audio_path:
547
- cmd.extend(["--source-paths", str(audio_path)])
548
- cmd.extend(["--lip-syncer-model", lip_syncer_model])
549
- if 'lip_syncer' not in frame_processor:
550
- idx = cmd.index("--frame-processors") + 1
551
- cmd[idx:idx] = ['lip_syncer']
552
 
553
- if face_recognition != 'none':
554
- cmd.extend(["--face-recognition", face_recognition])
555
- if face_analyser_gender != 'none':
556
- cmd.extend(["--face-analyser-gender", face_analyser_gender])
557
- if skip_audio and not enable_lip_sync:
558
- cmd.append("--skip-audio")
559
- if keep_fps:
560
- cmd.append("--keep-fps")
561
-
562
- try:
563
- start_time = time.time()
564
- current_process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, text=True, bufsize=1)
565
-
566
- while True:
567
- output = current_process.stdout.readline()
568
- if output == '' and current_process.poll() is not None:
569
- break
570
- if output:
571
- line = output.strip()
572
- print(line)
573
 
574
- rc = current_process.poll()
575
- execution_time = time.time() - start_time
576
 
577
- if rc == 0:
578
- success_msg = f"βœ… Completed in {execution_time:.2f}s\n\n"
579
- cli_output += success_msg
580
- successful += 1
581
- else:
582
- error_msg = f"❌ Processing failed\n\n"
583
  cli_output += error_msg
584
  failed += 1
 
 
585
 
586
- yield None, cli_output
 
 
587
 
588
- # Cleanup
589
- if torch.cuda.is_available():
590
- torch.cuda.empty_cache()
591
- gc.collect()
 
 
592
 
593
- if audio_path and audio_path.exists():
594
- audio_path.unlink()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
595
 
596
- except Exception as e:
597
- error_msg = f"❌ Error: {e}\n\n"
598
- cli_output += error_msg
599
- failed += 1
600
- yield None, cli_output
601
-
602
- # Final summary
603
- final_msg = f"\n=== BATCH COMPLETE ===\nβœ… Successful: {successful}\n❌ Failed: {failed}\n"
604
- cli_output += final_msg
605
-
606
- if successful > 0:
607
- last_output_path = str(create_batch_zip())
608
-
609
- yield None, cli_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610
 
611
  def handle_processing(source_image, target_video, frame_processor, face_analyser_direction, face_recognition,
612
  face_analyser_gender, face_analyser_age, skip_audio, keep_fps,
613
  lip_syncer_model, enable_lip_sync, use_folder_mode, gpu_selection):
614
  """Main processing handler"""
615
 
616
- if use_folder_mode:
617
- for _, cli_output in run_batch_processing(
618
- source_image, frame_processor, face_analyser_direction, face_recognition,
619
- face_analyser_gender, skip_audio, keep_fps, lip_syncer_model, enable_lip_sync, gpu_selection
620
- ):
621
- yield cli_output, "⏹️ CANCEL"
622
- yield cli_output + "\nπŸŽ‰ Batch processing complete!", "πŸ“₯ DOWNLOAD"
623
- else:
624
- for video_result, cli_output in run_single_video(
625
- source_image, target_video, frame_processor, face_analyser_direction, face_recognition,
626
- face_analyser_gender, face_analyser_age, skip_audio, keep_fps,
627
- lip_syncer_model, enable_lip_sync, gpu_selection
628
- ):
629
- yield cli_output, "⏹️ CANCEL"
630
-
631
- if video_result and not video_result.startswith("❌"):
632
- yield cli_output + "\nπŸŽ‰ Processing complete!", "πŸ“₯ DOWNLOAD"
633
  else:
634
- yield cli_output, "πŸ”„ RESET"
 
 
 
 
 
 
 
 
 
 
 
 
 
635
 
636
  def cancel_processing():
637
  """Cancel current processing"""
638
  global current_process
639
- if current_process and current_process.poll() is None:
640
- try:
641
  current_process.terminate()
642
  current_process.wait(timeout=10)
 
 
 
 
 
 
 
 
 
643
  except:
644
- current_process.kill()
645
- current_process.wait()
646
- return "⏹️ Processing cancelled"
647
- return "⚠️ No active processing"
648
 
649
  def reset_interface():
650
  """Reset interface to defaults"""
651
- cleanup_temp_files()
652
- cleanup_convert_files()
653
- return (
654
- None, # source_image
655
- None, # target_video
656
- ['face_swapper'] + (['face_enhancer'] if ENHANCEMENT_AVAILABLE else []), # frame_processor
657
- 'top-bottom', # face_analyser_direction
658
- 'reference', # face_recognition
659
- 'female', # face_analyser_gender
660
- 'adult', # face_analyser_age
661
- False, # skip_audio
662
- True, # keep_fps
663
- 'wav2lip_gan_96', # lip_syncer_model
664
- False, # enable_lip_sync
665
- False, # use_folder_mode
666
- AVAILABLE_GPUS[0] if AVAILABLE_GPUS else "CPU Only", # gpu_selection
667
- "πŸ”§ Interface reset. Ready for new session!", # cli_output
668
- "πŸš€ START PROCESSING" # button text
669
- )
 
 
 
 
 
 
670
 
671
  def handle_download():
672
  """Handle download button click"""
673
- download_path, status = get_download_file()
674
- if download_path:
675
- return download_path, status, gr.update(visible=True), gr.update(visible=False)
676
- else:
677
- return None, status, gr.update(visible=False), gr.update(visible=True)
 
 
 
678
 
679
  def handle_action_button(button_text, *inputs):
680
  """Handle multi-purpose action button"""
681
- if "RESET" in button_text:
682
- return reset_interface()
683
- elif "CANCEL" in button_text:
684
- cancel_msg = cancel_processing()
685
- return inputs + (cancel_msg, "πŸ”„ RESET")
686
- else:
687
- return inputs + ("", button_text)
 
 
 
688
 
689
  def toggle_batch_mode(use_folder_mode):
690
  """Handle batch mode toggle"""
691
- if use_folder_mode:
692
- return gr.update(
693
- label="πŸ“ Target Videos (Drag multiple files here)",
694
- file_count="multiple",
695
- file_types=["video"]
696
- )
697
- else:
698
- return gr.update(
699
- label="Target Video (Video to modify)",
700
- file_count="single",
701
- file_types=["video"]
702
- )
 
 
 
 
703
 
704
  def handle_file_upload(files, use_folder_mode):
705
  """Handle file uploads - single or multiple"""
706
- if use_folder_mode and files:
707
- # Handle batch upload
708
- status = handle_batch_file_upload(files)
709
- return status
710
- elif not use_folder_mode and files:
711
- # Single file mode - just return status
712
- return f"βœ… Single video uploaded: {Path(files.name).name if hasattr(files, 'name') else 'video file'}"
713
- else:
714
- return "πŸ“ No files uploaded"
 
 
715
 
716
  # Initialize GPU detection
717
- AVAILABLE_GPUS = get_available_gpus()
718
- print(f"πŸ–₯️ Available GPUs: {AVAILABLE_GPUS}")
 
 
 
 
719
 
720
  # Gradio Interface
721
  def create_interface():
@@ -873,61 +993,65 @@ def create_interface():
873
  with gr.Column():
874
  keep_fps = gr.Checkbox(label="🎬 Keep Original FPS", value=True)
875
 
876
- # Event handlers
877
- enable_lip_sync.change(
878
- lambda x: gr.update(visible=x),
879
- inputs=[enable_lip_sync],
880
- outputs=[lip_syncer_model]
881
- )
882
-
883
- use_folder_mode.change(
884
- toggle_batch_mode,
885
- inputs=[use_folder_mode],
886
- outputs=[target_video]
887
- )
888
-
889
- target_video.upload(
890
- handle_file_upload,
891
- inputs=[target_video, use_folder_mode],
892
- outputs=[upload_status]
893
- )
894
-
895
- start_button.click(
896
- handle_processing,
897
- inputs=[
898
- source_image, target_video, frame_processor, face_analyser_direction,
899
- face_recognition, face_analyser_gender, face_analyser_age,
900
- skip_audio, keep_fps, lip_syncer_model, enable_lip_sync,
901
- use_folder_mode, gpu_selection
902
- ],
903
- outputs=[cli_output, action_button]
904
- )
905
-
906
- action_button.click(
907
- handle_action_button,
908
- inputs=[
909
- action_button, source_image, target_video, frame_processor,
910
- face_analyser_direction, face_recognition, face_analyser_gender,
911
- face_analyser_age, skip_audio, keep_fps, lip_syncer_model,
912
- enable_lip_sync, use_folder_mode, gpu_selection
913
- ],
914
- outputs=[
915
- source_image, target_video, frame_processor, face_analyser_direction,
916
- face_recognition, face_analyser_gender, face_analyser_age,
917
- skip_audio, keep_fps, lip_syncer_model, enable_lip_sync,
918
- use_folder_mode, gpu_selection, cli_output, action_button
919
- ]
920
- )
921
-
922
- download_button.click(
923
- handle_download,
924
- outputs=[download_file, download_status, download_file, download_button]
925
- )
926
-
927
- download_file.change(
928
- lambda: (gr.update(visible=False), gr.update(visible=True), "Ready for next download"),
929
- outputs=[download_file, download_button, download_status]
930
- )
 
 
 
 
931
 
932
  return interface
933
 
@@ -950,12 +1074,16 @@ if __name__ == "__main__":
950
  cleanup_temp_files()
951
 
952
  # Create and launch interface
953
- app = create_interface()
954
- app.launch(
955
- server_name="0.0.0.0",
956
- server_port=7860,
957
- share=False,
958
- debug=False,
959
- show_error=True,
960
- max_file_size="1500mb"
961
- )
 
 
 
 
 
26
  # Set up environment for HuggingFace Spaces
27
  os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
28
  os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "TRUE"
29
+ os.environ["PYTHONPATH"] = "."
30
 
31
  # Core imports
32
  import gradio as gr
33
  import torch
 
34
 
35
  # Optional imports with graceful degradation
36
+ try:
37
+ import onnxruntime as ort
38
+ print("βœ… ONNX Runtime loaded successfully")
39
+ except ImportError as e:
40
+ print(f"⚠️ ONNX Runtime not available: {e}")
41
+
42
  try:
43
  from moviepy.editor import VideoFileClip
44
  MOVIEPY_AVAILABLE = True
 
47
  print(f"⚠️ MoviePy not available: {e}")
48
  MOVIEPY_AVAILABLE = False
49
 
50
+ # Try to import enhancement modules - make this more robust
51
+ ENHANCEMENT_AVAILABLE = False
52
  try:
53
+ import importlib.util
54
+
55
+ # Check if the modules exist
56
+ face_enhancer_path = Path("SwitcherAI/processors/frame/modules/face_enhancer.py")
57
+ frame_enhancer_path = Path("SwitcherAI/processors/frame/modules/frame_enhancer.py")
58
+
59
+ if face_enhancer_path.exists() and frame_enhancer_path.exists():
60
+ sys.path.insert(0, str(Path("SwitcherAI/processors/frame/modules").resolve()))
61
+ import face_enhancer
62
+ import frame_enhancer
63
+ ENHANCEMENT_AVAILABLE = True
64
+ print("βœ… Enhancement modules loaded successfully")
65
+ else:
66
+ print("⚠️ Enhancement module files not found")
67
+
68
+ except Exception as e:
69
  print(f"⚠️ Enhancement modules not available: {e}")
 
70
 
71
  # Directory setup for HuggingFace Spaces
72
+ BASE_DIR = Path(__file__).parent.resolve()
73
+ TEMP_DIR = BASE_DIR / "temp_workspace"
74
  OUTPUT_DIR = BASE_DIR / "outputs"
75
  CONVERT_DIR = BASE_DIR / "Convert"
76
  ASSETS_DIR = BASE_DIR / ".assets" / "models"
77
 
78
+ # Create directories with better error handling
79
+ for directory in [TEMP_DIR, OUTPUT_DIR, CONVERT_DIR, ASSETS_DIR]:
80
+ try:
81
+ directory.mkdir(parents=True, exist_ok=True)
82
+ print(f"πŸ“ Directory ready: {directory}")
83
+ except Exception as e:
84
+ print(f"⚠️ Failed to create directory {directory}: {e}")
85
 
86
  print(f"πŸ“ Base directory: {BASE_DIR}")
87
  print(f"πŸ“‚ Temp directory: {TEMP_DIR}")
 
89
  print(f"🎯 Assets directory: {ASSETS_DIR}")
90
  print(f"πŸ“ Convert directory: {CONVERT_DIR}")
91
 
92
+ # Try to set up SwitcherAI temp directory
93
  try:
94
+ sys.path.insert(0, str(BASE_DIR))
95
+ from SwitcherAI.utilities import conditional_download
96
+
97
+ # Set up temp directory for SwitcherAI
98
+ temp_switcher_dir = TEMP_DIR / "switcher_temp"
99
+ temp_switcher_dir.mkdir(exist_ok=True)
100
 
101
+ # Set environment variable for temp directory
102
+ os.environ['SWITCHER_TEMP_DIR'] = str(temp_switcher_dir)
 
 
103
 
104
+ print("πŸ”§ SwitcherAI utilities loaded successfully")
 
 
 
 
 
 
 
105
 
106
  except ImportError as e:
107
  print(f"⚠️ Could not import SwitcherAI utilities: {e}")
108
  print("πŸ”„ Using default temp directory behavior")
109
 
110
+ # Download required model files with better error handling
111
  def download_required_models():
112
  """Download required model files if not present"""
113
  import urllib.request
 
116
  models_to_download = [
117
  {
118
  'name': 'GFPGANv1.4.pth',
119
+ 'url': 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
120
  'path': ASSETS_DIR / 'GFPGANv1.4.pth',
121
  'description': 'GFPGAN face enhancement model'
122
  }
 
127
  model_url = model['url']
128
  model_name = model['name']
129
 
130
+ try:
131
+ if model_path.exists() and model_path.stat().st_size > 1024: # Check if file exists and is > 1KB
132
+ file_size = model_path.stat().st_size / (1024 * 1024) # MB
133
+ print(f"βœ… {model_name} already exists ({file_size:.1f}MB)")
134
+ continue
135
+ except Exception as e:
136
+ print(f"⚠️ Error checking {model_name}: {e}")
137
 
138
  try:
139
  print(f"πŸ“₯ Downloading {model_name}...")
140
  print(f" URL: {model_url}")
141
  print(f" Path: {model_path}")
142
 
143
+ # Ensure parent directory exists
144
+ model_path.parent.mkdir(parents=True, exist_ok=True)
145
+
146
  # Create a progress callback
147
  def progress_callback(block_num, block_size, total_size):
148
  if total_size > 0:
149
  percent = min(100, (block_num * block_size * 100) / total_size)
150
+ if block_num % 100 == 0: # Update every 100 blocks to avoid spam
151
  print(f" Progress: {percent:.1f}%")
152
 
153
  # Download with progress
154
  urllib.request.urlretrieve(model_url, str(model_path), progress_callback)
155
 
156
  # Verify download
157
+ if model_path.exists() and model_path.stat().st_size > 1024:
158
  file_size = model_path.stat().st_size / (1024 * 1024) # MB
159
  print(f"βœ… {model_name} downloaded successfully ({file_size:.1f}MB)")
160
  else:
161
+ print(f"❌ {model_name} download failed - file not created or too small")
162
+ # Clean up failed download
163
+ if model_path.exists():
164
+ model_path.unlink()
165
+
166
  except urllib.error.URLError as e:
167
  print(f"❌ Network error downloading {model_name}: {e}")
168
  except Exception as e:
 
170
 
171
  # Download models at startup
172
  print("\nπŸ”„ Checking required model files...")
173
+ try:
174
+ download_required_models()
175
+ print("βœ… Model check complete\n")
176
+ except Exception as e:
177
+ print(f"⚠️ Model download failed: {e}\n")
178
 
179
  # Global variables
180
  current_process = None
 
190
  print("❌ CUDA not available")
191
  return ["CPU Only"]
192
 
193
+ try:
194
+ device_count = torch.cuda.device_count()
195
+ print(f"πŸ”’ CUDA devices detected: {device_count}")
196
+
197
+ for i in range(device_count):
198
+ try:
199
+ props = torch.cuda.get_device_properties(i)
200
+ gpu_name = props.name
201
+ gpu_memory = props.total_memory / (1024**3) # GB
202
+
203
+ # Test device accessibility
204
+ torch.cuda.set_device(i)
205
+ test_tensor = torch.tensor([1.0], device=f'cuda:{i}')
206
+
207
+ gpu_entry = f"GPU {i}: {gpu_name} ({gpu_memory:.1f}GB)"
208
+ available_gpus.append(gpu_entry)
209
+ print(f"βœ… {gpu_entry}")
210
+
211
+ del test_tensor
212
+ torch.cuda.empty_cache()
213
+
214
+ except Exception as e:
215
+ print(f"❌ Error with GPU {i}: {e}")
216
+ available_gpus.append(f"GPU {i}: Error")
217
 
218
+ except Exception as e:
219
+ print(f"❌ GPU detection failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
  available_gpus.append("CPU Only")
222
  return available_gpus
223
 
224
  def set_gpu_device(gpu_selection):
225
  """Set CUDA device based on selection"""
226
+ try:
227
+ if gpu_selection.startswith("GPU") and "Error" not in gpu_selection:
228
  gpu_id = gpu_selection.split(":")[0].split(" ")[1]
229
  os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
230
  print(f"πŸ–₯️ Using GPU {gpu_id}")
231
  return gpu_id
232
+ else:
233
  os.environ["CUDA_VISIBLE_DEVICES"] = ""
234
+ print("πŸ–₯️ Using CPU mode")
235
  return "cpu"
236
+ except Exception as e:
237
+ print(f"⚠️ Error setting GPU device: {e}")
238
  os.environ["CUDA_VISIBLE_DEVICES"] = ""
 
239
  return "cpu"
240
 
241
  def safe_copy_file(source, destination):
242
  """Safely copy file with verification"""
243
  try:
244
+ if isinstance(source, str):
245
+ source = Path(source)
246
+ if isinstance(destination, str):
247
+ destination = Path(destination)
248
+
249
  destination.parent.mkdir(parents=True, exist_ok=True)
250
+
251
+ # Check source file exists and is readable
252
+ if not source.exists():
253
+ print(f"❌ Source file does not exist: {source}")
254
+ return False
255
+
256
+ if source.stat().st_size == 0:
257
+ print(f"❌ Source file is empty: {source}")
258
+ return False
259
+
260
  shutil.copy2(source, destination)
261
 
262
+ # Verify copy
263
  if destination.exists() and destination.stat().st_size > 0:
264
  print(f"βœ… File copied: {destination.name}")
265
  return True
266
  else:
267
  print(f"❌ Copy verification failed: {destination.name}")
268
  return False
269
+
270
  except Exception as e:
271
  print(f"❌ Copy error: {e}")
272
  return False
 
277
  return "πŸ“ No files uploaded"
278
 
279
  # Clear existing files in convert directory
280
+ try:
281
+ for existing_file in CONVERT_DIR.glob("*"):
282
+ if existing_file.is_file():
283
+ existing_file.unlink()
284
+ except Exception as e:
285
+ print(f"⚠️ Error cleaning convert directory: {e}")
286
 
287
  uploaded_count = 0
288
  failed_count = 0
 
297
 
298
  # Copy file to convert directory
299
  dest_path = CONVERT_DIR / original_name
 
300
 
301
+ if safe_copy_file(file, dest_path):
302
  file_size = dest_path.stat().st_size / (1024 * 1024) # MB
303
  print(f"βœ… Uploaded: {original_name} ({file_size:.1f}MB)")
304
  uploaded_count += 1
 
315
  status_msg += f"❌ Failed: {failed_count} files\n"
316
 
317
  # List uploaded files
318
+ try:
319
+ uploaded_files = [f.name for f in CONVERT_DIR.glob("*.mp4")] + [f.name for f in CONVERT_DIR.glob("*.avi")] + [f.name for f in CONVERT_DIR.glob("*.mov")]
320
+ if uploaded_files:
321
+ status_msg += f"πŸ“ Files ready for processing:\n" + "\n".join([f" β€’ {f}" for f in uploaded_files[:10]])
322
+ if len(uploaded_files) > 10:
323
+ status_msg += f"\n ... and {len(uploaded_files) - 10} more"
324
+ except Exception as e:
325
+ print(f"⚠️ Error listing files: {e}")
326
 
327
  return status_msg
328
 
329
  def resize_video(input_path, output_path, fps=30):
330
  """Resize/process video with fallback"""
 
 
 
 
 
331
  try:
332
+ if not MOVIEPY_AVAILABLE:
333
+ print("⚠️ MoviePy not available - copying video directly")
334
+ shutil.copy2(input_path, output_path)
335
+ return True
336
+
337
  print(f"🎬 Processing video: {input_path.name}")
338
  clip = VideoFileClip(str(input_path))
339
  clip.write_videofile(str(output_path), fps=fps, audio_codec='aac', verbose=False, logger=None)
340
  clip.close()
341
  print("βœ… Video processed successfully")
342
+ return True
343
+
344
  except Exception as e:
345
  print(f"❌ Video processing failed: {e}")
346
+ try:
347
+ shutil.copy2(input_path, output_path)
348
+ return True
349
+ except Exception as e2:
350
+ print(f"❌ Fallback copy failed: {e2}")
351
+ return False
352
 
353
  def extract_audio(video_path, audio_path):
354
  """Extract audio from video"""
 
 
 
 
355
  try:
356
+ if not MOVIEPY_AVAILABLE:
357
+ print("⚠️ MoviePy not available - cannot extract audio")
358
+ return False
359
+
360
  clip = VideoFileClip(str(video_path))
361
  if clip.audio is not None:
362
  clip.audio.write_audiofile(str(audio_path), logger=None, verbose=False)
 
365
  else:
366
  clip.close()
367
  return False
368
+
369
  except Exception as e:
370
  print(f"❌ Audio extraction failed: {e}")
371
  return False
 
406
 
407
  print(f"βœ… Batch zip created: {zip_path.name}")
408
  return zip_path
409
+
410
  except Exception as e:
411
  print(f"❌ Zip creation failed: {e}")
412
  return None
 
422
  file_size = latest_file.stat().st_size / (1024 * 1024) # MB
423
 
424
  return str(latest_file), f"πŸ“₯ Ready: {latest_file.name} ({file_size:.1f}MB)"
425
+
426
  except Exception as e:
427
  return None, f"❌ Error: {e}"
428
 
 
433
  global last_output_path, last_batch_mode, current_process
434
  last_batch_mode = False
435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
  try:
437
+ set_gpu_device(gpu_selection)
438
+
439
+ # Setup temp files
440
+ temp_source = TEMP_DIR / 'source-image.jpg'
441
+ temp_target = TEMP_DIR / 'resize-vid.mp4'
442
+
443
+ # Copy and process files
444
+ if not safe_copy_file(Path(source_image), temp_source):
445
+ return "❌ Failed to copy source image", ""
446
+
447
+ if not resize_video(Path(target_video), temp_target):
448
+ return "❌ Video processing failed", ""
449
+
450
+ # Generate output filename
451
+ source_name = Path(source_image).stem
452
+ target_name = Path(target_video).stem
453
+ suffix = "_lipsynced" if enable_lip_sync else ""
454
+ output_filename = f"{source_name}_{target_name}{suffix}.mp4"
455
+ output_path = OUTPUT_DIR / output_filename
456
+
457
+ # Handle lip sync
458
+ audio_path = None
459
+ if enable_lip_sync:
460
+ audio_path = TEMP_DIR / 'target-audio.wav'
461
+ if not extract_audio(temp_target, audio_path):
462
+ print("⚠️ Lip sync disabled - audio extraction failed")
463
+ enable_lip_sync = False
464
+
465
+ # Build command
466
+ execution_provider = "cuda" if gpu_selection.startswith("GPU") and "Error" not in gpu_selection else "cpu"
467
+
468
+ cmd = [
469
+ sys.executable, "run.py",
470
+ "--execution-providers", execution_provider,
471
+ "--execution-thread-count", "8",
472
+ "--reference-face-distance", "1.5",
473
+ "-s", str(temp_source),
474
+ "-t", str(temp_target),
475
+ "-o", str(output_path),
476
+ "--frame-processors"] + frame_processor + [
477
+ "--face-analyser-direction", face_analyser_direction,
478
+ "--face-analyser-age", face_analyser_age
479
+ ]
480
+
481
+ if enable_lip_sync and audio_path:
482
+ cmd.extend(["--source-paths", str(audio_path)])
483
+ cmd.extend(["--lip-syncer-model", lip_syncer_model])
484
+ if 'lip_syncer' not in frame_processor:
485
+ idx = cmd.index("--frame-processors") + 1
486
+ cmd[idx:idx] = ['lip_syncer']
487
+
488
+ if face_recognition != 'none':
489
+ cmd.extend(["--face-recognition", face_recognition])
490
+ if face_analyser_gender != 'none':
491
+ cmd.extend(["--face-analyser-gender", face_analyser_gender])
492
+ if skip_audio and not enable_lip_sync:
493
+ cmd.append("--skip-audio")
494
+ if keep_fps:
495
+ cmd.append("--keep-fps")
496
+
497
  print("πŸš€ Starting face swap processing...")
498
+ print(f"πŸ“‹ Command: {' '.join(cmd)}")
499
  start_time = time.time()
500
 
501
+ current_process = sp.Popen(
502
+ cmd,
503
+ stdout=sp.PIPE,
504
+ stderr=sp.STDOUT,
505
+ text=True,
506
+ bufsize=1,
507
+ cwd=str(BASE_DIR)
508
+ )
509
 
510
  cli_output = ""
511
  while True:
 
531
  return "❌ Processing failed", cli_output + f"\n\n⏱️ Time: {execution_time:.2f}s"
532
 
533
  # Cleanup
534
+ try:
535
+ if torch.cuda.is_available():
536
+ torch.cuda.empty_cache()
537
+ gc.collect()
538
+
539
+ if audio_path and audio_path.exists():
540
+ audio_path.unlink()
541
+ except Exception as e:
542
+ print(f"⚠️ Cleanup error: {e}")
543
 
544
  last_output_path = str(output_path)
545
 
546
  return str(output_path), cli_output + f"\n\nβœ… Completed in {execution_time:.2f}s"
547
 
548
  except Exception as e:
549
+ return f"❌ Error: {e}", ""
550
 
551
  def run_batch_processing(source_image, frame_processor, face_analyser_direction, face_recognition,
552
  face_analyser_gender, skip_audio, keep_fps, lip_syncer_model, enable_lip_sync, gpu_selection):
 
554
  global last_output_path, last_batch_mode, current_process
555
  last_batch_mode = True
556
 
557
+ try:
558
+ set_gpu_device(gpu_selection)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
 
560
+ video_extensions = ['*.mp4', '*.avi', '*.mov', '*.mkv']
561
+ video_files = []
562
+ for ext in video_extensions:
563
+ video_files.extend(CONVERT_DIR.glob(ext))
 
 
 
 
564
 
565
+ if not video_files:
566
+ yield None, f"πŸ“ No video files found in Convert folder.\nPlease upload videos using the file input above."
567
+ return
 
 
 
 
 
 
 
568
 
569
+ temp_source = TEMP_DIR / 'source-image.jpg'
570
+ if not safe_copy_file(Path(source_image), temp_source):
571
+ yield None, "❌ Failed to copy source image"
572
+ return
573
 
574
+ source_name = Path(source_image).stem
575
+ cli_output = f"πŸ“Š Processing {len(video_files)} videos in batch mode\n🎯 Source: {source_name}\n\n"
576
+ yield None, cli_output
 
 
 
 
 
 
 
 
577
 
578
+ successful = 0
579
+ failed = 0
 
 
 
 
580
 
581
+ for i, video_file in enumerate(video_files, 1):
582
+ current_output = f"[{i}/{len(video_files)}] 🎬 {video_file.name}\n"
583
+ cli_output += current_output
584
+ yield None, cli_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585
 
586
+ temp_target = TEMP_DIR / 'resize-vid.mp4'
 
587
 
588
+ if not resize_video(video_file, temp_target):
589
+ error_msg = f"❌ Video resize failed\n"
 
 
 
 
590
  cli_output += error_msg
591
  failed += 1
592
+ yield None, cli_output
593
+ continue
594
 
595
+ suffix = "_lipsynced" if enable_lip_sync else ""
596
+ output_filename = f"{source_name}_{video_file.stem}{suffix}.mp4"
597
+ output_path = OUTPUT_DIR / output_filename
598
 
599
+ # Handle lip sync
600
+ audio_path = None
601
+ if enable_lip_sync:
602
+ audio_path = TEMP_DIR / 'target-audio.wav'
603
+ if not extract_audio(temp_target, audio_path):
604
+ enable_lip_sync = False
605
 
606
+ # Build command
607
+ execution_provider = "cuda" if gpu_selection.startswith("GPU") and "Error" not in gpu_selection else "cpu"
608
+
609
+ cmd = [
610
+ sys.executable, "run.py",
611
+ "--execution-providers", execution_provider,
612
+ "--execution-thread-count", "8",
613
+ "--reference-face-distance", "1.5",
614
+ "-s", str(temp_source),
615
+ "-t", str(temp_target),
616
+ "-o", str(output_path),
617
+ "--frame-processors"] + frame_processor + [
618
+ "--face-analyser-direction", face_analyser_direction
619
+ ]
620
+
621
+ if enable_lip_sync and audio_path:
622
+ cmd.extend(["--source-paths", str(audio_path)])
623
+ cmd.extend(["--lip-syncer-model", lip_syncer_model])
624
+ if 'lip_syncer' not in frame_processor:
625
+ idx = cmd.index("--frame-processors") + 1
626
+ cmd[idx:idx] = ['lip_syncer']
627
+
628
+ if face_recognition != 'none':
629
+ cmd.extend(["--face-recognition", face_recognition])
630
+ if face_analyser_gender != 'none':
631
+ cmd.extend(["--face-analyser-gender", face_analyser_gender])
632
+ if skip_audio and not enable_lip_sync:
633
+ cmd.append("--skip-audio")
634
+ if keep_fps:
635
+ cmd.append("--keep-fps")
636
+
637
+ try:
638
+ start_time = time.time()
639
+ current_process = sp.Popen(
640
+ cmd,
641
+ stdout=sp.PIPE,
642
+ stderr=sp.STDOUT,
643
+ text=True,
644
+ bufsize=1,
645
+ cwd=str(BASE_DIR)
646
+ )
647
 
648
+ while True:
649
+ output = current_process.stdout.readline()
650
+ if output == '' and current_process.poll() is not None:
651
+ break
652
+ if output:
653
+ line = output.strip()
654
+ print(line)
655
+
656
+ rc = current_process.poll()
657
+ execution_time = time.time() - start_time
658
+
659
+ if rc == 0:
660
+ success_msg = f"βœ… Completed in {execution_time:.2f}s\n\n"
661
+ cli_output += success_msg
662
+ successful += 1
663
+ else:
664
+ error_msg = f"❌ Processing failed\n\n"
665
+ cli_output += error_msg
666
+ failed += 1
667
+
668
+ yield None, cli_output
669
+
670
+ # Cleanup
671
+ try:
672
+ if torch.cuda.is_available():
673
+ torch.cuda.empty_cache()
674
+ gc.collect()
675
+
676
+ if audio_path and audio_path.exists():
677
+ audio_path.unlink()
678
+ except Exception as e:
679
+ print(f"⚠️ Cleanup error: {e}")
680
+
681
+ except Exception as e:
682
+ error_msg = f"❌ Error: {e}\n\n"
683
+ cli_output += error_msg
684
+ failed += 1
685
+ yield None, cli_output
686
+
687
+ # Final summary
688
+ final_msg = f"\n=== BATCH COMPLETE ===\nβœ… Successful: {successful}\n❌ Failed: {failed}\n"
689
+ cli_output += final_msg
690
+
691
+ if successful > 0:
692
+ last_output_path = str(create_batch_zip())
693
+
694
+ yield None, cli_output
695
+
696
+ except Exception as e:
697
+ yield None, f"❌ Batch processing error: {e}"
698
 
699
  def handle_processing(source_image, target_video, frame_processor, face_analyser_direction, face_recognition,
700
  face_analyser_gender, face_analyser_age, skip_audio, keep_fps,
701
  lip_syncer_model, enable_lip_sync, use_folder_mode, gpu_selection):
702
  """Main processing handler"""
703
 
704
+ try:
705
+ if use_folder_mode:
706
+ for _, cli_output in run_batch_processing(
707
+ source_image, frame_processor, face_analyser_direction, face_recognition,
708
+ face_analyser_gender, skip_audio, keep_fps, lip_syncer_model, enable_lip_sync, gpu_selection
709
+ ):
710
+ yield cli_output, "⏹️ CANCEL"
711
+ yield cli_output + "\nπŸŽ‰ Batch processing complete!", "πŸ“₯ DOWNLOAD"
 
 
 
 
 
 
 
 
 
712
  else:
713
+ for video_result, cli_output in run_single_video(
714
+ source_image, target_video, frame_processor, face_analyser_direction, face_recognition,
715
+ face_analyser_gender, face_analyser_age, skip_audio, keep_fps,
716
+ lip_syncer_model, enable_lip_sync, gpu_selection
717
+ ):
718
+ yield cli_output, "⏹️ CANCEL"
719
+
720
+ if video_result and not video_result.startswith("❌"):
721
+ yield cli_output + "\nπŸŽ‰ Processing complete!", "πŸ“₯ DOWNLOAD"
722
+ else:
723
+ yield cli_output, "πŸ”„ RESET"
724
+
725
+ except Exception as e:
726
+ yield f"❌ Processing error: {e}", "πŸ”„ RESET"
727
 
728
  def cancel_processing():
729
  """Cancel current processing"""
730
  global current_process
731
+ try:
732
+ if current_process and current_process.poll() is None:
733
  current_process.terminate()
734
  current_process.wait(timeout=10)
735
+ return "⏹️ Processing cancelled"
736
+ else:
737
+ return "⚠️ No active processing"
738
+ except Exception as e:
739
+ try:
740
+ if current_process:
741
+ current_process.kill()
742
+ current_process.wait()
743
+ return f"⏹️ Processing force-cancelled: {e}"
744
  except:
745
+ return f"❌ Cancel failed: {e}"
 
 
 
746
 
747
  def reset_interface():
748
  """Reset interface to defaults"""
749
+ try:
750
+ cleanup_temp_files()
751
+ cleanup_convert_files()
752
+
753
+ return (
754
+ None, # source_image
755
+ None, # target_video
756
+ ['face_swapper'] + (['face_enhancer'] if ENHANCEMENT_AVAILABLE else []), # frame_processor
757
+ 'top-bottom', # face_analyser_direction
758
+ 'reference', # face_recognition
759
+ 'female', # face_analyser_gender
760
+ 'adult', # face_analyser_age
761
+ False, # skip_audio
762
+ True, # keep_fps
763
+ 'wav2lip_gan_96', # lip_syncer_model
764
+ False, # enable_lip_sync
765
+ False, # use_folder_mode
766
+ AVAILABLE_GPUS[0] if AVAILABLE_GPUS else "CPU Only", # gpu_selection
767
+ "πŸ”§ Interface reset. Ready for new session!", # cli_output
768
+ "πŸš€ START PROCESSING" # button text
769
+ )
770
+ except Exception as e:
771
+ return (None, None, ['face_swapper'], 'top-bottom', 'reference', 'female', 'adult',
772
+ False, True, 'wav2lip_gan_96', False, False, "CPU Only",
773
+ f"⚠️ Reset error: {e}", "πŸš€ START PROCESSING")
774
 
775
  def handle_download():
776
  """Handle download button click"""
777
+ try:
778
+ download_path, status = get_download_file()
779
+ if download_path:
780
+ return download_path, status, gr.update(visible=True), gr.update(visible=False)
781
+ else:
782
+ return None, status, gr.update(visible=False), gr.update(visible=True)
783
+ except Exception as e:
784
+ return None, f"❌ Download error: {e}", gr.update(visible=False), gr.update(visible=True)
785
 
786
  def handle_action_button(button_text, *inputs):
787
  """Handle multi-purpose action button"""
788
+ try:
789
+ if "RESET" in button_text:
790
+ return reset_interface()
791
+ elif "CANCEL" in button_text:
792
+ cancel_msg = cancel_processing()
793
+ return inputs + (cancel_msg, "πŸ”„ RESET")
794
+ else:
795
+ return inputs + ("", button_text)
796
+ except Exception as e:
797
+ return inputs + (f"❌ Action error: {e}", "πŸ”„ RESET")
798
 
799
  def toggle_batch_mode(use_folder_mode):
800
  """Handle batch mode toggle"""
801
+ try:
802
+ if use_folder_mode:
803
+ return gr.update(
804
+ label="πŸ“ Target Videos (Drag multiple files here)",
805
+ file_count="multiple",
806
+ file_types=["video"]
807
+ )
808
+ else:
809
+ return gr.update(
810
+ label="Target Video (Video to modify)",
811
+ file_count="single",
812
+ file_types=["video"]
813
+ )
814
+ except Exception as e:
815
+ print(f"⚠️ Toggle batch mode error: {e}")
816
+ return gr.update(label="Target Video")
817
 
818
  def handle_file_upload(files, use_folder_mode):
819
  """Handle file uploads - single or multiple"""
820
+ try:
821
+ if use_folder_mode and files:
822
+ # Handle batch upload
823
+ return handle_batch_file_upload(files)
824
+ elif not use_folder_mode and files:
825
+ # Single file mode - just return status
826
+ return f"βœ… Single video uploaded: {Path(files.name).name if hasattr(files, 'name') else 'video file'}"
827
+ else:
828
+ return "πŸ“ No files uploaded"
829
+ except Exception as e:
830
+ return f"❌ Upload error: {e}"
831
 
832
  # Initialize GPU detection
833
+ try:
834
+ AVAILABLE_GPUS = get_available_gpus()
835
+ print(f"πŸ–₯️ Available GPUs: {AVAILABLE_GPUS}")
836
+ except Exception as e:
837
+ print(f"⚠️ GPU detection failed: {e}")
838
+ AVAILABLE_GPUS = ["CPU Only"]
839
 
840
  # Gradio Interface
841
  def create_interface():
 
993
  with gr.Column():
994
  keep_fps = gr.Checkbox(label="🎬 Keep Original FPS", value=True)
995
 
996
+ # Event handlers with error handling
997
+ try:
998
+ enable_lip_sync.change(
999
+ lambda x: gr.update(visible=x),
1000
+ inputs=[enable_lip_sync],
1001
+ outputs=[lip_syncer_model]
1002
+ )
1003
+
1004
+ use_folder_mode.change(
1005
+ toggle_batch_mode,
1006
+ inputs=[use_folder_mode],
1007
+ outputs=[target_video]
1008
+ )
1009
+
1010
+ target_video.upload(
1011
+ handle_file_upload,
1012
+ inputs=[target_video, use_folder_mode],
1013
+ outputs=[upload_status]
1014
+ )
1015
+
1016
+ start_button.click(
1017
+ handle_processing,
1018
+ inputs=[
1019
+ source_image, target_video, frame_processor, face_analyser_direction,
1020
+ face_recognition, face_analyser_gender, face_analyser_age,
1021
+ skip_audio, keep_fps, lip_syncer_model, enable_lip_sync,
1022
+ use_folder_mode, gpu_selection
1023
+ ],
1024
+ outputs=[cli_output, action_button]
1025
+ )
1026
+
1027
+ action_button.click(
1028
+ handle_action_button,
1029
+ inputs=[
1030
+ action_button, source_image, target_video, frame_processor,
1031
+ face_analyser_direction, face_recognition, face_analyser_gender,
1032
+ face_analyser_age, skip_audio, keep_fps, lip_syncer_model,
1033
+ enable_lip_sync, use_folder_mode, gpu_selection
1034
+ ],
1035
+ outputs=[
1036
+ source_image, target_video, frame_processor, face_analyser_direction,
1037
+ face_recognition, face_analyser_gender, face_analyser_age,
1038
+ skip_audio, keep_fps, lip_syncer_model, enable_lip_sync,
1039
+ use_folder_mode, gpu_selection, cli_output, action_button
1040
+ ]
1041
+ )
1042
+
1043
+ download_button.click(
1044
+ handle_download,
1045
+ outputs=[download_file, download_status, download_file, download_button]
1046
+ )
1047
+
1048
+ download_file.change(
1049
+ lambda: (gr.update(visible=False), gr.update(visible=True), "Ready for next download"),
1050
+ outputs=[download_file, download_button, download_status]
1051
+ )
1052
+
1053
+ except Exception as e:
1054
+ print(f"⚠️ Error setting up event handlers: {e}")
1055
 
1056
  return interface
1057
 
 
1074
  cleanup_temp_files()
1075
 
1076
  # Create and launch interface
1077
+ try:
1078
+ app = create_interface()
1079
+ app.launch(
1080
+ server_name="0.0.0.0",
1081
+ server_port=7860,
1082
+ share=False,
1083
+ debug=False,
1084
+ show_error=True,
1085
+ max_file_size="1500mb"
1086
+ )
1087
+ except Exception as e:
1088
+ print(f"❌ Failed to launch application: {e}")
1089
+ print("πŸ”„ Please check your dependencies and try again")