Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| import os | |
| import tempfile | |
| from pathlib import Path | |
| import spaces | |
| from video_processor import VideoCharacterReplacer | |
| from utils import save_uploaded_file, cleanup_temp_files | |
| # Initialize the character replacer | |
| character_replacer = VideoCharacterReplacer() | |
| def process_video(reference_image, input_video, replacement_strength, detection_sensitivity, tracking_stability, preserve_background): | |
| """ | |
| Process video to replace character with reference image | |
| Args: | |
| reference_image (PIL.Image): Reference image of the character to replace with | |
| input_video (str): Path to input video file | |
| replacement_strength (float): Strength of character replacement (0-1) | |
| detection_sensitivity (float): Face detection sensitivity (0-1) | |
| tracking_stability (float): Tracking stability for temporal consistency (0-1) | |
| preserve_background (bool): Whether to preserve background lighting and colors | |
| Returns: | |
| tuple: (processed_video_path, info_message) | |
| """ | |
| if reference_image is None or input_video is None: | |
| return None, "Please provide both a reference image and input video." | |
| try: | |
| # Save uploaded files to temporary locations | |
| ref_path = save_uploaded_file(reference_image, ".jpg") | |
| video_path = save_uploaded_file(input_video, ".mp4") | |
| # Process the video | |
| output_path = character_replacer.replace_character( | |
| ref_image_path=ref_path, | |
| input_video_path=video_path, | |
| replacement_strength=replacement_strength, | |
| detection_sensitivity=detection_sensitivity, | |
| tracking_stability=tracking_stability, | |
| preserve_background=preserve_background | |
| ) | |
| # Cleanup temporary files | |
| cleanup_temp_files([ref_path, video_path]) | |
| if output_path and os.path.exists(output_path): | |
| return output_path, f"Character replacement completed successfully! Output saved to: {output_path}" | |
| else: | |
| return None, "Error: Failed to process video." | |
| except Exception as e: | |
| cleanup_temp_files([ref_path, video_path]) | |
| return None, f"Error processing video: {str(e)}" | |
| def extract_preview_frames(video_path, num_frames=4): | |
| """Extract preview frames from video for display""" | |
| if video_path is None: | |
| return None | |
| try: | |
| cap = cv2.VideoCapture(video_path) | |
| total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| fps = cap.get(cv2.CAP_PROP_FPS) | |
| duration = total_frames / fps if fps > 0 else 0 | |
| # Select frames evenly distributed across the video | |
| frame_indices = np.linspace(0, total_frames-1, num_frames, dtype=int) | |
| frames = [] | |
| for frame_idx in frame_indices: | |
| cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) | |
| ret, frame = cap.read() | |
| if ret: | |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| frames.append(Image.fromarray(frame_rgb)) | |
| cap.release() | |
| return frames | |
| except Exception as e: | |
| print(f"Error extracting preview frames: {e}") | |
| return [] | |
| # Create the Gradio interface | |
| with gr.Blocks(title="Video Character Replacement", theme=gr.themes.Base()) as demo: | |
| # Header | |
| gr.HTML(""" | |
| <div style='text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; margin-bottom: 20px;'> | |
| <h1>π¬ Video Character Replacement</h1> | |
| <p style='font-size: 18px; margin: 10px 0;'> | |
| Replace characters in videos using AI-powered face detection and replacement | |
| </p> | |
| <p style='margin: 5px 0;'> | |
| <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style='color: #FFD700; text-decoration: none; font-weight: bold;'>β‘ Built with anycoder</a> | |
| </p> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### πΈ Reference Image") | |
| reference_input = gr.Image( | |
| label="Character to replace with", | |
| type="pil", | |
| height=300 | |
| ) | |
| gr.Markdown("### π₯ Input Video") | |
| video_input = gr.Video( | |
| label="Video with character to replace", | |
| height=300 | |
| ) | |
| gr.Markdown("### βοΈ Settings") | |
| strength_slider = gr.Slider( | |
| label="Replacement Strength", | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.8, | |
| step=0.1, | |
| info="Higher values produce more aggressive replacement" | |
| ) | |
| sensitivity_slider = gr.Slider( | |
| label="Detection Sensitivity", | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.6, | |
| step=0.1, | |
| info="Higher values detect more faces but may cause false positives" | |
| ) | |
| stability_slider = gr.Slider( | |
| label="Tracking Stability", | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.1, | |
| info="Higher values improve temporal consistency" | |
| ) | |
| preserve_bg = gr.Checkbox( | |
| label="Preserve Background", | |
| value=True, | |
| info="Maintain original background lighting and colors" | |
| ) | |
| process_btn = gr.Button( | |
| "π Replace Character", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π― Results") | |
| output_video = gr.Video( | |
| label="Processed Video", | |
| height=400 | |
| ) | |
| result_info = gr.Textbox( | |
| label="Processing Info", | |
| lines=3, | |
| max_lines=5, | |
| interactive=False | |
| ) | |
| gr.Markdown("### π Preview Frames") | |
| preview_gallery = gr.Gallery( | |
| label="Original Video Frames", | |
| columns=4, | |
| height=200, | |
| object_fit="cover" | |
| ) | |
| # Preview video frames when video is uploaded | |
| def update_preview(video_path): | |
| if video_path: | |
| frames = extract_preview_frames(video_path) | |
| return frames | |
| return [] | |
| video_input.change( | |
| update_preview, | |
| inputs=video_input, | |
| outputs=preview_gallery | |
| ) | |
| # Process video when button is clicked | |
| process_btn.click( | |
| process_video, | |
| inputs=[ | |
| reference_input, | |
| video_input, | |
| strength_slider, | |
| sensitivity_slider, | |
| stability_slider, | |
| preserve_bg | |
| ], | |
| outputs=[output_video, result_info] | |
| ) | |
| # Example section | |
| with gr.Accordion("π How to Use", open=False): | |
| gr.Markdown(""" | |
| ### Instructions: | |
| 1. **Upload Reference Image**: Choose a clear image of the character you want to replace with | |
| 2. **Upload Video**: Select the video containing the character you want to replace | |
| 3. **Adjust Settings**: Fine-tune the replacement parameters according to your needs | |
| 4. **Process**: Click "Replace Character" to start the AI processing | |
| 5. **Download**: Save the processed video when complete | |
| ### Tips: | |
| - Use high-quality reference images with clear facial features | |
| - Videos with good lighting produce better results | |
| - Adjust replacement strength based on how subtle or obvious you want the replacement | |
| - Higher tracking stability helps maintain consistency across frames | |
| """) | |
| if __name__ == "__main__": | |
| demo.launch(debug=True) |