Spaces:
Running
Running
| """ | |
| Director's Cut - HuggingFace Space Frontend | |
| ============================================ | |
| This version uses Modal backend for all video processing. | |
| Preserves the exact UI and structure from GitHub, only replacing local processing with Modal API calls. | |
| Modal backend handles: | |
| - YouTube downloads (with Webshare residential proxies) | |
| - Video processing (Scout, Verifier, Director, Hands, Showrunner) | |
| - All heavy compute operations | |
| Frontend (this file) handles: | |
| - Gradio UI | |
| - MCP server | |
| - User interactions | |
| - Display/download of results | |
| """ | |
| # Copy exact imports from GitHub version | |
| import gradio as gr | |
| import os | |
| import tempfile | |
| import shutil | |
| import logging | |
| import json | |
| import time | |
| import re | |
| import base64 | |
| import requests | |
| from typing import List, Dict, Any, Tuple, Optional | |
| from dotenv import load_dotenv | |
| # Load environment variables | |
| load_dotenv() | |
| # Configure Logging | |
| logging.basicConfig(level=logging.INFO, | |
| format='%(asctime)s - %(levelname)s - %(message)s') | |
| logger = logging.getLogger(__name__) | |
| # ============================================================================== | |
| # MODAL BACKEND CONFIGURATION | |
| # ============================================================================== | |
| # Modal backend URL - deployed as "directors-cut" app | |
| # CORRECT username is tayyabkhn343 (not tayyab415) | |
| MODAL_BASE_URL = os.getenv( | |
| "MODAL_BASE_URL", "https://tayyabkhn343--directors-cut") | |
| # Available Modal endpoints (8 max for free tier): | |
| # - health (GET) - Health check | |
| # - video_info (POST) - Get video metadata using Webshare proxies | |
| # - transcript (POST) - Get transcript via Supadata | |
| # - process (POST) - Download video/audio with proxies | |
| # - outputs (GET) - List output files | |
| # - state (GET) - Get workflow state | |
| # - step (POST) - Full pipeline: steps 1-6 | |
| # - download (GET) - Download rendered video by job_id | |
| def get_modal_endpoint(name: str) -> str: | |
| """Build Modal endpoint URL. Modal converts underscores to hyphens.""" | |
| return f"{MODAL_BASE_URL}-{name.replace('_', '-')}.modal.run" | |
| # Modal API helper | |
| def call_modal(endpoint: str, method: str = "POST", data: dict = None, timeout: int = 1800) -> dict: | |
| """Call Modal backend endpoint.""" | |
| url = get_modal_endpoint(endpoint) | |
| logger.info(f"Calling Modal: {method} {url}") | |
| try: | |
| if method == "GET": | |
| response = requests.get(url, timeout=timeout) | |
| else: | |
| response = requests.post(url, json=data, timeout=timeout) | |
| response.raise_for_status() | |
| return response.json() | |
| except requests.exceptions.Timeout: | |
| logger.error(f"Modal timeout: {endpoint}") | |
| return {"error": "Request timed out"} | |
| except requests.exceptions.HTTPError as e: | |
| logger.error( | |
| f"Modal HTTP error: {e.response.status_code} - {e.response.text}") | |
| return {"error": f"{e.response.status_code} {e.response.reason} for url: {url}"} | |
| except Exception as e: | |
| logger.error(f"Modal error: {e}") | |
| return {"error": str(e)} | |
| # Output directory | |
| OUTPUT_DIR = os.getenv("OUTPUT_DIR", "/tmp/directors-cut/output") | |
| os.makedirs(OUTPUT_DIR, exist_ok=True) | |
| # ============================================================================== | |
| # HELPER FUNCTIONS (Preserved from GitHub version) | |
| # ============================================================================== | |
| def classify_video(video_info: Dict) -> str: | |
| """Classify video as 'podcast' or 'generic' - preserved from #file:app.py""" | |
| title = video_info.get('title', '').lower() | |
| uploader = video_info.get('uploader', '').lower() | |
| channel = video_info.get('channel', '').lower() | |
| duration = video_info.get('duration', 0) | |
| # Known podcast channels | |
| podcast_channels = [ | |
| 'joe rogan', 'powerfuljre', 'jre clips', | |
| 'lex fridman', 'lex clips', | |
| 'huberman lab', 'andrew huberman', | |
| 'all-in podcast', 'all-in pod', | |
| 'diary of a ceo', 'impact theory', | |
| 'tim ferriss', 'smartless', | |
| 'ted talks', 'ted', | |
| 'flagrant', 'flagrant 2' | |
| ] | |
| for pc in podcast_channels: | |
| if pc in uploader or pc in channel: | |
| logger.info(f"Classified as PODCAST via channel: {uploader}") | |
| return "podcast" | |
| # Podcast keywords | |
| podcast_keywords = ['podcast', 'interview', | |
| 'talk show', 'conversation', 'episode'] | |
| generic_keywords = ['tutorial', 'how to', 'guide', 'demo', 'review'] | |
| for keyword in generic_keywords: | |
| if keyword in title: | |
| logger.info(f"Classified as GENERIC via keyword: {keyword}") | |
| return "generic" | |
| podcast_score = sum(1 for kw in podcast_keywords if kw in title) | |
| if duration > 900 and podcast_score >= 2: | |
| logger.info("Classified as PODCAST via long duration + keywords") | |
| return "podcast" | |
| if podcast_score >= 3: | |
| logger.info("Classified as PODCAST via strong signals") | |
| return "podcast" | |
| logger.info(f"Classified as GENERIC (default)") | |
| return "generic" | |
| # ============================================================================== | |
| # GLOBAL STATE (Preserved from GitHub version) | |
| # ============================================================================== | |
| workflow_state = { | |
| 'video_url': None, | |
| 'video_info': None, | |
| 'category': None, | |
| 'temp_dir': None, | |
| 'hotspots': [], | |
| 'verified_hotspots': [], | |
| 'clips_metadata': [], | |
| 'edit_plan': [], | |
| 'final_plan': [], | |
| 'final_video_path': None, | |
| 'num_hotspots': 5, | |
| 'job_id': None | |
| } | |
| manual_state = { | |
| 'video_url': None, | |
| 'video_info': None, | |
| 'temp_dir': None, | |
| 'transcript_text': None, | |
| 'topics': [], | |
| 'selected_indices': [], | |
| 'clips_metadata': [], | |
| 'verified_clips': [], | |
| 'final_video_path': None | |
| } | |
| # ============================================================================== | |
| # STEP-BY-STEP WORKFLOW (Modal Backend Integration) | |
| # ============================================================================== | |
| def step1_analyze_video(url: str): | |
| """Step 1: Analyze video via Modal step endpoint - creates job_id and syncs state.""" | |
| try: | |
| workflow_state['video_url'] = url | |
| workflow_state['temp_dir'] = tempfile.mkdtemp() | |
| logger.info(f"Step 1: Analyzing video via Modal step endpoint: {url}") | |
| # Call Modal step endpoint (creates job_id, uses Webshare proxies) | |
| response = call_modal( | |
| "step", data={"step": 1, "url": url}, timeout=180) | |
| if response.get("error"): | |
| return f"❌ Error: {response['error']}", gr.update(interactive=False) | |
| # Modal returns job_id on success (no "success" field, just check for job_id) | |
| if not response.get("job_id"): | |
| return f"❌ Failed to analyze video: {response.get('error', 'No job_id returned')}", gr.update(interactive=False) | |
| # Store job_id for later steps | |
| workflow_state['job_id'] = response.get('job_id') | |
| # Store results | |
| workflow_state['video_info'] = { | |
| 'title': response.get('title', 'Unknown'), | |
| 'duration': response.get('duration', 0), | |
| 'uploader': response.get('channel', 'Unknown'), | |
| 'channel': response.get('channel', 'Unknown'), | |
| 'description': '', | |
| 'thumbnail': '', | |
| } | |
| # Classify video - use category from Modal or classify locally | |
| workflow_state['category'] = response.get('category') or classify_video( | |
| workflow_state['video_info']) | |
| video_info = workflow_state['video_info'] | |
| category = workflow_state['category'] | |
| duration = video_info.get('duration', 0) or 0 | |
| job_id = workflow_state.get('job_id', 'unknown') | |
| has_transcript = response.get('has_transcript', False) | |
| info_text = f""" | |
| ## ✅ Video Analyzed | |
| **Job ID:** `{job_id}` | |
| **Video Info:** | |
| - **Title:** {video_info.get('title')} | |
| - **Duration:** {duration:.0f}s ({duration/60:.1f} min) | |
| - **Channel:** {video_info.get('channel')} | |
| - **Classification:** **{category.upper()}** | |
| - **Transcript:** {'✅ Available' if has_transcript else '❌ Not available'} | |
| **Pipeline:** {'🎙️ Podcast Mode' if category == 'podcast' else '🎬 Generic Mode'} | |
| ✅ Ready for Step 2: Scout Hotspots | |
| """ | |
| return info_text, gr.update(interactive=True) | |
| except Exception as e: | |
| logger.error(f"Step 1 failed: {e}") | |
| return f"❌ Error: {e}", gr.update(interactive=False) | |
| def step2_scout_hotspots(url: str, num_hotspots: int = 5): | |
| """Step 2: Scout via Modal - preserved structure from #file:app.py""" | |
| try: | |
| if not workflow_state['video_info']: | |
| return "❌ Run Step 1 first!", gr.update(interactive=False) | |
| workflow_state['num_hotspots'] = int(num_hotspots) | |
| logger.info(f"Step 2: Scouting {num_hotspots} hotspots via Modal") | |
| response = call_modal( | |
| "step", data={"step": 2, "num_hotspots": num_hotspots}, timeout=600) | |
| if response.get("error"): | |
| return f"❌ Error: {response['error']}", gr.update(interactive=False) | |
| hotspots = response.get('hotspots', []) | |
| workflow_state['hotspots'] = hotspots | |
| result_text = f"""## 🎯 Hotspots Found | |
| **Total:** {response.get('total_found', len(hotspots))} | |
| **Top {num_hotspots}:** | |
| """ | |
| for i, h in enumerate(hotspots[:num_hotspots], 1): | |
| start_fmt = f"{int(h['start'] // 60)}:{int(h['start'] % 60):02d}" | |
| end_fmt = f"{int(h['end'] // 60)}:{int(h['end'] % 60):02d}" | |
| result_text += f"\n{i}. **{start_fmt}-{end_fmt}** | Score: {h.get('score', 0):.2f}" | |
| return result_text, gr.update(interactive=True) | |
| except Exception as e: | |
| logger.error(f"Step 2 failed: {e}") | |
| return f"❌ Error: {e}", gr.update(interactive=False) | |
| def step3_verify_hotspots(url: str): | |
| """Step 3: Verify via Modal - preserved structure from #file:app.py""" | |
| try: | |
| if not workflow_state['hotspots']: | |
| return "❌ Run Step 2 first!", gr.update(interactive=False) | |
| logger.info("Step 3: Verifying via Modal") | |
| response = call_modal("step", data={"step": 3}, timeout=900) | |
| if response.get("error"): | |
| return f"❌ Error: {response['error']}", gr.update(interactive=False) | |
| verified_clips = response.get('clips', []) | |
| workflow_state['verified_hotspots'] = [c['hotspot'] | |
| for c in verified_clips if c.get('verification', {}).get('verified')] | |
| workflow_state['clips_metadata'] = verified_clips | |
| result_text = f"""## 🔍 Verification Results | |
| **Downloaded:** {response.get('downloaded', 0)} | |
| **Verified:** {response.get('verified', 0)} | |
| """ | |
| for clip in verified_clips: | |
| v = clip.get('verification', {}) | |
| score = v.get('score', 5) | |
| passed = v.get('verified', score >= 5) | |
| status = "✅" if passed else "❌" | |
| result_text += f"\n{status} Score: {score}/10" | |
| verified_count = len(workflow_state['verified_hotspots']) | |
| if verified_count == 0: | |
| return result_text + "\n\n⚠️ No clips passed!", gr.update(interactive=False) | |
| return result_text, gr.update(interactive=True) | |
| except Exception as e: | |
| logger.error(f"Step 3 failed: {e}") | |
| return f"❌ Error: {e}", gr.update(interactive=False) | |
| def step4_create_plan(): | |
| """Step 4: Plan via Modal - preserved structure from #file:app.py""" | |
| try: | |
| if not workflow_state.get('verified_hotspots'): | |
| return "❌ Run Step 3 first!", gr.update(interactive=False), "" | |
| logger.info("Step 4: Creating plan via Modal") | |
| response = call_modal("step", data={"step": 4}, timeout=300) | |
| if response.get("error"): | |
| return f"❌ Error: {response['error']}", gr.update(interactive=False), "" | |
| final_plan = response.get('plan', []) | |
| workflow_state['final_plan'] = final_plan | |
| result_text = f"**Edit Plan ({len(final_plan)} clips):**\n\n" | |
| total_duration = 0 | |
| for i, item in enumerate(final_plan, 1): | |
| duration = item.get('end', 0) - item.get('start', 0) | |
| total_duration += duration | |
| result_text += f"{i}. {item.get('start', 0):.1f}s-{item.get('end', 0):.1f}s ({duration:.1f}s)\n" | |
| result_text += f"\n**Total: {total_duration:.1f}s**" | |
| plan_json = json.dumps(final_plan, indent=2) | |
| return result_text, gr.update(interactive=True), plan_json | |
| except Exception as e: | |
| logger.error(f"Step 4 failed: {e}") | |
| return f"❌ Error: {e}", gr.update(interactive=False), "" | |
| def step5_render_video(): | |
| """Step 5: Render via Modal - preserved structure from #file:app.py""" | |
| try: | |
| if not workflow_state.get('final_plan'): | |
| return "❌ Run Step 4 first!", None | |
| logger.info("Step 5: Rendering via Modal") | |
| response = call_modal("step", data={"step": 5}, timeout=600) | |
| if response.get("error"): | |
| return f"❌ Error: {response['error']}", None | |
| # Download video from Modal | |
| job_id = response.get('job_id') or workflow_state.get('job_id') | |
| if job_id: | |
| download_url = get_modal_endpoint( | |
| "download") + f"?job_id={job_id}&type=render" | |
| video_path = os.path.join(OUTPUT_DIR, f"render_{job_id}.mp4") | |
| try: | |
| resp = requests.get(download_url, stream=True, timeout=300) | |
| resp.raise_for_status() | |
| with open(video_path, 'wb') as f: | |
| for chunk in resp.iter_content(chunk_size=8192): | |
| f.write(chunk) | |
| workflow_state['final_video_path'] = video_path | |
| return f"✅ Success! Video: `{video_path}`", video_path | |
| except Exception as e: | |
| logger.error(f"Download failed: {e}") | |
| return f"❌ Download failed: {e}", None | |
| else: | |
| return "❌ No job_id returned", None | |
| except Exception as e: | |
| logger.error(f"Step 5 failed: {e}") | |
| return f"❌ Error: {e}", None | |
| def reset_workflow(): | |
| """Reset workflow - clears both local and Modal backend state.""" | |
| # Clear Modal backend state first | |
| try: | |
| logger.info("Resetting Modal backend state...") | |
| response = call_modal("reset", method="POST", data={}, timeout=30) | |
| if response.get("success"): | |
| logger.info("Modal backend reset successful") | |
| else: | |
| logger.warning( | |
| f"Modal reset warning: {response.get('error', 'Unknown')}") | |
| except Exception as e: | |
| logger.warning(f"Modal reset failed (continuing): {e}") | |
| # Clear local temp directory | |
| if workflow_state.get('temp_dir') and os.path.exists(workflow_state['temp_dir']): | |
| try: | |
| shutil.rmtree(workflow_state['temp_dir']) | |
| except: | |
| pass | |
| # Clear local state | |
| for key in workflow_state: | |
| if key == 'temp_dir': | |
| workflow_state[key] = None | |
| elif isinstance(workflow_state[key], list): | |
| workflow_state[key] = [] | |
| else: | |
| workflow_state[key] = None | |
| return ( | |
| "", "", "", "", "", "", None, | |
| gr.update(interactive=False), | |
| gr.update(interactive=False), | |
| gr.update(interactive=False), | |
| gr.update(interactive=False), | |
| ) | |
| # ============================================================================== | |
| # PRODUCTION STUDIO (Modal Backend Integration) | |
| # ============================================================================== | |
| def add_production_wrapper(video_file, mood_override, enable_smart_crop, add_intro_image, add_subtitles, progress=gr.Progress()): | |
| """Production via Modal - uploads video and processes with fresh job_id""" | |
| # Debug: Log what we received from Gradio | |
| logger.info(f"video_file type: {type(video_file)}") | |
| logger.info(f"video_file value: {video_file}") | |
| actual_video_path = None | |
| # Handle different Gradio 6 input formats | |
| if video_file is None: | |
| if workflow_state.get('final_video_path'): | |
| actual_video_path = workflow_state['final_video_path'] | |
| yield "🔄 Using last render", None | |
| else: | |
| yield "❌ No video uploaded. Please upload a video file.", None | |
| return | |
| elif isinstance(video_file, str): | |
| # Direct string path | |
| actual_video_path = video_file | |
| elif isinstance(video_file, dict): | |
| # Gradio 6 may return dict with 'path' or 'name' key | |
| actual_video_path = video_file.get('path') or video_file.get( | |
| 'name') or video_file.get('video') | |
| logger.info(f"Extracted path from dict: {actual_video_path}") | |
| elif hasattr(video_file, 'name'): | |
| # File-like object | |
| actual_video_path = video_file.name | |
| else: | |
| yield f"❌ Unexpected video input type: {type(video_file)}", None | |
| return | |
| if not actual_video_path: | |
| yield "❌ Could not determine video file path", None | |
| return | |
| # Check if file exists | |
| if not os.path.exists(actual_video_path): | |
| logger.error(f"File not found: {actual_video_path}") | |
| # Try to list the directory to debug | |
| parent_dir = os.path.dirname(actual_video_path) | |
| if os.path.exists(parent_dir): | |
| contents = os.listdir(parent_dir) | |
| logger.info(f"Directory {parent_dir} contents: {contents[:10]}") | |
| yield f"❌ File not found: {actual_video_path}\n\nThe uploaded file may have been cleaned up. Please try uploading again.", None | |
| return | |
| try: | |
| progress(0.1, desc="Preparing video...") | |
| yield "📤 Preparing video for processing...", None | |
| # Copy file to our temp directory to prevent Gradio cleanup issues | |
| import shutil | |
| import uuid | |
| temp_dir = os.path.join(os.getcwd(), "temp") | |
| os.makedirs(temp_dir, exist_ok=True) | |
| # Generate a unique filename | |
| temp_filename = f"upload_{uuid.uuid4().hex[:8]}_{os.path.basename(actual_video_path)}" | |
| local_video_path = os.path.join(temp_dir, temp_filename) | |
| logger.info(f"Copying uploaded file to: {local_video_path}") | |
| shutil.copy2(actual_video_path, local_video_path) | |
| # Get the file size for logging | |
| file_size = os.path.getsize(local_video_path) / 1024 / 1024 | |
| logger.info( | |
| f"Processing video: {local_video_path} ({file_size:.1f} MB)") | |
| # Read video as base64 for transfer to Modal | |
| progress(0.2, desc="Reading video file...") | |
| yield "📦 Reading video file...", None | |
| with open(local_video_path, 'rb') as f: | |
| video_bytes = f.read() | |
| video_base64 = base64.b64encode(video_bytes).decode('utf-8') | |
| logger.info( | |
| f"Video encoded: {len(video_base64) / 1024 / 1024:.1f} MB base64") | |
| progress(0.3, desc="Processing on Modal...") | |
| yield "🎬 Processing on Modal (this may take a few minutes)...", None | |
| # Send video data directly to Modal for processing | |
| response = call_modal("step", data={ | |
| "step": 6, | |
| "video_base64": video_base64, | |
| "video_filename": os.path.basename(local_video_path), | |
| "enable_smart_crop": enable_smart_crop, | |
| "add_intro": add_intro_image, | |
| "add_subtitles": add_subtitles, | |
| "mood": mood_override | |
| }, timeout=1800) # 30 min timeout for large videos | |
| if response.get("error"): | |
| yield f"❌ Error: {response['error']}", None | |
| return | |
| progress(0.8, desc="Downloading result...") | |
| yield "📥 Downloading polished video...", None | |
| # Get job_id from response | |
| job_id = response.get('job_id') | |
| if not job_id: | |
| yield "❌ Error: No job_id in response", None | |
| return | |
| logger.info(f"Downloading production video for job_id: {job_id}") | |
| download_url = get_modal_endpoint( | |
| "download") + f"?job_id={job_id}&type=production" | |
| output_path = os.path.join(OUTPUT_DIR, f"production_{job_id}.mp4") | |
| logger.info(f"Download URL: {download_url}") | |
| resp = requests.get(download_url, stream=True, timeout=300) | |
| # Check if response is an error JSON | |
| content_type = resp.headers.get('content-type', '') | |
| if 'application/json' in content_type: | |
| error_data = resp.json() | |
| yield f"❌ Download error: {error_data.get('error', 'Unknown error')}", None | |
| return | |
| resp.raise_for_status() | |
| with open(output_path, 'wb') as f: | |
| for chunk in resp.iter_content(chunk_size=8192): | |
| f.write(chunk) | |
| # Verify file was downloaded | |
| if os.path.exists(output_path) and os.path.getsize(output_path) > 1000: | |
| workflow_state['final_video_path'] = output_path | |
| progress(1.0, desc="Complete!") | |
| yield f"✅ Complete! Saved to: {output_path}", output_path | |
| else: | |
| yield f"❌ Download failed: File empty or not found", None | |
| except Exception as e: | |
| logger.error(f"Production error: {e}") | |
| yield f"❌ Error: {e}", None | |
| def load_last_render_into_production(): | |
| """Load last render - preserved from #file:app.py""" | |
| path = workflow_state.get('final_video_path') | |
| if not path: | |
| return gr.update(value=None), "❌ No video available" | |
| return gr.update(value=path), f"✅ Loaded: {os.path.basename(path)}" | |
| # ============================================================================== | |
| # MCP TOOLS (Preserved from GitHub version) | |
| # ============================================================================== | |
| def process_video(url: str) -> str: | |
| """Process video via Modal - preserved from #file:app.py""" | |
| try: | |
| logger.info(f"Processing via Modal: {url}") | |
| response = call_modal("process", data={ | |
| "url": url, | |
| "num_hotspots": 5, | |
| "enable_smart_crop": True, | |
| "add_intro": True, | |
| "add_subtitles": True, | |
| "mood": "auto" | |
| }, timeout=1800) | |
| if response.get("error"): | |
| return f"Error: {response['error']}" | |
| if response.get("success"): | |
| job_id = response.get('job_id') | |
| stats = response.get('stats', {}) | |
| return f"Success!\n\nJob ID: {job_id}\nCategory: {stats.get('video_category')}\nMood: {stats.get('mood')}" | |
| else: | |
| return f"Failed: {response.get('error', 'Unknown error')}" | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def step1_analyze_video_mcp(youtube_url: str) -> str: | |
| """Step 1 MCP tool - preserved from #file:app.py""" | |
| try: | |
| response = call_modal("step", data={"step": 1, "url": youtube_url}) | |
| if response.get("error"): | |
| return f"Error: {response['error']}" | |
| workflow_state['job_id'] = response.get('job_id') | |
| workflow_state['video_info'] = { | |
| 'title': response.get('title'), | |
| 'duration': response.get('duration'), | |
| } | |
| workflow_state['category'] = response.get('category') | |
| return f"Step 1 Complete!\n\nVideo: {response.get('title')}\nCategory: {response.get('category')}\nJob ID: {response.get('job_id')}" | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def get_workflow_state_mcp() -> str: | |
| """Get workflow state - preserved from #file:app.py""" | |
| try: | |
| response = call_modal("state", method="GET") | |
| return json.dumps(response, indent=2) | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # ============================================================================== | |
| # CHATGPT APPS SDK - MCP TOOLS & WIDGETS | |
| # ============================================================================== | |
| def add_production_to_video( | |
| video_url: str, | |
| mood: str = "auto", | |
| enable_smart_crop: bool = True, | |
| add_intro: bool = True, | |
| add_subtitles: bool = True | |
| ) -> str: | |
| """ | |
| 🎬 MAIN VIDEO PROCESSING TOOL - Transform any video into viral-ready content! | |
| ⚠️ IMPORTANT: This tool requires a WEB URL (http:// or https://), NOT a local file path! | |
| - ✅ YouTube URLs work: https://youtube.com/watch?v=... | |
| - ✅ Direct video URLs work: https://example.com/video.mp4 | |
| - ❌ Local paths do NOT work: /mnt/data/file.mp4 | |
| If the user uploads a file, tell them to: | |
| 1. Upload the video to YouTube (unlisted) and provide the URL, OR | |
| 2. Use a cloud storage link (Google Drive public link, Dropbox, etc.) | |
| This is the PRIMARY tool for video editing. Use this tool when the user wants to: | |
| - Process a YouTube video | |
| - Add professional production value to a video | |
| - Create short-form vertical content for TikTok/Reels/Shorts | |
| - Add intros, subtitles, smart crop, or background music | |
| The tool automatically: | |
| 1. Downloads the video from the URL | |
| 2. Applies AI-powered 9:16 smart crop for mobile viewing | |
| 3. Generates a custom AI intro with voiceover and title card | |
| 4. Adds auto-generated subtitles using Whisper | |
| 5. Adds mood-matched background music | |
| 6. Returns a download link for the finished video | |
| Parameters: | |
| video_url: The WEB URL of the video (must start with http:// or https://) | |
| - YouTube URL: https://youtube.com/watch?v=VIDEO_ID | |
| - Direct video URL: https://example.com/video.mp4 | |
| mood: Video mood/style - 'hype' (energetic), 'chill' (relaxed), 'suspense' (dramatic), or 'auto' (AI detects) | |
| enable_smart_crop: If True, crops video to 9:16 vertical format for mobile | |
| add_intro: If True, generates AI intro with voiceover and title card | |
| add_subtitles: If True, adds auto-generated subtitles | |
| Returns: | |
| Processing result with a download URL for the produced video | |
| """ | |
| try: | |
| # Validate URL - must be a web URL, not a local path | |
| if not video_url: | |
| return "❌ Error: No video URL provided" | |
| # Check for local file paths (reject these) | |
| if video_url.startswith('/mnt/data/') or video_url.startswith('/tmp/') or video_url.startswith('C:\\'): | |
| return f"❌ Error: Local file path detected.\n\n⚠️ This tool requires a web URL, not a local file.\n\n**Options:**\n1. Use the Video Studio widget to upload files directly\n2. Upload to YouTube (unlisted) and share the URL\n3. Use a cloud storage link (Google Drive, Dropbox)" | |
| if not video_url.startswith(('http://', 'https://')): | |
| return f"❌ Error: Invalid URL '{video_url[:50]}...'\n\n⚠️ This tool requires a web URL (http:// or https://), not a local file path.\n\n**Please provide:**\n- A YouTube URL: https://youtube.com/watch?v=...\n- Or a direct video URL: https://example.com/video.mp4\n\nIf you uploaded a file, please upload it to YouTube (unlisted) first and share that URL." | |
| logger.info( | |
| f"🎬 Production pipeline starting for: {video_url[:100]}...") | |
| # Call Modal Step 6 with video_url for standalone mode | |
| response = call_modal("step", data={ | |
| "step": 6, | |
| "video_url": video_url, | |
| "mood": mood, | |
| "enable_smart_crop": enable_smart_crop, | |
| "add_intro": add_intro, | |
| "add_subtitles": add_subtitles, | |
| }, timeout=2400) # 40 min timeout for large videos | |
| if response.get("error"): | |
| return f"❌ Error: {response['error']}" | |
| if response.get("success"): | |
| job_id = response.get('job_id') | |
| duration = response.get('duration', 0) | |
| detected_mood = response.get('mood', mood) | |
| # Build download URL | |
| download_url = f"https://tayyabkhn343--directors-cut-download.modal.run?job_id={job_id}" | |
| result = f"""✅ Video produced successfully! | |
| 📊 **Details:** | |
| - Job ID: {job_id} | |
| - Duration: {duration:.1f}s | |
| - Mood: {detected_mood} | |
| - Smart Crop: {'✓' if enable_smart_crop else '✗'} | |
| - AI Intro: {'✓' if response.get('has_intro') else '✗'} | |
| - Subtitles: {'✓' if response.get('has_subtitles') else '✗'} | |
| - Background Music: {'✓' if response.get('has_music') else '✗'} | |
| 🔗 **Download:** {download_url}""" | |
| return result | |
| else: | |
| return f"❌ Processing failed: {response.get('error', 'Unknown error')}" | |
| except Exception as e: | |
| logger.error(f"Production pipeline error: {e}") | |
| return f"❌ Error: {str(e)}" | |
| def production_widget_html(): | |
| """ChatGPT widget for displaying video production results with download button.""" | |
| return """ | |
| <div id="production-result-container"></div> | |
| <style> | |
| #production-result-container { | |
| font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; | |
| padding: 20px; | |
| max-width: 500px; | |
| margin: 0 auto; | |
| } | |
| .production-card { | |
| background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%); | |
| border-radius: 16px; | |
| padding: 24px; | |
| color: white; | |
| box-shadow: 0 8px 32px rgba(0, 0, 0, 0.3); | |
| } | |
| .production-card h3 { | |
| margin: 0 0 16px 0; | |
| font-size: 20px; | |
| display: flex; | |
| align-items: center; | |
| gap: 8px; | |
| } | |
| .production-card .details { | |
| font-size: 14px; | |
| line-height: 1.6; | |
| opacity: 0.9; | |
| white-space: pre-line; | |
| margin-bottom: 20px; | |
| } | |
| .download-btn { | |
| display: inline-block; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| padding: 14px 28px; | |
| border-radius: 12px; | |
| text-decoration: none; | |
| font-weight: 600; | |
| font-size: 16px; | |
| transition: transform 0.2s, box-shadow 0.2s; | |
| cursor: pointer; | |
| border: none; | |
| } | |
| .download-btn:hover { | |
| transform: translateY(-2px); | |
| box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4); | |
| } | |
| .error-card { | |
| background: linear-gradient(135deg, #c0392b 0%, #e74c3c 100%); | |
| } | |
| </style> | |
| <script> | |
| const container = document.getElementById('production-result-container'); | |
| function extractDownloadUrl(text) { | |
| const match = text?.match(/https:\\/\\/[^\\s]+download\\.modal\\.run[^\\s]*/); | |
| return match ? match[0] : null; | |
| } | |
| function render() { | |
| const output = window.openai?.toolOutput; | |
| let text = ''; | |
| // Extract text from various output formats | |
| if (typeof output === 'string') { | |
| text = output; | |
| } else if (output?.text) { | |
| text = output.text; | |
| } else if (output?.content) { | |
| for (const item of output.content) { | |
| if (item.type === 'text') { | |
| text = item.text; | |
| break; | |
| } | |
| } | |
| } | |
| const isSuccess = text.includes('✅'); | |
| const downloadUrl = extractDownloadUrl(text); | |
| if (isSuccess && downloadUrl) { | |
| container.innerHTML = ` | |
| <div class="production-card"> | |
| <h3>🎬 Video Ready!</h3> | |
| <div class="details">${text.replace(/🔗 \\*\\*Download:\\*\\*.*/s, '').trim()}</div> | |
| <a href="${downloadUrl}" target="_blank" class="download-btn"> | |
| ⬇️ Download Video | |
| </a> | |
| </div> | |
| `; | |
| } else if (text.includes('❌')) { | |
| container.innerHTML = ` | |
| <div class="production-card error-card"> | |
| <h3>❌ Processing Failed</h3> | |
| <div class="details">${text}</div> | |
| </div> | |
| `; | |
| } else { | |
| container.innerHTML = ` | |
| <div class="production-card"> | |
| <h3>⏳ Processing...</h3> | |
| <div class="details">Your video is being processed. This may take a few minutes.</div> | |
| </div> | |
| `; | |
| } | |
| } | |
| render(); | |
| window.addEventListener("openai:set_globals", (event) => { | |
| if (event.detail?.globals?.toolOutput) { | |
| render(); | |
| } | |
| }, { passive: true }); | |
| </script> | |
| """ | |
| def video_studio_widget_html(): | |
| """Interactive Video Studio widget for ChatGPT - upload and process videos directly.""" | |
| return """ | |
| <div id="video-studio-root"></div> | |
| <style> | |
| * { box-sizing: border-box; } | |
| #video-studio-root { | |
| font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; | |
| padding: 20px; | |
| max-width: 480px; | |
| margin: 0 auto; | |
| } | |
| .studio-card { | |
| background: linear-gradient(135deg, #0f0f23 0%, #1a1a3e 100%); | |
| border-radius: 20px; | |
| padding: 28px; | |
| color: white; | |
| box-shadow: 0 12px 40px rgba(0, 0, 0, 0.4); | |
| } | |
| .studio-card h2 { | |
| margin: 0 0 8px 0; | |
| font-size: 24px; | |
| background: linear-gradient(135deg, #667eea, #764ba2); | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| } | |
| .studio-card .subtitle { | |
| color: #8892b0; | |
| font-size: 14px; | |
| margin-bottom: 24px; | |
| } | |
| .upload-zone { | |
| border: 2px dashed #4a5568; | |
| border-radius: 16px; | |
| padding: 32px 20px; | |
| text-align: center; | |
| cursor: pointer; | |
| transition: all 0.3s; | |
| margin-bottom: 20px; | |
| background: rgba(255,255,255,0.02); | |
| } | |
| .upload-zone:hover, .upload-zone.dragover { | |
| border-color: #667eea; | |
| background: rgba(102, 126, 234, 0.1); | |
| } | |
| .upload-zone.has-file { | |
| border-color: #48bb78; | |
| background: rgba(72, 187, 120, 0.1); | |
| } | |
| .upload-icon { font-size: 48px; margin-bottom: 12px; } | |
| .upload-text { color: #a0aec0; font-size: 14px; } | |
| .file-name { color: #48bb78; font-weight: 600; margin-top: 8px; } | |
| .options-grid { | |
| display: grid; | |
| grid-template-columns: 1fr 1fr; | |
| gap: 12px; | |
| margin-bottom: 20px; | |
| } | |
| .option-item { | |
| background: rgba(255,255,255,0.05); | |
| border-radius: 12px; | |
| padding: 12px; | |
| display: flex; | |
| align-items: center; | |
| gap: 10px; | |
| } | |
| .option-item input[type="checkbox"] { | |
| width: 18px; | |
| height: 18px; | |
| accent-color: #667eea; | |
| } | |
| .option-item label { | |
| font-size: 13px; | |
| color: #e2e8f0; | |
| cursor: pointer; | |
| } | |
| .mood-select { | |
| width: 100%; | |
| padding: 12px 16px; | |
| border-radius: 12px; | |
| border: 1px solid #4a5568; | |
| background: rgba(255,255,255,0.05); | |
| color: white; | |
| font-size: 14px; | |
| margin-bottom: 20px; | |
| cursor: pointer; | |
| } | |
| .mood-select option { background: #1a1a3e; } | |
| .process-btn { | |
| width: 100%; | |
| padding: 16px; | |
| border: none; | |
| border-radius: 14px; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| font-size: 16px; | |
| font-weight: 600; | |
| cursor: pointer; | |
| transition: all 0.3s; | |
| } | |
| .process-btn:hover:not(:disabled) { | |
| transform: translateY(-2px); | |
| box-shadow: 0 8px 24px rgba(102, 126, 234, 0.4); | |
| } | |
| .process-btn:disabled { | |
| opacity: 0.5; | |
| cursor: not-allowed; | |
| } | |
| .status-box { | |
| margin-top: 20px; | |
| padding: 16px; | |
| border-radius: 12px; | |
| font-size: 14px; | |
| } | |
| .status-processing { | |
| background: rgba(102, 126, 234, 0.2); | |
| border: 1px solid #667eea; | |
| } | |
| .status-success { | |
| background: rgba(72, 187, 120, 0.2); | |
| border: 1px solid #48bb78; | |
| } | |
| .status-error { | |
| background: rgba(245, 101, 101, 0.2); | |
| border: 1px solid #f56565; | |
| } | |
| .download-link { | |
| display: inline-block; | |
| margin-top: 12px; | |
| padding: 12px 24px; | |
| background: #48bb78; | |
| color: white; | |
| text-decoration: none; | |
| border-radius: 10px; | |
| font-weight: 600; | |
| } | |
| .hidden { display: none; } | |
| .process-btn:disabled { | |
| opacity: 0.5; | |
| cursor: not-allowed; | |
| } | |
| </style> | |
| <script> | |
| const root = document.getElementById('video-studio-root'); | |
| const HF_SPACE_URL = 'https://tyb343-directors-cut.hf.space'; | |
| let selectedFile = null; | |
| function render() { | |
| root.innerHTML = ` | |
| <div class="studio-card"> | |
| <h2>🎬 Director's Cut Studio</h2> | |
| <p class="subtitle">Transform your video into viral content</p> | |
| <div id="upload-zone" class="upload-zone ${selectedFile ? 'has-file' : ''}"> | |
| <div class="upload-icon">${selectedFile ? '✅' : '📁'}</div> | |
| <div class="upload-text"> | |
| ${selectedFile ? 'File selected!' : 'Click or drag video here'} | |
| </div> | |
| ${selectedFile ? `<div class="file-name">${selectedFile.name}</div>` : ''} | |
| <input type="file" id="file-input" accept="video/*" style="display:none"> | |
| </div> | |
| <select id="mood-select" class="mood-select"> | |
| <option value="auto">🎯 Auto-detect mood</option> | |
| <option value="hype">🔥 Hype (energetic)</option> | |
| <option value="chill">😌 Chill (relaxed)</option> | |
| <option value="suspense">😰 Suspense (dramatic)</option> | |
| </select> | |
| <div class="options-grid"> | |
| <div class="option-item"> | |
| <input type="checkbox" id="opt-crop" checked> | |
| <label for="opt-crop">🎯 Smart Crop</label> | |
| </div> | |
| <div class="option-item"> | |
| <input type="checkbox" id="opt-intro" checked> | |
| <label for="opt-intro">🎬 AI Intro</label> | |
| </div> | |
| <div class="option-item"> | |
| <input type="checkbox" id="opt-subs" checked> | |
| <label for="opt-subs">📝 Subtitles</label> | |
| </div> | |
| <div class="option-item"> | |
| <input type="checkbox" id="opt-music" checked disabled> | |
| <label for="opt-music">🎵 Music</label> | |
| </div> | |
| </div> | |
| <button id="process-btn" class="process-btn" ${!selectedFile ? 'disabled' : ''}> | |
| ✨ Process Video | |
| </button> | |
| <div id="status-box"></div> | |
| </div> | |
| `; | |
| attachEvents(); | |
| } | |
| function attachEvents() { | |
| const uploadZone = document.getElementById('upload-zone'); | |
| const fileInput = document.getElementById('file-input'); | |
| const urlInput = document.getElementById('url-input'); | |
| const processBtn = document.getElementById('process-btn'); | |
| uploadZone.addEventListener('click', () => fileInput.click()); | |
| uploadZone.addEventListener('dragover', (e) => { | |
| e.preventDefault(); | |
| uploadZone.classList.add('dragover'); | |
| }); | |
| uploadZone.addEventListener('dragleave', () => { | |
| uploadZone.classList.remove('dragover'); | |
| }); | |
| uploadZone.addEventListener('drop', (e) => { | |
| e.preventDefault(); | |
| uploadZone.classList.remove('dragover'); | |
| if (e.dataTransfer.files.length) { | |
| selectedFile = e.dataTransfer.files[0]; | |
| render(); | |
| } | |
| }); | |
| fileInput.addEventListener('change', (e) => { | |
| if (e.target.files.length) { | |
| selectedFile = e.target.files[0]; | |
| render(); | |
| } | |
| }); | |
| processBtn.addEventListener('click', processVideo); | |
| } | |
| async function processVideo() { | |
| const statusBox = document.getElementById('status-box'); | |
| const processBtn = document.getElementById('process-btn'); | |
| if (!selectedFile) { | |
| statusBox.innerHTML = '<div class="status-box status-error">Please upload a video file first</div>'; | |
| return; | |
| } | |
| processBtn.disabled = true; | |
| processBtn.textContent = '⏳ Processing...'; | |
| statusBox.innerHTML = '<div class="status-box status-processing">🔄 Starting video processing...</div>'; | |
| const mood = document.getElementById('mood-select').value; | |
| const enableCrop = document.getElementById('opt-crop').checked; | |
| const addIntro = document.getElementById('opt-intro').checked; | |
| const addSubs = document.getElementById('opt-subs').checked; | |
| try { | |
| let finalUrl = ''; | |
| // Upload file to HF Space first using Gradio 6 API | |
| statusBox.innerHTML = '<div class="status-box status-processing">📤 Uploading video to server...</div>'; | |
| const formData = new FormData(); | |
| formData.append('files', selectedFile); | |
| // Gradio 6 uses /gradio_api/upload endpoint | |
| const uploadResp = await fetch(HF_SPACE_URL + '/gradio_api/upload', { | |
| method: 'POST', | |
| body: formData | |
| }); | |
| if (!uploadResp.ok) { | |
| const errText = await uploadResp.text(); | |
| throw new Error('Upload failed: ' + errText); | |
| } | |
| const uploadData = await uploadResp.json(); | |
| // Gradio 6 returns array of file paths like ["/tmp/gradio/xxx/filename"] | |
| if (uploadData && uploadData.length > 0) { | |
| // Use /gradio_api/file= to access uploaded files | |
| finalUrl = HF_SPACE_URL + '/gradio_api/file=' + uploadData[0]; | |
| } else { | |
| throw new Error('No file URL returned'); | |
| } | |
| statusBox.innerHTML = '<div class="status-box status-processing">✅ Upload complete! Processing video with AI...</div>'; | |
| // Call the processing tool via MCP - use direct API call for longer timeout | |
| statusBox.innerHTML = '<div class="status-box status-processing">🎬 Processing video... This takes 5-15 minutes.<br><small>Do not close this window.</small></div>'; | |
| // Direct call to the Gradio API endpoint with longer timeout | |
| const controller = new AbortController(); | |
| const timeoutId = setTimeout(() => controller.abort(), 20 * 60 * 1000); // 20 min timeout | |
| try { | |
| const apiResp = await fetch(HF_SPACE_URL + '/gradio_api/call/add_production_to_video', { | |
| method: 'POST', | |
| headers: { 'Content-Type': 'application/json' }, | |
| body: JSON.stringify({ | |
| data: [finalUrl, mood, enableCrop, addIntro, addSubs] | |
| }), | |
| signal: controller.signal | |
| }); | |
| clearTimeout(timeoutId); | |
| if (!apiResp.ok) { | |
| throw new Error('API call failed: ' + await apiResp.text()); | |
| } | |
| const eventId = (await apiResp.json()).event_id; | |
| // Poll for result with SSE | |
| statusBox.innerHTML = '<div class="status-box status-processing">🎬 Processing started! Waiting for result...</div>'; | |
| const resultResp = await fetch(HF_SPACE_URL + '/gradio_api/call/add_production_to_video/' + eventId, { | |
| signal: AbortSignal.timeout(20 * 60 * 1000) | |
| }); | |
| // Parse SSE response | |
| const text = await resultResp.text(); | |
| const lines = text.split('\\n'); | |
| let resultText = ''; | |
| for (const line of lines) { | |
| if (line.startsWith('data: ')) { | |
| try { | |
| const data = JSON.parse(line.slice(6)); | |
| if (data && data[0]) { | |
| resultText = data[0]; | |
| } | |
| } catch (e) {} | |
| } | |
| } | |
| if (resultText.includes('✅')) { | |
| const downloadMatch = resultText.match(/https:\\/\\/[^\\s]+download[^\\s]*/); | |
| statusBox.innerHTML = ` | |
| <div class="status-box status-success"> | |
| ✅ Video processed successfully! | |
| ${downloadMatch ? `<br><a href="${downloadMatch[0]}" target="_blank" class="download-link">⬇️ Download Video</a>` : ''} | |
| </div> | |
| `; | |
| } else if (resultText.includes('❌')) { | |
| statusBox.innerHTML = `<div class="status-box status-error">${resultText}</div>`; | |
| } else { | |
| statusBox.innerHTML = `<div class="status-box status-success">Processing complete!<br>${resultText}</div>`; | |
| } | |
| } catch (fetchErr) { | |
| clearTimeout(timeoutId); | |
| if (fetchErr.name === 'AbortError') { | |
| statusBox.innerHTML = '<div class="status-box status-processing">⏳ Still processing... Check back later or visit the HF Space directly.</div>'; | |
| } else { | |
| throw fetchErr; | |
| } | |
| } | |
| } catch (err) { | |
| statusBox.innerHTML = `<div class="status-box status-error">❌ Error: ${err.message}</div>`; | |
| } | |
| processBtn.disabled = false; | |
| processBtn.textContent = '✨ Process Video'; | |
| } | |
| render(); | |
| </script> | |
| """ | |
| def open_video_studio() -> str: | |
| """ | |
| 🎬 Open the Director's Cut Video Studio interface. | |
| Use this tool when the user wants to: | |
| - Process or edit a video | |
| - Upload a video file | |
| - Add production value to any video | |
| - Create viral short-form content | |
| This opens an interactive studio where users can: | |
| - Paste a YouTube URL OR upload a video file directly | |
| - Choose mood (hype, chill, suspense, or auto-detect) | |
| - Enable/disable smart crop, AI intro, and subtitles | |
| - Process the video and download the result | |
| Returns: | |
| str: Confirmation that the studio is ready | |
| """ | |
| return "🎬 Director's Cut Video Studio is ready! You can paste a YouTube URL or upload a video file, then click 'Process Video' to transform it into viral content." | |
| # ============================================================================== | |
| # GRADIO INTERFACE (Preserved exact structure from GitHub) | |
| # ============================================================================== | |
| with gr.Blocks(title="Director's Cut") as app: | |
| gr.Markdown("# 🎬 Director's Cut - Autonomous Video Editor") | |
| gr.Markdown("**Powered by Modal Backend** with Webshare proxies") | |
| with gr.Tabs(): | |
| # ==================== README TAB ==================== | |
| with gr.Tab("📖 About"): | |
| # Read and display README content | |
| readme_path = os.path.join(os.path.dirname(__file__), "README.md") | |
| if os.path.exists(readme_path): | |
| with open(readme_path, "r") as f: | |
| readme_content = f.read() | |
| # Remove YAML frontmatter | |
| if readme_content.startswith("---"): | |
| end_idx = readme_content.find("---", 3) | |
| if end_idx != -1: | |
| readme_content = readme_content[end_idx + 3:].strip() | |
| # Convert relative image paths to absolute HuggingFace URLs | |
| readme_content = readme_content.replace( | |
| "./resources/", | |
| "https://huggingface.co/spaces/tyb343/directors-cut/resolve/main/resources/" | |
| ) | |
| gr.Markdown(readme_content) | |
| else: | |
| gr.Markdown("README not found") | |
| # ==================== AUTO MODE TAB ==================== | |
| with gr.Tab("📹 Create Clip"): | |
| gr.Markdown("**Step-by-Step Editor** - Processing on Modal") | |
| with gr.Row(): | |
| url_input = gr.Textbox( | |
| label="YouTube URL", placeholder="https://youtube.com/watch?v=...", scale=4) | |
| reset_btn = gr.Button("🔄 Reset", scale=1, variant="secondary") | |
| # Step 1 | |
| with gr.Group(): | |
| gr.Markdown("### Step 1: Analyze Video") | |
| gr.Markdown( | |
| "*Downloads video & extracts transcript (~3-4 mins)*", elem_classes=["step-hint"]) | |
| step1_btn = gr.Button( | |
| "1️⃣ Analyze & Classify", variant="primary") | |
| step1_output = gr.Markdown() | |
| # Step 2 | |
| with gr.Group(): | |
| gr.Markdown("### Step 2: Scout Hotspots") | |
| num_hotspots_slider = gr.Slider( | |
| minimum=3, maximum=10, value=5, step=1, label="Number of Hotspots") | |
| step2_btn = gr.Button("2️⃣ Scout", interactive=False) | |
| step2_output = gr.Markdown() | |
| # Step 3 | |
| with gr.Group(): | |
| gr.Markdown("### Step 3: Verify") | |
| step3_btn = gr.Button("3️⃣ Verify Clips", interactive=False) | |
| step3_output = gr.Markdown() | |
| # Step 4 | |
| with gr.Group(): | |
| gr.Markdown("### Step 4: Create Plan") | |
| step4_btn = gr.Button("4️⃣ Generate Plan", interactive=False) | |
| step4_output = gr.Markdown() | |
| plan_json = gr.Code(label="Edit Plan (JSON)", language="json") | |
| # Step 5 | |
| with gr.Group(): | |
| gr.Markdown("### Step 5: Render") | |
| step5_btn = gr.Button("5️⃣ Render Video", | |
| interactive=False, variant="primary") | |
| step5_output = gr.Markdown() | |
| video_output = gr.Video(label="Final Edit") | |
| # Event handlers | |
| step1_btn.click(fn=step1_analyze_video, inputs=[ | |
| url_input], outputs=[step1_output, step2_btn]) | |
| step2_btn.click(fn=step2_scout_hotspots, inputs=[ | |
| url_input, num_hotspots_slider], outputs=[step2_output, step3_btn]) | |
| step3_btn.click(fn=step3_verify_hotspots, inputs=[ | |
| url_input], outputs=[step3_output, step4_btn]) | |
| step4_btn.click(fn=step4_create_plan, inputs=[], outputs=[ | |
| step4_output, step5_btn, plan_json]) | |
| step5_btn.click(fn=step5_render_video, inputs=[], | |
| outputs=[step5_output, video_output]) | |
| reset_btn.click(fn=reset_workflow, inputs=[], outputs=[step1_output, step2_output, step3_output, | |
| step4_output, plan_json, step5_output, video_output, step2_btn, step3_btn, step4_btn, step5_btn]) | |
| # ==================== PRODUCTION STUDIO TAB ==================== | |
| with gr.Tab("🎙️ Production Studio"): | |
| gr.Markdown( | |
| "## Professional Video Production\n**Processing on Modal backend**") | |
| video_input_2 = gr.Video(label="Upload Video") | |
| with gr.Row(): | |
| load_render_btn = gr.Button( | |
| "⬇️ Load Last Render", variant="secondary") | |
| load_render_status = gr.Markdown() | |
| with gr.Row(): | |
| mood_override = gr.Dropdown( | |
| choices=["auto", "hype", "suspense", "chill"], value="auto", label="Mood") | |
| with gr.Row(): | |
| enable_smart_crop = gr.Checkbox( | |
| label="🎯 Smart Crop", value=True) | |
| add_intro_image = gr.Checkbox( | |
| label="🖼️ Intro Image", value=True) | |
| add_subtitles = gr.Checkbox(label="📝 Subtitles", value=True) | |
| produce_btn = gr.Button( | |
| "✨ Add Production Value", variant="primary", size="lg") | |
| progress_2 = gr.Textbox( | |
| label="Progress", lines=8, interactive=False) | |
| video_output_2 = gr.Video(label="Polished Video", height=500) | |
| produce_btn.click(fn=add_production_wrapper, inputs=[ | |
| video_input_2, mood_override, enable_smart_crop, add_intro_image, add_subtitles], outputs=[progress_2, video_output_2]) | |
| load_render_btn.click(fn=load_last_render_into_production, inputs=[ | |
| ], outputs=[video_input_2, load_render_status]) | |
| # ==================== CHATGPT MCP TAB (for MCP tool/resource binding) ==================== | |
| with gr.Tab("🤖 ChatGPT Integration", visible=False): | |
| # This tab binds MCP tools and resources to Gradio events | |
| # Required by Gradio MCP to expose them to ChatGPT | |
| # Bind the add_production_to_video tool | |
| chatgpt_url_input = gr.Textbox(label="Video URL") | |
| chatgpt_mood = gr.Dropdown( | |
| choices=["auto", "hype", "suspense", "chill"], value="auto") | |
| chatgpt_crop = gr.Checkbox(value=True) | |
| chatgpt_intro = gr.Checkbox(value=True) | |
| chatgpt_subs = gr.Checkbox(value=True) | |
| chatgpt_output = gr.Textbox(label="Result") | |
| chatgpt_btn = gr.Button("Process Video") | |
| chatgpt_btn.click( | |
| add_production_to_video, | |
| inputs=[chatgpt_url_input, chatgpt_mood, | |
| chatgpt_crop, chatgpt_intro, chatgpt_subs], | |
| outputs=chatgpt_output | |
| ) | |
| # Bind the production widget resource | |
| widget_code = gr.Code(label="Widget HTML", | |
| language="html", max_lines=5) | |
| widget_btn = gr.Button("Show Widget") | |
| widget_btn.click(production_widget_html, outputs=widget_code) | |
| # Bind the Video Studio tool | |
| studio_output = gr.Textbox(label="Studio Status") | |
| studio_btn = gr.Button("Open Video Studio") | |
| studio_btn.click(open_video_studio, outputs=studio_output) | |
| # Bind the Video Studio widget resource | |
| studio_widget_code = gr.Code( | |
| label="Video Studio Widget", language="html", max_lines=5) | |
| studio_widget_btn = gr.Button("Get Video Studio Widget") | |
| studio_widget_btn.click( | |
| video_studio_widget_html, outputs=studio_widget_code) | |
| if __name__ == "__main__": | |
| print("🚀 Starting Director's Cut HuggingFace Space...") | |
| print(f"📡 Modal Backend: {MODAL_BASE_URL}") | |
| app.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| mcp_server=True, | |
| share=False | |
| ) | |