Spaces:
Running
Running
| # ============================================================================== | |
| # engine.py - [FINAL FIXED VERSION] | |
| # FIX: Removed 'zoompan' which was pausing videos. Restored natural video motion. | |
| # ============================================================================== | |
| import os | |
| import time | |
| import json | |
| import uuid | |
| import threading | |
| import subprocess | |
| import requests | |
| import sqlite3 | |
| import random | |
| import shutil | |
| import re | |
| from gtts import gTTS | |
| from werkzeug.utils import secure_filename | |
| # स्थानीय परीक्षण के लिए dotenv लोड करें (अगर उपलब्ध हो) | |
| try: | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| except ImportError: | |
| pass | |
| # ============================================================================== | |
| # 1. Global Setup and Database Functions | |
| # ============================================================================== | |
| # प्रोजेक्ट की रूट डायरेक्टरी | |
| APP_ROOT = '/code' | |
| # सभी ज़रूरी फोल्डरों के लिए एब्सोल्यूट पाथ | |
| DATA_FOLDER = os.path.join(APP_ROOT, 'data') | |
| UPLOAD_FOLDER = os.path.join(APP_ROOT, 'uploads') | |
| OUTPUT_FOLDER = os.path.join(APP_ROOT, 'outputs') | |
| # डेटाबेस फाइल का पूरा एब्सोल्यूट पाथ | |
| DATABASE_FILE = os.path.join(DATA_FOLDER, 'tasks.db') | |
| def get_db_connection(): | |
| conn = sqlite3.connect(DATABASE_FILE, check_same_thread=False) | |
| conn.row_factory = sqlite3.Row | |
| return conn | |
| def init_db(): | |
| conn = get_db_connection() | |
| conn.execute('CREATE TABLE IF NOT EXISTS tasks (id TEXT PRIMARY KEY, status TEXT NOT NULL, progress INTEGER NOT NULL, log TEXT, output_filename TEXT)') | |
| conn.commit() | |
| conn.close() | |
| def create_task(task_id): | |
| log_message = "मिशन शुरू हो रहा है...\n" | |
| conn = get_db_connection() | |
| conn.execute('INSERT INTO tasks (id, status, progress, log) VALUES (?, ?, ?, ?)', (task_id, 'processing', 0, log_message)) | |
| conn.commit() | |
| conn.close() | |
| def get_task(task_id): | |
| conn = get_db_connection() | |
| task = conn.execute('SELECT * FROM tasks WHERE id = ?', (task_id,)).fetchone() | |
| conn.close() | |
| return task | |
| def update_task_log(task_id, message, progress): | |
| conn = get_db_connection() | |
| current_log = conn.execute('SELECT log FROM tasks WHERE id = ?', (task_id,)).fetchone()['log'] | |
| new_log = current_log + message + "\n" | |
| conn.execute('UPDATE tasks SET log = ?, progress = ? WHERE id = ?', (new_log, progress, task_id)) | |
| conn.commit() | |
| conn.close() | |
| def update_task_final_status(task_id, status, error_message=None, output_filename=None): | |
| conn = get_db_connection() | |
| current_log = conn.execute('SELECT log FROM tasks WHERE id = ?', (task_id,)).fetchone()['log'] | |
| if status == 'error': | |
| final_log = current_log + f"\n\n🚨 FATAL ERROR: {error_message}" | |
| conn.execute('UPDATE tasks SET status = ?, log = ? WHERE id = ?', (status, final_log, task_id)) | |
| elif status == 'complete': | |
| final_log = current_log + "🎉 मिशन पूरा हुआ!" | |
| conn.execute('UPDATE tasks SET status = ?, progress = ?, output_filename = ?, log = ? WHERE id = ?', (status, 100, output_filename, final_log, task_id)) | |
| conn.commit() | |
| conn.close() | |
| def load_api_keys(prefix): | |
| try: | |
| prefix_lower = prefix.lower() | |
| keys = [v.strip() for k, v in os.environ.items() if k.lower().startswith(prefix_lower) and v.strip()] | |
| if keys: | |
| print(f"✅ API Key Check: '{prefix}' के लिए {len(keys)} कीज़ मिलीं।") | |
| else: | |
| print(f"❌ API Key Check: '{prefix}' के लिए कोई कीज़ नहीं मिलीं!") | |
| return keys | |
| except Exception as e: | |
| print(f"🚨 एनवायरनमेंट वेरिएबल्स लोड करते समय त्रुटि: {e}") | |
| return [] | |
| # ============================================================================== | |
| # 2. All API Classes | |
| # ============================================================================== | |
| class GroqAPI: | |
| def __init__(self, api_keys): | |
| self.api_keys, self.api_url, self.model, self._key_index = api_keys, "https://api.groq.com/openai/v1/audio/transcriptions", "whisper-large-v3", 0 | |
| def transcribe_audio(self, audio_path): | |
| if not self.api_keys: raise Exception("Groq API key not found.") | |
| api_key = self.api_keys[self._key_index % len(self.api_keys)]; self._key_index += 1 | |
| data = {'model': self.model, 'response_format': 'verbose_json', 'timestamp_granularities[]': 'word'} | |
| headers = {'Authorization': f'Bearer {api_key}'} | |
| try: | |
| with open(audio_path, 'rb') as audio_file: | |
| files = {'file': (os.path.basename(audio_path), audio_file, 'audio/mpeg')}; print(f"-> Groq API को शब्द-स्तर पर टाइमस्टैम्प के लिए भेजा जा रहा है...") | |
| response = requests.post(self.api_url, headers=headers, data=data, files=files, timeout=120); response.raise_for_status() | |
| words_data = response.json().get('words', []); print(f"-> ट्रांसक्रिप्शन सफल: {len(words_data)} शब्दों के टाइमस्टैम्प मिले।"); return words_data | |
| except Exception as e: raise Exception(f"Groq API Error: {e}") | |
| class PexelsAPI: | |
| def __init__(self, api_keys): | |
| if not api_keys: raise Exception("Pexels API key not found.") | |
| self.api_key = api_keys[0]; self.api_url = "https://api.pexels.com/videos/search" | |
| def search_and_download(self, query, download_path, orientation, search_page=1): | |
| print(f"-> Pexels पर खोजा जा रहा है (Direct API): '{query}' (Page: {search_page}, Orientation: {orientation})") | |
| headers = {'Authorization': self.api_key}; params = {'query': query, 'page': search_page, 'per_page': 1, 'orientation': orientation} | |
| try: | |
| response = requests.get(self.api_url, headers=headers, params=params, timeout=60); response.raise_for_status(); data = response.json() | |
| if not data.get('videos'): print(f"-> Pexels पर '{query}' के लिए कोई परिणाम नहीं मिला।"); return None | |
| video_data = data['videos'][0]; video_files = video_data.get('video_files', []); best_link = None | |
| for video_file in video_files: | |
| if video_file.get('quality') == 'hd': best_link = video_file.get('link'); break | |
| if not best_link and video_files: best_link = video_files[0].get('link') | |
| if not best_link: print(f"-> Pexels परिणाम में कोई डाउनलोड करने योग्य लिंक नहीं मिला।"); return None | |
| print(f"-> Pexels से वीडियो डाउनलोड किया जा रहा है..."); download_response = requests.get(best_link, stream=True, timeout=60); download_response.raise_for_status() | |
| with open(download_path, 'wb') as f: | |
| for chunk in download_response.iter_content(chunk_size=8192): f.write(chunk) | |
| print(f"-> सफलतापूर्वक सहेजा गया: {download_path}"); return download_path | |
| except requests.exceptions.RequestException as e: print(f"🚨 Pexels API में त्रुटि: {e}"); return None | |
| except Exception as e: print(f"🚨 Pexels वीडियो डाउनलोड करने में अज्ञात त्रुटि: {e}"); return None | |
| class PixabayAPI: | |
| def __init__(self, api_keys): | |
| if not api_keys: raise Exception("Pixabay API key not found.") | |
| self.api_key = api_keys[0]; self.api_url = "https://pixabay.com/api/videos/" | |
| def search_and_download(self, query, download_path, orientation, max_clip_length, search_index=0): | |
| print(f"-> Pixabay पर खोजा जा रहा है: '{query}' (Index: {search_index})") | |
| params = {'key': self.api_key, 'q': query, 'per_page': 5, 'orientation': orientation, 'max_duration': int(max_clip_length)} | |
| try: | |
| response = requests.get(self.api_url, params=params, timeout=60); response.raise_for_status(); results = response.json() | |
| if not results['hits'] or len(results['hits']) <= search_index: print(f"-> Pixabay पर '{query}' के लिए index {search_index} पर कोई परिणाम नहीं मिला।"); return None | |
| video_url = results['hits'][search_index]['videos']['medium']['url']; print(f"-> Pixabay से वीडियो डाउनलोड किया जा रहा है...") | |
| response = requests.get(video_url, stream=True, timeout=60); response.raise_for_status() | |
| with open(download_path, 'wb') as f: | |
| for chunk in response.iter_content(chunk_size=8192): f.write(chunk) | |
| print(f"-> सफलतापूर्वक सहेजा गया: {download_path}"); return download_path | |
| except Exception as e: print(f"🚨 Pixabay API में त्रुटि: {e}"); return None | |
| class GeminiTeam: | |
| MODELS_LIST_URL = "https://generativelanguage.googleapis.com/v1beta/models" | |
| def __init__(self, api_keys): | |
| self.api_keys = api_keys | |
| if not self.api_keys: raise Exception("Gemini API key not found.") | |
| self.model_name = self._find_best_model() | |
| if not self.model_name: raise Exception("Could not dynamically find a suitable Gemini 'flash' model from any of the provided keys.") | |
| self.api_url = f"https://generativelanguage.googleapis.com/v1beta/{self.model_name}:generateContent" | |
| print(f"✅ स्मार्ट मॉडल हंटर सफल: '{self.model_name}' का उपयोग किया जाएगा।") | |
| def _find_best_model(self): | |
| print("-> स्मार्ट मॉडल हंटर: सबसे अच्छे 'gemini-*-flash' मॉडल को खोजा जा रहा है...") | |
| for api_key in self.api_keys: | |
| try: | |
| print(f"-> API Key के अंतिम 4 अक्षरों से कोशिश की जा रही है: ...{api_key[-4:]}") | |
| response = requests.get(f"{self.MODELS_LIST_URL}?key={api_key}", timeout=20); response.raise_for_status(); data = response.json() | |
| available_models = [m['name'] for m in data.get('models', []) if 'flash' in m['name'] and 'generateContent' in m.get('supportedGenerationMethods', []) and 'exp' not in m['name']] | |
| if not available_models: continue | |
| available_models.sort(reverse=True); print(f"-> उपलब्ध 'flash' मॉडल मिले: {available_models}"); return available_models[0] | |
| except requests.exceptions.RequestException as e: print(f"🚨 API Key ...{api_key[-4:]} के साथ त्रुटि: {e}. अगली की आजमाई जा रही है..."); continue | |
| print("🚨 स्मार्ट मॉडल हंटर में गंभीर त्रुटि: कोई भी Gemini API Key काम नहीं कर रही है।"); return None | |
| def _make_resilient_api_call(self, prompt, timeout=120): | |
| headers = {'Content-Type': 'application/json'}; payload = {'contents': [{'parts': [{'text': prompt}]}]} | |
| for api_key in self.api_keys: | |
| try: | |
| print(f"-> Gemini को अनुरोध भेजा जा रहा है (Key: ...{api_key[-4:]}, Model: {self.model_name.split('/')[-1]})") | |
| response = requests.post(f"{self.api_url}?key={api_key}", headers=headers, json=payload, timeout=timeout); response.raise_for_status(); result = response.json() | |
| if 'candidates' not in result or not result['candidates']: print(f"🚨 चेतावनी: Key ...{api_key[-4:]} से कोई कैंडिडेट नहीं मिला (संभवतः सुरक्षा ब्लॉक)। अगली की आजमाई जा रही है..."); continue | |
| return result | |
| except requests.exceptions.RequestException as e: print(f"🚨 API कॉल में त्रुटि (Key: ...{api_key[-4:]}): {e}. अगली की आजमाई जा रही है..."); raise Exception("Gemini API Error: All available API keys failed. Please check your keys and quotas.") | |
| def extract_keywords(self, script_text): | |
| prompt = f"""You are a search query expert. Analyze the script below and for each scene, create a JSON object. Each object must contain: 1. "scene_description": A brief description of the scene. 2. "primary_query": A highly creative, emotional, and cinematic search query in English. This is the main attempt. 3. "fallback_query": A simple, literal, and direct search query in English. Use this if the primary query fails. RULES: - Your response MUST be ONLY a JSON list of objects. - All queries must be in English. Script: "{script_text}" Example: [ {{"scene_description": "A person looking at a mountain.", "primary_query": "inspirational mountain peak cinematic hope", "fallback_query": "man looking at mountain"}} ] Generate the JSON:""" | |
| result = self._make_resilient_api_call(prompt) | |
| json_str = result['candidates'][0]['content']['parts'][0]['text'] | |
| clean_str = json_str[json_str.find('['):json_str.rfind(']') + 1]; scenes = json.loads(clean_str) | |
| try: | |
| log_file_path = os.path.join(OUTPUT_FOLDER, 'gemini_analysis_log.json') | |
| with open(log_file_path, 'w', encoding='utf-8') as f: json.dump(scenes, f, ensure_ascii=False, indent=4) | |
| except Exception as e: print(f"🚨 चेतावनी: Gemini विश्लेषण लॉग करने में विफल: {e}") | |
| print(f"-> Gemini ने सफलतापूर्वक {len(scenes)} प्राथमिक/फ़ॉलबैक दृश्य निकाले।"); return scenes | |
| def create_master_timeline(self, word_timestamps, enriched_scenes_with_paths): | |
| full_script_text = " ".join([word['word'] for word in word_timestamps]); total_duration = word_timestamps[-1]['end'] if word_timestamps else 0 | |
| # NOTE: Added instructions to avoid excessively long clips at the end | |
| prompt = f"""You are an expert AI video editor. Create a frame-perfect timeline JSON. Assets: 1. **Full Script:** "{full_script_text}" 2. **Total Audio Duration:** {total_duration:.2f} seconds. 3. **Available Scene Clips:** {json.dumps(enriched_scenes_with_paths, indent=2)} 4. **Word-Level Timestamps (with Pauses):** {json.dumps(word_timestamps, indent=2)}. RULES: 1. Your response MUST be ONLY a list of JSON objects. 2. Each object must have "start", "end", "matched_clip", and "start_offset_seconds". 3. **CRITICAL:** The timeline MUST cover the entire audio duration from 0 to {total_duration:.2f} seconds. There should be NO GAPS. 4. **CRITICAL:** You MUST use each video from the 'Available Scene Clips' list only once. Do not repeat clips. 5. **PACING RULE:** Avoid making the last clip excessively long. Try to distribute the duration evenly if possible. 6. **PAUSE RULE:** Use '[PAUSE]' moments for transitions. Create the final timeline JSON:""" | |
| result = self._make_resilient_api_call(prompt, timeout=180) | |
| json_str = result['candidates'][0]['content']['parts'][0]['text'] | |
| clean_str = json_str[json_str.find('['):json_str.rfind(']') + 1]; final_timeline = json.loads(clean_str) | |
| print(f"-> Gemini Master Editor ने सफलतापूर्वक {len(final_timeline)} क्लिप्स की टाइमलाइन और ऑफसेट बना दी है।"); return final_timeline | |
| def generate_script(self, topic, video_length): | |
| word_count_map = {"short": "~75 शब्द", "medium": "~150 शब्द", "long": "~300 शब्द"}; target_word_count = word_count_map.get(video_length, "~150 शब्द") | |
| prompt = f"""आप 'स्पार्कलिंग ज्ञान' के लिए एक विशेषज्ञ हिंदी स्क्रिप्ट राइटर हैं। विषय: "{topic}". निर्देश: 1. इस विषय पर एक आकर्षक, {target_word_count} की स्क्रिप्ट लिखें। 2. भाषा सरल और बोलचाल वाली हो। 3. हर 2-3 लाइनों के बाद एक नया विज़ुअल या सीन दिखाया जा सके, इस तरह से लिखें। 4. **CRITICAL RULE:** आपका आउटपुट सिर्फ और सिर्फ बोले जाने वाले डायलॉग्स (narration) होने चाहिए। किसी भी तरह के विज़ुअल निर्देश, सीन डिस्क्रिप्शन या ब्रैकेट () [] में लिखी कोई भी जानकारी आउटपुट में नहीं होनी चाहिए। सिर्फ वो टेक्स्ट दें जो ऑडियो में बोला जाएगा। अब, स्क्रिप्ट लिखें:""" | |
| result = self._make_resilient_api_call(prompt) | |
| generated_script = result['candidates'][0]['content']['parts'][0]['text'] | |
| print("-> Gemini ने सफलतापूर्वक स्क्रिप्ट जेनरेट कर दी है।"); return generated_script.strip() | |
| # ============================================================================== | |
| # 3. Enhanced Video Assembler (FIXED: NO FREEZE) | |
| # ============================================================================== | |
| class VideoAssembler: | |
| # Cinematic Transitions Setup | |
| TRANSITION_DURATION = 0.75 | |
| TRANSITION_TYPES = ['fade', 'wipeleft', 'wiperight', 'slideup', 'slidedown', 'circleopen', 'rectcrop'] | |
| def __init__(self, timeline, narration_audio, output_path, width, height, mute_audio, temp_dir): | |
| self.timeline = timeline | |
| self.narration_audio = narration_audio | |
| self.output_path = output_path | |
| self.width = width | |
| self.height = height | |
| self.mute_audio = mute_audio | |
| self.temp_dir = temp_dir | |
| def _run_ffmpeg_command(self, command, suppress_errors=False): | |
| process = subprocess.run(command, capture_output=True, text=True) | |
| if not suppress_errors and process.returncode != 0: | |
| error_details = f"Return Code {process.returncode}" | |
| if process.returncode == -9: error_details += " (SIGKILL): Process killed (Memory/CPU limit)." | |
| raise Exception(f"FFmpeg Error ({error_details}):\nSTDERR:\n{process.stderr}") | |
| return process | |
| def assemble_video(self, log_callback): | |
| if not self.timeline: return | |
| # --- STAGE 1: Clip Preparation (Motion Restored) --- | |
| log_callback("-> Stage 1/3: क्लिप्स तैयार की जा रही हैं (Normal Motion)...", 91) | |
| prepared_clips = [] | |
| for i, item in enumerate(self.timeline): | |
| input_clip_path = item['matched_clip'] | |
| # क्लिप की अवधि पता करना | |
| try: | |
| ffprobe_cmd = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', input_clip_path] | |
| duration_proc = self._run_ffmpeg_command(ffprobe_cmd) | |
| actual_clip_duration = float(duration_proc.stdout.strip()) | |
| except Exception as e: | |
| log_callback(f"🚨 Skipping clip due to error: {e}", 91); continue | |
| start_offset = float(item.get('start_offset_seconds', 0.0)) | |
| if start_offset >= actual_clip_duration: start_offset = 0.0 | |
| is_last_clip = (i == len(self.timeline) - 1) | |
| overlap = 0 if is_last_clip else self.TRANSITION_DURATION | |
| duration = (float(item['end']) - float(item['start'])) + overlap | |
| if duration <= 0: continue | |
| output_clip_path = os.path.join(self.temp_dir, f"prepared_{i:03d}.mp4") | |
| # 🔥 FIX: No 'zoompan'. Use scale and crop to keep video moving. | |
| # scale='w=WIDTH:h=HEIGHT:force_original_aspect_ratio=increase' ensures cover | |
| # crop='WIDTH:HEIGHT' centers the video | |
| command = [ | |
| 'ffmpeg', '-y', '-ss', str(start_offset), '-i', input_clip_path, '-t', str(duration), | |
| '-vf', f"scale='w={self.width}:h={self.height}:force_original_aspect_ratio=increase',crop={self.width}:{self.height},setsar=1,fps=30", | |
| '-c:v', 'libx264', '-preset', 'medium', '-crf', '23', '-an', '-threads', '2', | |
| output_clip_path | |
| ] | |
| self._run_ffmpeg_command(command) | |
| prepared_clips.append(output_clip_path) | |
| # --- STAGE 2: Merging with Random Transitions --- | |
| log_callback("-> Stage 2/3: रैंडम ट्रांजिशन्स (Random Transitions) के साथ जोड़ा जा रहा है...", 94) | |
| if not prepared_clips: raise Exception("No clips prepared.") | |
| if len(prepared_clips) == 1: | |
| shutil.copy(prepared_clips[0], self.output_path) | |
| transitioned_video_path = self.output_path | |
| else: | |
| current_video = prepared_clips[0] | |
| for i in range(len(prepared_clips) - 1): | |
| next_video = prepared_clips[i+1] | |
| output_path = os.path.join(self.temp_dir, f"transition_{i:03d}.mp4") | |
| # 🔥 Random Transition Selection | |
| trans_type = random.choice(self.TRANSITION_TYPES) | |
| log_callback(f" -> Adding '{trans_type}' between clip {i+1} & {i+2}", 95) | |
| ffprobe_cmd = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', current_video] | |
| duration_proc = self._run_ffmpeg_command(ffprobe_cmd) | |
| curr_duration = float(duration_proc.stdout.strip()) | |
| offset = curr_duration - self.TRANSITION_DURATION | |
| command = [ | |
| 'ffmpeg', '-y', '-i', current_video, '-i', next_video, | |
| '-filter_complex', f"[0:v][1:v]xfade=transition={trans_type}:duration={self.TRANSITION_DURATION}:offset={offset},format=yuv420p", | |
| '-c:v', 'libx264', '-preset', 'medium', '-crf', '23', output_path | |
| ] | |
| self._run_ffmpeg_command(command) | |
| current_video = output_path | |
| transitioned_video_path = current_video | |
| # --- STAGE 3: Final Audio Mixing --- | |
| log_callback("-> Stage 3/3: फाइनल ऑडियो मिक्सिंग और रेंडरिंग...", 98) | |
| audio_input = [] if self.mute_audio else ['-i', self.narration_audio] | |
| audio_map = ['-an'] if self.mute_audio else ['-map', '0:v:0', '-map', '1:a:0'] | |
| command = [ | |
| 'ffmpeg', '-y', '-i', transitioned_video_path, | |
| ] + audio_input + [ | |
| '-c:v', 'copy', | |
| ] + audio_map + [ | |
| '-c:a', 'aac', '-shortest', self.output_path | |
| ] | |
| self._run_ffmpeg_command(command) | |
| # ============================================================================== | |
| # 4. Main Worker Logic | |
| # ============================================================================== | |
| def run_ai_engine_worker(task_id, script_text, script_file_path, orientation, max_clip_length, mute_final_video): | |
| log = lambda message, progress: update_task_log(task_id, message, progress) | |
| temp_dir = os.path.join(UPLOAD_FOLDER, task_id) | |
| try: | |
| log("Step 0: API Keys की पुष्टि...", 2) | |
| gemini_keys = load_api_keys("Gemini_Key") | |
| pexels_keys = load_api_keys("Pexels_Key") | |
| pixabay_keys = load_api_keys("Pixabay_Key") | |
| groq_keys = load_api_keys("Groq_Key") | |
| missing = [] | |
| if not gemini_keys: missing.append("Gemini_Key") | |
| if not pexels_keys: missing.append("Pexels_Key") | |
| if not pixabay_keys: missing.append("Pixabay_Key") | |
| if not groq_keys: missing.append("Groq_Key") | |
| if missing: | |
| raise Exception(f"API Key Error: ये कीज़ नहीं मिलीं: {', '.join(missing)}") | |
| gemini = GeminiTeam(api_keys=gemini_keys); log("-> सभी जरूरी API कीज मौजूद हैं।", 5) | |
| log("Step 1: स्क्रिप्ट तैयार की जा रही है...", 8) | |
| os.makedirs(temp_dir, exist_ok=True); narration_audio_path = "" | |
| if script_file_path: | |
| narration_audio_path = script_file_path; log("-> ऑडियो फ़ाइल प्राप्त हुई।", 12) | |
| else: | |
| cleaned_script_for_tts = re.sub(r'\[.*?\]|\(.*?\)', '', script_text) | |
| full_script_text_for_tts = cleaned_script_for_tts.strip() | |
| narration_audio_path = os.path.join(temp_dir, "narration.mp3") | |
| gTTS(full_script_text_for_tts, lang='hi').save(narration_audio_path) | |
| log(f"-> TTS ke liye saaf script bheji gayi: '{full_script_text_for_tts}'", 12) | |
| log("-> टेक्स्ट से ऑडियो सफलतापूर्वक बनाया गया।", 12) | |
| log("Step 2: ऑडियो का सटीक विश्लेषण (Groq)...", 15) | |
| groq_api = GroqAPI(api_keys=groq_keys); word_timestamps = groq_api.transcribe_audio(narration_audio_path) | |
| if not word_timestamps: raise Exception("Transcription failed or returned no words.") | |
| log("-> Smart Pause Detector: ऑडियो में लंबी chuppi खोजी जा रही है...", 20) | |
| timestamps_with_pauses = []; pause_threshold = 1.5; pause_count = 0 | |
| if word_timestamps: | |
| timestamps_with_pauses.append(word_timestamps[0]) | |
| for i in range(len(word_timestamps) - 1): | |
| current_word_end = float(word_timestamps[i]['end']); next_word_start = float(word_timestamps[i+1]['start']) | |
| gap = next_word_start - current_word_end | |
| if gap > pause_threshold: | |
| pause_event = {'word': '[PAUSE]', 'start': current_word_end, 'end': next_word_start} | |
| timestamps_with_pauses.append(pause_event); pause_count += 1 | |
| timestamps_with_pauses.append(word_timestamps[i+1]) | |
| if pause_count > 0: log(f"-> Pause Detector ne सफलतापूर्वक {pause_count} pauses jode.", 22) | |
| else: log("-> Pause Detector ko koi lamba pause nahi mila. Sab theek hai.", 22) | |
| full_script_text = " ".join([word['word'] for word in timestamps_with_pauses]); log(f"-> पूर्ण स्क्रिप्ट: '{full_script_text}'", 25) | |
| log("Step 3: Contextual वीडियो खोजे जा रहे हैं (Smart Search)...", 30) | |
| scenes_from_gemini = gemini.extract_keywords(full_script_text) | |
| pexels = PexelsAPI(api_keys=pexels_keys); pixabay = PixabayAPI(api_keys=pixabay_keys) | |
| for i, scene in enumerate(scenes_from_gemini): | |
| scene['downloaded_path'] = None; primary_query = scene.get('primary_query'); fallback_query = scene.get('fallback_query') | |
| log(f"-> Scene {i+1} ('{scene['scene_description'][:20]}...') के लिए वीडियो खोजा जा रहा है...", 30 + i * 5) | |
| filename = f"scene_{i+1}_{secure_filename(primary_query)[:20]}.mp4"; download_path = os.path.join(temp_dir, filename) | |
| log(f" -> प्राथमिक कोशिश (Primary): '{primary_query}'...", 30 + i * 5) | |
| for attempt in range(2): | |
| path = pexels.search_and_download(primary_query, download_path, orientation, search_page=attempt + 1) | |
| if path: scene['downloaded_path'] = path; break | |
| path = pixabay.search_and_download(primary_query, download_path, orientation, max_clip_length, search_index=attempt) | |
| if path: scene['downloaded_path'] = path; break | |
| if not scene.get('downloaded_path'): | |
| log(f" -> प्राथमिक कोशिश विफल। फ़ॉलबैक कोशिश (Fallback): '{fallback_query}'...", 30 + i * 5) | |
| for attempt in range(2): | |
| path = pexels.search_and_download(fallback_query, download_path, orientation, search_page=attempt + 1) | |
| if path: scene['downloaded_path'] = path; break | |
| path = pixabay.search_and_download(fallback_query, download_path, orientation, max_clip_length, search_index=attempt) | |
| if path: scene['downloaded_path'] = path; break | |
| if scene['downloaded_path']: log(f"-> Scene {i+1} के लिए वीडियो मिला: {os.path.basename(scene['downloaded_path'])}", 30 + i * 5) | |
| else: log(f"🚨 चेतावनी: Scene {i+1} के लिए कोई भी वीडियो नहीं मिला।", 30 + i * 5) | |
| successful_scenes = [scene for scene in scenes_from_gemini if scene.get('downloaded_path')] | |
| if not successful_scenes: raise Exception("Could not download any videos for the given script.") | |
| log(f"-> {len(successful_scenes)} वीडियो क्लिप्स सफलतापूर्वक डाउनलोड हुए।", 60) | |
| log("Step 4: मास्टर टाइमलाइन और इंटेलिजेंट क्लिप ट्रिमिंग...", 75) | |
| final_timeline = gemini.create_master_timeline(timestamps_with_pauses, successful_scenes); log(f"-> मास्टर टाइमलाइन AI से प्राप्त हुई।", 85) | |
| validated_timeline = [] | |
| used_clips = set() | |
| for clip in final_timeline: | |
| path_value = clip.get('matched_clip'); actual_path = None | |
| if isinstance(path_value, str): actual_path = path_value | |
| elif isinstance(path_value, dict): actual_path = path_value.get('downloaded_path') | |
| if actual_path and isinstance(actual_path, str) and os.path.exists(actual_path) and actual_path not in used_clips: | |
| clip['matched_clip'] = actual_path; validated_timeline.append(clip); used_clips.add(actual_path) | |
| else: log(f"🚨 चेतावनी: टाइमलाइन में अमान्य क्लिप: {os.path.basename(actual_path or 'Invalid')}", 87) | |
| if not validated_timeline: raise Exception("Timeline verification failed.") | |
| log("-> टाइमलाइन में गैप्स की जाँच...", 88) | |
| final_gapless_timeline = []; total_duration = word_timestamps[-1]['end'] if word_timestamps else 0 | |
| validated_timeline.sort(key=lambda x: float(x['start'])) | |
| for i, clip in enumerate(validated_timeline): | |
| if i < len(validated_timeline) - 1: | |
| current_clip_end = float(clip['end']); next_clip_start = float(validated_timeline[i+1]['start']) | |
| if current_clip_end < next_clip_start: clip['end'] = next_clip_start | |
| elif i == len(validated_timeline) - 1: | |
| last_clip_end = float(clip['end']) | |
| if last_clip_end < total_duration: clip['end'] = total_duration | |
| final_gapless_timeline.append(clip) | |
| log("Step 5: फाइनल वीडियो को रेंडर किया जा रहा है (Natural Motion, No Freeze)...", 90) | |
| width, height = (1080, 1920) if orientation == 'vertical' else (1920, 1080) | |
| output_filename = f"{task_id}_final_video.mp4"; output_path = os.path.join(OUTPUT_FOLDER, output_filename) | |
| assembler = VideoAssembler(final_gapless_timeline, narration_audio_path, output_path, width, height, mute_final_video, temp_dir) | |
| assembler.assemble_video(log) | |
| log("-> अंतिम विस्तृत रिपोर्ट बनाई जा रही है...", 99) | |
| try: | |
| report_data = { | |
| "full_transcribed_script": full_script_text, | |
| "groq_word_timestamps": word_timestamps, | |
| "timestamps_with_pauses_added": timestamps_with_pauses, | |
| "gemini_scene_analysis_and_downloads": successful_scenes, | |
| "processed_gapless_timeline": final_gapless_timeline, | |
| } | |
| report_file_path = os.path.join(OUTPUT_FOLDER, f'{task_id}_report.json') | |
| with open(report_file_path, 'w', encoding='utf-8') as f: json.dump(report_data, f, ensure_ascii=False, indent=4) | |
| except Exception as e: log(f"🚨 चेतावनी: विस्तृत रिपोर्ट सहेजने में विफल: {e}", 99) | |
| update_task_final_status(task_id, 'complete', output_filename=output_filename) | |
| except Exception as e: | |
| import traceback; traceback.print_exc() | |
| update_task_final_status(task_id, 'error', error_message=str(e)) | |
| finally: | |
| if os.path.exists(temp_dir): | |
| try: shutil.rmtree(temp_dir); log(f"-> Temp files cleaned.", 100) | |
| except Exception as e: print(f"Cleanup Error: {e}") | |
| def generate_script_with_ai(topic, video_length): | |
| print(f"-> AI स्क्रिप्ट जेनरेटर शुरू हुआ: Topic='{topic}', Length='{video_length}'") | |
| try: | |
| gemini_keys = load_api_keys("Gemini_Key") | |
| if not gemini_keys: raise Exception("Gemini API key not found for script generation.") | |
| gemini_agent = GeminiTeam(api_keys=gemini_keys) | |
| script = gemini_agent.generate_script(topic, video_length) | |
| return script | |
| except Exception as e: raise e |