Spaces:
Running
Running
Update engine.py
Browse files
engine.py
CHANGED
|
@@ -1,44 +1,39 @@
|
|
| 1 |
# ==============================================================================
|
| 2 |
-
# engine.py - [
|
| 3 |
-
#
|
|
|
|
| 4 |
# ==============================================================================
|
| 5 |
-
|
| 6 |
import os
|
| 7 |
import time
|
| 8 |
import json
|
| 9 |
import uuid
|
|
|
|
| 10 |
import subprocess
|
| 11 |
import requests
|
| 12 |
import sqlite3
|
| 13 |
import random
|
| 14 |
import shutil
|
| 15 |
import re
|
| 16 |
-
import
|
| 17 |
-
import edge_tts # NEW: For Human-like voice
|
| 18 |
from werkzeug.utils import secure_filename
|
| 19 |
|
| 20 |
# ==============================================================================
|
| 21 |
-
# 1. Global Setup and Database Functions
|
| 22 |
# ==============================================================================
|
| 23 |
-
|
| 24 |
-
# प्रोजेक्ट की रूट डायरेक्टरी
|
| 25 |
APP_ROOT = '/code'
|
| 26 |
-
|
| 27 |
-
# सभी ज़रूरी फोल्डरों के लिए एब्सोल्यूट पाथ
|
| 28 |
DATA_FOLDER = os.path.join(APP_ROOT, 'data')
|
| 29 |
UPLOAD_FOLDER = os.path.join(APP_ROOT, 'uploads')
|
| 30 |
OUTPUT_FOLDER = os.path.join(APP_ROOT, 'outputs')
|
| 31 |
-
|
| 32 |
-
MUSIC_FOLDER = os.path.join(ASSETS_FOLDER, 'music') # NEW: Background Music folder
|
| 33 |
-
|
| 34 |
-
# डेटाबेस फाइल का पाथ
|
| 35 |
DATABASE_FILE = os.path.join(DATA_FOLDER, 'tasks.db')
|
| 36 |
-
|
| 37 |
-
# सुनिश्चित करें कि सभी फोल्डर मौजूद हैं
|
| 38 |
-
for folder in [DATA_FOLDER, UPLOAD_FOLDER, OUTPUT_FOLDER, ASSETS_FOLDER, MUSIC_FOLDER]:
|
| 39 |
-
os.makedirs(folder, exist_ok=True)
|
| 40 |
|
| 41 |
def get_db_connection():
|
|
|
|
| 42 |
conn = sqlite3.connect(DATABASE_FILE, check_same_thread=False)
|
| 43 |
conn.row_factory = sqlite3.Row
|
| 44 |
return conn
|
|
@@ -50,7 +45,7 @@ def init_db():
|
|
| 50 |
conn.close()
|
| 51 |
|
| 52 |
def create_task(task_id):
|
| 53 |
-
log_message = "मिशन शुरू हो रहा
|
| 54 |
conn = get_db_connection()
|
| 55 |
conn.execute('INSERT INTO tasks (id, status, progress, log) VALUES (?, ?, ?, ?)', (task_id, 'processing', 0, log_message))
|
| 56 |
conn.commit()
|
|
@@ -64,33 +59,28 @@ def get_task(task_id):
|
|
| 64 |
|
| 65 |
def update_task_log(task_id, message, progress):
|
| 66 |
conn = get_db_connection()
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
except Exception as e:
|
| 73 |
-
print(f"Database Log Error: {e}")
|
| 74 |
-
finally:
|
| 75 |
-
conn.close()
|
| 76 |
|
| 77 |
def update_task_final_status(task_id, status, error_message=None, output_filename=None):
|
| 78 |
conn = get_db_connection()
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
except Exception as e:
|
| 89 |
-
print(f"Database Status Error: {e}")
|
| 90 |
-
finally:
|
| 91 |
-
conn.close()
|
| 92 |
|
| 93 |
def load_api_keys(prefix):
|
|
|
|
|
|
|
|
|
|
| 94 |
try:
|
| 95 |
prefix_lower = prefix.lower()
|
| 96 |
keys = [v for k, v in os.environ.items() if k.lower().startswith(prefix_lower)]
|
|
@@ -102,33 +92,9 @@ def load_api_keys(prefix):
|
|
| 102 |
return []
|
| 103 |
|
| 104 |
# ==============================================================================
|
| 105 |
-
# 2.
|
|
|
|
| 106 |
# ==============================================================================
|
| 107 |
-
|
| 108 |
-
async def generate_edge_tts_audio(text, output_file, voice="hi-IN-SwaraNeural"):
|
| 109 |
-
"""
|
| 110 |
-
Microsoft Edge TTS का उपयोग करके इंसानी जैसी आवाज़ बनाता है।
|
| 111 |
-
Voices: hi-IN-SwaraNeural (Female), hi-IN-MadhurNeural (Male)
|
| 112 |
-
"""
|
| 113 |
-
communicate = edge_tts.Communicate(text, voice)
|
| 114 |
-
await communicate.save(output_file)
|
| 115 |
-
|
| 116 |
-
def get_random_background_music():
|
| 117 |
-
"""
|
| 118 |
-
assets/music फोल्डर से एक रैंडम म्यूजिक फाइल चुनता है।
|
| 119 |
-
"""
|
| 120 |
-
if not os.path.exists(MUSIC_FOLDER):
|
| 121 |
-
return None
|
| 122 |
-
music_files = [f for f in os.listdir(MUSIC_FOLDER) if f.endswith(('.mp3', '.wav'))]
|
| 123 |
-
if not music_files:
|
| 124 |
-
return None
|
| 125 |
-
selected = random.choice(music_files)
|
| 126 |
-
return os.path.join(MUSIC_FOLDER, selected)
|
| 127 |
-
|
| 128 |
-
# ==============================================================================
|
| 129 |
-
# 3. All API Classes
|
| 130 |
-
# ==============================================================================
|
| 131 |
-
|
| 132 |
class GroqAPI:
|
| 133 |
def __init__(self, api_keys): self.api_keys, self.api_url, self.model, self._key_index = api_keys, "https://api.groq.com/openai/v1/audio/transcriptions", "whisper-large-v3", 0
|
| 134 |
def transcribe_audio(self, audio_path):
|
|
@@ -138,11 +104,9 @@ class GroqAPI:
|
|
| 138 |
headers = {'Authorization': f'Bearer {api_key}'}
|
| 139 |
try:
|
| 140 |
with open(audio_path, 'rb') as audio_file:
|
| 141 |
-
files = {'file': (os.path.basename(audio_path), audio_file, 'audio/mpeg')}
|
| 142 |
-
print(f"-> Groq API: Transcribing audio for timestamps...")
|
| 143 |
response = requests.post(self.api_url, headers=headers, data=data, files=files, timeout=120); response.raise_for_status()
|
| 144 |
-
words_data = response.json().get('words', [])
|
| 145 |
-
return words_data
|
| 146 |
except Exception as e: raise Exception(f"Groq API Error: {e}")
|
| 147 |
|
| 148 |
class PexelsAPI:
|
|
@@ -150,36 +114,39 @@ class PexelsAPI:
|
|
| 150 |
if not api_keys: raise Exception("Pexels API key not found.")
|
| 151 |
self.api_key = api_keys[0]; self.api_url = "https://api.pexels.com/videos/search"
|
| 152 |
def search_and_download(self, query, download_path, orientation, search_page=1):
|
|
|
|
| 153 |
headers = {'Authorization': self.api_key}; params = {'query': query, 'page': search_page, 'per_page': 1, 'orientation': orientation}
|
| 154 |
try:
|
| 155 |
response = requests.get(self.api_url, headers=headers, params=params, timeout=60); response.raise_for_status(); data = response.json()
|
| 156 |
-
if not data.get('videos'): return None
|
| 157 |
video_data = data['videos'][0]; video_files = video_data.get('video_files', []); best_link = None
|
| 158 |
for video_file in video_files:
|
| 159 |
if video_file.get('quality') == 'hd': best_link = video_file.get('link'); break
|
| 160 |
if not best_link and video_files: best_link = video_files[0].get('link')
|
| 161 |
-
if not best_link: return None
|
| 162 |
-
download_response = requests.get(best_link, stream=True, timeout=60); download_response.raise_for_status()
|
| 163 |
with open(download_path, 'wb') as f:
|
| 164 |
for chunk in download_response.iter_content(chunk_size=8192): f.write(chunk)
|
| 165 |
-
return download_path
|
| 166 |
-
except
|
|
|
|
| 167 |
|
| 168 |
class PixabayAPI:
|
| 169 |
def __init__(self, api_keys):
|
| 170 |
if not api_keys: raise Exception("Pixabay API key not found.")
|
| 171 |
self.api_key = api_keys[0]; self.api_url = "https://pixabay.com/api/videos/"
|
| 172 |
def search_and_download(self, query, download_path, orientation, max_clip_length, search_index=0):
|
|
|
|
| 173 |
params = {'key': self.api_key, 'q': query, 'per_page': 5, 'orientation': orientation, 'max_duration': int(max_clip_length)}
|
| 174 |
try:
|
| 175 |
response = requests.get(self.api_url, params=params, timeout=60); response.raise_for_status(); results = response.json()
|
| 176 |
-
if not results['hits'] or len(results['hits']) <= search_index: return None
|
| 177 |
-
video_url = results['hits'][search_index]['videos']['medium']['url']
|
| 178 |
response = requests.get(video_url, stream=True, timeout=60); response.raise_for_status()
|
| 179 |
with open(download_path, 'wb') as f:
|
| 180 |
for chunk in response.iter_content(chunk_size=8192): f.write(chunk)
|
| 181 |
-
return download_path
|
| 182 |
-
except Exception as e: print(f"Pixabay
|
| 183 |
|
| 184 |
class GeminiTeam:
|
| 185 |
MODELS_LIST_URL = "https://generativelanguage.googleapis.com/v1beta/models"
|
|
@@ -187,256 +154,137 @@ class GeminiTeam:
|
|
| 187 |
self.api_keys = api_keys
|
| 188 |
if not self.api_keys: raise Exception("Gemini API key not found.")
|
| 189 |
self.model_name = self._find_best_model()
|
|
|
|
| 190 |
self.api_url = f"https://generativelanguage.googleapis.com/v1beta/{self.model_name}:generateContent"
|
| 191 |
-
|
| 192 |
def _find_best_model(self):
|
| 193 |
-
print("->
|
| 194 |
for api_key in self.api_keys:
|
| 195 |
try:
|
|
|
|
| 196 |
response = requests.get(f"{self.MODELS_LIST_URL}?key={api_key}", timeout=20); response.raise_for_status(); data = response.json()
|
| 197 |
-
available_models = [m['name'] for m in data.get('models', []) if 'flash' in m['name'] and 'generateContent' in m.get('supportedGenerationMethods', [])]
|
| 198 |
-
if available_models:
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
except: continue
|
| 203 |
-
return "models/gemini-1.5-flash" # Fallback
|
| 204 |
-
|
| 205 |
def _make_resilient_api_call(self, prompt, timeout=120):
|
| 206 |
headers = {'Content-Type': 'application/json'}; payload = {'contents': [{'parts': [{'text': prompt}]}]}
|
| 207 |
for api_key in self.api_keys:
|
| 208 |
try:
|
|
|
|
| 209 |
response = requests.post(f"{self.api_url}?key={api_key}", headers=headers, json=payload, timeout=timeout); response.raise_for_status(); result = response.json()
|
| 210 |
-
if 'candidates' in result
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
def extract_keywords(self, script_text):
|
| 215 |
-
prompt = f"""Analyze
|
| 216 |
result = self._make_resilient_api_call(prompt)
|
|
|
|
|
|
|
| 217 |
try:
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
except:
|
| 222 |
-
|
| 223 |
def create_master_timeline(self, word_timestamps, enriched_scenes_with_paths):
|
| 224 |
-
full_script_text = " ".join([
|
| 225 |
-
prompt = f"""
|
| 226 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
result = self._make_resilient_api_call(prompt, timeout=180)
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
return json.loads(clean_str)
|
| 232 |
-
except: return []
|
| 233 |
-
|
| 234 |
def generate_script(self, topic, video_length):
|
| 235 |
-
|
| 236 |
-
prompt = f"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
result = self._make_resilient_api_call(prompt)
|
| 238 |
-
|
|
|
|
| 239 |
|
| 240 |
-
# ==============================================================================
|
| 241 |
-
# 4. Video Assembler (The Director)
|
| 242 |
-
# ==============================================================================
|
| 243 |
class VideoAssembler:
|
| 244 |
TRANSITION_DURATION = 0.5
|
| 245 |
-
|
| 246 |
def __init__(self, timeline, narration_audio, output_path, width, height, mute_audio, temp_dir):
|
| 247 |
-
self.timeline = timeline
|
| 248 |
-
self.narration_audio = narration_audio
|
| 249 |
-
self.output_path = output_path
|
| 250 |
-
self.width = width
|
| 251 |
-
self.height = height
|
| 252 |
-
self.mute_audio = mute_audio
|
| 253 |
self.temp_dir = temp_dir
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
|
|
|
|
|
|
| 259 |
def assemble_video(self, log_callback):
|
| 260 |
if not self.timeline: return
|
| 261 |
-
|
| 262 |
-
# --- Stage 1: Prepare Individual Clips ---
|
| 263 |
-
log_callback("-> Stage 1/3: क्लिप्स तैयार की जा रही हैं...", 91)
|
| 264 |
prepared_clips = []
|
| 265 |
for i, item in enumerate(self.timeline):
|
| 266 |
-
|
| 267 |
-
duration = float(item['end']) - float(item['start'])
|
| 268 |
-
if i < len(self.timeline) - 1: duration += self.TRANSITION_DURATION
|
| 269 |
-
|
| 270 |
-
cmd = [
|
| 271 |
-
'ffmpeg', '-y', '-ss', str(item.get('start_offset_seconds', 0)), '-i', item['matched_clip'],
|
| 272 |
-
'-t', str(duration),
|
| 273 |
-
'-vf', f"scale='w={self.width}:h={self.height}:force_original_aspect_ratio=increase',crop={self.width}:{self.height},setsar=1,fps=30",
|
| 274 |
-
'-c:v', 'libx264', '-preset', 'ultrafast', '-an', output_clip
|
| 275 |
-
]
|
| 276 |
try:
|
| 277 |
-
|
| 278 |
-
|
|
|
|
| 279 |
except Exception as e:
|
| 280 |
-
log_callback(f"
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
cmd = [
|
| 297 |
-
'ffmpeg', '-y', '-i', current_video, '-i', next_video,
|
| 298 |
-
'-filter_complex', f"[0:v][1:v]xfade=transition=fade:duration={self.TRANSITION_DURATION}:offset={offset},format=yuv420p",
|
| 299 |
-
'-c:v', 'libx264', '-preset', 'ultrafast', out_trans
|
| 300 |
]
|
| 301 |
-
self.
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
input_cmds = ['-i', visual_only_video]
|
| 310 |
-
if not self.mute_audio:
|
| 311 |
-
input_cmds.extend(['-i', self.narration_audio])
|
| 312 |
-
|
| 313 |
-
filter_complex = "[1:a]volume=1.0[voice]" # Base voice volume
|
| 314 |
-
map_cmd = ["-map", "0:v", "-map", "[a_out]"]
|
| 315 |
-
|
| 316 |
-
if self.bg_music:
|
| 317 |
-
log_callback(f" 🎵 Background Music Found: {os.path.basename(self.bg_music)}", 98)
|
| 318 |
-
input_cmds.extend(['-stream_loop', '-1', '-i', self.bg_music]) # Loop music
|
| 319 |
-
# Mix logic: Voice normal, Music 15% volume, mix them, cut when shortest input ends
|
| 320 |
-
filter_complex += ";[2:a]volume=0.15[bg];[voice][bg]amix=inputs=2:duration=first[a_out]"
|
| 321 |
-
else:
|
| 322 |
-
log_callback(" ℹ️ No Background Music found in assets/music.", 98)
|
| 323 |
-
filter_complex = "[1:a]volume=1.0[a_out]"
|
| 324 |
-
|
| 325 |
-
cmd = ['ffmpeg', '-y'] + input_cmds + ['-filter_complex', filter_complex] + map_cmd + ['-c:v', 'copy', '-c:a', 'aac', '-shortest', self.output_path]
|
| 326 |
-
else:
|
| 327 |
-
# Muted output
|
| 328 |
-
cmd = ['ffmpeg', '-y', '-i', visual_only_video, '-c:v', 'copy', '-an', self.output_path]
|
| 329 |
-
|
| 330 |
-
self._run_ffmpeg(cmd)
|
| 331 |
-
|
| 332 |
-
# ==============================================================================
|
| 333 |
-
# 5. Worker Function
|
| 334 |
-
# ==============================================================================
|
| 335 |
-
def run_ai_engine_worker(task_id, script_text, script_file_path, orientation, max_clip_length, mute_final_video):
|
| 336 |
-
log = lambda message, progress: update_task_log(task_id, message, progress)
|
| 337 |
-
temp_dir = os.path.join(UPLOAD_FOLDER, task_id)
|
| 338 |
-
|
| 339 |
-
try:
|
| 340 |
-
# --- Step 0: Init ---
|
| 341 |
-
log("Step 0: सिस्टम चेक और चाबियाँ (Keys) लोड की जा रही हैं...", 2)
|
| 342 |
-
gemini_keys = load_api_keys("Gemini_Key")
|
| 343 |
-
if not gemini_keys: raise Exception("Gemini Keys missing.")
|
| 344 |
-
gemini = GeminiTeam(gemini_keys)
|
| 345 |
-
|
| 346 |
-
# --- Step 1: Audio Generation (Advanced) ---
|
| 347 |
-
log("Step 1: ऑडियो तैयार किया जा रहा है...", 10)
|
| 348 |
-
os.makedirs(temp_dir, exist_ok=True)
|
| 349 |
-
|
| 350 |
-
if script_file_path:
|
| 351 |
-
narration_audio_path = script_file_path
|
| 352 |
-
log("-> यूजर द्वारा अपलोड की गई ऑडियो फाइल का उपयोग किया जा रहा है।", 12)
|
| 353 |
else:
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
for i in range(len(word_timestamps) - 1):
|
| 371 |
-
gap = float(word_timestamps[i+1]['start']) - float(word_timestamps[i]['end'])
|
| 372 |
-
if gap > 1.5: timestamps_with_pauses.append({'word': '[PAUSE]', 'start': word_timestamps[i]['end'], 'end': word_timestamps[i+1]['start']})
|
| 373 |
-
timestamps_with_pauses.append(word_timestamps[i+1])
|
| 374 |
-
|
| 375 |
-
# --- Step 3: Visual Search ---
|
| 376 |
-
log("Step 3: विज़ुअल्स खोजे जा रहे हैं...", 30)
|
| 377 |
-
full_text = " ".join([w['word'] for w in timestamps_with_pauses])
|
| 378 |
-
scenes = gemini.extract_keywords(full_text)
|
| 379 |
-
|
| 380 |
-
pexels = PexelsAPI(load_api_keys("Pexels_Key"))
|
| 381 |
-
pixabay = PixabayAPI(load_api_keys("Pixabay_Key"))
|
| 382 |
-
|
| 383 |
-
successful_scenes = []
|
| 384 |
-
for i, scene in enumerate(scenes):
|
| 385 |
-
d_path = os.path.join(temp_dir, f"scene_{i}.mp4")
|
| 386 |
-
log(f" -> Scene {i+1}: {scene['primary_query']}", 30+i)
|
| 387 |
-
|
| 388 |
-
found = pexels.search_and_download(scene['primary_query'], d_path, orientation)
|
| 389 |
-
if not found: found = pixabay.search_and_download(scene['primary_query'], d_path, orientation, max_clip_length)
|
| 390 |
-
if not found: found = pexels.search_and_download(scene['fallback_query'], d_path, orientation) # Fallback
|
| 391 |
-
|
| 392 |
-
if found:
|
| 393 |
-
scene['downloaded_path'] = found
|
| 394 |
-
successful_scenes.append(scene)
|
| 395 |
-
|
| 396 |
-
if not successful_scenes: raise Exception("No videos found.")
|
| 397 |
-
|
| 398 |
-
# --- Step 4: Timeline & Assembly ---
|
| 399 |
-
log("Step 4: मास्टर टाइमलाइन बनाई जा रही है...", 70)
|
| 400 |
-
timeline = gemini.create_master_timeline(timestamps_with_pauses, successful_scenes)
|
| 401 |
-
|
| 402 |
-
# Gap filling logic (simplified for brevity, keeping integrity)
|
| 403 |
-
final_timeline = []
|
| 404 |
-
if timeline:
|
| 405 |
-
timeline.sort(key=lambda x: float(x['start']))
|
| 406 |
-
for i, clip in enumerate(timeline):
|
| 407 |
-
# Ensure path is valid
|
| 408 |
-
real_path = clip.get('matched_clip')
|
| 409 |
-
if isinstance(real_path, dict): real_path = real_path.get('downloaded_path')
|
| 410 |
-
if not real_path or not os.path.exists(real_path): continue
|
| 411 |
-
|
| 412 |
-
clip['matched_clip'] = real_path
|
| 413 |
-
# Extend to close gaps
|
| 414 |
-
if i < len(timeline)-1:
|
| 415 |
-
next_start = float(timeline[i+1]['start'])
|
| 416 |
-
if float(clip['end']) < next_start: clip['end'] = next_start
|
| 417 |
-
else:
|
| 418 |
-
# Extend last clip
|
| 419 |
-
if word_timestamps: clip['end'] = word_timestamps[-1]['end']
|
| 420 |
-
final_timeline.append(clip)
|
| 421 |
-
|
| 422 |
-
# --- Step 5: Final Render ---
|
| 423 |
-
log("Step 5: फाइनल रेंडरिंग (Magic happening)...", 90)
|
| 424 |
-
w, h = (1080, 1920) if orientation == 'vertical' else (1920, 1080)
|
| 425 |
-
out_file = f"{task_id}_final.mp4"
|
| 426 |
-
out_path = os.path.join(OUTPUT_FOLDER, out_file)
|
| 427 |
-
|
| 428 |
-
assembler = VideoAssembler(final_timeline, narration_audio_path, out_path, w, h, mute_final_video, temp_dir)
|
| 429 |
-
assembler.assemble_video(log)
|
| 430 |
-
|
| 431 |
-
update_task_final_status(task_id, 'complete', output_filename=out_file)
|
| 432 |
-
|
| 433 |
-
except Exception as e:
|
| 434 |
-
import traceback; traceback.print_exc()
|
| 435 |
-
update_task_final_status(task_id, 'error', error_message=str(e))
|
| 436 |
-
finally:
|
| 437 |
-
if os.path.exists(temp_dir): shutil.rmtree(temp_dir)
|
| 438 |
-
|
| 439 |
-
def generate_script_with_ai(topic, video_length):
|
| 440 |
-
try:
|
| 441 |
-
return GeminiTeam(load_api_keys("Gemini_Key")).generate_script(topic, video_length)
|
| 442 |
-
except Exception as e: raise e
|
|
|
|
| 1 |
# ==============================================================================
|
| 2 |
+
# engine.py - [FINAL CORRECTED VERSION FOR HUGGING FACE]
|
| 3 |
+
# CHANGE 1: API Keys environment variables se load hongi.
|
| 4 |
+
# CHANGE 2: Sabhi folder paths (data, uploads, outputs) absolute hain.
|
| 5 |
# ==============================================================================
|
| 6 |
+
# engine.py के शुरुआत में
|
| 7 |
import os
|
| 8 |
import time
|
| 9 |
import json
|
| 10 |
import uuid
|
| 11 |
+
import threading
|
| 12 |
import subprocess
|
| 13 |
import requests
|
| 14 |
import sqlite3
|
| 15 |
import random
|
| 16 |
import shutil
|
| 17 |
import re
|
| 18 |
+
from gtts import gTTS
|
|
|
|
| 19 |
from werkzeug.utils import secure_filename
|
| 20 |
|
| 21 |
# ==============================================================================
|
| 22 |
+
# 1. Global Setup and Database Functions (FINAL CORRECTED VERSION)
|
| 23 |
# ==============================================================================
|
| 24 |
+
# <<<--- YAHAN SE BADLAV SHURU HAI --- >>>
|
| 25 |
+
# प्रोजेक्ट की रूट डायरेक्टरी को एब्सोल्यूट पाथ के रूप में सेट करें
|
| 26 |
APP_ROOT = '/code'
|
| 27 |
+
# सभी ज़रूरी फोल्डरों के लिए एब्सोल्यूट पाथ बनाएँ
|
|
|
|
| 28 |
DATA_FOLDER = os.path.join(APP_ROOT, 'data')
|
| 29 |
UPLOAD_FOLDER = os.path.join(APP_ROOT, 'uploads')
|
| 30 |
OUTPUT_FOLDER = os.path.join(APP_ROOT, 'outputs')
|
| 31 |
+
# डेटाबेस फाइल का पूरा एब्सोल्यूट पाथ सेट करें
|
|
|
|
|
|
|
|
|
|
| 32 |
DATABASE_FILE = os.path.join(DATA_FOLDER, 'tasks.db')
|
| 33 |
+
# <<<--- BADLAV YAHAN KHATM HOTA HAI --- >>>
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
def get_db_connection():
|
| 36 |
+
# अब यह सही पाथ का उपयोग करेगा: /code/data/tasks.db
|
| 37 |
conn = sqlite3.connect(DATABASE_FILE, check_same_thread=False)
|
| 38 |
conn.row_factory = sqlite3.Row
|
| 39 |
return conn
|
|
|
|
| 45 |
conn.close()
|
| 46 |
|
| 47 |
def create_task(task_id):
|
| 48 |
+
log_message = "मिशन शुरू हो रहा है...\n"
|
| 49 |
conn = get_db_connection()
|
| 50 |
conn.execute('INSERT INTO tasks (id, status, progress, log) VALUES (?, ?, ?, ?)', (task_id, 'processing', 0, log_message))
|
| 51 |
conn.commit()
|
|
|
|
| 59 |
|
| 60 |
def update_task_log(task_id, message, progress):
|
| 61 |
conn = get_db_connection()
|
| 62 |
+
current_log = conn.execute('SELECT log FROM tasks WHERE id = ?', (task_id,)).fetchone()['log']
|
| 63 |
+
new_log = current_log + message + "\n"
|
| 64 |
+
conn.execute('UPDATE tasks SET log = ?, progress = ? WHERE id = ?', (new_log, progress, task_id))
|
| 65 |
+
conn.commit()
|
| 66 |
+
conn.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
def update_task_final_status(task_id, status, error_message=None, output_filename=None):
|
| 69 |
conn = get_db_connection()
|
| 70 |
+
current_log = conn.execute('SELECT log FROM tasks WHERE id = ?', (task_id,)).fetchone()['log']
|
| 71 |
+
if status == 'error':
|
| 72 |
+
final_log = current_log + f"\n\n��� FATAL ERROR: {error_message}"
|
| 73 |
+
conn.execute('UPDATE tasks SET status = ?, log = ? WHERE id = ?', (status, final_log, task_id))
|
| 74 |
+
elif status == 'complete':
|
| 75 |
+
final_log = current_log + "🎉 मिशन पूरा हुआ!"
|
| 76 |
+
conn.execute('UPDATE tasks SET status = ?, progress = ?, output_filename = ?, log = ? WHERE id = ?', (status, 100, output_filename, final_log, task_id))
|
| 77 |
+
conn.commit()
|
| 78 |
+
conn.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
def load_api_keys(prefix):
|
| 81 |
+
"""
|
| 82 |
+
सिस्टम के एनवायरनमेंट वेरिएबल्स से API कीज़ लोड करता है।
|
| 83 |
+
"""
|
| 84 |
try:
|
| 85 |
prefix_lower = prefix.lower()
|
| 86 |
keys = [v for k, v in os.environ.items() if k.lower().startswith(prefix_lower)]
|
|
|
|
| 92 |
return []
|
| 93 |
|
| 94 |
# ==============================================================================
|
| 95 |
+
# 2. All API and Worker Classes
|
| 96 |
+
# (Is section mein koi badlav nahi hai)
|
| 97 |
# ==============================================================================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
class GroqAPI:
|
| 99 |
def __init__(self, api_keys): self.api_keys, self.api_url, self.model, self._key_index = api_keys, "https://api.groq.com/openai/v1/audio/transcriptions", "whisper-large-v3", 0
|
| 100 |
def transcribe_audio(self, audio_path):
|
|
|
|
| 104 |
headers = {'Authorization': f'Bearer {api_key}'}
|
| 105 |
try:
|
| 106 |
with open(audio_path, 'rb') as audio_file:
|
| 107 |
+
files = {'file': (os.path.basename(audio_path), audio_file, 'audio/mpeg')}; print(f"-> Groq API को शब्द-स्तर पर टाइमस्टैम्प के लिए भेजा जा रहा है...")
|
|
|
|
| 108 |
response = requests.post(self.api_url, headers=headers, data=data, files=files, timeout=120); response.raise_for_status()
|
| 109 |
+
words_data = response.json().get('words', []); print(f"-> ट्रांसक्रिप्शन सफल: {len(words_data)} शब्दों के टाइमस्टैम्प मिले।"); return words_data
|
|
|
|
| 110 |
except Exception as e: raise Exception(f"Groq API Error: {e}")
|
| 111 |
|
| 112 |
class PexelsAPI:
|
|
|
|
| 114 |
if not api_keys: raise Exception("Pexels API key not found.")
|
| 115 |
self.api_key = api_keys[0]; self.api_url = "https://api.pexels.com/videos/search"
|
| 116 |
def search_and_download(self, query, download_path, orientation, search_page=1):
|
| 117 |
+
print(f"-> Pexels पर खोजा जा रहा है (Direct API): '{query}' (Page: {search_page}, Orientation: {orientation})")
|
| 118 |
headers = {'Authorization': self.api_key}; params = {'query': query, 'page': search_page, 'per_page': 1, 'orientation': orientation}
|
| 119 |
try:
|
| 120 |
response = requests.get(self.api_url, headers=headers, params=params, timeout=60); response.raise_for_status(); data = response.json()
|
| 121 |
+
if not data.get('videos'): print(f"-> Pexels पर '{query}' के लिए कोई परिणाम नहीं मिला।"); return None
|
| 122 |
video_data = data['videos'][0]; video_files = video_data.get('video_files', []); best_link = None
|
| 123 |
for video_file in video_files:
|
| 124 |
if video_file.get('quality') == 'hd': best_link = video_file.get('link'); break
|
| 125 |
if not best_link and video_files: best_link = video_files[0].get('link')
|
| 126 |
+
if not best_link: print(f"-> Pexels परिणाम में क���ई डाउनलोड करने योग्य लिंक नहीं मिला।"); return None
|
| 127 |
+
print(f"-> Pexels से वीडियो डाउनलोड किया जा रहा है..."); download_response = requests.get(best_link, stream=True, timeout=60); download_response.raise_for_status()
|
| 128 |
with open(download_path, 'wb') as f:
|
| 129 |
for chunk in download_response.iter_content(chunk_size=8192): f.write(chunk)
|
| 130 |
+
print(f"-> सफलतापूर्वक सहेजा गया: {download_path}"); return download_path
|
| 131 |
+
except requests.exceptions.RequestException as e: print(f"🚨 Pexels API में त्रुटि: {e}"); return None
|
| 132 |
+
except Exception as e: print(f"🚨 Pexels वीडियो डाउनलोड करने में अज्ञात त्रुटि: {e}"); return None
|
| 133 |
|
| 134 |
class PixabayAPI:
|
| 135 |
def __init__(self, api_keys):
|
| 136 |
if not api_keys: raise Exception("Pixabay API key not found.")
|
| 137 |
self.api_key = api_keys[0]; self.api_url = "https://pixabay.com/api/videos/"
|
| 138 |
def search_and_download(self, query, download_path, orientation, max_clip_length, search_index=0):
|
| 139 |
+
print(f"-> Pixabay पर खोजा जा रहा है: '{query}' (Index: {search_index})")
|
| 140 |
params = {'key': self.api_key, 'q': query, 'per_page': 5, 'orientation': orientation, 'max_duration': int(max_clip_length)}
|
| 141 |
try:
|
| 142 |
response = requests.get(self.api_url, params=params, timeout=60); response.raise_for_status(); results = response.json()
|
| 143 |
+
if not results['hits'] or len(results['hits']) <= search_index: print(f"-> Pixabay पर '{query}' के लिए index {search_index} पर कोई परिणाम नहीं मिला।"); return None
|
| 144 |
+
video_url = results['hits'][search_index]['videos']['medium']['url']; print(f"-> Pixabay से वीडियो डाउनलोड किया जा रहा है...")
|
| 145 |
response = requests.get(video_url, stream=True, timeout=60); response.raise_for_status()
|
| 146 |
with open(download_path, 'wb') as f:
|
| 147 |
for chunk in response.iter_content(chunk_size=8192): f.write(chunk)
|
| 148 |
+
print(f"-> सफलतापूर्वक सहेजा गया: {download_path}"); return download_path
|
| 149 |
+
except Exception as e: print(f"🚨 Pixabay API में त्रुटि: {e}"); return None
|
| 150 |
|
| 151 |
class GeminiTeam:
|
| 152 |
MODELS_LIST_URL = "https://generativelanguage.googleapis.com/v1beta/models"
|
|
|
|
| 154 |
self.api_keys = api_keys
|
| 155 |
if not self.api_keys: raise Exception("Gemini API key not found.")
|
| 156 |
self.model_name = self._find_best_model()
|
| 157 |
+
if not self.model_name: raise Exception("Could not dynamically find a suitable Gemini 'flash' model from any of the provided keys.")
|
| 158 |
self.api_url = f"https://generativelanguage.googleapis.com/v1beta/{self.model_name}:generateContent"
|
| 159 |
+
print(f"✅ स्मार्ट मॉडल हंटर सफल: '{self.model_name}' का उपयोग किया जाएगा।")
|
| 160 |
def _find_best_model(self):
|
| 161 |
+
print("-> स्मार्ट मॉडल हंटर: सबसे अच्छे 'gemini-*-flash' मॉडल को खोजा जा रहा है...")
|
| 162 |
for api_key in self.api_keys:
|
| 163 |
try:
|
| 164 |
+
print(f"-> API Key के अंतिम 4 अक्षरों से कोशिश की जा रही है: ...{api_key[-4:]}")
|
| 165 |
response = requests.get(f"{self.MODELS_LIST_URL}?key={api_key}", timeout=20); response.raise_for_status(); data = response.json()
|
| 166 |
+
available_models = [m['name'] for m in data.get('models', []) if 'flash' in m['name'] and 'generateContent' in m.get('supportedGenerationMethods', []) and 'exp' not in m['name']]
|
| 167 |
+
if not available_models: continue
|
| 168 |
+
available_models.sort(reverse=True); print(f"-> उपलब्ध 'flash' मॉडल मिले: {available_models}"); return available_models[0]
|
| 169 |
+
except requests.exceptions.RequestException as e: print(f"🚨 API Key ...{api_key[-4:]} के साथ त्रुटि: {e}. अगली की आजमाई जा रही है..."); continue
|
| 170 |
+
print("🚨 स्मार्ट मॉडल हंटर में गंभीर त्रुटि: कोई भी Gemini API Key काम नहीं कर रही है।"); return None
|
|
|
|
|
|
|
|
|
|
| 171 |
def _make_resilient_api_call(self, prompt, timeout=120):
|
| 172 |
headers = {'Content-Type': 'application/json'}; payload = {'contents': [{'parts': [{'text': prompt}]}]}
|
| 173 |
for api_key in self.api_keys:
|
| 174 |
try:
|
| 175 |
+
print(f"-> Gemini को अनुरोध भेजा जा रहा है (Key: ...{api_key[-4:]}, Model: {self.model_name.split('/')[-1]})")
|
| 176 |
response = requests.post(f"{self.api_url}?key={api_key}", headers=headers, json=payload, timeout=timeout); response.raise_for_status(); result = response.json()
|
| 177 |
+
if 'candidates' not in result or not result['candidates']: print(f"🚨 चेतावनी: Key ...{api_key[-4:]} से कोई कैंडिडेट नहीं मिला (संभवतः सुरक्षा ब्लॉक)। अगली की आजमाई जा रही है..."); continue
|
| 178 |
+
return result
|
| 179 |
+
except requests.exceptions.RequestException as e: print(f"🚨 API कॉल में त्रुटि (Key: ...{api_key[-4:]}): {e}. अगली की आजमाई जा रही है...");
|
| 180 |
+
raise Exception("Gemini API Error: All available API keys failed. Please check your keys and quotas.")
|
| 181 |
def extract_keywords(self, script_text):
|
| 182 |
+
prompt = f"""You are a search query expert. Analyze the script below and for each scene, create a JSON object. Each object must contain: 1. "scene_description": A brief description of the scene. 2. "primary_query": A highly creative, emotional, and cinematic search query in English. This is the main attempt. 3. "fallback_query": A simple, literal, and direct search query in English. Use this if the primary query fails. RULES: - Your response MUST be ONLY a JSON list of objects. - All queries must be in English. Script: "{script_text}" Example: [ {{"scene_description": "A person looking at a mountain.", "primary_query": "inspirational mountain peak cinematic hope", "fallback_query": "man looking at mountain"}} ] Generate the JSON:"""
|
| 183 |
result = self._make_resilient_api_call(prompt)
|
| 184 |
+
json_str = result['candidates'][0]['content']['parts'][0]['text']
|
| 185 |
+
clean_str = json_str[json_str.find('['):json_str.rfind(']') + 1]; scenes = json.loads(clean_str)
|
| 186 |
try:
|
| 187 |
+
log_file_path = os.path.join(OUTPUT_FOLDER, 'gemini_analysis_log.json')
|
| 188 |
+
with open(log_file_path, 'w', encoding='utf-8') as f: json.dump(scenes, f, ensure_ascii=False, indent=4)
|
| 189 |
+
print(f"-> Gemini का विश्लेषण सफलतापूर्वक '{log_file_path}' में सहेजा गया।")
|
| 190 |
+
except Exception as e: print(f"🚨 चेतावनी: Gemini विश्लेषण लॉग करने में विफल: {e}")
|
| 191 |
+
print(f"-> Gemini ने सफलतापूर्वक {len(scenes)} प्राथमिक/फ़ॉलबैक दृश्य निकाले।"); return scenes
|
| 192 |
def create_master_timeline(self, word_timestamps, enriched_scenes_with_paths):
|
| 193 |
+
full_script_text = " ".join([word['word'] for word in word_timestamps]); total_duration = word_timestamps[-1]['end'] if word_timestamps else 0
|
| 194 |
+
prompt = f"""You are an expert AI video editor. Create a frame-perfect timeline JSON.
|
| 195 |
+
Assets:
|
| 196 |
+
1. **Full Script:** "{full_script_text}"
|
| 197 |
+
2. **Total Audio Duration:** {total_duration:.2f} seconds.
|
| 198 |
+
3. **Available Scene Clips:** {json.dumps(enriched_scenes_with_paths, indent=2)}
|
| 199 |
+
4. **Word-Level Timestamps (with Pauses):** {json.dumps(word_timestamps, indent=2)}.
|
| 200 |
+
|
| 201 |
+
RULES:
|
| 202 |
+
1. Your response MUST be ONLY a list of JSON objects.
|
| 203 |
+
2. Each object must have "start", "end", "matched_clip", and "start_offset_seconds".
|
| 204 |
+
3. **CRITICAL:** The timeline MUST cover the entire audio duration from 0 to {total_duration:.2f} seconds. There should be NO GAPS.
|
| 205 |
+
4. **CRITICAL:** You MUST use each video from the 'Available Scene Clips' list only once. Do not repeat clips.
|
| 206 |
+
5. **NEW CRITICAL RULE:** In the 'Word-Level Timestamps', you will find special words like '[PAUSE]'. This represents a deliberate silence in the narration. Treat this as a creative opportunity! It is the perfect moment for a beautiful transition between two clips or to let a cinematic shot play out for its full emotional impact. DO NOT repeat the previous clip to fill a pause. Use the pause to enhance the video's pacing.
|
| 207 |
+
|
| 208 |
+
Create the final timeline JSON:"""
|
| 209 |
result = self._make_resilient_api_call(prompt, timeout=180)
|
| 210 |
+
json_str = result['candidates'][0]['content']['parts'][0]['text']
|
| 211 |
+
clean_str = json_str[json_str.find('['):json_str.rfind(']') + 1]; final_timeline = json.loads(clean_str)
|
| 212 |
+
print(f"-> Gemini Master Editor ने सफलतापूर्वक {len(final_timeline)} क्लिप्स की टाइमलाइन और ऑफसेट बना दी है।"); return final_timeline
|
|
|
|
|
|
|
|
|
|
| 213 |
def generate_script(self, topic, video_length):
|
| 214 |
+
word_count_map = {"short": "~75 शब्द", "medium": "~150 शब्द", "long": "~300 शब्द"}; target_word_count = word_count_map.get(video_length, "~150 शब्द")
|
| 215 |
+
prompt = f"""आप 'स्पार्कलिंग ज्ञान' के लिए एक विशेषज्ञ हिंदी स्क्रिप्ट राइटर हैं।
|
| 216 |
+
विषय: "{topic}".
|
| 217 |
+
निर्देश:
|
| 218 |
+
1. इस विषय पर एक आकर्षक, {target_word_count} की स्क्रिप्ट लिखें।
|
| 219 |
+
2. भाषा सरल और बोलचाल वाली हो।
|
| 220 |
+
3. हर 2-3 लाइनों के बाद एक नया विज़ुअल या सीन दिखाया जा सके, इस तरह से लिखें।
|
| 221 |
+
4. **CRITICAL RULE:** आपका आउटपुट सिर्फ और सिर्फ बोले जाने वाले डायलॉग्स (narration) होने चाहिए। किसी भी तरह के विज़ुअल निर्देश, सीन डिस्क्रिप्शन या ब्रैकेट () [] में लिखी कोई भी जानकारी आउटपुट में नहीं होनी चाहिए। सिर्फ वो टेक्स्ट दें जो ऑडियो में बोला जाएगा।
|
| 222 |
+
|
| 223 |
+
अब, स्क्रिप्ट लिखें:"""
|
| 224 |
result = self._make_resilient_api_call(prompt)
|
| 225 |
+
generated_script = result['candidates'][0]['content']['parts'][0]['text']
|
| 226 |
+
print("-> Gemini ने सफलतापूर्वक स्क्रिप्ट जेनरेट कर दी है।"); return generated_script.strip()
|
| 227 |
|
|
|
|
|
|
|
|
|
|
| 228 |
class VideoAssembler:
|
| 229 |
TRANSITION_DURATION = 0.5
|
|
|
|
| 230 |
def __init__(self, timeline, narration_audio, output_path, width, height, mute_audio, temp_dir):
|
| 231 |
+
self.timeline = timeline; self.narration_audio = narration_audio; self.output_path = output_path; self.width = width; self.height = height; self.mute_audio = mute_audio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
self.temp_dir = temp_dir
|
| 233 |
+
def _run_ffmpeg_command(self, command, suppress_errors=False):
|
| 234 |
+
process = subprocess.run(command, capture_output=True, text=True)
|
| 235 |
+
if not suppress_errors and process.returncode != 0:
|
| 236 |
+
error_details = f"Return Code {process.returncode}"
|
| 237 |
+
if process.returncode == -9: error_details += " (SIGKILL): Process was killed, likely due to excessive memory usage."
|
| 238 |
+
raise Exception(f"FFmpeg Error ({error_details}):\nSTDERR:\n{process.stderr}")
|
| 239 |
+
return process
|
| 240 |
def assemble_video(self, log_callback):
|
| 241 |
if not self.timeline: return
|
| 242 |
+
log_callback("-> Stage 1/3: सभी क्लिप्स को व्यक्तिगत रूप से तैयार किया जा रहा है...", 91)
|
|
|
|
|
|
|
| 243 |
prepared_clips = []
|
| 244 |
for i, item in enumerate(self.timeline):
|
| 245 |
+
input_clip_path = item['matched_clip']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
try:
|
| 247 |
+
ffprobe_command = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', input_clip_path]
|
| 248 |
+
duration_proc = self._run_ffmpeg_command(ffprobe_command)
|
| 249 |
+
actual_clip_duration = float(duration_proc.stdout.strip())
|
| 250 |
except Exception as e:
|
| 251 |
+
log_callback(f"🚨 चेतावनी: क्लिप {os.path.basename(input_clip_path)} की अवधि का पता नहीं लगाया जा सका, इसे छोड़ दिया जाएगा। त्रुटि: {e}", 91)
|
| 252 |
+
continue
|
| 253 |
+
start_offset = float(item.get('start_offset_seconds', 0.0))
|
| 254 |
+
if start_offset >= actual_clip_duration:
|
| 255 |
+
log_callback(f" -> 🚨 चेतावनी: AI द्वारा दिया गया स्टार्ट ऑफसेट ({start_offset}s) क्लिप की वास्तविक लंबाई ({actual_clip_duration:.2f}s) से अधिक है। ऑफसेट को 0 पर रीसेट किया जा रहा है।", 91)
|
| 256 |
+
start_offset = 0.0
|
| 257 |
+
is_last_clip = (i == len(self.timeline) - 1)
|
| 258 |
+
overlap = 0 if is_last_clip else self.TRANSITION_DURATION
|
| 259 |
+
duration = (float(item['end']) - float(item['start'])) + overlap
|
| 260 |
+
if duration <= 0: continue
|
| 261 |
+
output_clip_path = os.path.join(self.temp_dir, f"prepared_{i:03d}.mp4")
|
| 262 |
+
command = [
|
| 263 |
+
'ffmpeg', '-y', '-ss', str(start_offset), '-i', input_clip_path, '-t', str(duration),
|
| 264 |
+
'-vf', f"scale='w={self.width}:h={self.height}:force_original_aspect_ratio=increase',crop={self.width}:{self.height},setsar=1,fps=30",
|
| 265 |
+
'-c:v', 'libx264', '-preset', 'ultrafast', '-an', '-threads', '1', output_clip_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
]
|
| 267 |
+
self._run_ffmpeg_command(command)
|
| 268 |
+
prepared_clips.append(output_clip_path)
|
| 269 |
+
log_callback("-> Stage 2/3: क्लिप्स को ट्रांजीशन के साथ जोड़ा जा रहा है...", 94)
|
| 270 |
+
if not prepared_clips: raise Exception("कोई भी क्लिप सफलतापूर्वक तैयार नहीं हो सकी।")
|
| 271 |
+
if len(prepared_clips) == 1:
|
| 272 |
+
shutil.copy(prepared_clips[0], self.output_path)
|
| 273 |
+
transitioned_video_path = self.output_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
else:
|
| 275 |
+
current_video = prepared_clips[0]
|
| 276 |
+
for i in range(len(prepared_clips) - 1):
|
| 277 |
+
next_video = prepared_clips[i+1]
|
| 278 |
+
output_path = os.path.join(self.temp_dir, f"transition_{i:03d}.mp4")
|
| 279 |
+
total_transitions = len(prepared_clips) - 1
|
| 280 |
+
progress = 94 + int((i / total_transitions) * 4) if total_transitions > 0 else 94
|
| 281 |
+
log_callback(f" -> ट्रांजीशन बनाया जा रहा है: क्लिप {i+1} और {i+2}", progress)
|
| 282 |
+
ffprobe_command = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', current_video]
|
| 283 |
+
duration_proc = self._run_ffmpeg_command(ffprobe_command)
|
| 284 |
+
transition_offset = float(duration_proc.stdout.strip()) - self.TRANSITION_DURATION
|
| 285 |
+
command = [
|
| 286 |
+
'ffmpeg', '-y', '-i', current_video, '-i', next_video,
|
| 287 |
+
'-filter_complex', f"[0:v][1:v]xfade=transition=fade:duration={self.TRANSITION_DURATION}:offset={transition_offset},format=yuv420p",
|
| 288 |
+
'-c:v', 'libx264', '-preset', 'ultrafast', output_path
|
| 289 |
+
]
|
| 290 |
+
se
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|