Spaces:
Sleeping
Sleeping
| from moviepy.editor import * | |
| from PIL import Image | |
| import pytesseract | |
| import numpy as np | |
| from gtts import gTTS | |
| from mutagen.mp3 import MP3 | |
| import uuid | |
| import os | |
| from pathlib import Path | |
| # Use /app/data which we created with proper permissions | |
| BASE_DIR = "/app/data" | |
| IMAGE_DIR = "/tmp/images" | |
| os.makedirs(IMAGE_DIR, exist_ok=True) | |
| AUDIO_DIR = os.path.join(BASE_DIR, "sound") | |
| CLIPS_DIR = os.path.join(BASE_DIR, "video") | |
| # Create directories (no chmod needed) | |
| for path in [BASE_DIR, AUDIO_DIR, CLIPS_DIR]: | |
| Path(path).mkdir(parents=True, exist_ok=True) | |
| # Generate audio | |
| def audio_func(id,lines): | |
| tts = gTTS(text=lines[id], lang='en', slow=False) | |
| audio_name = "audio"+str(id)+".mp3" | |
| audio_path=os.path.join(AUDIO_DIR,audio_name) | |
| tts.save(audio_path) | |
| if os.path.exists(audio_path): | |
| audio = MP3(audio_path) | |
| duration = audio.info.length | |
| return duration,audio_path | |
| # --- CONFIGURATION --- | |
| def video_func(id,lines): | |
| print(id,lines[id]) | |
| duration,audio_path = audio_func(id,lines) | |
| IMAGE_PATH = os.path.join(IMAGE_DIR,f"slide{id}.png") # Ensure this path is correct | |
| VIDEO_DURATION = duration # seconds | |
| HIGHLIGHT_COLOR = (255, 255, 0) # Yellow highlight | |
| HIGHLIGHT_OPACITY = 0.5 # Semi-transparent | |
| # --- OCR STEP --- | |
| img = Image.open(IMAGE_PATH) | |
| data = pytesseract.image_to_data(img, output_type=pytesseract.Output.DICT) | |
| # Extract words and their positions | |
| words = [] | |
| for i in range(len(data['text'])): | |
| word = data['text'][i].strip() | |
| if word and int(data['conf'][i]) > 60: | |
| x, y, w, h = data['left'][i], data['top'][i], data['width'][i], data['height'][i] | |
| words.append({'text': word, 'box': (x, y, w, h)}) | |
| # --- BASE IMAGE CLIP --- | |
| image_clip = ImageClip(IMAGE_PATH).set_duration(VIDEO_DURATION) | |
| # --- HIGHLIGHT WORDS ONE BY ONE --- | |
| n_words = len(words) | |
| highlight_duration = VIDEO_DURATION / n_words | |
| highlight_clips = [] | |
| for i, word in enumerate(words): | |
| x, y, w, h = word['box'] | |
| start = i * highlight_duration | |
| end = start + highlight_duration | |
| # Create highlight rectangle | |
| rect = ColorClip(size=(w, h), color=HIGHLIGHT_COLOR) | |
| rect = rect.set_opacity(HIGHLIGHT_OPACITY).set_position((x, y)).set_start(start).set_end(end) | |
| highlight_clips.append(rect) | |
| # --- FINAL VIDEO -- | |
| final_clip = CompositeVideoClip([image_clip] + highlight_clips) | |
| audio = AudioFileClip(audio_path) | |
| final_clip = final_clip.set_audio(audio) | |
| clip_name = "clip"+str(id)+".mp4" | |
| video_path=os.path.join(CLIPS_DIR,clip_name) | |
| final_clip.write_videofile(video_path, fps=24,audio=True) | |
| def video_com(lines): | |
| video_path = f"/tmp/video_{uuid.uuid4().hex}.mp4" | |
| for id in range(len(lines)): | |
| video_func(id,lines) | |
| print(id,lines[id]) | |
| clips = [] | |
| for id in range(len(lines)): | |
| clip = VideoFileClip(f"/app/data/video/clip{id}.mp4") | |
| clips.append(clip) | |
| final_video = concatenate_videoclips(clips,method="compose") | |
| final_video.write_videofile(video_path, fps=24) | |
| return video_path |