Hackathon / urdu_tts_video.py
Backened's picture
Create urdu_tts_video.py
07fa759 verified
raw
history blame
4.63 kB
import gradio as gr
import os
import cv2
import ffmpeg
import numpy as np
from gtts import gTTS
from diffusers import StableDiffusionPipeline
import torch
# Ensure required folders exist
os.makedirs("generated_images", exist_ok=True)
os.makedirs("output", exist_ok=True)
# Load Stable Diffusion for image generation
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
pipe.safety_checker = None # Disable safety checker
# Global variable to store generated TTS audio path
global_audio_path = None
### πŸ—£οΈ TEXT-TO-SPEECH FUNCTION ###
def text_to_speech(script_file):
if script_file is None:
return None, "⚠️ Please upload an Urdu script file!"
with open(script_file.name, "r", encoding="utf-8") as f:
urdu_text = f.read().strip()
audio_path = "output/urdu_audio.mp3"
tts = gTTS(text=urdu_text, lang="ur")
tts.save(audio_path)
global global_audio_path
global_audio_path = audio_path
return audio_path, "βœ… Audio generated successfully!"
### 🏞️ IMAGE GENERATION FUNCTION ###
def generate_images(script_file, num_images):
if script_file is None:
return None, "⚠️ Please upload a script file!"
num_images = int(num_images)
with open(script_file.name, "r", encoding="utf-8") as f:
text_lines = f.read().split("\n\n") # Splitting scenes by double newlines
image_paths = []
for i, scene in enumerate(text_lines[:num_images]):
prompt = f"Scene {i+1}: {scene.strip()}"
image = pipe(prompt).images[0]
image_path = f"generated_images/image_{i+1}.png"
image.save(image_path)
image_paths.append(image_path)
return image_paths, "βœ… Images generated successfully!"
### πŸŽ₯ VIDEO CREATION FUNCTION ###
def images_to_video(image_paths, fps=1):
if not image_paths:
return None
frame = cv2.imread(image_paths[0])
height, width, layers = frame.shape
video_path = "output/generated_video.mp4"
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
video = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
for image in image_paths:
frame = cv2.imread(image)
video.write(frame)
video.release()
return video_path
### πŸ”Š AUDIO-VIDEO MERGE FUNCTION ###
def merge_audio_video(video_path):
if global_audio_path is None:
return None, "⚠️ No audio found! Please generate Urdu TTS first."
final_video_path = "output/final_video.mp4"
video = ffmpeg.input(video_path)
audio = ffmpeg.input(global_audio_path)
ffmpeg.output(video, audio, final_video_path, vcodec="libx264", acodec="aac").run(overwrite_output=True)
return final_video_path, "βœ… Video with Urdu voice-over generated successfully!"
### 🎬 FINAL VIDEO GENERATION PIPELINE ###
def generate_final_video(script_file, num_images):
if script_file is None:
return None, "⚠️ Please upload a script file for image generation!"
image_paths, img_msg = generate_images(script_file, num_images)
if not image_paths:
return None, img_msg
video_path = images_to_video(image_paths, fps=1)
final_video_path, vid_msg = merge_audio_video(video_path)
return final_video_path, vid_msg
### πŸš€ GRADIO UI ###
with gr.Blocks() as demo:
gr.Markdown("## 🎀 Urdu Text-to-Speech & AI Video Generator")
# TTS Section
with gr.Tab("πŸ—£οΈ Urdu Text-to-Speech"):
script_file_tts = gr.File(label="πŸ“‚ Upload Urdu Script for Audio", type="filepath")
generate_audio_btn = gr.Button("πŸŽ™οΈ Generate Audio", variant="primary")
audio_output = gr.Audio(label="πŸ”Š Urdu Speech Output", interactive=False)
audio_status = gr.Textbox(label="ℹ️ Status", interactive=False)
generate_audio_btn.click(text_to_speech, inputs=[script_file_tts], outputs=[audio_output, audio_status])
# Video Generation Section
with gr.Tab("πŸŽ₯ AI Video Generator"):
script_file_video = gr.File(label="πŸ“‚ Upload Urdu Script for Images", type="filepath")
num_images = gr.Number(label="πŸ“Έ Number of Scenes", value=3, minimum=1, maximum=10, step=1)
generate_video_btn = gr.Button("🎬 Generate Video", variant="primary")
video_output = gr.Video(label="🎞️ Generated Video")
video_status = gr.Textbox(label="ℹ️ Status", interactive=False)
generate_video_btn.click(generate_final_video, inputs=[script_file_video, num_images], outputs=[video_output, video_status])
demo.launch()