Spaces:
Runtime error
Runtime error
| chmod +x setup.sh && ./setup.sh | |
| import sys | |
| import os | |
| sys.path.append(os.path.abspath(".")) | |
| import gradio as gr | |
| from pathlib import Path | |
| import argparse | |
| # استيراد بقية الأشياء | |
| from STT.sst import speech_to_text | |
| from LLM.llm import generate_reply | |
| from TTS_X.tts import generate_voice | |
| from FantasyTalking.infer import load_models, main | |
| # ثابتات تحميل النموذج | |
| args_template = argparse.Namespace( | |
| wan_model_dir="./models/Wan2.1-I2V-14B-720P", | |
| fantasytalking_model_path="./models/fantasytalking_model.ckpt", | |
| wav2vec_model_dir="./models/wav2vec2-base-960h", | |
| image_path="", | |
| audio_path="", | |
| prompt="", | |
| output_dir="./output", | |
| image_size=512, | |
| audio_scale=1.0, | |
| prompt_cfg_scale=5.0, | |
| audio_cfg_scale=5.0, | |
| max_num_frames=81, | |
| inference_steps=20, | |
| fps=23, | |
| num_persistent_param_in_dit=None, | |
| seed=1111 | |
| ) | |
| # تحميل النماذج مرة وحدة فقط | |
| pipe, fantasytalking, wav2vec_processor, wav2vec = load_models(args_template) | |
| def generate_video(image_path, audio_path, prompt, output_dir="./output"): | |
| args = argparse.Namespace( | |
| **vars(args_template), | |
| image_path=image_path, | |
| audio_path=audio_path, | |
| prompt=prompt, | |
| output_dir=output_dir | |
| ) | |
| return main(args, pipe, fantasytalking, wav2vec_processor, wav2vec) | |
| def full_pipeline(user_audio, user_image): | |
| # 1. تحويل الصوت إلى نص | |
| user_text = speech_to_text(user_audio) | |
| # 2. توليد الرد من LLM | |
| reply = generate_reply(user_text) | |
| # 3. تحويل الرد إلى صوت | |
| reply_audio_path = generate_voice(reply) | |
| # 4. توليد فيديو من الصورة والصوت | |
| Path("./output").mkdir(parents=True, exist_ok=True) | |
| video_path = generate_video( | |
| image_path=user_image, | |
| audio_path=reply_audio_path, | |
| prompt=reply | |
| ) | |
| return user_text, reply, reply_audio_path, video_path | |
| # واجهة Gradio | |
| with gr.Blocks(title="🧠 صوتك يحرك صورة!") as demo: | |
| gr.Markdown("## 🎤➡️💬➡️🔊➡️📽️ من صوتك إلى فيديو متكلم!") | |
| with gr.Row(): | |
| with gr.Column(): | |
| audio_input = gr.Audio(label="🎙️ ارفع صوتك", type="filepath") | |
| image_input = gr.Image(label="🖼️ صورة المتحدث", type="filepath") | |
| btn = gr.Button("🎬 شغل") | |
| with gr.Column(): | |
| user_text = gr.Textbox(label="📝 النص المسموع") | |
| reply_text = gr.Textbox(label="🤖 رد المساعد") | |
| reply_audio = gr.Audio(label="🔊 الرد المنطوق") | |
| video_output = gr.Video(label="📽️ الفيديو الناتج") | |
| btn.click(fn=full_pipeline, | |
| inputs=[audio_input, image_input], | |
| outputs=[user_text, reply_text, reply_audio, video_output]) | |
| demo.launch(inbrowser=True, share=True) | |