Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,27 +7,31 @@ from moviepy import VideoFileClip, TextClip, CompositeVideoClip
|
|
| 7 |
from arabic_reshaper import reshape
|
| 8 |
|
| 9 |
# --- الإعدادات ---
|
| 10 |
-
FONT_PATH = "arialbd.ttf"
|
| 11 |
model = WhisperModel("large-v3", device="cpu", compute_type="int8")
|
| 12 |
|
| 13 |
def process_arabic_text(text):
|
| 14 |
return reshape(text) + "\n "
|
| 15 |
|
| 16 |
-
|
|
|
|
| 17 |
if not video_path:
|
| 18 |
return None, "الرجاء رفع فيديو أولاً."
|
| 19 |
|
| 20 |
-
|
| 21 |
segments, _ = model.transcribe(video_path, word_timestamps=True, language="ar")
|
|
|
|
| 22 |
words_data = []
|
|
|
|
|
|
|
| 23 |
for segment in segments:
|
| 24 |
for word in segment.words:
|
| 25 |
words_data.append([word.word.strip(), round(word.start, 2), round(word.end, 2)])
|
| 26 |
|
| 27 |
df = pd.DataFrame(words_data, columns=["الكلمة", "البداية", "النهاية"])
|
| 28 |
-
return df, "تم الاستخراج بنجاح!
|
| 29 |
|
| 30 |
-
def step_2_render_video(video_path, df_edited):
|
| 31 |
if video_path is None or df_edited is None or df_edited.empty:
|
| 32 |
return None, "بيانات ناقصة."
|
| 33 |
|
|
@@ -36,11 +40,10 @@ def step_2_render_video(video_path, df_edited):
|
|
| 36 |
w, h = int(video.w), int(video.h)
|
| 37 |
clips = [video]
|
| 38 |
words_list = df_edited.values.tolist()
|
| 39 |
-
|
| 40 |
chunk_size = 3
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
for i in range(0, len(words_list), chunk_size):
|
| 45 |
current_chunk = words_list[i : i + chunk_size]
|
| 46 |
sentence = " ".join([str(r[0]) for r in current_chunk])
|
|
@@ -50,10 +53,9 @@ def step_2_render_video(video_path, df_edited):
|
|
| 50 |
c_end = float(current_chunk[-1][2])
|
| 51 |
duration = max(0.1, c_end - c_start)
|
| 52 |
|
| 53 |
-
# إنشاء نص الكابشن ثابت بدون أي أنيميشن
|
| 54 |
txt_clip = TextClip(
|
| 55 |
text=clean_sentence,
|
| 56 |
-
font_size=
|
| 57 |
color='yellow',
|
| 58 |
stroke_color='black',
|
| 59 |
stroke_width=2,
|
|
@@ -61,18 +63,27 @@ def step_2_render_video(video_path, df_edited):
|
|
| 61 |
font=FONT_PATH,
|
| 62 |
size=(int(w * 0.9), None),
|
| 63 |
text_align='center'
|
| 64 |
-
).with_start(c_start).with_duration(duration).with_position(('center', int(h * 0.
|
| 65 |
|
| 66 |
clips.append(txt_clip)
|
| 67 |
|
| 68 |
final_video = CompositeVideoClip(clips, size=(w, h))
|
| 69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
return output_path, "تم إنتاج الفيديو بنجاح!"
|
| 72 |
|
| 73 |
# --- بناء الواجهة ---
|
| 74 |
-
with gr.Blocks(title="Caption Pro
|
| 75 |
-
gr.Markdown("# 🎬 Caption Pro -
|
| 76 |
|
| 77 |
with gr.Row():
|
| 78 |
video_in = gr.Video(label="فيديو المدخلات")
|
|
@@ -84,6 +95,7 @@ with gr.Blocks(title="Caption Pro Clean") as app:
|
|
| 84 |
btn_ex = gr.Button("1. استخراج الكلمات", variant="primary")
|
| 85 |
btn_re = gr.Button("2. إنتاج الفيديو", variant="secondary")
|
| 86 |
|
|
|
|
| 87 |
btn_ex.click(step_1_extract_words, inputs=[video_in], outputs=[table, status])
|
| 88 |
btn_re.click(step_2_render_video, inputs=[video_in, table], outputs=[video_out, status])
|
| 89 |
|
|
|
|
| 7 |
from arabic_reshaper import reshape
|
| 8 |
|
| 9 |
# --- الإعدادات ---
|
| 10 |
+
FONT_PATH = "arialbd.ttf"
|
| 11 |
model = WhisperModel("large-v3", device="cpu", compute_type="int8")
|
| 12 |
|
| 13 |
def process_arabic_text(text):
|
| 14 |
return reshape(text) + "\n "
|
| 15 |
|
| 16 |
+
# إضافة gr.Progress لتتبع التقدم في الواجهة
|
| 17 |
+
def step_1_extract_words(video_path, progress=gr.Progress()):
|
| 18 |
if not video_path:
|
| 19 |
return None, "الرجاء رفع فيديو أولاً."
|
| 20 |
|
| 21 |
+
progress(0, desc="جاري تحميل النموذج...")
|
| 22 |
segments, _ = model.transcribe(video_path, word_timestamps=True, language="ar")
|
| 23 |
+
|
| 24 |
words_data = []
|
| 25 |
+
progress(0.5, desc="جاري تحليل الصوت واستخراج الكلمات...")
|
| 26 |
+
|
| 27 |
for segment in segments:
|
| 28 |
for word in segment.words:
|
| 29 |
words_data.append([word.word.strip(), round(word.start, 2), round(word.end, 2)])
|
| 30 |
|
| 31 |
df = pd.DataFrame(words_data, columns=["الكلمة", "البداية", "النهاية"])
|
| 32 |
+
return df, "تم الاستخراج بنجاح!"
|
| 33 |
|
| 34 |
+
def step_2_render_video(video_path, df_edited, progress=gr.Progress()):
|
| 35 |
if video_path is None or df_edited is None or df_edited.empty:
|
| 36 |
return None, "بيانات ناقصة."
|
| 37 |
|
|
|
|
| 40 |
w, h = int(video.w), int(video.h)
|
| 41 |
clips = [video]
|
| 42 |
words_list = df_edited.values.tolist()
|
|
|
|
| 43 |
chunk_size = 3
|
| 44 |
|
| 45 |
+
progress(0.1, desc="جاري تحضير النصوص...")
|
| 46 |
+
|
| 47 |
for i in range(0, len(words_list), chunk_size):
|
| 48 |
current_chunk = words_list[i : i + chunk_size]
|
| 49 |
sentence = " ".join([str(r[0]) for r in current_chunk])
|
|
|
|
| 53 |
c_end = float(current_chunk[-1][2])
|
| 54 |
duration = max(0.1, c_end - c_start)
|
| 55 |
|
|
|
|
| 56 |
txt_clip = TextClip(
|
| 57 |
text=clean_sentence,
|
| 58 |
+
font_size=65,
|
| 59 |
color='yellow',
|
| 60 |
stroke_color='black',
|
| 61 |
stroke_width=2,
|
|
|
|
| 63 |
font=FONT_PATH,
|
| 64 |
size=(int(w * 0.9), None),
|
| 65 |
text_align='center'
|
| 66 |
+
).with_start(c_start).with_duration(duration).with_position(('center', int(h * 0.65)))
|
| 67 |
|
| 68 |
clips.append(txt_clip)
|
| 69 |
|
| 70 |
final_video = CompositeVideoClip(clips, size=(w, h))
|
| 71 |
+
|
| 72 |
+
# هنا أعدنا الـ logger لكي تظهر النسبة المئوية % في الـ Log
|
| 73 |
+
print("بدء عملية دمج الفيديو والترميز...")
|
| 74 |
+
final_video.write_videofile(
|
| 75 |
+
output_path,
|
| 76 |
+
codec="libx264",
|
| 77 |
+
audio_codec="aac",
|
| 78 |
+
fps=video.fps,
|
| 79 |
+
logger='bar' # 'bar' هي التي تظهر النسبة المئوية % في الـ Logs
|
| 80 |
+
)
|
| 81 |
|
| 82 |
return output_path, "تم إنتاج الفيديو بنجاح!"
|
| 83 |
|
| 84 |
# --- بناء الواجهة ---
|
| 85 |
+
with gr.Blocks(title="Caption Pro Progress") as app:
|
| 86 |
+
gr.Markdown("# 🎬 Caption Pro - With Progress Bar")
|
| 87 |
|
| 88 |
with gr.Row():
|
| 89 |
video_in = gr.Video(label="فيديو المدخلات")
|
|
|
|
| 95 |
btn_ex = gr.Button("1. استخراج الكلمات", variant="primary")
|
| 96 |
btn_re = gr.Button("2. إنتاج الفيديو", variant="secondary")
|
| 97 |
|
| 98 |
+
# نلاحظ إضافة progress=True في الربط
|
| 99 |
btn_ex.click(step_1_extract_words, inputs=[video_in], outputs=[table, status])
|
| 100 |
btn_re.click(step_2_render_video, inputs=[video_in, table], outputs=[video_out, status])
|
| 101 |
|