Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,38 +3,72 @@ import tempfile
|
|
| 3 |
from pathlib import Path
|
| 4 |
import uuid
|
| 5 |
import subprocess
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
allowed_medias = [".png", ".jpg", ".jpeg", ".bmp", ".gif", ".tiff"]
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
if not images:
|
| 11 |
return None, "❌ Keine Bilder ausgewählt"
|
| 12 |
-
|
| 13 |
-
y_pos = min(max(0.0, y_pos), 0.9)
|
| 14 |
temp_dir = tempfile.mkdtemp()
|
| 15 |
clips = []
|
| 16 |
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
texts = [""] * len(images)
|
| 19 |
|
| 20 |
for i, img_path in enumerate(images):
|
| 21 |
clip_path = Path(temp_dir) / f"clip_{i}.mp4"
|
| 22 |
text = texts[i] if i < len(texts) else ""
|
| 23 |
|
| 24 |
-
# Scale + Pad für proportionalen Fit in 1280x720
|
| 25 |
vf_filters = (
|
| 26 |
"scale=w=1280:h=720:force_original_aspect_ratio=decrease,"
|
| 27 |
"pad=1280:720:(ow-iw)/2:(oh-ih)/2:color=black,"
|
| 28 |
"fps=25,format=yuv420p"
|
| 29 |
)
|
| 30 |
|
| 31 |
-
# Text Overlay Filter
|
| 32 |
if text:
|
| 33 |
safe_text = text.replace(":", "\\:").replace("'", "\\'")
|
| 34 |
drawtext_filter = (
|
| 35 |
f",drawtext=text='{safe_text}':fontcolor=white:fontsize={font_size}:borderw=2:"
|
| 36 |
f"x=(w-text_w)/2:y=(h-text_h)*{y_pos}:"
|
| 37 |
-
f"alpha='if(lt(t,{fade_duration}), t/{fade_duration}, if(lt(t,{
|
| 38 |
)
|
| 39 |
vf_filters += drawtext_filter
|
| 40 |
|
|
@@ -43,7 +77,7 @@ def generate_slideshow(images, duration, texts=None, y_pos=0.5, fade_duration=0.
|
|
| 43 |
"-y",
|
| 44 |
"-loop", "1",
|
| 45 |
"-i", str(img_path),
|
| 46 |
-
"-t", str(
|
| 47 |
"-vf", vf_filters,
|
| 48 |
str(clip_path)
|
| 49 |
]
|
|
@@ -54,7 +88,7 @@ def generate_slideshow(images, duration, texts=None, y_pos=0.5, fade_duration=0.
|
|
| 54 |
|
| 55 |
clips.append(clip_path)
|
| 56 |
|
| 57 |
-
#
|
| 58 |
filelist_path = Path(temp_dir) / "filelist.txt"
|
| 59 |
with open(filelist_path, "w") as f:
|
| 60 |
for clip in clips:
|
|
@@ -76,32 +110,44 @@ def generate_slideshow(images, duration, texts=None, y_pos=0.5, fade_duration=0.
|
|
| 76 |
except subprocess.CalledProcessError as e:
|
| 77 |
return None, f"❌ FFmpeg Concat Fehler:\n{e.stderr}"
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
# Gradio UI
|
| 85 |
with gr.Blocks() as demo:
|
| 86 |
-
gr.Markdown("# Slideshow mit
|
| 87 |
|
| 88 |
img_input = gr.Files(label="Bilder auswählen (mehrere)", file_types=allowed_medias)
|
|
|
|
| 89 |
duration_input = gr.Number(value=3, label="Dauer pro Bild in Sekunden", precision=1)
|
| 90 |
fade_input = gr.Number(value=0.7, label="Fade Dauer in Sekunden", precision=1)
|
| 91 |
-
text_input = gr.Textbox(label="Texte pro Bild (mit Komma trennen)", placeholder="Text1, Text2, Text3 ...")
|
| 92 |
ypos_input = gr.Slider(minimum=0.0, maximum=0.9, step=0.01, value=0.5, label="Y-Position für alle Texte (0=oben, 0.5=mitte, 0.9=unten)")
|
| 93 |
font_size_input = gr.Number(value=60, label="Textgröße (px)")
|
| 94 |
out_video = gr.Video(interactive=False, label="Generiertes Video")
|
| 95 |
status = gr.Textbox(interactive=False, label="Status")
|
| 96 |
|
| 97 |
-
def wrapper(images, duration, fade_duration, text_str, y_pos, font_size):
|
| 98 |
-
texts = [t.strip() for t in text_str.split(",")] if text_str else None
|
| 99 |
-
return generate_slideshow(images, duration, texts, y_pos, fade_duration, font_size)
|
| 100 |
-
|
| 101 |
btn = gr.Button("Video erstellen")
|
| 102 |
btn.click(
|
| 103 |
-
fn=
|
| 104 |
-
inputs=[img_input,
|
| 105 |
outputs=[out_video, status]
|
| 106 |
)
|
| 107 |
|
|
|
|
| 3 |
from pathlib import Path
|
| 4 |
import uuid
|
| 5 |
import subprocess
|
| 6 |
+
import requests
|
| 7 |
+
import base64
|
| 8 |
+
import math
|
| 9 |
|
| 10 |
allowed_medias = [".png", ".jpg", ".jpeg", ".bmp", ".gif", ".tiff"]
|
| 11 |
+
API_URL = "https://text.pollinations.ai/openai"
|
| 12 |
+
|
| 13 |
+
def transcribe_audio(audio_path):
|
| 14 |
+
with open(audio_path, "rb") as f:
|
| 15 |
+
audio_data = base64.b64encode(f.read()).decode()
|
| 16 |
+
payload = {
|
| 17 |
+
"model": "openai-audio",
|
| 18 |
+
"messages": [{
|
| 19 |
+
"role": "user",
|
| 20 |
+
"content": [
|
| 21 |
+
{"type": "text", "text": "Transcribe this audio:"},
|
| 22 |
+
{"type": "input_audio", "input_audio": {"data": audio_data, "format": "wav"}}
|
| 23 |
+
]
|
| 24 |
+
}]
|
| 25 |
+
}
|
| 26 |
+
response = requests.post(API_URL, json=payload)
|
| 27 |
+
response.raise_for_status()
|
| 28 |
+
result = response.json()
|
| 29 |
+
# API liefert Text in choices[0].message.content
|
| 30 |
+
text = result['choices'][0]['message']['content']
|
| 31 |
+
return text
|
| 32 |
+
|
| 33 |
+
def generate_slideshow_with_audio(images, audio_file, duration_per_image=3, y_pos=0.5, fade_duration=0.7, font_size=60):
|
| 34 |
if not images:
|
| 35 |
return None, "❌ Keine Bilder ausgewählt"
|
| 36 |
+
|
| 37 |
+
y_pos = min(max(0.0, y_pos), 0.9)
|
| 38 |
temp_dir = tempfile.mkdtemp()
|
| 39 |
clips = []
|
| 40 |
|
| 41 |
+
# Transkription, falls Audio vorhanden
|
| 42 |
+
if audio_file:
|
| 43 |
+
transcript = transcribe_audio(audio_file.name)
|
| 44 |
+
# Einfach in gleiche Länge wie Bilder aufteilen
|
| 45 |
+
words = transcript.split()
|
| 46 |
+
total_words = len(words)
|
| 47 |
+
segments_per_image = math.ceil(total_words / len(images))
|
| 48 |
+
texts = []
|
| 49 |
+
for i in range(len(images)):
|
| 50 |
+
start = i*segments_per_image
|
| 51 |
+
end = min((i+1)*segments_per_image, total_words)
|
| 52 |
+
texts.append(" ".join(words[start:end]))
|
| 53 |
+
else:
|
| 54 |
texts = [""] * len(images)
|
| 55 |
|
| 56 |
for i, img_path in enumerate(images):
|
| 57 |
clip_path = Path(temp_dir) / f"clip_{i}.mp4"
|
| 58 |
text = texts[i] if i < len(texts) else ""
|
| 59 |
|
|
|
|
| 60 |
vf_filters = (
|
| 61 |
"scale=w=1280:h=720:force_original_aspect_ratio=decrease,"
|
| 62 |
"pad=1280:720:(ow-iw)/2:(oh-ih)/2:color=black,"
|
| 63 |
"fps=25,format=yuv420p"
|
| 64 |
)
|
| 65 |
|
|
|
|
| 66 |
if text:
|
| 67 |
safe_text = text.replace(":", "\\:").replace("'", "\\'")
|
| 68 |
drawtext_filter = (
|
| 69 |
f",drawtext=text='{safe_text}':fontcolor=white:fontsize={font_size}:borderw=2:"
|
| 70 |
f"x=(w-text_w)/2:y=(h-text_h)*{y_pos}:"
|
| 71 |
+
f"alpha='if(lt(t,{fade_duration}), t/{fade_duration}, if(lt(t,{duration_per_image}-{fade_duration}), 1, ({duration_per_image}-t)/{fade_duration}))'"
|
| 72 |
)
|
| 73 |
vf_filters += drawtext_filter
|
| 74 |
|
|
|
|
| 77 |
"-y",
|
| 78 |
"-loop", "1",
|
| 79 |
"-i", str(img_path),
|
| 80 |
+
"-t", str(duration_per_image),
|
| 81 |
"-vf", vf_filters,
|
| 82 |
str(clip_path)
|
| 83 |
]
|
|
|
|
| 88 |
|
| 89 |
clips.append(clip_path)
|
| 90 |
|
| 91 |
+
# Clips zusammenfügen
|
| 92 |
filelist_path = Path(temp_dir) / "filelist.txt"
|
| 93 |
with open(filelist_path, "w") as f:
|
| 94 |
for clip in clips:
|
|
|
|
| 110 |
except subprocess.CalledProcessError as e:
|
| 111 |
return None, f"❌ FFmpeg Concat Fehler:\n{e.stderr}"
|
| 112 |
|
| 113 |
+
# Audio hinzufügen, falls vorhanden
|
| 114 |
+
if audio_file:
|
| 115 |
+
final_output = Path(temp_dir) / f"slideshow_audio_{uuid.uuid4().hex}.mp4"
|
| 116 |
+
cmd_audio = [
|
| 117 |
+
"ffmpeg",
|
| 118 |
+
"-y",
|
| 119 |
+
"-i", str(output_file),
|
| 120 |
+
"-i", audio_file.name,
|
| 121 |
+
"-c:v", "copy",
|
| 122 |
+
"-c:a", "aac",
|
| 123 |
+
"-shortest",
|
| 124 |
+
str(final_output)
|
| 125 |
+
]
|
| 126 |
+
try:
|
| 127 |
+
subprocess.run(cmd_audio, check=True, capture_output=True, text=True)
|
| 128 |
+
return str(final_output), "✅ Slideshow mit Audio und automatischen Untertiteln erstellt"
|
| 129 |
+
except subprocess.CalledProcessError as e:
|
| 130 |
+
return None, f"❌ FFmpeg Audio Merge Fehler:\n{e.stderr}"
|
| 131 |
+
|
| 132 |
+
return str(output_file), "✅ Slideshow erstellt (ohne Audio)"
|
| 133 |
|
| 134 |
# Gradio UI
|
| 135 |
with gr.Blocks() as demo:
|
| 136 |
+
gr.Markdown("# Slideshow mit Audio & automatischen Untertiteln")
|
| 137 |
|
| 138 |
img_input = gr.Files(label="Bilder auswählen (mehrere)", file_types=allowed_medias)
|
| 139 |
+
audio_input = gr.File(label="Audio hinzufügen (optional, WAV)", file_types=[".wav"])
|
| 140 |
duration_input = gr.Number(value=3, label="Dauer pro Bild in Sekunden", precision=1)
|
| 141 |
fade_input = gr.Number(value=0.7, label="Fade Dauer in Sekunden", precision=1)
|
|
|
|
| 142 |
ypos_input = gr.Slider(minimum=0.0, maximum=0.9, step=0.01, value=0.5, label="Y-Position für alle Texte (0=oben, 0.5=mitte, 0.9=unten)")
|
| 143 |
font_size_input = gr.Number(value=60, label="Textgröße (px)")
|
| 144 |
out_video = gr.Video(interactive=False, label="Generiertes Video")
|
| 145 |
status = gr.Textbox(interactive=False, label="Status")
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
btn = gr.Button("Video erstellen")
|
| 148 |
btn.click(
|
| 149 |
+
fn=generate_slideshow_with_audio,
|
| 150 |
+
inputs=[img_input, audio_input, duration_input, ypos_input, fade_input, font_size_input],
|
| 151 |
outputs=[out_video, status]
|
| 152 |
)
|
| 153 |
|