Vishwas1 commited on
Commit
d7dbd51
·
verified ·
1 Parent(s): 43229da

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +562 -0
app.py ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py — Slideshow with per-image audio + multi-voice TTS (HF Coqui)
2
+ # Works with MoviePy v2.x; falls back to v1 where possible.
3
+
4
+ import os
5
+ import tempfile
6
+ import random
7
+ from typing import Optional, List, Dict, Tuple
8
+
9
+ import numpy as np
10
+ from PIL import Image
11
+ import gradio as gr
12
+
13
+ # ---- MoviePy imports with v2/v1 compatibility ----
14
+ MPY_V2 = False
15
+ afx = None # audio effects (v2)
16
+
17
+ try:
18
+ # v2.x preferred
19
+ from moviepy import ImageSequenceClip, AudioFileClip, ImageClip, concatenate_videoclips # type: ignore
20
+ try:
21
+ from moviepy import afx as _afx # type: ignore
22
+ afx = _afx
23
+ except Exception:
24
+ afx = None
25
+ MPY_V2 = True
26
+ except Exception:
27
+ # v1.x fallback
28
+ from moviepy.editor import ImageSequenceClip, AudioFileClip, ImageClip, concatenate_videoclips # type: ignore
29
+ MPY_V2 = False
30
+
31
+
32
+ # ---------- Small compatibility helpers ----------
33
+
34
+ def clip_with_duration(clip, duration: float):
35
+ if hasattr(clip, "with_duration"): # v2
36
+ return clip.with_duration(duration)
37
+ return clip.set_duration(duration) # v1
38
+
39
+
40
+ def clip_with_audio(clip, audio):
41
+ if hasattr(clip, "with_audio"): # v2
42
+ return clip.with_audio(audio)
43
+ return clip.set_audio(audio) # v1
44
+
45
+
46
+ def apply_linear_gain(audio_clip, gain_linear: float):
47
+ """
48
+ Try to apply a linear gain to an AudioFileClip.
49
+ If effects aren't available, return the original clip (no-op).
50
+ """
51
+ if hasattr(audio_clip, "with_effects") and afx is not None:
52
+ try:
53
+ return audio_clip.with_effects([afx.MultiplyVolume(gain_linear)])
54
+ except Exception:
55
+ pass
56
+ if hasattr(audio_clip, "fx"):
57
+ try:
58
+ if afx is not None and hasattr(afx, "volumex"):
59
+ return audio_clip.fx(afx.volumex, gain_linear)
60
+ except Exception:
61
+ pass
62
+ return audio_clip
63
+
64
+
65
+ # ---------- Image utilities ----------
66
+
67
+ def load_and_fit_image(path: str, width: int, height: int, fit: str = "contain", bg: str = "#000000") -> np.ndarray:
68
+ img = Image.open(path).convert("RGB")
69
+
70
+ if fit == "stretch":
71
+ img = img.resize((width, height), Image.LANCZOS)
72
+ return np.array(img)
73
+
74
+ iw, ih = img.size
75
+ target_aspect = float(width) / float(height)
76
+ src_aspect = float(iw) / float(ih)
77
+
78
+ if fit == "cover":
79
+ if src_aspect > target_asect := target_aspect:
80
+ new_h = height
81
+ new_w = int(round(src_asect * new_h))
82
+ else:
83
+ new_w = width
84
+ new_h = int(round(new_w / src_asect))
85
+ img = img.resize((new_w, new_h), Image.LANCZOS)
86
+ left = (new_w - width) // 2
87
+ top = (new_h - height) // 2
88
+ img = img.crop((left, top, left + width, top + height))
89
+ return np.array(img)
90
+
91
+ # contain
92
+ canvas = Image.new("RGB", (width, height), bg)
93
+ if src_aspect > target_aspect:
94
+ new_w = width
95
+ new_h = int(round(new_w / src_aspect))
96
+ else:
97
+ new_h = height
98
+ new_w = int(round(src_aspect * new_h))
99
+ resized = img.resize((new_w, new_h), Image.LANCZOS)
100
+ left = (width - new_w) // 2
101
+ top = (height - new_h) // 2
102
+ canvas.paste(resized, (left, top))
103
+ return np.array(canvas)
104
+
105
+
106
+ # ---------- TTS backends ----------
107
+
108
+ _TTS_CACHE: Dict[str, object] = {}
109
+
110
+ def _get_tts_backend(backend_name: str):
111
+ """
112
+ Lazy-load a TTS backend instance.
113
+ - "Coqui (VCTK multi-speaker)" -> coqui-ai/TTS model: tts_models/en/vctk/vits
114
+ - "gTTS (simple)" -> sentinel string "gTTS"
115
+ """
116
+ if backend_name == "Coqui (VCTK multi-speaker)":
117
+ if backend_name not in _TTS_CACHE:
118
+ from TTS.api import TTS # heavy import
119
+ _TTS_CACHE[backend_name] = TTS("tts_models/en/vctk/vits")
120
+ return _TTS_CACHE[backend_name]
121
+ elif backend_name == "gTTS (simple)":
122
+ return "gTTS"
123
+ return None
124
+
125
+
126
+ def list_voices(backend_name: str) -> List[str]:
127
+ if backend_name == "Coqui (VCTK multi-speaker)":
128
+ try:
129
+ tts = _get_tts_backend(backend_name)
130
+ spks = list(getattr(tts, "speakers", []))
131
+ # Prefer a common male default if present
132
+ default_pref = ["p225", "p226", "p233", "p243"]
133
+ ordered = sorted(spks)
134
+ for pref in default_pref:
135
+ if pref in ordered:
136
+ ordered.remove(pref)
137
+ ordered.insert(0, pref)
138
+ break
139
+ return ordered
140
+ except Exception:
141
+ return []
142
+ return []
143
+
144
+
145
+ def synth_tts_to_file(text: str, backend_name: str, voice: Optional[str], out_path: str) -> Optional[str]:
146
+ text = (text or "").strip()
147
+ if not text:
148
+ return None
149
+
150
+ if backend_name == "Coqui (VCTK multi-speaker)":
151
+ try:
152
+ tts = _get_tts_backend(backend_name)
153
+ # Coqui writes WAV by default; we'll give a .wav path
154
+ if not out_path.lower().endswith(".wav"):
155
+ out_path = os.path.splitext(out_path)[0] + ".wav"
156
+ tts.tts_to_file(text=text, speaker=voice, file_path=out_path)
157
+ return out_path
158
+ except Exception:
159
+ return None
160
+
161
+ # gTTS fallback
162
+ if backend_name == "gTTS (simple)":
163
+ try:
164
+ from gtts import gTTS
165
+ if not out_path.lower().endswith(".mp3"):
166
+ out_path = os.path.splitext(out_path)[0] + ".mp3"
167
+ gTTS(text=text, lang="en").save(out_path)
168
+ return out_path
169
+ except Exception:
170
+ return None
171
+
172
+ return None
173
+
174
+
175
+ # ---------- Helpers for per-image mapping ----------
176
+
177
+ def map_audio_to_images_by_name(image_paths: List[str], audio_paths: List[str]) -> List[Optional[str]]:
178
+ """
179
+ Try basename match first; then fall back to index order.
180
+ """
181
+ result = [None] * len(image_paths)
182
+ if not audio_paths:
183
+ return result
184
+
185
+ # Basename map (without extension)
186
+ audio_map = {}
187
+ for a in audio_paths:
188
+ base = os.path.splitext(os.path.basename(a))[0].lower()
189
+ audio_map[base] = a
190
+
191
+ used = set()
192
+ # First pass: basename matches
193
+ for i, ip in enumerate(image_paths):
194
+ base = os.path.splitext(os.path.basename(ip))[0].lower()
195
+ if base in audio_map:
196
+ result[i] = audio_map[base]
197
+ used.add(audio_map[base])
198
+
199
+ # Second pass: index fallback for any remaining
200
+ leftover = [a for a in audio_paths if a not in used]
201
+ for i in range(len(image_paths)):
202
+ if result[i] is None and leftover:
203
+ result[i] = leftover.pop(0)
204
+
205
+ return result
206
+
207
+
208
+ # ---------- Core builder ----------
209
+
210
+ def build_variable_duration_video(
211
+ frames: List[np.ndarray],
212
+ per_image_durations: List[float],
213
+ per_image_audios: List[Optional[str]],
214
+ audio_gain_db: float
215
+ ):
216
+ """
217
+ Create a video where each image has its own duration and optional audio.
218
+ """
219
+ clips = []
220
+ for frame, dur, apath in zip(frames, per_image_durations, per_image_audios):
221
+ iclip = ImageClip(frame)
222
+ iclip = clip_with_duration(iclip, float(dur))
223
+ if apath:
224
+ try:
225
+ aclip = AudioFileClip(apath)
226
+ gain = 10 ** (float(audio_gain_db) / 20.0) if audio_gain_db else 1.0
227
+ if abs(gain - 1.0) > 1e-3:
228
+ aclip = apply_linear_gain(aclip, gain)
229
+ iclip = clip_with_audio(iclip, aclip)
230
+ except Exception:
231
+ pass
232
+ clips.append(iclip)
233
+
234
+ # Compose ensures audio & size are aligned
235
+ final = concatenate_videoclips(clips, method="compose")
236
+ return final
237
+
238
+
239
+ def create_slideshow(
240
+ image_files: List,
241
+ narration_mode: str, # "None" | "Single story" | "Per-image (files)" | "Per-image (TTS per line)"
242
+ seconds_per_image: float,
243
+ width: int,
244
+ height: int,
245
+ fit_mode: str,
246
+ bg_color: str,
247
+ sort_mode: str,
248
+ shuffle_seed: Optional[float],
249
+
250
+ # single-story inputs
251
+ story_text: str,
252
+ match_video_to_narration: bool,
253
+
254
+ # per-image inputs
255
+ per_image_texts: str, # one line per image; optional "speaker| text" when using Coqui
256
+ per_image_audio_files: List, # uploaded audio files
257
+
258
+ # TTS config
259
+ tts_backend: str,
260
+ tts_voice: Optional[str],
261
+ audio_gain_db: float
262
+ ):
263
+ if not image_files:
264
+ return None, "Please upload at least one image."
265
+
266
+ # Normalize image paths
267
+ paths = []
268
+ for f in image_files:
269
+ p = getattr(f, "name", None) or getattr(f, "path", None) or f
270
+ if p and os.path.exists(p):
271
+ paths.append(p)
272
+ if not paths:
273
+ return None, "Could not read the uploaded images."
274
+
275
+ # Order
276
+ if sort_mode == "Filename (A→Z)":
277
+ paths = sorted(paths, key=lambda p: os.path.basename(p).lower())
278
+ elif sort_mode == "Filename (Z→A)":
279
+ paths = sorted(paths, key=lambda p: os.path.basename(p).lower(), reverse=True)
280
+ elif sort_mode == "Shuffle":
281
+ rnd = random.Random(int(shuffle_seed or 0))
282
+ rnd.shuffle(paths)
283
+
284
+ # Load frames
285
+ width = int(width); height = int(height)
286
+ frames = [load_and_fit_image(p, width, height, fit=fit_mode, bg=bg_color) for p in paths]
287
+
288
+ # Build outputs based on narration_mode
289
+ out_path = os.path.join(tempfile.gettempdir(), "slideshow_output.mp4")
290
+
291
+ # --- Per-image AUDIO FILES ---
292
+ if narration_mode == "Per-image (files)" and per_image_audio_files:
293
+ # Normalize audio paths & sort by filename
294
+ aud_paths = []
295
+ for a in per_image_audio_files:
296
+ ap = getattr(a, "name", None) or getattr(a, "path", None) or a
297
+ if ap and os.path.exists(ap):
298
+ aud_paths.append(ap)
299
+ aud_paths = sorted(aud_paths, key=lambda p: os.path.basename(p).lower())
300
+
301
+ per_img_audio = map_audio_to_images_by_name(paths, aud_paths)
302
+
303
+ # Durations: match each audio if present, else fall back to seconds_per_image
304
+ durations = []
305
+ for ap in per_img_audio:
306
+ if ap:
307
+ try:
308
+ aclip = AudioFileClip(ap)
309
+ durations.append(float(aclip.duration))
310
+ except Exception:
311
+ durations.append(float(seconds_per_image))
312
+ else:
313
+ durations.append(float(seconds_per_image))
314
+
315
+ final_clip = build_variable_duration_video(frames, durations, per_img_audio, audio_gain_db)
316
+
317
+ final_clip.write_videofile(
318
+ out_path,
319
+ codec="libx264",
320
+ audio_codec="aac",
321
+ fps=24,
322
+ preset="medium",
323
+ threads=max(1, (os.cpu_count() or 2) // 2),
324
+ )
325
+ return out_path, "Done! Per-image audio applied."
326
+
327
+ # --- Per-image TTS per line ---
328
+ if narration_mode == "Per-image (TTS per line)" and per_image_texts.strip():
329
+ lines = [ln.strip() for ln in per_image_texts.splitlines()]
330
+ # Pad / trim to image count
331
+ if len(lines) < len(paths):
332
+ lines += [""] * (len(paths) - len(lines))
333
+ else:
334
+ lines = lines[:len(paths)]
335
+
336
+ # Generate audio per line
337
+ tmp_dir = tempfile.gettempdir()
338
+ per_img_audio = []
339
+ durations = []
340
+ for idx, text in enumerate(lines):
341
+ voice = tts_voice
342
+ # Optional "speaker| text" override for Coqui
343
+ if "|" in text and tts_backend.startswith("Coqui"):
344
+ maybe_speaker, maybe_text = text.split("|", 1)
345
+ if maybe_text.strip():
346
+ text = maybe_text.strip()
347
+ if maybe_speaker.strip():
348
+ voice = maybe_speaker.strip()
349
+
350
+ apath = None
351
+ if text:
352
+ apath = os.path.join(tmp_dir, f"tts_line_{idx}.wav")
353
+ gen = synth_tts_to_file(text, tts_backend, voice, apath)
354
+ apath = gen if gen and os.path.exists(gen) else None
355
+
356
+ per_img_audio.append(apath)
357
+ if apath:
358
+ try:
359
+ aclip = AudioFileClip(apath)
360
+ durations.append(float(aclip.duration))
361
+ except Exception:
362
+ durations.append(float(seconds_per_image))
363
+ else:
364
+ durations.append(float(seconds_per_image))
365
+
366
+ final_clip = build_variable_duration_video(frames, durations, per_img_audio, audio_gain_db)
367
+
368
+ final_clip.write_videofile(
369
+ out_path,
370
+ codec="libx264",
371
+ audio_codec="aac",
372
+ fps=24,
373
+ preset="medium",
374
+ threads=max(1, (os.cpu_count() or 2) // 2),
375
+ )
376
+ return out_path, "Done! Per-image TTS applied."
377
+
378
+ # --- Single story (one track) ---
379
+ if narration_mode == "Single story" and story_text.strip():
380
+ # Build base video (uniform duration)
381
+ fps = 24
382
+ repeats = max(1, int(round(float(seconds_per_image) * fps)))
383
+ expanded = []
384
+ for frame in frames:
385
+ expanded.extend([frame] * repeats)
386
+ clip = ImageSequenceClip(expanded, fps=fps)
387
+
388
+ # TTS
389
+ tmp = tempfile.gettempdir()
390
+ audio_path = os.path.join(tmp, "narration_single.wav")
391
+ gen = synth_tts_to_file(story_text.strip(), tts_backend, tts_voice, audio_path)
392
+ audio_path = gen if gen and os.path.exists(gen) else None
393
+
394
+ if audio_path:
395
+ try:
396
+ aclip = AudioFileClip(audio_path)
397
+ if match_video_to_narration:
398
+ clip = clip_with_duration(clip, float(aclip.duration))
399
+ gain = 10 ** (float(audio_gain_db) / 20.0) if audio_gain_db else 1.0
400
+ if abs(gain - 1.0) > 1e-3:
401
+ aclip = apply_linear_gain(aclip, gain)
402
+ clip = clip_with_audio(clip, aclip)
403
+ except Exception:
404
+ pass
405
+
406
+ clip.write_videofile(
407
+ out_path,
408
+ codec="libx264",
409
+ audio_codec="aac",
410
+ fps=fps,
411
+ preset="medium",
412
+ threads=max(1, (os.cpu_count() or 2) // 2),
413
+ )
414
+ return out_path, "Done! Story narration applied."
415
+
416
+ # --- No narration: uniform duration slideshow ---
417
+ fps = 24
418
+ repeats = max(1, int(round(float(seconds_per_image) * fps)))
419
+ expanded = []
420
+ for frame in frames:
421
+ expanded.extend([frame] * repeats)
422
+ clip = ImageSequenceClip(expanded, fps=fps)
423
+ clip.write_videofile(
424
+ out_path,
425
+ codec="libx264",
426
+ audio_codec="aac",
427
+ fps=fps,
428
+ preset="medium",
429
+ threads=max(1, (os.cpu_count() or 2) // 2),
430
+ )
431
+ return out_path, "Done! Video created without narration."
432
+
433
+
434
+ # ---------- UI ----------
435
+
436
+ def update_voice_choices(backend_name: str):
437
+ voices = list_voices(backend_name)
438
+ value = voices[0] if voices else None
439
+ return gr.update(choices=voices, value=value), f"Loaded {len(voices)} voices." if voices else "No voices found (or using gTTS)."
440
+
441
+
442
+ def ui():
443
+ with gr.Blocks(title="Slideshow + Per-Image Audio + Voice Picker", theme=gr.themes.Soft()) as demo:
444
+ gr.Markdown(
445
+ """
446
+ # 🖼️ → 🎬 Slideshow Maker
447
+ - **Per-image audio**: upload audio files (matched by filename or order) **or** generate per-image narration from text lines.
448
+ - **TTS voices**: pick from **Coqui VCTK**'s multi-speaker voices (male/female), or use gTTS as a lightweight fallback.
449
+ """
450
+ )
451
+
452
+ with gr.Row():
453
+ with gr.Column(scale=1):
454
+ image_files = gr.Files(
455
+ label="Upload Images (multiple)",
456
+ file_count="multiple",
457
+ file_types=["image"],
458
+ )
459
+ sort_mode = gr.Radio(
460
+ ["Filename (A→Z)", "Filename (Z→A)", "Shuffle"],
461
+ value="Filename (A→Z)",
462
+ label="Image Order",
463
+ )
464
+ shuffle_seed = gr.Number(value=0, precision=0, label="Shuffle Seed (integer)")
465
+
466
+ seconds_per_image = gr.Slider(
467
+ minimum=0.1, maximum=10.0, step=0.1, value=1.5, label="Seconds per Image (used when no per-image audio)"
468
+ )
469
+
470
+ with gr.Row():
471
+ width = gr.Number(value=1280, precision=0, label="Width (px)")
472
+ height = gr.Number(value=720, precision=0, label="Height (px)")
473
+
474
+ fit_mode = gr.Radio(["contain", "cover", "stretch"], value="contain", label="Sizing Mode")
475
+ bg_color = gr.ColorPicker(value="#000000", label="Background (for 'contain')")
476
+
477
+ with gr.Column(scale=1):
478
+ narration_mode = gr.Radio(
479
+ ["None", "Single story", "Per-image (files)", "Per-image (TTS per line)"],
480
+ value="None",
481
+ label="Narration mode"
482
+ )
483
+ # Single-story UI
484
+ story_text = gr.Textbox(
485
+ label="Story (Single track narration)",
486
+ placeholder="Type or paste your story..."
487
+ )
488
+ match_video_to_narration = gr.Checkbox(
489
+ value=True, label="Match video duration to narration length (single-story)"
490
+ )
491
+
492
+ # Per-image UI
493
+ per_image_audio_files = gr.Files(
494
+ label="Per-image audio files (optional) — matched by filename or order",
495
+ file_count="multiple",
496
+ file_types=["audio"]
497
+ )
498
+ per_image_texts = gr.Textbox(
499
+ label="Per-image TTS text (one line per image). For Coqui, optional 'speaker| text' per line.",
500
+ placeholder="Line 1 text\nLine 2 text\n..."
501
+ )
502
+
503
+ with gr.Row():
504
+ tts_backend = gr.Dropdown(
505
+ ["Coqui (VCTK multi-speaker)", "gTTS (simple)"],
506
+ value="Coqui (VCTK multi-speaker)",
507
+ label="TTS backend"
508
+ )
509
+ tts_voice = gr.Dropdown(choices=[], label="Voice (for Coqui)")
510
+ voice_status = gr.Markdown("")
511
+
512
+ audio_gain_db = gr.Slider(
513
+ minimum=-12, maximum=12, step=1, value=0, label="Narration Gain (dB)"
514
+ )
515
+
516
+ run_btn = gr.Button("Create Video", variant="primary")
517
+ status = gr.Markdown("")
518
+
519
+ video_out = gr.Video(label="Result", autoplay=False)
520
+
521
+ # Load voices when backend changes
522
+ tts_backend.change(
523
+ fn=update_voice_choices,
524
+ inputs=[tts_backend],
525
+ outputs=[tts_voice, voice_status]
526
+ )
527
+
528
+ # Main action
529
+ run_btn.click(
530
+ fn=create_slideshow,
531
+ inputs=[
532
+ image_files,
533
+ narration_mode,
534
+ seconds_per_image,
535
+ width, height,
536
+ fit_mode, bg_color,
537
+ sort_mode, shuffle_seed,
538
+ # single-story
539
+ story_text, match_video_to_narration,
540
+ # per-image
541
+ per_image_texts, per_image_audio_files,
542
+ # tts
543
+ tts_backend, tts_voice,
544
+ audio_gain_db
545
+ ],
546
+ outputs=[video_out, status],
547
+ )
548
+
549
+ gr.Markdown(
550
+ """
551
+ **Tips**
552
+ - *Per-image audio (files)*: name audio like your images (e.g., `001.jpg` ↔ `001.wav`) for automatic matching.
553
+ - *Per-image TTS per line*: supply the same number of lines as images; extra/missing lines are trimmed/padded.
554
+ - *Coqui voice per line*: prefix a line with `speaker| text` to override the dropdown voice (e.g., `p225| Hello there`).
555
+ """
556
+ )
557
+
558
+ return demo
559
+
560
+
561
+ if __name__ == "__main__":
562
+ ui().launch()