Hug0endob commited on
Commit
06e8576
·
verified ·
1 Parent(s): 28f418e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -160
app.py CHANGED
@@ -1,28 +1,27 @@
1
  import os
2
  import shutil
3
- import base64
4
  import tempfile
5
  import subprocess
6
  from io import BytesIO
7
  from PIL import Image
 
8
  import requests
9
  import gradio as gr
10
  from mistralai import Mistral
11
 
12
- # --- Configuration ---
13
- DEFAULT_KEY = os.getenv("MISTRAL_API_KEY")
14
- DEFAULT_MODEL_IMAGE = "pixtral-12b-2409" # image-only model (default for images)
15
- DEFAULT_MODEL_VIDEO = "voxtral-mini-latest" # audio/video-capable model (Voxtral)
16
- # ---------------------
17
 
18
- def get_client(alt_key: str = None):
19
- key = (alt_key or "").strip() or DEFAULT_KEY
20
- return Mistral(api_key=key)
21
 
22
- def is_remote(s: str):
23
- return bool(s) and (s.startswith("http://") or s.startswith("https://"))
24
 
25
- def fetch_bytes(src: str):
26
  if is_remote(src):
27
  r = requests.get(src, timeout=60)
28
  r.raise_for_status()
@@ -30,70 +29,29 @@ def fetch_bytes(src: str):
30
  with open(src, "rb") as f:
31
  return f.read()
32
 
33
- # ---------------- image conversion utilities (kept from your original) ----------------
34
- def try_ffmpeg_extract_frame(in_path: str, out_path: str):
35
- ffmpeg = shutil.which("ffmpeg")
36
- if not ffmpeg:
37
- return False
38
- cmd = [ffmpeg, "-y", "-i", in_path, "-vf", "scale=-2:512", "-frames:v", "1", out_path]
39
- try:
40
- subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=30)
41
- return os.path.exists(out_path)
42
- except Exception:
43
- return False
44
-
45
- def ezgif_convert(media_bytes: bytes, filename: str = "input"):
46
- files = {"new-image": (filename, media_bytes)}
47
- r = requests.post("https://s.ezgif.com/upload", files=files, timeout=60)
48
- r.raise_for_status()
49
- import re
50
- m = re.search(r'name="file" value="([^"]+)"', r.text)
51
- if not m:
52
- raise RuntimeError("ezgif upload failed")
53
- key = m.group(1)
54
- conv = requests.post("https://s.ezgif.com/gif-to-jpg", data={"file": key}, timeout=60)
55
- conv.raise_for_status()
56
- m2 = re.search(r'<img src="(https?://s.ezgif.com/tmp/[^"]+)"', conv.text) or re.search(r'<a href="(https?://s.ezgif.com/tmp/[^"]+)"', conv.text)
57
- if not m2:
58
- raise RuntimeError("ezgif conversion failed")
59
- jpg_url = m2.group(1)
60
- r2 = requests.get(jpg_url, timeout=60)
61
- r2.raise_for_status()
62
- return r2.content
63
-
64
  def convert_to_jpeg_bytes(media_bytes: bytes, filename_hint: str = "input"):
65
- try:
66
- img = Image.open(BytesIO(media_bytes))
67
- if img.mode != "RGB":
68
- img = img.convert("RGB")
69
- base_h = 512
70
- w = int(img.width * (base_h / img.height))
71
- img = img.resize((w, base_h), Image.LANCZOS)
72
- buf = BytesIO()
73
- img.save(buf, format="JPEG", quality=90)
74
- return buf.getvalue()
75
- except Exception:
76
- with tempfile.TemporaryDirectory() as td:
77
- in_path = os.path.join(td, filename_hint)
78
- with open(in_path, "wb") as f:
79
- f.write(media_bytes)
80
- out_path = os.path.join(td, "frame.jpg")
81
- if try_ffmpeg_extract_frame(in_path, out_path) and os.path.exists(out_path):
82
- with open(out_path, "rb") as f:
83
- return f.read()
84
- return ezgif_convert(media_bytes, filename_hint)
85
-
86
- def to_b64_jpeg(img_bytes: bytes):
87
  return base64.b64encode(img_bytes).decode("utf-8")
88
- # --------------------------------------------------------------------------------------
89
 
90
- # ---------------- audio/video helpers ----------------
91
  def model_supports_audio(model_name: str) -> bool:
92
  if not model_name:
93
  return False
94
  mn = model_name.lower()
95
  return "voxtral" in mn or "audio" in mn or "video" in mn
96
 
 
97
  def save_remote_to_temp(url: str, suffix: str = "") -> str:
98
  b = fetch_bytes(url)
99
  fd, path = tempfile.mkstemp(suffix=suffix or os.path.splitext(url)[1] or "")
@@ -102,122 +60,138 @@ def save_remote_to_temp(url: str, suffix: str = "") -> str:
102
  f.write(b)
103
  return path
104
 
 
105
  def ffmpeg_extract_audio(in_path: str, out_path: str):
106
  ffmpeg = shutil.which("ffmpeg")
107
  if not ffmpeg:
108
- raise RuntimeError("ffmpeg not available in runtime")
109
- # mono 16k WAV for transcription robustness
110
  cmd = [ffmpeg, "-y", "-i", in_path, "-vn", "-ar", "16000", "-ac", "1", "-f", "wav", out_path]
111
  subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=120)
112
  return out_path
113
 
114
- def transcribe_audio_with_client(client, model, audio_bytes: bytes, language: str = None):
115
- # Use the client's audio.transcriptions.complete if available
116
- try:
117
- # The mistralai client accepts a file-like object for "file"
118
- bio = BytesIO(audio_bytes)
119
- resp = client.audio.transcriptions.complete(model=model, file={"content": bio, "file_name": "audio.wav"}, language=language) # language optional
120
- # resp typically includes "text"
121
- if isinstance(resp, dict):
122
- return resp.get("text", "")
123
- # fallback attribute access
124
- return getattr(resp, "text", "")
125
- except Exception as e:
126
- raise
127
-
128
- # ---------------- streaming & processing ----------------
129
- def generate_stream_multimedia(media_src: str, custom_prompt: str, alt_key: str, model: str = DEFAULT_MODEL_VIDEO):
130
- client = get_client(alt_key)
131
- prompt_text = (custom_prompt.strip() if custom_prompt and custom_prompt.strip() else
132
  "Provide a detailed, neutral, clinical-style description focusing on observable non-sexual features, hygiene, skin condition, posture, and general anatomy. Keep language professional.")
133
- # If input looks like an image (by extension) try the image path used previously
134
- lower = (media_src or "").lower()
135
- is_image_ext = lower.endswith((".jpg", ".jpeg", ".png", ".webp", ".gif")) or not is_remote(media_src) and os.path.isfile(media_src) and any(media_src.lower().endswith(ext) for ext in (".jpg", ".jpeg", ".png", ".webp", ".gif"))
136
- # If it's an image, reuse existing image flow (convert to JPEG and send)
137
- if is_image_ext:
 
 
138
  try:
139
- raw = fetch_bytes(media_src)
140
- jpg = convert_to_jpeg_bytes(raw, filename_hint=os.path.basename(media_src) or "input")
141
  except Exception as e:
142
  yield f"Error processing image: {e}"
143
  return
144
- b64 = to_b64_jpeg(jpg)
145
- # choose image-capable model (keep previous model)
146
- image_model = DEFAULT_MODEL_IMAGE
147
  messages = [{
148
  "role": "user",
149
  "content": [
150
- {"type": "text", "text": prompt_text},
151
  {"type": "image_url", "image_url": f"data:image/jpeg;base64,{b64}"}
152
  ],
153
  "stream": False
154
  }]
 
155
  try:
 
156
  partial = ""
157
- for chunk in client.chat.stream(model=image_model, messages=messages):
158
- if getattr(chunk, "data", None) and chunk.data.choices[0].delta.content is not None:
159
- partial += chunk.data.choices[0].delta.content
 
 
 
 
160
  yield partial
161
  return
162
  except Exception as e:
163
  yield f"Model error (image): {e}"
164
  return
165
 
166
- # If model supports audio/video and input is a remote URL, try sending the video URL directly
167
- if model_supports_audio(model) and is_remote(media_src):
168
- # Try direct video URL block many Mistral processors accept {"type":"video","url": ...}
169
- messages = [{
170
- "role": "user",
171
- "content": [
172
- {"type": "text", "text": prompt_text},
173
- {"type": "video", "url": media_src}
174
- ],
175
- "stream": False
176
- }]
177
- try:
178
- partial = ""
179
- for chunk in client.chat.stream(model=model, messages=messages):
180
- if getattr(chunk, "data", None) and chunk.data.choices[0].delta.content is not None:
181
- partial += chunk.data.choices[0].delta.content
182
- yield partial
183
- return
184
- except Exception:
185
- # if direct video URL fails, fall back to audio extraction/transcription below
186
- pass
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
- # Fallback: download media, extract audio, transcribe, then send transcript + prompt to chat
189
  tmp_media = None
190
  tmp_audio = None
191
  try:
192
- tmp_media = save_remote_to_temp(media_src, suffix=".mp4")
193
  tmp_audio = tempfile.mktemp(suffix=".wav")
194
  ffmpeg_extract_audio(tmp_media, tmp_audio)
195
  with open(tmp_audio, "rb") as f:
196
  audio_bytes = f.read()
197
- # Use transcription endpoint
198
  try:
199
- transcript = transcribe_audio_with_client(client, model, audio_bytes)
200
  except Exception as e:
201
  yield f"Transcription error: {e}"
202
  return
203
- # Send transcript + prompt to chosen chat model for streaming description
204
- # Use image model (text-only) or audio-capable chat model for richer understanding
205
- chat_model = model if model_supports_audio(model) else DEFAULT_MODEL_IMAGE
206
  messages = [{
207
  "role": "user",
208
  "content": [
209
- {"type": "text", "text": f"{prompt_text}\n\nTranscript:\n{transcript}"}
210
  ],
211
  "stream": False
212
  }]
 
213
  partial = ""
214
  for chunk in client.chat.stream(model=chat_model, messages=messages):
215
- if getattr(chunk, "data", None) and chunk.data.choices[0].delta.content is not None:
216
- partial += chunk.data.choices[0].delta.content
 
 
 
 
217
  yield partial
218
  return
219
  except Exception as e:
220
- yield f"Error processing media/audio fallback: {e}"
221
  finally:
222
  for p in (tmp_media, tmp_audio):
223
  try:
@@ -226,57 +200,51 @@ def generate_stream_multimedia(media_src: str, custom_prompt: str, alt_key: str,
226
  except Exception:
227
  pass
228
 
229
- # ---------------- Gradio UI ----------------
230
  with gr.Blocks(title="Image/Video to Clinical Description") as demo:
231
- gr.Markdown("Image/Video to Clinical Description (custom prompt optional)")
232
 
233
  with gr.Row():
234
  with gr.Column(scale=1):
235
- alt_key = gr.Textbox(label="Mistral API Key (optional)", type="password", max_lines=1)
236
- preview_img = gr.Image(label="Preview image (first frame)", type="pil")
237
- preview_video = gr.HTML("<div style='color:gray'>Video preview will appear here when a video URL is provided.</div>")
238
- url_input = gr.Textbox(label="Image/Video URL", placeholder="https://...")
239
- custom = gr.Textbox(label="Custom prompt (optional)", lines=4, placeholder="Enter custom prompt to override default")
240
- model_select = gr.Dropdown(label="Model", choices=[DEFAULT_MODEL_IMAGE, DEFAULT_MODEL_VIDEO], value=DEFAULT_MODEL_VIDEO)
241
  submit = gr.Button("Submit")
242
  with gr.Column(scale=1):
243
  output_display = gr.Markdown("", elem_id="generated_output")
244
 
 
245
  def load_preview(url):
246
  if not url:
247
  return None, "<div style='color:gray'>No URL provided.</div>"
248
- # Try to preview as image first (works for image URLs)
249
  try:
250
- r = requests.get(url, timeout=30)
251
  r.raise_for_status()
252
- # If content-type indicates video, create <video> tag for preview
253
- content_type = r.headers.get("content-type", "")
254
- if content_type.startswith("video/") or any(url.lower().endswith(ext) for ext in (".mp4", ".mov", ".webm", ".mkv")):
255
- # build HTML5 video preview
256
- video_html = f"""
257
- <video controls style="max-width:100%;height:auto;">
258
- <source src="{url}" type="{content_type or 'video/mp4'}">
259
- Your browser does not support the video tag.
260
- </video>
261
- """
262
  return None, video_html
263
  # otherwise treat as image
264
- img = Image.open(BytesIO(r.content)).convert("RGB")
265
- return img, "<div style='color:gray'>Image preview shown. If this is a video, server didn't report video content-type.</div>"
 
266
  except Exception:
267
- # If remote fetch fails for preview, show nothing
268
  return None, "<div style='color:red'>Preview failed to load.</div>"
269
 
270
- def start_gen(url, custom_p, alt_k, model_name):
271
  if not url:
272
  return "No URL provided."
273
  text = ""
274
- for chunk in generate_stream_multimedia(url, custom_p, alt_k, model=model_name):
275
- text += chunk
276
  yield text
277
 
278
  url_input.change(fn=load_preview, inputs=[url_input], outputs=[preview_img, preview_video])
279
- submit.click(fn=start_gen, inputs=[url_input, custom, alt_key, model_select], outputs=[output_display])
280
 
281
  if __name__ == "__main__":
282
  demo.launch()
 
1
  import os
2
  import shutil
 
3
  import tempfile
4
  import subprocess
5
  from io import BytesIO
6
  from PIL import Image
7
+ import base64
8
  import requests
9
  import gradio as gr
10
  from mistralai import Mistral
11
 
12
+ # Configuration
13
+ DEFAULT_KEY = os.getenv("MISTRAL_API_KEY", "")
14
+ DEFAULT_IMAGE_MODEL = "pixtral-12b-2409"
15
+ DEFAULT_VIDEO_MODEL = "voxtral-mini-latest"
 
16
 
17
+ def get_client(key: str = None):
18
+ api_key = (key or "").strip() or DEFAULT_KEY
19
+ return Mistral(api_key=api_key)
20
 
21
+ def is_remote(src: str) -> bool:
22
+ return bool(src) and (src.startswith("http://") or src.startswith("https://"))
23
 
24
+ def fetch_bytes(src: str) -> bytes:
25
  if is_remote(src):
26
  r = requests.get(src, timeout=60)
27
  r.raise_for_status()
 
29
  with open(src, "rb") as f:
30
  return f.read()
31
 
32
+ # Image utilities (kept minimal)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  def convert_to_jpeg_bytes(media_bytes: bytes, filename_hint: str = "input"):
34
+ img = Image.open(BytesIO(media_bytes))
35
+ if img.mode != "RGB":
36
+ img = img.convert("RGB")
37
+ base_h = 512
38
+ w = int(img.width * (base_h / img.height))
39
+ img = img.resize((w, base_h), Image.LANCZOS)
40
+ buf = BytesIO()
41
+ img.save(buf, format="JPEG", quality=90)
42
+ return buf.getvalue()
43
+
44
+ def b64_jpeg(img_bytes: bytes) -> str:
 
 
 
 
 
 
 
 
 
 
 
45
  return base64.b64encode(img_bytes).decode("utf-8")
 
46
 
47
+ # Model capability detection
48
  def model_supports_audio(model_name: str) -> bool:
49
  if not model_name:
50
  return False
51
  mn = model_name.lower()
52
  return "voxtral" in mn or "audio" in mn or "video" in mn
53
 
54
+ # Temp file helpers
55
  def save_remote_to_temp(url: str, suffix: str = "") -> str:
56
  b = fetch_bytes(url)
57
  fd, path = tempfile.mkstemp(suffix=suffix or os.path.splitext(url)[1] or "")
 
60
  f.write(b)
61
  return path
62
 
63
+ # ffmpeg audio extraction
64
  def ffmpeg_extract_audio(in_path: str, out_path: str):
65
  ffmpeg = shutil.which("ffmpeg")
66
  if not ffmpeg:
67
+ raise RuntimeError("ffmpeg not found in runtime")
 
68
  cmd = [ffmpeg, "-y", "-i", in_path, "-vn", "-ar", "16000", "-ac", "1", "-f", "wav", out_path]
69
  subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=120)
70
  return out_path
71
 
72
+ # Transcription via Mistral audio.transcriptions.complete
73
+ def transcribe_audio(client: Mistral, model: str, audio_bytes: bytes, language: str = None) -> str:
74
+ bio = BytesIO(audio_bytes)
75
+ resp = client.audio.transcriptions.complete(model=model, file={"content": bio, "file_name": "audio.wav"}, language=language)
76
+ if isinstance(resp, dict):
77
+ return resp.get("text", "")
78
+ return getattr(resp, "text", "")
79
+
80
+ # Core processing + streaming
81
+ def generate_stream_multimedia(src: str, custom_prompt: str, api_key: str, model_name: str):
82
+ client = get_client(api_key)
83
+ prompt_base = (custom_prompt.strip() if custom_prompt and custom_prompt.strip() else
 
 
 
 
 
 
84
  "Provide a detailed, neutral, clinical-style description focusing on observable non-sexual features, hygiene, skin condition, posture, and general anatomy. Keep language professional.")
85
+ # Label / heading used once at start
86
+ heading = f"### {custom_prompt.strip()}" if custom_prompt and custom_prompt.strip() else "### Clinical-style Description"
87
+ # If input looks like an image (ext or local file), use image flow
88
+ lower = (src or "").lower()
89
+ image_exts = (".jpg", ".jpeg", ".png", ".webp", ".gif")
90
+ is_image = lower.endswith(image_exts) or (not is_remote(src) and os.path.isfile(src) and src.lower().endswith(image_exts))
91
+ if is_image:
92
  try:
93
+ raw = fetch_bytes(src)
94
+ jpg = convert_to_jpeg_bytes(raw, filename_hint=os.path.basename(src) or "input")
95
  except Exception as e:
96
  yield f"Error processing image: {e}"
97
  return
98
+ b64 = b64_jpeg(jpg)
 
 
99
  messages = [{
100
  "role": "user",
101
  "content": [
102
+ {"type": "text", "text": prompt_base},
103
  {"type": "image_url", "image_url": f"data:image/jpeg;base64,{b64}"}
104
  ],
105
  "stream": False
106
  }]
107
+ # stream from an image-capable model
108
  try:
109
+ yielded_heading = False
110
  partial = ""
111
+ for chunk in client.chat.stream(model=DEFAULT_IMAGE_MODEL, messages=messages):
112
+ delta = getattr(chunk, "data", None) and chunk.data.choices[0].delta.content
113
+ if delta is not None:
114
+ if not yielded_heading:
115
+ partial += heading + "\n\n"
116
+ yielded_heading = True
117
+ partial += delta
118
  yield partial
119
  return
120
  except Exception as e:
121
  yield f"Model error (image): {e}"
122
  return
123
 
124
+ # If model supports audio/video and src is remote, try direct video URL variants
125
+ if model_supports_audio(model_name) and is_remote(src):
126
+ # Try a few common video/audio URL block shapes supported by Mistral clients
127
+ variants = [
128
+ {"type": "video", "url": src},
129
+ {"type": "video_url", "video_url": src},
130
+ {"type": "input_audio", "input_audio_url": src}, # less common; try anyway
131
+ {"type": "audio", "url": src}
132
+ ]
133
+ for v in variants:
134
+ messages = [{
135
+ "role": "user",
136
+ "content": [
137
+ {"type": "text", "text": prompt_base},
138
+ v
139
+ ],
140
+ "stream": False
141
+ }]
142
+ try:
143
+ yielded_heading = False
144
+ partial = ""
145
+ for chunk in client.chat.stream(model=model_name, messages=messages):
146
+ delta = getattr(chunk, "data", None) and chunk.data.choices[0].delta.content
147
+ if delta is not None:
148
+ if not yielded_heading:
149
+ partial += heading + "\n\n"
150
+ yielded_heading = True
151
+ partial += delta
152
+ yield partial
153
+ return
154
+ except Exception:
155
+ # try next variant
156
+ pass
157
 
158
+ # Fallback: download, extract audio, transcribe, then send transcript + prompt to chat model
159
  tmp_media = None
160
  tmp_audio = None
161
  try:
162
+ tmp_media = save_remote_to_temp(src, suffix=".mp4")
163
  tmp_audio = tempfile.mktemp(suffix=".wav")
164
  ffmpeg_extract_audio(tmp_media, tmp_audio)
165
  with open(tmp_audio, "rb") as f:
166
  audio_bytes = f.read()
167
+ # Use transcription endpoint (voxtral-mini-latest recommended)
168
  try:
169
+ transcript = transcribe_audio(client, model_name, audio_bytes)
170
  except Exception as e:
171
  yield f"Transcription error: {e}"
172
  return
173
+ # Send transcript + prompt to chat model and stream response
174
+ chat_model = model_name if model_supports_audio(model_name) else DEFAULT_IMAGE_MODEL
 
175
  messages = [{
176
  "role": "user",
177
  "content": [
178
+ {"type": "text", "text": f"{prompt_base}\n\nTranscript:\n{transcript}"}
179
  ],
180
  "stream": False
181
  }]
182
+ yielded_heading = False
183
  partial = ""
184
  for chunk in client.chat.stream(model=chat_model, messages=messages):
185
+ delta = getattr(chunk, "data", None) and chunk.data.choices[0].delta.content
186
+ if delta is not None:
187
+ if not yielded_heading:
188
+ partial += heading + "\n\n"
189
+ yielded_heading = True
190
+ partial += delta
191
  yield partial
192
  return
193
  except Exception as e:
194
+ yield f"Error processing fallback: {e}"
195
  finally:
196
  for p in (tmp_media, tmp_audio):
197
  try:
 
200
  except Exception:
201
  pass
202
 
203
+ # --- Gradio UI ---
204
  with gr.Blocks(title="Image/Video to Clinical Description") as demo:
205
+ gr.Markdown("Image/Video to Clinical Description provides a clinical, non-sexual, neutral description of images or video (audio optional).")
206
 
207
  with gr.Row():
208
  with gr.Column(scale=1):
209
+ api_key = gr.Textbox(label="Mistral API Key (optional)", type="password", max_lines=1)
210
+ preview_img = gr.Image(label="Image preview (if image)", type="pil")
211
+ preview_video = gr.HTML("<div style='color:gray'>Video preview will appear here for video URLs.</div>")
212
+ url_input = gr.Textbox(label="Image or Video URL", placeholder="https://...")
213
+ custom_prompt = gr.Textbox(label="Custom heading (optional)", lines=2, placeholder="Custom heading to appear above the description")
214
+ model_select = gr.Dropdown(label="Model", choices=[DEFAULT_IMAGE_MODEL, DEFAULT_VIDEO_MODEL], value=DEFAULT_VIDEO_MODEL)
215
  submit = gr.Button("Submit")
216
  with gr.Column(scale=1):
217
  output_display = gr.Markdown("", elem_id="generated_output")
218
 
219
+ # Preview loader: choose image preview if image, otherwise HTML5 video tag for video
220
  def load_preview(url):
221
  if not url:
222
  return None, "<div style='color:gray'>No URL provided.</div>"
 
223
  try:
224
+ r = requests.get(url, timeout=30, stream=True)
225
  r.raise_for_status()
226
+ ctype = r.headers.get("content-type", "")
227
+ # treat explicit video content-type or known extensions as video
228
+ if ctype.startswith("video/") or any(url.lower().endswith(ext) for ext in (".mp4", ".mov", ".webm", ".mkv")):
229
+ video_html = f'<video controls style="max-width:100%;height:auto;"><source src="{url}" type="{ctype or "video/mp4"}">Your browser does not support the video tag.</video>'
 
 
 
 
 
 
230
  return None, video_html
231
  # otherwise treat as image
232
+ data = r.content
233
+ img = Image.open(BytesIO(data)).convert("RGB")
234
+ return img, "<div style='color:gray'>Image preview shown.</div>"
235
  except Exception:
 
236
  return None, "<div style='color:red'>Preview failed to load.</div>"
237
 
238
+ def run_generation(url, custom_h, key, model_name):
239
  if not url:
240
  return "No URL provided."
241
  text = ""
242
+ for chunk in generate_stream_multimedia(url, custom_h, key, model_name):
243
+ text = chunk # chunk already accumulates heading + partial text
244
  yield text
245
 
246
  url_input.change(fn=load_preview, inputs=[url_input], outputs=[preview_img, preview_video])
247
+ submit.click(fn=run_generation, inputs=[url_input, custom_prompt, api_key, model_select], outputs=[output_display])
248
 
249
  if __name__ == "__main__":
250
  demo.launch()