CB commited on
Commit
8ce448b
·
verified ·
1 Parent(s): 59ad1a6

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +246 -396
streamlit_app.py CHANGED
@@ -5,8 +5,8 @@ import string
5
  import hashlib
6
  import traceback
7
  import inspect
8
- import re
9
  import json
 
10
  from glob import glob
11
  from pathlib import Path
12
  from difflib import SequenceMatcher
@@ -21,13 +21,10 @@ from dotenv import load_dotenv
21
 
22
  load_dotenv()
23
 
24
- # Remove phi agent code (fragile imports); keep simple flag
25
  HAS_PHI = False
26
-
27
- # google generative ai SDK (may be absent or partial in some runtimes)
28
  try:
29
  import google.generativeai as genai # type: ignore
30
- # upload_file/get_file exist in some versions
31
  try:
32
  from google.generativeai import upload_file, get_file # type: ignore
33
  except Exception:
@@ -45,22 +42,26 @@ DATA_DIR = Path("./data")
45
  DATA_DIR.mkdir(exist_ok=True)
46
 
47
  # Session defaults
48
- st.session_state.setdefault("videos", "")
49
- st.session_state.setdefault("loop_video", False)
50
- st.session_state.setdefault("uploaded_file", None)
51
- st.session_state.setdefault("processed_file", None)
52
- st.session_state.setdefault("busy", False)
53
- st.session_state.setdefault("last_loaded_path", "")
54
- st.session_state.setdefault("analysis_out", "")
55
- st.session_state.setdefault("last_error", "")
56
- st.session_state.setdefault("file_hash", None)
57
- st.session_state.setdefault("fast_mode", False)
58
- st.session_state.setdefault("api_key", os.getenv("GOOGLE_API_KEY", ""))
59
- st.session_state.setdefault("last_model", "")
60
- st.session_state.setdefault("last_url_value", "")
 
 
 
61
 
62
  HEADERS = {"User-Agent": "Mozilla/5.0 (compatible)"}
63
 
 
64
  def sanitize_filename(path_str: str):
65
  return Path(path_str).name.lower().translate(str.maketrans("", "", string.punctuation)).replace(" ", "_")
66
 
@@ -89,81 +90,16 @@ def compress_video(input_path: str, target_path: str, crf: int = 28, preset: str
89
  except Exception:
90
  return input_path
91
 
92
- def download_video_ytdlp(url: str, save_dir: str, video_password: str = None) -> str:
93
- if not url:
94
- raise ValueError("No URL provided")
95
- outtmpl = str(Path(save_dir) / "%(id)s.%(ext)s")
96
- ydl_opts = {"outtmpl": outtmpl, "format": "best"}
97
- if video_password:
98
- ydl_opts["videopassword"] = video_password
99
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
100
- info = ydl.extract_info(url, download=True)
101
- video_id = info.get("id") if isinstance(info, dict) else None
102
- if video_id:
103
- matches = glob(os.path.join(save_dir, f"{video_id}.*"))
104
- else:
105
- all_files = glob(os.path.join(save_dir, "*"))
106
- matches = sorted(all_files, key=os.path.getmtime, reverse=True)[:1] if all_files else []
107
- if not matches:
108
- raise FileNotFoundError("Downloaded video not found")
109
- return convert_video_to_mp4(matches[0])
110
-
111
- def file_name_or_id(file_obj):
112
- if file_obj is None:
113
- return None
114
- if isinstance(file_obj, dict):
115
- return file_obj.get("name") or file_obj.get("id")
116
- for attr in ("name", "id", "fileId", "file_id"):
117
- if hasattr(file_obj, attr):
118
- val = getattr(file_obj, attr)
119
- if val:
120
- return val
121
- return str(file_obj)
122
-
123
- def get_effective_api_key():
124
- return st.session_state.get("api_key") or os.getenv("GOOGLE_API_KEY")
125
-
126
- def maybe_configure_genai(key):
127
- if not key or not HAS_GENAI:
128
- return False
129
- try:
130
- genai.configure(api_key=key)
131
- return True
132
- except Exception:
133
- return False
134
-
135
- def clear_all_video_state():
136
- st.session_state.pop("uploaded_file", None)
137
- st.session_state.pop("processed_file", None)
138
- st.session_state["videos"] = ""
139
- st.session_state["last_loaded_path"] = ""
140
- st.session_state["analysis_out"] = ""
141
- st.session_state["last_error"] = ""
142
- st.session_state["file_hash"] = None
143
- for f in glob(str(DATA_DIR / "*")):
144
- try:
145
- os.remove(f)
146
- except Exception:
147
- pass
148
-
149
- # --- Twitter (t.co / X) helpers integrated into expand/extract flow ---
150
  def expand_url(short_url, timeout=10):
151
- """
152
- General URL expander. For t.co/twitter shortlinks we try multiple variants
153
- and return final URL and HTML if available.
154
- """
155
  try:
156
  r = requests.get(short_url, allow_redirects=True, timeout=timeout, headers=HEADERS)
157
  r.raise_for_status()
158
- final = r.url
159
- return final, r.text
160
  except Exception as e:
161
  return None, f"error: {e}"
162
 
163
  def extract_video_from_html(html, base_url=None):
164
- """
165
- Generic extractor tries og:video, <video>, LD+JSON, twitter tags, and links to common hosts.
166
- """
167
  soup = BeautifulSoup(html, "html.parser")
168
  og = soup.find("meta", property="og:video")
169
  if og and og.get("content"):
@@ -179,6 +115,7 @@ def extract_video_from_html(html, base_url=None):
179
  for script in soup.find_all("script", type="application/ld+json"):
180
  try:
181
  data = json.loads(script.string or "{}")
 
182
  if isinstance(data, dict):
183
  video = data.get("video") or data.get("videoObject") or data.get("mainEntity")
184
  if isinstance(video, dict):
@@ -189,63 +126,46 @@ def extract_video_from_html(html, base_url=None):
189
  return data.get("contentUrl")
190
  except Exception:
191
  continue
192
- for meta_name in ("twitter:player:stream", "twitter:player"):
193
- m = soup.find("meta", attrs={"name": meta_name})
194
- if m and m.get("content"):
195
- return m.get("content")
196
  for a in soup.find_all("a", href=True):
197
  href = a["href"]
198
- if any(domain in href for domain in ("youtube.com", "youtu.be", "vimeo.com")):
199
  return href
200
  return None
201
 
202
  def extract_video_from_twitter_html(html):
203
- """
204
- Attempt to pull direct MP4 URL from Twitter/X HTML by searching JSON blobs and OG tags.
205
- This is a best-effort extractor and may fail if Twitter/X obfuscates content.
206
- """
207
  soup = BeautifulSoup(html, "html.parser")
208
-
209
- # 1) Open Graph video tag
210
  og_video = soup.find("meta", property="og:video")
211
  if og_video and og_video.get("content"):
212
  return og_video["content"]
213
-
214
- # 2) Look for JSON blobs in <script> tags and search for variants/urls
215
  scripts = soup.find_all("script")
216
  for s in scripts:
217
  txt = s.string
218
  if not txt:
219
  continue
220
- # crude detect for embedded JSON-ish blobs that include "video_info" or "variants"
221
- if "video_info" in txt or "variants" in txt or "playbackUrl" in txt or "media" in txt:
222
- # try to extract a JSON object within the script text
223
  m = re.search(r"(?s)(\{.+\})", txt)
224
  if not m:
225
  continue
226
  try:
227
  blob = json.loads(m.group(1))
228
  except Exception:
229
- # sometimes it's not strict JSON; skip
230
  continue
231
-
232
- # deep search for urls and variants
233
  def find_media_urls(obj):
234
  if isinstance(obj, dict):
235
  for k, v in obj.items():
236
- if isinstance(v, str):
237
- if v.startswith("https://") and v.endswith(".mp4"):
238
- yield v
239
  else:
240
  yield from find_media_urls(v)
241
  elif isinstance(obj, list):
242
  for it in obj:
243
  yield from find_media_urls(it)
244
-
245
  for url in find_media_urls(blob):
246
  return url
247
-
248
- # also look for variant lists
249
  def find_variants(obj):
250
  if isinstance(obj, dict):
251
  for k, v in obj.items():
@@ -261,22 +181,14 @@ def extract_video_from_twitter_html(html):
261
  elif isinstance(obj, list):
262
  for it in obj:
263
  yield from find_variants(it)
264
-
265
  for url in find_variants(blob):
266
  return url
267
-
268
  return None
269
 
270
  def extract_direct_twitter_video(url):
271
- """
272
- Expand t.co and try several page variants (mobile, amp, x.com) and oEmbed.
273
- Returns (direct_video_url or None, info_string)
274
- """
275
  final, html_or_err = expand_url(url)
276
  if final is None:
277
  return None, html_or_err
278
-
279
- # Try several variants (mobile, x.com, with query params)
280
  variants = [
281
  final,
282
  final.replace("://twitter.com/", "://mobile.twitter.com/"),
@@ -293,8 +205,6 @@ def extract_direct_twitter_video(url):
293
  return direct, u
294
  except Exception:
295
  continue
296
-
297
- # Try oEmbed as last resort
298
  try:
299
  oembed = requests.get("https://publish.twitter.com/oembed?url=" + final, headers=HEADERS, timeout=6)
300
  if oembed.ok:
@@ -306,16 +216,46 @@ def extract_direct_twitter_video(url):
306
  return video["src"], final
307
  except Exception:
308
  pass
309
-
310
  return None, "not found"
311
 
312
- # --- Upload helpers for Generative AI SDK + HTTP fallback (fixed endpoint/use patterns) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
  def upload_video_sdk(filepath: str):
314
  key = get_effective_api_key()
315
  if not key:
316
  raise RuntimeError("No API key provided")
317
  if not HAS_GENAI or upload_file is None:
318
- raise RuntimeError("google.generativeai SDK upload not available; cannot upload")
319
  genai.configure(api_key=key)
320
  return upload_file(filepath)
321
 
@@ -323,7 +263,14 @@ def wait_for_processed(file_obj, timeout=180):
323
  if not HAS_GENAI or get_file is None:
324
  return file_obj
325
  start = time.time()
326
- name = file_name_or_id(file_obj)
 
 
 
 
 
 
 
327
  if not name:
328
  return file_obj
329
  backoff = 1.0
@@ -363,18 +310,12 @@ def remove_prompt_echo(prompt: str, text: str, check_len: int = 600, ratio_thres
363
  return text
364
 
365
  def generative_model_call_flexible(model_name, messages, files=None, max_output_tokens=1024):
366
- """
367
- Try different call patterns for genai.GenerativeModel depending on its constructor/signature.
368
- Do NOT pass unsupported keywords called 'files' into generate_content() if the SDK rejects them.
369
- """
370
  if not HAS_GENAI or genai is None:
371
  raise RuntimeError("genai not available")
372
-
373
  GM = getattr(genai, "GenerativeModel", None)
374
  if GM is None:
375
  raise RuntimeError("GenerativeModel not available")
376
-
377
- # Construct instance robustly
378
  try:
379
  sig = inspect.signature(GM)
380
  params = sig.parameters
@@ -384,54 +325,36 @@ def generative_model_call_flexible(model_name, messages, files=None, max_output_
384
  gm = GM(model_name=model_name)
385
  else:
386
  gm = GM()
387
- try:
388
- if hasattr(gm, "model"):
389
  setattr(gm, "model", model_name)
390
- except Exception:
391
- pass
392
  except Exception:
393
  try:
394
  gm = GM(model=model_name)
395
- except TypeError:
396
- try:
397
- gm = GM(model_name=model_name)
398
- except TypeError:
399
- gm = GM()
400
-
401
- # Now attempt supported generate methods but avoid unsupported kwargs
402
- # 1) generate_content(messages...) may accept just messages and options (no files)
403
  if hasattr(gm, "generate_content"):
404
  try:
405
- return gm.generate_content(messages, max_output_tokens=max_output_tokens)
406
- except TypeError:
407
- # generate_content signature doesn't accept our args; try positional single string fallback
408
  try:
409
- # some versions expect a string prompt
410
- prompt = messages[-1].get("content") if isinstance(messages, (list, tuple)) and messages else str(messages)
411
- return gm.generate_content(prompt)
412
- except Exception as e:
413
- raise RuntimeError(f"GenerativeModel.generate_content unusable: {e}")
414
- # 2) generate(...) variants
415
  if hasattr(gm, "generate"):
416
  try:
417
- return gm.generate(messages, max_output_tokens=max_output_tokens)
418
  except TypeError:
419
- try:
420
- return gm.generate(messages)
421
- except Exception as e:
422
- raise RuntimeError(f"GenerativeModel.generate unusable: {e}")
423
-
424
  raise RuntimeError("No usable generate method on GenerativeModel instance")
425
 
426
  def responses_http_call(api_key, model, messages, file_name=None, max_output_tokens=1024, safety_settings=None):
427
- """
428
- Fallback to the public Responses API v1 endpoint (modern). Construct a minimal request body.
429
- Note: endpoint and schema may change; this uses a simple v1-compatible payload.
430
- """
431
- # Use the modern Responses v1 endpoint format
432
- url = "https://api.generativeai.googleapis.com/v1/models/{model}:generateMessage".format(model=model)
433
  headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
434
- # Build minimal 'messages' style payload expected by many GenAI endpoints
435
  payload = {
436
  "messages": [{"role": m.get("role", "user"), "content": [{"type": "text", "text": m.get("content", "")}]} for m in messages],
437
  "maxOutputTokens": max_output_tokens,
@@ -439,19 +362,83 @@ def responses_http_call(api_key, model, messages, file_name=None, max_output_tok
439
  if safety_settings:
440
  payload["safetySettings"] = safety_settings
441
  if file_name:
442
- # Some endpoints accept files as references
443
  payload["files"] = [{"name": file_name}]
444
- try:
445
- r = requests.post(url, json=payload, headers=headers, timeout=60)
446
- r.raise_for_status()
447
- return r.json()
448
- except Exception as e:
449
- raise RuntimeError(f"HTTP responses fallback failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
 
451
- # UI layout
452
  current_url = st.session_state.get("url", "")
453
  if current_url != st.session_state.get("last_url_value"):
454
- clear_all_video_state()
 
455
  st.session_state["last_url_value"] = current_url
456
 
457
  st.sidebar.header("Video Input")
@@ -469,7 +456,6 @@ settings_exp.checkbox("Fast mode (skip compression, smaller model, fewer tokens)
469
 
470
  key_source = "session" if st.session_state.get("api_key") else ".env" if os.getenv("GOOGLE_API_KEY") else "none"
471
  settings_exp.caption(f"Using API key from: **{key_source}**")
472
-
473
  if not get_effective_api_key():
474
  settings_exp.warning("No Google API key provided; upload/generation disabled.", icon="⚠️")
475
 
@@ -486,7 +472,7 @@ with col1:
486
  with col2:
487
  pass
488
 
489
- # Load Video flow: expand short URLs and try to extract direct video links from HTML before yt-dlp
490
  if st.sidebar.button("Load Video", use_container_width=True):
491
  try:
492
  vpw = st.session_state.get("video-password", "")
@@ -495,14 +481,11 @@ if st.sidebar.button("Load Video", use_container_width=True):
495
  html_text = None
496
  extracted = None
497
  if url_val:
498
- # Special handling for t.co / twitter shortlinks
499
  if "t.co/" in url_val or ("twitter.com" in url_val or "x.com" in url_val):
500
  extracted, src_info = extract_direct_twitter_video(url_val)
501
  if extracted:
502
  final_url = extracted
503
- html_text = None
504
  else:
505
- # fallback to expand_url to get final page HTML
506
  expanded, html_or_err = expand_url(url_val)
507
  if expanded:
508
  final_url = expanded
@@ -512,42 +495,39 @@ if st.sidebar.button("Load Video", use_container_width=True):
512
  if expanded:
513
  final_url = expanded
514
  html_text = html_or_err
515
-
516
  if html_text and not extracted:
517
  extracted = extract_video_from_html(html_text, base_url=final_url)
518
  target_url_for_ytdlp = extracted or final_url
519
  path = download_video_ytdlp(target_url_for_ytdlp, str(DATA_DIR), vpw)
520
  st.session_state["videos"] = path
521
  st.session_state["last_loaded_path"] = path
522
- st.session_state.pop("uploaded_file", None)
523
- st.session_state.pop("processed_file", None)
524
- try:
525
- st.session_state["file_hash"] = file_sha256(path)
526
- except Exception:
527
- st.session_state["file_hash"] = None
528
  except Exception as e:
529
  st.sidebar.error(f"Failed to load video: {e}")
530
 
531
- # Player / sidebar controls
532
  if st.session_state["videos"]:
533
  try:
534
  st.sidebar.video(st.session_state["videos"], loop=st.session_state.get("loop_video", False))
535
  except Exception:
536
  st.sidebar.write("Couldn't preview video")
537
-
538
  with st.sidebar.expander("Options", expanded=False):
539
  loop_checkbox = st.checkbox("Enable Loop", value=st.session_state.get("loop_video", False))
540
  st.session_state["loop_video"] = loop_checkbox
541
-
542
  if st.button("Clear Video(s)"):
543
- clear_all_video_state()
544
-
 
 
 
 
545
  try:
546
  with open(st.session_state["videos"], "rb") as vf:
547
  st.download_button("Download Video", data=vf, file_name=sanitize_filename(st.session_state["videos"]), mime="video/mp4", use_container_width=True)
548
  except Exception:
549
  st.sidebar.error("Failed to prepare download")
550
-
551
  st.sidebar.write("Title:", Path(st.session_state["videos"]).name)
552
  try:
553
  file_size_mb = os.path.getsize(st.session_state["videos"]) / (1024 * 1024)
@@ -557,7 +537,7 @@ if st.session_state["videos"]:
557
  except Exception:
558
  pass
559
 
560
- # Generation flow (robust handling of google.generativeai variants)
561
  if generate_now and not st.session_state.get("busy"):
562
  if not st.session_state.get("videos"):
563
  st.error("No video loaded. Use 'Load Video' in the sidebar.")
@@ -566,43 +546,29 @@ if generate_now and not st.session_state.get("busy"):
566
  if not key_to_use:
567
  st.error("Google API key not set.")
568
  else:
 
569
  try:
570
- st.session_state["busy"] = True
571
  maybe_configure_genai(key_to_use)
572
-
573
  model_id = (st.session_state.get("model_input") or "gemini-2.5-flash-lite").strip()
574
- if st.session_state.get("last_model") != model_id:
575
- st.session_state["last_model"] = ""
576
- processed = st.session_state.get("processed_file")
577
  current_path = st.session_state.get("videos")
578
- try:
579
- current_hash = file_sha256(current_path) if current_path and os.path.exists(current_path) else None
580
- except Exception:
581
- current_hash = None
582
 
583
  reupload_needed = True
 
584
  if processed and st.session_state.get("last_loaded_path") == current_path and st.session_state.get("file_hash") == current_hash:
585
  reupload_needed = False
586
 
 
587
  if reupload_needed:
588
- if not HAS_GENAI:
589
- raise RuntimeError("google.generativeai SDK not available; install it.")
590
- local_path = current_path
591
  fast_mode = st.session_state.get("fast_mode", False)
592
- upload_path = local_path
593
  try:
594
- file_size_mb = os.path.getsize(local_path) / (1024 * 1024)
595
  except Exception:
596
  file_size_mb = 0
597
-
598
  if not fast_mode and file_size_mb > 50:
599
- compressed_path = str(Path(local_path).with_name(Path(local_path).stem + "_compressed.mp4"))
600
- try:
601
- preset = "veryfast" if fast_mode else "fast"
602
- upload_path = compress_video(local_path, compressed_path, crf=28, preset=preset)
603
- except Exception:
604
- upload_path = local_path
605
-
606
  with st.spinner("Uploading video..."):
607
  uploaded = upload_video_sdk(upload_path)
608
  processed = wait_for_processed(uploaded, timeout=180)
@@ -611,204 +577,88 @@ if generate_now and not st.session_state.get("busy"):
611
  st.session_state["last_loaded_path"] = current_path
612
  st.session_state["file_hash"] = current_hash
613
 
614
- prompt_text = (st.session_state.get("analysis_prompt", "").strip() or default_prompt).strip()
615
-
616
- out = ""
617
- model_used = model_id
618
- max_tokens = 256 if st.session_state.get("fast_mode") else 1024
619
- est_tokens = max_tokens
620
-
621
- debug_info = {"agent_attempted": False, "agent_ok": False, "agent_error": None}
622
-
623
- if not out:
 
 
 
 
 
 
 
 
624
  try:
625
- if not HAS_GENAI or genai is None:
626
- raise RuntimeError("Responses API not available; install google.generativeai SDK.")
627
- # ensure configured (best-effort)
628
- try:
629
- genai.configure(api_key=key_to_use)
630
- except Exception:
631
- pass
632
-
633
- fname = file_name_or_id(processed)
634
- if not fname:
635
- raise RuntimeError("Uploaded file missing name/id")
636
 
637
- system_msg = {"role": "system", "content": prompt_text}
638
- user_msg = {"role": "user", "content": "Please summarize the attached video."}
 
 
 
 
 
 
639
 
 
 
 
 
 
 
 
 
 
 
 
640
  response = None
641
- diagnostics = {"attempts": []}
642
-
643
- # Attempt #1: genai.responses.generate (modern public SDK)
644
- try:
645
- if hasattr(genai, "responses") and hasattr(genai.responses, "generate"):
646
- diagnostics["attempts"].append("responses.generate")
647
- response = genai.responses.generate(
648
- model=model_used,
649
- messages=[system_msg, user_msg],
650
- files=[{"name": fname}],
651
- safety_settings=safety_settings,
652
- max_output_tokens=max_tokens,
653
- )
654
- except Exception as e:
655
- diagnostics["responses.generate_error"] = str(e)
656
- response = None
657
-
658
- # Attempt #2: GenerativeModel variants (0.8.x+), using flexible caller
659
- if response is None:
660
- try:
661
- if hasattr(genai, "GenerativeModel"):
662
- diagnostics["attempts"].append("GenerativeModel")
663
- # generative_model_call_flexible avoids passing unsupported 'files' kwarg
664
- response = generative_model_call_flexible(model_used, [system_msg, user_msg], files=[{"name": fname}], max_output_tokens=max_tokens)
665
- except Exception as e:
666
- diagnostics["GenerativeModel_error"] = str(e)
667
- response = None
668
-
669
- # Attempt #3: top-level legacy helpers
670
- if response is None:
671
- try:
672
- if hasattr(genai, "generate"):
673
- diagnostics["attempts"].append("top.generate")
674
- response = genai.generate(model=model_used, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=max_tokens)
675
- elif hasattr(genai, "create"):
676
- diagnostics["attempts"].append("top.create")
677
- response = genai.create(model=model_used, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=max_tokens)
678
- except Exception as e:
679
- diagnostics["top_level_error"] = str(e)
680
- response = None
681
-
682
- # Attempt #4: fallback HTTP Responses call (modern endpoint)
683
- if response is None:
684
- try:
685
- diagnostics["attempts"].append("http_fallback")
686
- response = responses_http_call(key_to_use, model_used, [system_msg, user_msg], file_name=fname, max_output_tokens=max_tokens, safety_settings=safety_settings)
687
- except Exception as e:
688
- diagnostics["http_fallback_error"] = str(e)
689
- response = None
690
-
691
- if response is None:
692
- diag_text = f"No supported generate method found on google.generativeai in this runtime. Diagnostics: {diagnostics}"
693
- st.session_state["last_error"] = diag_text
694
- st.error("Responses API not supported in this runtime. See Last Error for details.")
695
- out = ""
696
- else:
697
- # Normalize outputs into text pieces
698
- outputs = []
699
- try:
700
- if isinstance(response, dict):
701
- for key in ("output", "candidates", "items", "responses"):
702
- val = response.get(key)
703
- if isinstance(val, (list, tuple)) and val:
704
- outputs = list(val)
705
- break
706
- if not outputs:
707
- # some Responses v1 return {'message': {...}}
708
- msg = response.get("message") or response.get("response") or response.get("output")
709
- if isinstance(msg, dict):
710
- # try to extract text from structured message
711
- c = msg.get("content")
712
- if isinstance(c, list):
713
- for part in c:
714
- if isinstance(part, dict) and part.get("type") == "output_text":
715
- outputs.append({"text": part.get("text")})
716
- elif isinstance(part, dict) and part.get("type") == "text":
717
- outputs.append({"text": part.get("text")})
718
- else:
719
- # fallback: join string values
720
- for v in response.values():
721
- if isinstance(v, str) and v.strip():
722
- outputs.append({"text": v.strip()})
723
- else:
724
- for attr in ("output", "candidates", "items", "responses"):
725
- val = getattr(response, attr, None)
726
- if isinstance(val, (list, tuple)) and val:
727
- try:
728
- outputs = list(val)
729
- except Exception:
730
- outputs = val
731
- break
732
- except Exception:
733
- outputs = []
734
-
735
- if not outputs:
736
- candidate_text = None
737
- if isinstance(response, dict):
738
- candidate_text = response.get("text") or response.get("message") or response.get("output_text")
739
- else:
740
- candidate_text = getattr(response, "text", None) or getattr(response, "message", None)
741
- if candidate_text:
742
- outputs = [{"text": candidate_text}]
743
-
744
- text_pieces = []
745
- for item in outputs:
746
- if not item:
747
- continue
748
- if isinstance(item, dict):
749
- # common dict shapes
750
- for k in ("content", "text", "message", "output_text", "output"):
751
- v = item.get(k)
752
- if v:
753
- if isinstance(v, str):
754
- text_pieces.append(v.strip())
755
- elif isinstance(v, (list, tuple)):
756
- for e in v:
757
- if isinstance(e, str):
758
- text_pieces.append(e.strip())
759
- elif isinstance(e, dict):
760
- t = e.get("text") or e.get("content")
761
- if t:
762
- text_pieces.append(str(t).strip())
763
- break
764
- else:
765
- for k in ("content", "text", "message", "output", "output_text"):
766
- v = getattr(item, k, None)
767
- if v:
768
- if isinstance(v, str):
769
- text_pieces.append(v.strip())
770
- elif isinstance(v, (list, tuple)):
771
- for e in v:
772
- if isinstance(e, str):
773
- text_pieces.append(e.strip())
774
- else:
775
- t = getattr(e, "text", None) or getattr(e, "content", None)
776
- if t:
777
- text_pieces.append(str(t).strip())
778
- break
779
-
780
- seen = set()
781
- filtered = []
782
- for t in text_pieces:
783
- if t and t not in seen:
784
- filtered.append(t)
785
- seen.add(t)
786
- out = "\n\n".join(filtered)
787
 
 
 
 
 
 
788
  except Exception as e:
789
- tb = traceback.format_exc()
790
- st.session_state["last_error"] = f"Responses API error: {e}\n\nDebug: {debug_info}\n\nTraceback:\n{tb}"
791
- st.error("An error occurred while generating the story. You can try Generate again; the uploaded video will be reused.")
792
- out = ""
793
-
794
- if out:
795
- out = remove_prompt_echo(prompt_text, out)
796
- p = prompt_text
797
- if p and out.strip().lower().startswith(p.lower()):
798
- out = out.strip()[len(p):].lstrip(" \n:-")
 
799
  placeholders = ["enter analysis", "enter your analysis", "enter analysis here", "please enter analysis"]
800
  low = out.strip().lower()
801
  for ph in placeholders:
802
  if low.startswith(ph):
803
  out = out.strip()[len(ph):].lstrip(" \n:-")
804
  break
805
- out = out.strip()
806
 
807
  st.session_state["analysis_out"] = out
808
  st.session_state["last_error"] = "" if out else st.session_state.get("last_error", "")
809
  st.subheader("Analysis Result")
810
  st.markdown(out if out else "No analysis returned.")
811
- st.caption(f"Est. max tokens: {est_tokens}")
812
 
813
  except Exception as e:
814
  tb = traceback.format_exc()
 
5
  import hashlib
6
  import traceback
7
  import inspect
 
8
  import json
9
+ import re
10
  from glob import glob
11
  from pathlib import Path
12
  from difflib import SequenceMatcher
 
21
 
22
  load_dotenv()
23
 
24
+ # Feature flags
25
  HAS_PHI = False
 
 
26
  try:
27
  import google.generativeai as genai # type: ignore
 
28
  try:
29
  from google.generativeai import upload_file, get_file # type: ignore
30
  except Exception:
 
42
  DATA_DIR.mkdir(exist_ok=True)
43
 
44
  # Session defaults
45
+ for k, v in {
46
+ "videos": "",
47
+ "loop_video": False,
48
+ "uploaded_file": None,
49
+ "processed_file": None,
50
+ "busy": False,
51
+ "last_loaded_path": "",
52
+ "analysis_out": "",
53
+ "last_error": "",
54
+ "file_hash": None,
55
+ "fast_mode": False,
56
+ "api_key": os.getenv("GOOGLE_API_KEY", ""),
57
+ "last_model": "",
58
+ "last_url_value": "",
59
+ }.items():
60
+ st.session_state.setdefault(k, v)
61
 
62
  HEADERS = {"User-Agent": "Mozilla/5.0 (compatible)"}
63
 
64
+ # Utilities --------------------------------------------------------------------
65
  def sanitize_filename(path_str: str):
66
  return Path(path_str).name.lower().translate(str.maketrans("", "", string.punctuation)).replace(" ", "_")
67
 
 
90
  except Exception:
91
  return input_path
92
 
93
+ # Downloader / extractor ------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  def expand_url(short_url, timeout=10):
 
 
 
 
95
  try:
96
  r = requests.get(short_url, allow_redirects=True, timeout=timeout, headers=HEADERS)
97
  r.raise_for_status()
98
+ return r.url, r.text
 
99
  except Exception as e:
100
  return None, f"error: {e}"
101
 
102
  def extract_video_from_html(html, base_url=None):
 
 
 
103
  soup = BeautifulSoup(html, "html.parser")
104
  og = soup.find("meta", property="og:video")
105
  if og and og.get("content"):
 
115
  for script in soup.find_all("script", type="application/ld+json"):
116
  try:
117
  data = json.loads(script.string or "{}")
118
+ video = None
119
  if isinstance(data, dict):
120
  video = data.get("video") or data.get("videoObject") or data.get("mainEntity")
121
  if isinstance(video, dict):
 
126
  return data.get("contentUrl")
127
  except Exception:
128
  continue
129
+ for mname in ("twitter:player:stream", "twitter:player"):
130
+ meta = soup.find("meta", attrs={"name": mname})
131
+ if meta and meta.get("content"):
132
+ return meta.get("content")
133
  for a in soup.find_all("a", href=True):
134
  href = a["href"]
135
+ if any(d in href for d in ("youtube.com", "youtu.be", "vimeo.com")):
136
  return href
137
  return None
138
 
139
  def extract_video_from_twitter_html(html):
 
 
 
 
140
  soup = BeautifulSoup(html, "html.parser")
 
 
141
  og_video = soup.find("meta", property="og:video")
142
  if og_video and og_video.get("content"):
143
  return og_video["content"]
 
 
144
  scripts = soup.find_all("script")
145
  for s in scripts:
146
  txt = s.string
147
  if not txt:
148
  continue
149
+ if any(k in txt for k in ("video_info", "variants", "playbackUrl", "media")):
 
 
150
  m = re.search(r"(?s)(\{.+\})", txt)
151
  if not m:
152
  continue
153
  try:
154
  blob = json.loads(m.group(1))
155
  except Exception:
 
156
  continue
 
 
157
  def find_media_urls(obj):
158
  if isinstance(obj, dict):
159
  for k, v in obj.items():
160
+ if isinstance(v, str) and v.startswith("https://") and v.endswith(".mp4"):
161
+ yield v
 
162
  else:
163
  yield from find_media_urls(v)
164
  elif isinstance(obj, list):
165
  for it in obj:
166
  yield from find_media_urls(it)
 
167
  for url in find_media_urls(blob):
168
  return url
 
 
169
  def find_variants(obj):
170
  if isinstance(obj, dict):
171
  for k, v in obj.items():
 
181
  elif isinstance(obj, list):
182
  for it in obj:
183
  yield from find_variants(it)
 
184
  for url in find_variants(blob):
185
  return url
 
186
  return None
187
 
188
  def extract_direct_twitter_video(url):
 
 
 
 
189
  final, html_or_err = expand_url(url)
190
  if final is None:
191
  return None, html_or_err
 
 
192
  variants = [
193
  final,
194
  final.replace("://twitter.com/", "://mobile.twitter.com/"),
 
205
  return direct, u
206
  except Exception:
207
  continue
 
 
208
  try:
209
  oembed = requests.get("https://publish.twitter.com/oembed?url=" + final, headers=HEADERS, timeout=6)
210
  if oembed.ok:
 
216
  return video["src"], final
217
  except Exception:
218
  pass
 
219
  return None, "not found"
220
 
221
+ def download_video_ytdlp(url: str, save_dir: str, video_password: str = None) -> str:
222
+ if not url:
223
+ raise ValueError("No URL provided")
224
+ outtmpl = str(Path(save_dir) / "%(id)s.%(ext)s")
225
+ ydl_opts = {"outtmpl": outtmpl, "format": "best"}
226
+ if video_password:
227
+ ydl_opts["videopassword"] = video_password
228
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
229
+ info = ydl.extract_info(url, download=True)
230
+ video_id = info.get("id") if isinstance(info, dict) else None
231
+ if video_id:
232
+ matches = glob(os.path.join(save_dir, f"{video_id}.*"))
233
+ else:
234
+ all_files = glob(os.path.join(save_dir, "*"))
235
+ matches = sorted(all_files, key=os.path.getmtime, reverse=True)[:1] if all_files else []
236
+ if not matches:
237
+ raise FileNotFoundError("Downloaded video not found")
238
+ return convert_video_to_mp4(matches[0])
239
+
240
+ # Generative AI helpers -------------------------------------------------------
241
+ def get_effective_api_key():
242
+ return st.session_state.get("api_key") or os.getenv("GOOGLE_API_KEY")
243
+
244
+ def maybe_configure_genai(key):
245
+ if not key or not HAS_GENAI:
246
+ return False
247
+ try:
248
+ genai.configure(api_key=key)
249
+ return True
250
+ except Exception:
251
+ return False
252
+
253
  def upload_video_sdk(filepath: str):
254
  key = get_effective_api_key()
255
  if not key:
256
  raise RuntimeError("No API key provided")
257
  if not HAS_GENAI or upload_file is None:
258
+ raise RuntimeError("google.generativeai SDK upload not available")
259
  genai.configure(api_key=key)
260
  return upload_file(filepath)
261
 
 
263
  if not HAS_GENAI or get_file is None:
264
  return file_obj
265
  start = time.time()
266
+ name = None
267
+ if isinstance(file_obj, dict):
268
+ name = file_obj.get("name") or file_obj.get("id")
269
+ else:
270
+ for attr in ("name", "id", "fileId", "file_id"):
271
+ if hasattr(file_obj, attr):
272
+ name = getattr(file_obj, attr)
273
+ break
274
  if not name:
275
  return file_obj
276
  backoff = 1.0
 
310
  return text
311
 
312
  def generative_model_call_flexible(model_name, messages, files=None, max_output_tokens=1024):
 
 
 
 
313
  if not HAS_GENAI or genai is None:
314
  raise RuntimeError("genai not available")
 
315
  GM = getattr(genai, "GenerativeModel", None)
316
  if GM is None:
317
  raise RuntimeError("GenerativeModel not available")
318
+ # robust constructor
 
319
  try:
320
  sig = inspect.signature(GM)
321
  params = sig.parameters
 
325
  gm = GM(model_name=model_name)
326
  else:
327
  gm = GM()
328
+ if hasattr(gm, "model"):
329
+ try:
330
  setattr(gm, "model", model_name)
331
+ except Exception:
332
+ pass
333
  except Exception:
334
  try:
335
  gm = GM(model=model_name)
336
+ except Exception:
337
+ gm = GM()
338
+ # try generate methods but avoid unsupported kwargs
 
 
 
 
 
339
  if hasattr(gm, "generate_content"):
340
  try:
341
+ # many versions accept 'messages' and 'files'
 
 
342
  try:
343
+ return gm.generate_content(messages=messages, files=files, max_output_tokens=max_output_tokens)
344
+ except TypeError:
345
+ return gm.generate_content(messages, max_output_tokens)
346
+ except Exception as e:
347
+ raise RuntimeError(f"generate_content failed: {e}")
 
348
  if hasattr(gm, "generate"):
349
  try:
350
+ return gm.generate(messages=messages, files=files, max_output_tokens=max_output_tokens)
351
  except TypeError:
352
+ return gm.generate(messages, max_output_tokens=max_output_tokens)
 
 
 
 
353
  raise RuntimeError("No usable generate method on GenerativeModel instance")
354
 
355
  def responses_http_call(api_key, model, messages, file_name=None, max_output_tokens=1024, safety_settings=None):
356
+ url = f"https://api.generativeai.googleapis.com/v1/models/{model}:generateMessage"
 
 
 
 
 
357
  headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
 
358
  payload = {
359
  "messages": [{"role": m.get("role", "user"), "content": [{"type": "text", "text": m.get("content", "")}]} for m in messages],
360
  "maxOutputTokens": max_output_tokens,
 
362
  if safety_settings:
363
  payload["safetySettings"] = safety_settings
364
  if file_name:
 
365
  payload["files"] = [{"name": file_name}]
366
+ r = requests.post(url, json=payload, headers=headers, timeout=60)
367
+ r.raise_for_status()
368
+ return r.json()
369
+
370
+ def normalize_response_to_text(response) -> str:
371
+ """Extract text from SDK or HTTP responses into a single string."""
372
+ if not response:
373
+ return ""
374
+ # dict-like responses (HTTP fallback or genai.responses)
375
+ if isinstance(response, dict):
376
+ # modern Responses v1 may include 'output' or 'message'
377
+ # search keys for lists of candidates/items/responses
378
+ for list_key in ("output", "candidates", "items", "responses"):
379
+ val = response.get(list_key)
380
+ if isinstance(val, (list, tuple)) and val:
381
+ pieces = []
382
+ for el in val:
383
+ if isinstance(el, dict):
384
+ # content field may be list of {type, text}
385
+ c = el.get("content") or el.get("message") or el.get("text")
386
+ if isinstance(c, list):
387
+ for part in c:
388
+ if isinstance(part, dict):
389
+ t = part.get("text") or part.get("content")
390
+ if t:
391
+ pieces.append(t)
392
+ elif isinstance(part, str):
393
+ pieces.append(part)
394
+ elif isinstance(c, str):
395
+ pieces.append(c)
396
+ elif isinstance(el, str):
397
+ pieces.append(el)
398
+ if pieces:
399
+ return "\n\n".join(pieces)
400
+ # message/content path
401
+ msg = response.get("message") or response.get("response") or response.get("output")
402
+ if isinstance(msg, dict):
403
+ c = msg.get("content")
404
+ if isinstance(c, list):
405
+ texts = []
406
+ for part in c:
407
+ if isinstance(part, dict) and "text" in part:
408
+ texts.append(part.get("text"))
409
+ elif isinstance(part, str):
410
+ texts.append(part)
411
+ return "\n\n".join([t for t in texts if t])
412
+ # fallback join string values
413
+ flat = []
414
+ for v in response.values():
415
+ if isinstance(v, str) and v.strip():
416
+ flat.append(v.strip())
417
+ return "\n\n".join(flat)
418
+ # object-like SDK responses
419
+ for attr in ("output", "candidates", "items", "responses", "message"):
420
+ val = getattr(response, attr, None)
421
+ if isinstance(val, (list, tuple)) and val:
422
+ pieces = []
423
+ for el in val:
424
+ if hasattr(el, "text"):
425
+ pieces.append(getattr(el, "text"))
426
+ elif isinstance(el, dict):
427
+ t = el.get("text") or el.get("content")
428
+ if t:
429
+ pieces.append(t)
430
+ else:
431
+ pieces.append(str(el))
432
+ return "\n\n".join([p for p in pieces if p])
433
+ # fallback
434
+ text = getattr(response, "text", None) or getattr(response, "message", None)
435
+ return text or ""
436
 
437
+ # UI --------------------------------------------------------------------------
438
  current_url = st.session_state.get("url", "")
439
  if current_url != st.session_state.get("last_url_value"):
440
+ # clear when user changes URL
441
+ st.session_state.update({"videos": "", "uploaded_file": None, "processed_file": None, "last_loaded_path": "", "analysis_out": "", "last_error": "", "file_hash": None})
442
  st.session_state["last_url_value"] = current_url
443
 
444
  st.sidebar.header("Video Input")
 
456
 
457
  key_source = "session" if st.session_state.get("api_key") else ".env" if os.getenv("GOOGLE_API_KEY") else "none"
458
  settings_exp.caption(f"Using API key from: **{key_source}**")
 
459
  if not get_effective_api_key():
460
  settings_exp.warning("No Google API key provided; upload/generation disabled.", icon="⚠️")
461
 
 
472
  with col2:
473
  pass
474
 
475
+ # Load Video button
476
  if st.sidebar.button("Load Video", use_container_width=True):
477
  try:
478
  vpw = st.session_state.get("video-password", "")
 
481
  html_text = None
482
  extracted = None
483
  if url_val:
 
484
  if "t.co/" in url_val or ("twitter.com" in url_val or "x.com" in url_val):
485
  extracted, src_info = extract_direct_twitter_video(url_val)
486
  if extracted:
487
  final_url = extracted
 
488
  else:
 
489
  expanded, html_or_err = expand_url(url_val)
490
  if expanded:
491
  final_url = expanded
 
495
  if expanded:
496
  final_url = expanded
497
  html_text = html_or_err
 
498
  if html_text and not extracted:
499
  extracted = extract_video_from_html(html_text, base_url=final_url)
500
  target_url_for_ytdlp = extracted or final_url
501
  path = download_video_ytdlp(target_url_for_ytdlp, str(DATA_DIR), vpw)
502
  st.session_state["videos"] = path
503
  st.session_state["last_loaded_path"] = path
504
+ st.session_state["file_hash"] = file_sha256(path) if os.path.exists(path) else None
505
+ st.session_state["uploaded_file"] = None
506
+ st.session_state["processed_file"] = None
 
 
 
507
  except Exception as e:
508
  st.sidebar.error(f"Failed to load video: {e}")
509
 
510
+ # Sidebar preview + controls
511
  if st.session_state["videos"]:
512
  try:
513
  st.sidebar.video(st.session_state["videos"], loop=st.session_state.get("loop_video", False))
514
  except Exception:
515
  st.sidebar.write("Couldn't preview video")
 
516
  with st.sidebar.expander("Options", expanded=False):
517
  loop_checkbox = st.checkbox("Enable Loop", value=st.session_state.get("loop_video", False))
518
  st.session_state["loop_video"] = loop_checkbox
 
519
  if st.button("Clear Video(s)"):
520
+ for f in glob(str(DATA_DIR / "*")):
521
+ try:
522
+ os.remove(f)
523
+ except Exception:
524
+ pass
525
+ st.session_state.update({"videos": "", "uploaded_file": None, "processed_file": None, "last_loaded_path": "", "analysis_out": "", "last_error": "", "file_hash": None})
526
  try:
527
  with open(st.session_state["videos"], "rb") as vf:
528
  st.download_button("Download Video", data=vf, file_name=sanitize_filename(st.session_state["videos"]), mime="video/mp4", use_container_width=True)
529
  except Exception:
530
  st.sidebar.error("Failed to prepare download")
 
531
  st.sidebar.write("Title:", Path(st.session_state["videos"]).name)
532
  try:
533
  file_size_mb = os.path.getsize(st.session_state["videos"]) / (1024 * 1024)
 
537
  except Exception:
538
  pass
539
 
540
+ # Generation flow --------------------------------------------------------------
541
  if generate_now and not st.session_state.get("busy"):
542
  if not st.session_state.get("videos"):
543
  st.error("No video loaded. Use 'Load Video' in the sidebar.")
 
546
  if not key_to_use:
547
  st.error("Google API key not set.")
548
  else:
549
+ st.session_state["busy"] = True
550
  try:
 
551
  maybe_configure_genai(key_to_use)
 
552
  model_id = (st.session_state.get("model_input") or "gemini-2.5-flash-lite").strip()
 
 
 
553
  current_path = st.session_state.get("videos")
554
+ current_hash = file_sha256(current_path) if current_path and os.path.exists(current_path) else None
 
 
 
555
 
556
  reupload_needed = True
557
+ processed = st.session_state.get("processed_file")
558
  if processed and st.session_state.get("last_loaded_path") == current_path and st.session_state.get("file_hash") == current_hash:
559
  reupload_needed = False
560
 
561
+ upload_path = current_path
562
  if reupload_needed:
563
+ # compress if large and not fast
 
 
564
  fast_mode = st.session_state.get("fast_mode", False)
 
565
  try:
566
+ file_size_mb = os.path.getsize(current_path) / (1024 * 1024)
567
  except Exception:
568
  file_size_mb = 0
 
569
  if not fast_mode and file_size_mb > 50:
570
+ compressed_path = str(Path(current_path).with_name(Path(current_path).stem + "_compressed.mp4"))
571
+ upload_path = compress_video(current_path, compressed_path, crf=28, preset="fast")
 
 
 
 
 
572
  with st.spinner("Uploading video..."):
573
  uploaded = upload_video_sdk(upload_path)
574
  processed = wait_for_processed(uploaded, timeout=180)
 
577
  st.session_state["last_loaded_path"] = current_path
578
  st.session_state["file_hash"] = current_hash
579
 
580
+ prompt_text = (st.session_state.get("analysis_prompt", "") or default_prompt).strip()
581
+ system_msg = {"role": "system", "content": prompt_text}
582
+ user_msg = {"role": "user", "content": "Please summarize the attached video."}
583
+ fname = None
584
+ if processed:
585
+ if isinstance(processed, dict):
586
+ fname = processed.get("name") or processed.get("id")
587
+ else:
588
+ for attr in ("name", "id", "fileId", "file_id"):
589
+ if hasattr(processed, attr):
590
+ fname = getattr(processed, attr)
591
+ break
592
+ # prefer SDK methods that support 'files' / file references
593
+ response = None
594
+ diagnostics = {"attempts": []}
595
+
596
+ # 1) genai.responses.generate (if available) - supports files param
597
+ if response is None and HAS_GENAI and genai is not None and hasattr(genai, "responses") and hasattr(genai.responses, "generate"):
598
  try:
599
+ diagnostics["attempts"].append("responses.generate")
600
+ response = genai.responses.generate(
601
+ model=model_id,
602
+ messages=[system_msg, user_msg],
603
+ files=[{"name": fname}] if fname else None,
604
+ safety_settings=safety_settings,
605
+ max_output_tokens=(256 if st.session_state.get("fast_mode") else 1024),
606
+ )
607
+ except Exception as e:
608
+ diagnostics["responses.generate_error"] = str(e)
609
+ response = None
610
 
611
+ # 2) GenerativeModel flexible call
612
+ if response is None and HAS_GENAI and genai is not None and hasattr(genai, "GenerativeModel"):
613
+ try:
614
+ diagnostics["attempts"].append("GenerativeModel")
615
+ response = generative_model_call_flexible(model_id, [system_msg, user_msg], files=[{"name": fname}] if fname else None, max_output_tokens=(256 if st.session_state.get("fast_mode") else 1024))
616
+ except Exception as e:
617
+ diagnostics["GenerativeModel_error"] = str(e)
618
+ response = None
619
 
620
+ # 3) top-level legacy helpers
621
+ if response is None and HAS_GENAI and genai is not None:
622
+ try:
623
+ if hasattr(genai, "generate"):
624
+ diagnostics["attempts"].append("top.generate")
625
+ response = genai.generate(model=model_id, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=(256 if st.session_state.get("fast_mode") else 1024))
626
+ elif hasattr(genai, "create"):
627
+ diagnostics["attempts"].append("top.create")
628
+ response = genai.create(model=model_id, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=(256 if st.session_state.get("fast_mode") else 1024))
629
+ except Exception as e:
630
+ diagnostics["top_level_error"] = str(e)
631
  response = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
632
 
633
+ # 4) HTTP fallback to Responses endpoint (supports file references)
634
+ if response is None:
635
+ try:
636
+ diagnostics["attempts"].append("http_fallback")
637
+ response = responses_http_call(key_to_use, model_id, [system_msg, user_msg], file_name=fname, max_output_tokens=(256 if st.session_state.get("fast_mode") else 1024), safety_settings=safety_settings)
638
  except Exception as e:
639
+ diagnostics["http_fallback_error"] = str(e)
640
+ response = None
641
+
642
+ if response is None:
643
+ st.session_state["last_error"] = f"No supported generation method found. Diagnostics: {diagnostics}"
644
+ st.error("Unable to call a supported Responses method in this runtime. See Last Error.")
645
+ out = ""
646
+ else:
647
+ out = normalize_response_to_text(response)
648
+ out = remove_prompt_echo(prompt_text, out).strip()
649
+ # additional cleanup of obvious echoes/placeholders
650
  placeholders = ["enter analysis", "enter your analysis", "enter analysis here", "please enter analysis"]
651
  low = out.strip().lower()
652
  for ph in placeholders:
653
  if low.startswith(ph):
654
  out = out.strip()[len(ph):].lstrip(" \n:-")
655
  break
 
656
 
657
  st.session_state["analysis_out"] = out
658
  st.session_state["last_error"] = "" if out else st.session_state.get("last_error", "")
659
  st.subheader("Analysis Result")
660
  st.markdown(out if out else "No analysis returned.")
661
+ st.caption(f"Est. max tokens: {256 if st.session_state.get('fast_mode') else 1024}")
662
 
663
  except Exception as e:
664
  tb = traceback.format_exc()