CB commited on
Commit
b722be7
·
verified ·
1 Parent(s): 4ed7ca7

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +97 -72
streamlit_app.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import time
4
  import string
5
  import hashlib
 
6
  from glob import glob
7
  from pathlib import Path
8
  from difflib import SequenceMatcher
@@ -353,6 +354,7 @@ if generate_now and not st.session_state.get("busy"):
353
  est_cost_caption = f"Est. max tokens: {est_tokens}"
354
 
355
  agent = maybe_create_agent(model_used)
 
356
  if agent:
357
  with st.spinner("Generating description via Agent..."):
358
  if not processed:
@@ -369,7 +371,6 @@ if generate_now and not st.session_state.get("busy"):
369
  system_msg = {"role": "system", "content": prompt_text}
370
  user_msg = {"role": "user", "content": "Please summarize the attached video."}
371
 
372
- # Try the modern and legacy signatures; fail clearly if both fail
373
  try:
374
  response = genai.responses.generate(
375
  model=model_used,
@@ -386,92 +387,100 @@ if generate_now and not st.session_state.get("busy"):
386
  max_output_tokens=max_tokens,
387
  )
388
 
389
- # Normalize response into iterable items safely
 
 
 
390
  outputs = []
391
- if response is None:
392
- outputs = []
393
- else:
394
- # response might be object or dict; try known attributes/keys
395
- if isinstance(response, dict):
396
- # common dict keys
397
- if isinstance(response.get("output"), list):
398
- outputs = response.get("output") or []
399
- elif isinstance(response.get("candidates"), list):
400
- outputs = response.get("candidates") or []
401
- elif isinstance(response.get("items"), list):
402
- outputs = response.get("items") or []
403
- elif isinstance(response.get("responses"), list):
404
- outputs = response.get("responses") or []
405
- else:
406
- # fallback: try to find list-valued entries
407
  for v in response.values():
408
  if isinstance(v, list):
409
  outputs = v
410
  break
411
  else:
412
- # try attribute access
413
- attr_candidates = []
414
  for attr in ("output", "candidates", "items", "responses"):
415
  val = getattr(response, attr, None)
416
- if isinstance(val, list):
417
- attr_candidates = val
418
  break
419
- outputs = attr_candidates or []
 
 
 
420
 
421
- # Ensure we have a list
422
- if not isinstance(outputs, list):
423
- outputs = list(outputs) if outputs else []
424
 
 
425
  text_pieces = []
426
- # Iterate safely through outputs (may be dicts or objects)
427
- for item in outputs:
428
- if item is None:
429
- continue
430
- # attempt to extract a 'content' bag
431
- contents = None
432
- if isinstance(item, dict):
433
- contents = item.get("content") or item.get("text") or item.get("message") or item.get("output")
434
- else:
435
- contents = getattr(item, "content", None) or getattr(item, "text", None) or getattr(item, "message", None) or getattr(item, "output", None)
436
-
437
- # If contents is a single string, take it
438
- if isinstance(contents, str):
439
- if contents.strip():
440
- text_pieces.append(contents.strip())
441
- continue
442
-
443
- # If contents is list-like, iterate
444
- if isinstance(contents, (list, tuple)):
445
- for c in contents:
446
- if c is None:
447
- continue
448
- if isinstance(c, str):
449
- if c.strip():
450
- text_pieces.append(c.strip())
451
- continue
452
- c_text = None
453
- if isinstance(c, dict):
454
- c_text = c.get("text") or c.get("content") or None
455
- else:
456
- c_text = getattr(c, "text", None) or getattr(c, "content", None)
457
- if c_text:
458
- text_pieces.append(str(c_text).strip())
459
- continue
460
 
461
- # If the item itself contains direct text fields
462
- direct_txt = None
463
- if isinstance(item, dict):
464
- direct_txt = item.get("text") or item.get("output_text") or item.get("message")
465
- else:
466
- direct_txt = getattr(item, "text", None) or getattr(item, "output_text", None) or getattr(item, "message", None)
467
- if direct_txt:
468
- text_pieces.append(str(direct_txt).strip())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469
 
470
- # final fallback: top-level text on response
471
  if not text_pieces:
472
  top_text = None
473
  if isinstance(response, dict):
474
- top_text = response.get("text") or response.get("message") or None
475
  else:
476
  top_text = getattr(response, "text", None) or getattr(response, "message", None)
477
  if top_text:
@@ -507,18 +516,34 @@ if generate_now and not st.session_state.get("busy"):
507
  st.subheader("Analysis Result")
508
  st.markdown(out if out else "No analysis returned.")
509
  st.caption(est_cost_caption)
 
510
  except Exception as e:
511
- st.session_state["last_error"] = str(e)
 
 
 
 
 
512
  st.error("An error occurred while generating the story. You can try Generate again; the uploaded video will be reused.")
513
  finally:
514
  st.session_state["busy"] = False
515
 
 
516
  if st.session_state.get("analysis_out"):
517
  just_loaded_same = (st.session_state.get("last_loaded_path") == st.session_state.get("videos"))
518
  if not just_loaded_same:
519
  st.subheader("Analysis Result")
520
  st.markdown(st.session_state.get("analysis_out"))
521
 
 
522
  if st.session_state.get("last_error"):
523
- with st.expander("Last Error", expanded=False):
524
  st.write(st.session_state.get("last_error"))
 
 
 
 
 
 
 
 
 
3
  import time
4
  import string
5
  import hashlib
6
+ import traceback
7
  from glob import glob
8
  from pathlib import Path
9
  from difflib import SequenceMatcher
 
354
  est_cost_caption = f"Est. max tokens: {est_tokens}"
355
 
356
  agent = maybe_create_agent(model_used)
357
+ debug_info = {"response_shape": None, "outputs_len": None, "outputs_types": None}
358
  if agent:
359
  with st.spinner("Generating description via Agent..."):
360
  if not processed:
 
371
  system_msg = {"role": "system", "content": prompt_text}
372
  user_msg = {"role": "user", "content": "Please summarize the attached video."}
373
 
 
374
  try:
375
  response = genai.responses.generate(
376
  model=model_used,
 
387
  max_output_tokens=max_tokens,
388
  )
389
 
390
+ # record raw shape for debugging
391
+ debug_info["response_shape"] = type(response).__name__ if response is not None else "None"
392
+
393
+ # SAFE normalization into list
394
  outputs = []
395
+ try:
396
+ if response is None:
397
+ outputs = []
398
+ elif isinstance(response, dict):
399
+ # typical dict shapes
400
+ for key in ("output", "candidates", "items", "responses"):
401
+ val = response.get(key)
402
+ if isinstance(val, list) and val:
403
+ outputs = val
404
+ break
405
+ if not outputs:
406
+ # pick first list-valued entry if any
 
 
 
 
407
  for v in response.values():
408
  if isinstance(v, list):
409
  outputs = v
410
  break
411
  else:
 
 
412
  for attr in ("output", "candidates", "items", "responses"):
413
  val = getattr(response, attr, None)
414
+ if isinstance(val, list) and val:
415
+ outputs = val
416
  break
417
+ except Exception as e:
418
+ # unexpected structure -> capture for debug and continue with empty outputs
419
+ st.session_state["last_error"] = f"Response parsing error: {e}\n{traceback.format_exc()}"
420
+ outputs = []
421
 
422
+ debug_info["outputs_len"] = len(outputs)
423
+ debug_info["outputs_types"] = [type(o).__name__ for o in outputs]
 
424
 
425
+ # iterate without indexing
426
  text_pieces = []
427
+ try:
428
+ for item in outputs:
429
+ if item is None:
430
+ continue
431
+ # get potential content container(s)
432
+ cand_contents = None
433
+ if isinstance(item, dict):
434
+ # common keys that may hold text
435
+ for k in ("content", "text", "message", "output_text", "output"):
436
+ if k in item and item[k]:
437
+ cand_contents = item[k]
438
+ break
439
+ else:
440
+ for k in ("content", "text", "message", "output", "output_text"):
441
+ cand_contents = getattr(item, k, None)
442
+ if cand_contents:
443
+ break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444
 
445
+ # handle string content
446
+ if isinstance(cand_contents, str):
447
+ if cand_contents.strip():
448
+ text_pieces.append(cand_contents.strip())
449
+ continue
450
+
451
+ # handle list-like content
452
+ if isinstance(cand_contents, (list, tuple)):
453
+ for c in cand_contents:
454
+ if c is None:
455
+ continue
456
+ if isinstance(c, str):
457
+ if c.strip():
458
+ text_pieces.append(c.strip())
459
+ continue
460
+ if isinstance(c, dict):
461
+ t = c.get("text") or c.get("content")
462
+ else:
463
+ t = getattr(c, "text", None) or getattr(c, "content", None)
464
+ if t:
465
+ text_pieces.append(str(t).strip())
466
+ continue
467
+
468
+ # fallback to direct text on item
469
+ direct = None
470
+ if isinstance(item, dict):
471
+ direct = item.get("text") or item.get("output_text") or item.get("message")
472
+ else:
473
+ direct = getattr(item, "text", None) or getattr(item, "output_text", None) or getattr(item, "message", None)
474
+ if direct:
475
+ text_pieces.append(str(direct).strip())
476
+ except Exception as e:
477
+ st.session_state["last_error"] = f"Error while extracting text pieces: {e}\n{traceback.format_exc()}"
478
 
479
+ # last resort: top-level text
480
  if not text_pieces:
481
  top_text = None
482
  if isinstance(response, dict):
483
+ top_text = response.get("text") or response.get("message")
484
  else:
485
  top_text = getattr(response, "text", None) or getattr(response, "message", None)
486
  if top_text:
 
516
  st.subheader("Analysis Result")
517
  st.markdown(out if out else "No analysis returned.")
518
  st.caption(est_cost_caption)
519
+
520
  except Exception as e:
521
+ # Build improved error info to display in the UI
522
+ tb = traceback.format_exc()
523
+ # If we have debug_info, include it
524
+ dbg = locals().get("debug_info") or {}
525
+ # Save a concise message + trace to last_error so UI shows it
526
+ st.session_state["last_error"] = f"{str(e)}\n\nDebug: {dbg}\n\nTraceback:\n{tb}"
527
  st.error("An error occurred while generating the story. You can try Generate again; the uploaded video will be reused.")
528
  finally:
529
  st.session_state["busy"] = False
530
 
531
+ # show analysis if present
532
  if st.session_state.get("analysis_out"):
533
  just_loaded_same = (st.session_state.get("last_loaded_path") == st.session_state.get("videos"))
534
  if not just_loaded_same:
535
  st.subheader("Analysis Result")
536
  st.markdown(st.session_state.get("analysis_out"))
537
 
538
+ # show last error and debug helper
539
  if st.session_state.get("last_error"):
540
+ with st.expander("Last Error (click to expand)", expanded=True):
541
  st.write(st.session_state.get("last_error"))
542
+ # If we extracted debug_info earlier, show short diagnostics
543
+ try:
544
+ di = locals().get("debug_info") or {}
545
+ if di:
546
+ st.write("Debug info (if available):")
547
+ st.write(di)
548
+ except Exception:
549
+ pass