CB commited on
Commit
29eb03e
·
verified ·
1 Parent(s): 1abf538

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +53 -30
streamlit_app.py CHANGED
@@ -406,7 +406,7 @@ if generate_now and not st.session_state.get("busy"):
406
  # Do not re-raise; we'll fallback to genai.responses.generate below
407
 
408
  if not out:
409
- # Fallback to direct Responses API flow
410
  try:
411
  if not HAS_GENAI or genai is None:
412
  raise RuntimeError("Responses API not available; install google.generativeai SDK.")
@@ -414,26 +414,51 @@ if generate_now and not st.session_state.get("busy"):
414
  fname = file_name_or_id(processed)
415
  if not fname:
416
  raise RuntimeError("Uploaded file missing name/id")
 
417
  system_msg = {"role": "system", "content": prompt_text}
418
  user_msg = {"role": "user", "content": "Please summarize the attached video."}
419
-
 
 
420
  try:
421
- response = genai.responses.generate(
422
- model=model_used,
423
- messages=[system_msg, user_msg],
424
- files=[{"name": fname}],
425
- safety_settings=safety_settings,
426
- max_output_tokens=max_tokens,
427
- )
428
- except TypeError:
429
- response = genai.responses.generate(
430
- model=model_used,
431
- input=[{"text": prompt_text, "files": [{"name": fname}]}],
432
- safety_settings=safety_settings,
433
- max_output_tokens=max_tokens,
434
- )
435
-
436
- # Defensive normalization of response -> outputs list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
  outputs = []
438
  if response is None:
439
  outputs = []
@@ -449,22 +474,20 @@ if generate_now and not st.session_state.get("busy"):
449
  outputs = v
450
  break
451
  else:
452
- for attr in ("output", "candidates", "items", "responses"):
453
  val = getattr(response, attr, None)
454
  if isinstance(val, list) and val:
455
  outputs = val
456
  break
457
-
458
- # ensure list
459
  if not isinstance(outputs, list):
460
  outputs = list(outputs) if outputs else []
461
-
462
- # extract text pieces safely
463
  text_pieces = []
464
  for item in outputs:
465
  if item is None:
466
  continue
467
- # item may be dict or object; attempt to find text-rich fields
468
  cand_contents = None
469
  if isinstance(item, dict):
470
  for k in ("content", "text", "message", "output_text", "output"):
@@ -476,12 +499,12 @@ if generate_now and not st.session_state.get("busy"):
476
  cand_contents = getattr(item, k, None)
477
  if cand_contents:
478
  break
479
-
480
  if isinstance(cand_contents, str):
481
  if cand_contents.strip():
482
  text_pieces.append(cand_contents.strip())
483
  continue
484
-
485
  if isinstance(cand_contents, (list, tuple)):
486
  for c in cand_contents:
487
  if c is None:
@@ -497,7 +520,7 @@ if generate_now and not st.session_state.get("busy"):
497
  if t:
498
  text_pieces.append(str(t).strip())
499
  continue
500
-
501
  direct = None
502
  if isinstance(item, dict):
503
  direct = item.get("text") or item.get("output_text") or item.get("message")
@@ -505,7 +528,7 @@ if generate_now and not st.session_state.get("busy"):
505
  direct = getattr(item, "text", None) or getattr(item, "output_text", None) or getattr(item, "message", None)
506
  if direct:
507
  text_pieces.append(str(direct).strip())
508
-
509
  if not text_pieces:
510
  top_text = None
511
  if isinstance(response, dict):
@@ -514,7 +537,7 @@ if generate_now and not st.session_state.get("busy"):
514
  top_text = getattr(response, "text", None) or getattr(response, "message", None)
515
  if top_text:
516
  text_pieces.append(str(top_text).strip())
517
-
518
  # dedupe preserving order
519
  seen = set()
520
  filtered = []
@@ -525,8 +548,8 @@ if generate_now and not st.session_state.get("busy"):
525
  filtered.append(t)
526
  seen.add(t)
527
  out = "\n\n".join(filtered)
 
528
  except Exception as e:
529
- # Capture clear error to UI and include debug_info
530
  tb = traceback.format_exc()
531
  st.session_state["last_error"] = f"Responses API error: {e}\n\nDebug: {debug_info}\n\nTraceback:\n{tb}"
532
  st.error("An error occurred while generating the story. You can try Generate again; the uploaded video will be reused.")
 
406
  # Do not re-raise; we'll fallback to genai.responses.generate below
407
 
408
  if not out:
409
+ # Fallback to direct Responses API flow (robust multi-version support)
410
  try:
411
  if not HAS_GENAI or genai is None:
412
  raise RuntimeError("Responses API not available; install google.generativeai SDK.")
 
414
  fname = file_name_or_id(processed)
415
  if not fname:
416
  raise RuntimeError("Uploaded file missing name/id")
417
+
418
  system_msg = {"role": "system", "content": prompt_text}
419
  user_msg = {"role": "user", "content": "Please summarize the attached video."}
420
+
421
+ response = None
422
+ # Try 1: new-style responses API (genai.responses.generate)
423
  try:
424
+ if hasattr(genai, "responses") and hasattr(genai.responses, "generate"):
425
+ response = genai.responses.generate(
426
+ model=model_used,
427
+ messages=[system_msg, user_msg],
428
+ files=[{"name": fname}],
429
+ safety_settings=safety_settings,
430
+ max_output_tokens=max_tokens,
431
+ )
432
+ except Exception:
433
+ response = None
434
+
435
+ # Try 2: model-based interface (GenerativeModel / model.generate_content)
436
+ if response is None:
437
+ try:
438
+ # prefer GenerativeModel if present
439
+ if hasattr(genai, "GenerativeModel"):
440
+ model_obj = genai.GenerativeModel(model=model_used)
441
+ if hasattr(model_obj, "generate_content"):
442
+ response = model_obj.generate_content([system_msg, user_msg], files=[{"name": fname}], max_output_tokens=max_tokens)
443
+ elif hasattr(model_obj, "generate"):
444
+ response = model_obj.generate([system_msg, user_msg], files=[{"name": fname}], max_output_tokens=max_tokens)
445
+ except Exception:
446
+ response = None
447
+
448
+ # Try 3: generic genai.generate / genai.create
449
+ if response is None:
450
+ try:
451
+ if hasattr(genai, "generate"):
452
+ response = genai.generate(model=model_used, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=max_tokens)
453
+ elif hasattr(genai, "create"):
454
+ response = genai.create(model=model_used, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=max_tokens)
455
+ except Exception:
456
+ response = None
457
+
458
+ if response is None:
459
+ raise RuntimeError("No supported generate method found on google.generativeai; check SDK version.")
460
+
461
+ # Defensive normalization of response -> outputs list (keeps your existing extraction logic)
462
  outputs = []
463
  if response is None:
464
  outputs = []
 
474
  outputs = v
475
  break
476
  else:
477
+ for attr in ("output", "candidates", "items", "responses", "candidates"):
478
  val = getattr(response, attr, None)
479
  if isinstance(val, list) and val:
480
  outputs = val
481
  break
482
+
 
483
  if not isinstance(outputs, list):
484
  outputs = list(outputs) if outputs else []
485
+
486
+ # extract text pieces safely (identical to your prior logic)
487
  text_pieces = []
488
  for item in outputs:
489
  if item is None:
490
  continue
 
491
  cand_contents = None
492
  if isinstance(item, dict):
493
  for k in ("content", "text", "message", "output_text", "output"):
 
499
  cand_contents = getattr(item, k, None)
500
  if cand_contents:
501
  break
502
+
503
  if isinstance(cand_contents, str):
504
  if cand_contents.strip():
505
  text_pieces.append(cand_contents.strip())
506
  continue
507
+
508
  if isinstance(cand_contents, (list, tuple)):
509
  for c in cand_contents:
510
  if c is None:
 
520
  if t:
521
  text_pieces.append(str(t).strip())
522
  continue
523
+
524
  direct = None
525
  if isinstance(item, dict):
526
  direct = item.get("text") or item.get("output_text") or item.get("message")
 
528
  direct = getattr(item, "text", None) or getattr(item, "output_text", None) or getattr(item, "message", None)
529
  if direct:
530
  text_pieces.append(str(direct).strip())
531
+
532
  if not text_pieces:
533
  top_text = None
534
  if isinstance(response, dict):
 
537
  top_text = getattr(response, "text", None) or getattr(response, "message", None)
538
  if top_text:
539
  text_pieces.append(str(top_text).strip())
540
+
541
  # dedupe preserving order
542
  seen = set()
543
  filtered = []
 
548
  filtered.append(t)
549
  seen.add(t)
550
  out = "\n\n".join(filtered)
551
+
552
  except Exception as e:
 
553
  tb = traceback.format_exc()
554
  st.session_state["last_error"] = f"Responses API error: {e}\n\nDebug: {debug_info}\n\nTraceback:\n{tb}"
555
  st.error("An error occurred while generating the story. You can try Generate again; the uploaded video will be reused.")