CB commited on
Commit
0ac35e4
·
verified ·
1 Parent(s): 835ac93

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +57 -45
streamlit_app.py CHANGED
@@ -21,11 +21,14 @@ try:
21
  from phi.tools.duckduckgo import DuckDuckGo
22
  HAS_PHI = True
23
  except Exception:
24
- # disable phi Agent usage to avoid phi IndexError in production
25
  HAS_PHI = False
26
- Agent = None
27
- Gemini = None
28
- DuckDuckGo = None
 
 
 
29
 
30
  try:
31
  import google.generativeai as genai
@@ -347,7 +350,7 @@ if generate_now and not st.session_state.get("busy"):
347
 
348
  out = ""
349
  model_used = model_id
350
- max_tokens = 512 if st.session_state.get("fast_mode") else 1024
351
  est_tokens = max_tokens
352
 
353
  agent = maybe_create_agent(model_used)
@@ -383,19 +386,10 @@ if generate_now and not st.session_state.get("busy"):
383
  system_msg = {"role": "system", "content": prompt_text}
384
  user_msg = {"role": "user", "content": "Please summarize the attached video."}
385
 
386
- response = None
387
-
388
  # normalize response generation across google-generativeai versions
389
  response = None
390
- genai.configure(api_key=key_to_use)
391
- fname = file_name_or_id(processed)
392
- if not fname:
393
- raise RuntimeError("Uploaded file missing name/id")
394
-
395
- system_msg = {"role": "system", "content": prompt_text}
396
- user_msg = {"role": "user", "content": "Please summarize the attached video."}
397
-
398
- # 1) responses.generate (modern)
399
  try:
400
  if hasattr(genai, "responses") and hasattr(genai.responses, "generate"):
401
  response = genai.responses.generate(
@@ -407,8 +401,8 @@ if generate_now and not st.session_state.get("busy"):
407
  )
408
  except Exception:
409
  response = None
410
-
411
- # 2) GenerativeModel (another modern interface)
412
  if response is None:
413
  try:
414
  if hasattr(genai, "GenerativeModel"):
@@ -419,8 +413,8 @@ if generate_now and not st.session_state.get("busy"):
419
  response = gm.generate([system_msg, user_msg], files=[{"name": fname}], max_output_tokens=max_tokens)
420
  except Exception:
421
  response = None
422
-
423
- # 3) older top-level helpers (legacy)
424
  if response is None:
425
  try:
426
  if hasattr(genai, "generate"):
@@ -429,19 +423,17 @@ if generate_now and not st.session_state.get("busy"):
429
  response = genai.create(model=model_used, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=max_tokens)
430
  except Exception:
431
  response = None
432
-
433
  if response is None:
434
  raise RuntimeError("No supported generate method found on google.generativeai; check SDK version or model compatibility.")
435
-
436
- # normalize output extraction robustly
437
  outputs = []
438
  try:
439
  if isinstance(response, dict):
440
- # common keys that hold lists
441
- for k in ("output", "candidates", "items", "responses"):
442
- v = response.get(k)
443
- if isinstance(v, list) and v:
444
- outputs = v
445
  break
446
  if not outputs:
447
  for v in response.values():
@@ -450,14 +442,13 @@ if generate_now and not st.session_state.get("busy"):
450
  break
451
  else:
452
  for attr in ("output", "candidates", "items", "responses"):
453
- v = getattr(response, attr, None)
454
- if isinstance(v, (list, tuple)) and v:
455
- outputs = list(v)
456
  break
457
  except Exception:
458
  outputs = []
459
-
460
- # fall back to message/text fields if no list found
461
  if not outputs:
462
  candidate_text = None
463
  if isinstance(response, dict):
@@ -466,13 +457,11 @@ if generate_now and not st.session_state.get("busy"):
466
  candidate_text = getattr(response, "text", None) or getattr(response, "message", None)
467
  if candidate_text:
468
  outputs = [ {"text": candidate_text} ]
469
-
470
- # extract strings from outputs
471
  text_pieces = []
472
  for item in outputs:
473
  if not item:
474
  continue
475
- # dict style
476
  if isinstance(item, dict):
477
  for k in ("content", "text", "message", "output_text", "output"):
478
  v = item.get(k)
@@ -489,7 +478,6 @@ if generate_now and not st.session_state.get("busy"):
489
  text_pieces.append(str(t).strip())
490
  break
491
  else:
492
- # object style
493
  for k in ("content", "text", "message", "output", "output_text"):
494
  v = getattr(item, k, None)
495
  if v:
@@ -504,8 +492,7 @@ if generate_now and not st.session_state.get("busy"):
504
  if t:
505
  text_pieces.append(str(t).strip())
506
  break
507
-
508
- # dedupe and join
509
  seen = set()
510
  filtered = []
511
  for t in text_pieces:
@@ -514,7 +501,6 @@ if generate_now and not st.session_state.get("busy"):
514
  seen.add(t)
515
  out = "\n\n".join(filtered)
516
 
517
-
518
  except Exception as e:
519
  tb = traceback.format_exc()
520
  st.session_state["last_error"] = f"Responses API error: {e}\n\nDebug: {debug_info}\n\nTraceback:\n{tb}"
@@ -557,8 +543,34 @@ if st.session_state.get("last_error"):
557
  with st.expander("Last Error", expanded=False):
558
  st.write(st.session_state.get("last_error"))
559
 
560
- import streamlit as st
561
- from diagnostics import env_check
562
-
563
- st.header("Environment")
564
- st.code(env_check())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  from phi.tools.duckduckgo import DuckDuckGo
22
  HAS_PHI = True
23
  except Exception:
24
+ Agent = Gemini = DuckDuckGo = None
25
  HAS_PHI = False
26
+
27
+ # disable phi Agent to avoid phi IndexError in production by default
28
+ HAS_PHI = False
29
+ Agent = None
30
+ Gemini = None
31
+ DuckDuckGo = None
32
 
33
  try:
34
  import google.generativeai as genai
 
350
 
351
  out = ""
352
  model_used = model_id
353
+ max_tokens = 256 if st.session_state.get("fast_mode") else 1024
354
  est_tokens = max_tokens
355
 
356
  agent = maybe_create_agent(model_used)
 
386
  system_msg = {"role": "system", "content": prompt_text}
387
  user_msg = {"role": "user", "content": "Please summarize the attached video."}
388
 
 
 
389
  # normalize response generation across google-generativeai versions
390
  response = None
391
+
392
+ # 1) responses.generate
 
 
 
 
 
 
 
393
  try:
394
  if hasattr(genai, "responses") and hasattr(genai.responses, "generate"):
395
  response = genai.responses.generate(
 
401
  )
402
  except Exception:
403
  response = None
404
+
405
+ # 2) GenerativeModel (present in 0.8.5)
406
  if response is None:
407
  try:
408
  if hasattr(genai, "GenerativeModel"):
 
413
  response = gm.generate([system_msg, user_msg], files=[{"name": fname}], max_output_tokens=max_tokens)
414
  except Exception:
415
  response = None
416
+
417
+ # 3) older top-level helpers
418
  if response is None:
419
  try:
420
  if hasattr(genai, "generate"):
 
423
  response = genai.create(model=model_used, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=max_tokens)
424
  except Exception:
425
  response = None
426
+
427
  if response is None:
428
  raise RuntimeError("No supported generate method found on google.generativeai; check SDK version or model compatibility.")
429
+
 
430
  outputs = []
431
  try:
432
  if isinstance(response, dict):
433
+ for key in ("output", "candidates", "items", "responses"):
434
+ val = response.get(key)
435
+ if isinstance(val, list) and val:
436
+ outputs = val
 
437
  break
438
  if not outputs:
439
  for v in response.values():
 
442
  break
443
  else:
444
  for attr in ("output", "candidates", "items", "responses"):
445
+ val = getattr(response, attr, None)
446
+ if isinstance(val, (list, tuple)) and val:
447
+ outputs = list(val)
448
  break
449
  except Exception:
450
  outputs = []
451
+
 
452
  if not outputs:
453
  candidate_text = None
454
  if isinstance(response, dict):
 
457
  candidate_text = getattr(response, "text", None) or getattr(response, "message", None)
458
  if candidate_text:
459
  outputs = [ {"text": candidate_text} ]
460
+
 
461
  text_pieces = []
462
  for item in outputs:
463
  if not item:
464
  continue
 
465
  if isinstance(item, dict):
466
  for k in ("content", "text", "message", "output_text", "output"):
467
  v = item.get(k)
 
478
  text_pieces.append(str(t).strip())
479
  break
480
  else:
 
481
  for k in ("content", "text", "message", "output", "output_text"):
482
  v = getattr(item, k, None)
483
  if v:
 
492
  if t:
493
  text_pieces.append(str(t).strip())
494
  break
495
+
 
496
  seen = set()
497
  filtered = []
498
  for t in text_pieces:
 
501
  seen.add(t)
502
  out = "\n\n".join(filtered)
503
 
 
504
  except Exception as e:
505
  tb = traceback.format_exc()
506
  st.session_state["last_error"] = f"Responses API error: {e}\n\nDebug: {debug_info}\n\nTraceback:\n{tb}"
 
543
  with st.expander("Last Error", expanded=False):
544
  st.write(st.session_state.get("last_error"))
545
 
546
+ with st.expander("Errors / Environment", expanded=False):
547
+ try:
548
+ # diagnostics snippet requested by user, expects diagnostics.env_check() to be present
549
+ import streamlit as _st # keep namespace local
550
+ try:
551
+ from diagnostics import env_check # optional helper in user's repo
552
+ _st.header("Environment")
553
+ _st.code(env_check())
554
+ except Exception:
555
+ # fallback basic env info
556
+ py = os.popen("python -V").read().strip()
557
+ try:
558
+ ver = getattr(genai, "__version__", None)
559
+ f = getattr(genai, "__file__", None)
560
+ has_responses = hasattr(genai, "responses")
561
+ has_GenerativeModel = hasattr(genai, "GenerativeModel")
562
+ has_generate = hasattr(genai, "generate")
563
+ info = {
564
+ "python_version": py,
565
+ "version": ver,
566
+ "file": f,
567
+ "has_responses": has_responses,
568
+ "has_GenerativeModel": has_GenerativeModel,
569
+ "has_generate": has_generate,
570
+ }
571
+ except Exception:
572
+ info = {"python_version": py}
573
+ _st.header("Environment")
574
+ _st.code(str(info))
575
+ except Exception:
576
+ pass