Spaces:
Build error
Build error
CB commited on
Update streamlit_app.py
Browse files- streamlit_app.py +83 -82
streamlit_app.py
CHANGED
|
@@ -1,11 +1,3 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
-
import pkg_resources
|
| 3 |
-
try:
|
| 4 |
-
st.write("google-generativeai:", pkg_resources.get_distribution("google-generativeai").version)
|
| 5 |
-
except Exception:
|
| 6 |
-
st.write("google-generativeai not installed")
|
| 7 |
-
|
| 8 |
-
|
| 9 |
# streamlit_app.py
|
| 10 |
import os
|
| 11 |
import time
|
|
@@ -390,7 +382,17 @@ if generate_now and not st.session_state.get("busy"):
|
|
| 390 |
|
| 391 |
response = None
|
| 392 |
|
| 393 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 394 |
try:
|
| 395 |
if hasattr(genai, "responses") and hasattr(genai.responses, "generate"):
|
| 396 |
response = genai.responses.generate(
|
|
@@ -402,20 +404,20 @@ if generate_now and not st.session_state.get("busy"):
|
|
| 402 |
)
|
| 403 |
except Exception:
|
| 404 |
response = None
|
| 405 |
-
|
| 406 |
-
#
|
| 407 |
if response is None:
|
| 408 |
try:
|
| 409 |
if hasattr(genai, "GenerativeModel"):
|
| 410 |
-
|
| 411 |
-
if hasattr(
|
| 412 |
-
response =
|
| 413 |
-
elif hasattr(
|
| 414 |
-
response =
|
| 415 |
except Exception:
|
| 416 |
response = None
|
| 417 |
-
|
| 418 |
-
#
|
| 419 |
if response is None:
|
| 420 |
try:
|
| 421 |
if hasattr(genai, "generate"):
|
|
@@ -424,93 +426,92 @@ if generate_now and not st.session_state.get("busy"):
|
|
| 424 |
response = genai.create(model=model_used, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=max_tokens)
|
| 425 |
except Exception:
|
| 426 |
response = None
|
| 427 |
-
|
| 428 |
if response is None:
|
| 429 |
raise RuntimeError("No supported generate method found on google.generativeai; check SDK version or model compatibility.")
|
| 430 |
-
|
|
|
|
| 431 |
outputs = []
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
break
|
| 438 |
-
if not outputs:
|
| 439 |
-
for v in response.values():
|
| 440 |
if isinstance(v, list) and v:
|
| 441 |
outputs = v
|
| 442 |
break
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 453 |
text_pieces = []
|
| 454 |
for item in outputs:
|
| 455 |
-
if
|
| 456 |
continue
|
| 457 |
-
|
| 458 |
if isinstance(item, dict):
|
| 459 |
for k in ("content", "text", "message", "output_text", "output"):
|
| 460 |
-
|
| 461 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 462 |
break
|
| 463 |
else:
|
|
|
|
| 464 |
for k in ("content", "text", "message", "output", "output_text"):
|
| 465 |
-
|
| 466 |
-
if
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 467 |
break
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
text_pieces.append(cand.strip())
|
| 471 |
-
continue
|
| 472 |
-
if isinstance(cand, (list, tuple)):
|
| 473 |
-
for c in cand:
|
| 474 |
-
if c is None:
|
| 475 |
-
continue
|
| 476 |
-
if isinstance(c, str):
|
| 477 |
-
if c.strip():
|
| 478 |
-
text_pieces.append(c.strip())
|
| 479 |
-
continue
|
| 480 |
-
if isinstance(c, dict):
|
| 481 |
-
t = c.get("text") or c.get("content")
|
| 482 |
-
else:
|
| 483 |
-
t = getattr(c, "text", None) or getattr(c, "content", None)
|
| 484 |
-
if t:
|
| 485 |
-
text_pieces.append(str(t).strip())
|
| 486 |
-
continue
|
| 487 |
-
direct = None
|
| 488 |
-
if isinstance(item, dict):
|
| 489 |
-
direct = item.get("text") or item.get("output_text") or item.get("message")
|
| 490 |
-
else:
|
| 491 |
-
direct = getattr(item, "text", None) or getattr(item, "output_text", None) or getattr(item, "message", None)
|
| 492 |
-
if direct:
|
| 493 |
-
text_pieces.append(str(direct).strip())
|
| 494 |
-
|
| 495 |
-
if not text_pieces:
|
| 496 |
-
top_text = None
|
| 497 |
-
if isinstance(response, dict):
|
| 498 |
-
top_text = response.get("text") or response.get("message")
|
| 499 |
-
else:
|
| 500 |
-
top_text = getattr(response, "text", None) or getattr(response, "message", None)
|
| 501 |
-
if top_text:
|
| 502 |
-
text_pieces.append(str(top_text).strip())
|
| 503 |
-
|
| 504 |
seen = set()
|
| 505 |
filtered = []
|
| 506 |
for t in text_pieces:
|
| 507 |
-
if not isinstance(t, str):
|
| 508 |
-
continue
|
| 509 |
if t and t not in seen:
|
| 510 |
filtered.append(t)
|
| 511 |
seen.add(t)
|
| 512 |
out = "\n\n".join(filtered)
|
| 513 |
|
|
|
|
| 514 |
except Exception as e:
|
| 515 |
tb = traceback.format_exc()
|
| 516 |
st.session_state["last_error"] = f"Responses API error: {e}\n\nDebug: {debug_info}\n\nTraceback:\n{tb}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# streamlit_app.py
|
| 2 |
import os
|
| 3 |
import time
|
|
|
|
| 382 |
|
| 383 |
response = None
|
| 384 |
|
| 385 |
+
# normalize response generation across google-generativeai versions
|
| 386 |
+
response = None
|
| 387 |
+
genai.configure(api_key=key_to_use)
|
| 388 |
+
fname = file_name_or_id(processed)
|
| 389 |
+
if not fname:
|
| 390 |
+
raise RuntimeError("Uploaded file missing name/id")
|
| 391 |
+
|
| 392 |
+
system_msg = {"role": "system", "content": prompt_text}
|
| 393 |
+
user_msg = {"role": "user", "content": "Please summarize the attached video."}
|
| 394 |
+
|
| 395 |
+
# 1) responses.generate (modern)
|
| 396 |
try:
|
| 397 |
if hasattr(genai, "responses") and hasattr(genai.responses, "generate"):
|
| 398 |
response = genai.responses.generate(
|
|
|
|
| 404 |
)
|
| 405 |
except Exception:
|
| 406 |
response = None
|
| 407 |
+
|
| 408 |
+
# 2) GenerativeModel (another modern interface)
|
| 409 |
if response is None:
|
| 410 |
try:
|
| 411 |
if hasattr(genai, "GenerativeModel"):
|
| 412 |
+
gm = genai.GenerativeModel(model=model_used)
|
| 413 |
+
if hasattr(gm, "generate_content"):
|
| 414 |
+
response = gm.generate_content([system_msg, user_msg], files=[{"name": fname}], max_output_tokens=max_tokens)
|
| 415 |
+
elif hasattr(gm, "generate"):
|
| 416 |
+
response = gm.generate([system_msg, user_msg], files=[{"name": fname}], max_output_tokens=max_tokens)
|
| 417 |
except Exception:
|
| 418 |
response = None
|
| 419 |
+
|
| 420 |
+
# 3) older top-level helpers (legacy)
|
| 421 |
if response is None:
|
| 422 |
try:
|
| 423 |
if hasattr(genai, "generate"):
|
|
|
|
| 426 |
response = genai.create(model=model_used, input=[{"text": prompt_text, "files": [{"name": fname}]}], max_output_tokens=max_tokens)
|
| 427 |
except Exception:
|
| 428 |
response = None
|
| 429 |
+
|
| 430 |
if response is None:
|
| 431 |
raise RuntimeError("No supported generate method found on google.generativeai; check SDK version or model compatibility.")
|
| 432 |
+
|
| 433 |
+
# normalize output extraction robustly
|
| 434 |
outputs = []
|
| 435 |
+
try:
|
| 436 |
+
if isinstance(response, dict):
|
| 437 |
+
# common keys that hold lists
|
| 438 |
+
for k in ("output", "candidates", "items", "responses"):
|
| 439 |
+
v = response.get(k)
|
|
|
|
|
|
|
|
|
|
| 440 |
if isinstance(v, list) and v:
|
| 441 |
outputs = v
|
| 442 |
break
|
| 443 |
+
if not outputs:
|
| 444 |
+
for v in response.values():
|
| 445 |
+
if isinstance(v, list) and v:
|
| 446 |
+
outputs = v
|
| 447 |
+
break
|
| 448 |
+
else:
|
| 449 |
+
for attr in ("output", "candidates", "items", "responses"):
|
| 450 |
+
v = getattr(response, attr, None)
|
| 451 |
+
if isinstance(v, (list, tuple)) and v:
|
| 452 |
+
outputs = list(v)
|
| 453 |
+
break
|
| 454 |
+
except Exception:
|
| 455 |
+
outputs = []
|
| 456 |
+
|
| 457 |
+
# fall back to message/text fields if no list found
|
| 458 |
+
if not outputs:
|
| 459 |
+
candidate_text = None
|
| 460 |
+
if isinstance(response, dict):
|
| 461 |
+
candidate_text = response.get("text") or response.get("message")
|
| 462 |
+
else:
|
| 463 |
+
candidate_text = getattr(response, "text", None) or getattr(response, "message", None)
|
| 464 |
+
if candidate_text:
|
| 465 |
+
outputs = [ {"text": candidate_text} ]
|
| 466 |
+
|
| 467 |
+
# extract strings from outputs
|
| 468 |
text_pieces = []
|
| 469 |
for item in outputs:
|
| 470 |
+
if not item:
|
| 471 |
continue
|
| 472 |
+
# dict style
|
| 473 |
if isinstance(item, dict):
|
| 474 |
for k in ("content", "text", "message", "output_text", "output"):
|
| 475 |
+
v = item.get(k)
|
| 476 |
+
if v:
|
| 477 |
+
if isinstance(v, str):
|
| 478 |
+
text_pieces.append(v.strip())
|
| 479 |
+
elif isinstance(v, list):
|
| 480 |
+
for e in v:
|
| 481 |
+
if isinstance(e, str):
|
| 482 |
+
text_pieces.append(e.strip())
|
| 483 |
+
elif isinstance(e, dict):
|
| 484 |
+
t = e.get("text") or e.get("content")
|
| 485 |
+
if t:
|
| 486 |
+
text_pieces.append(str(t).strip())
|
| 487 |
break
|
| 488 |
else:
|
| 489 |
+
# object style
|
| 490 |
for k in ("content", "text", "message", "output", "output_text"):
|
| 491 |
+
v = getattr(item, k, None)
|
| 492 |
+
if v:
|
| 493 |
+
if isinstance(v, str):
|
| 494 |
+
text_pieces.append(v.strip())
|
| 495 |
+
elif isinstance(v, (list, tuple)):
|
| 496 |
+
for e in v:
|
| 497 |
+
if isinstance(e, str):
|
| 498 |
+
text_pieces.append(e.strip())
|
| 499 |
+
else:
|
| 500 |
+
t = getattr(e, "text", None) or getattr(e, "content", None)
|
| 501 |
+
if t:
|
| 502 |
+
text_pieces.append(str(t).strip())
|
| 503 |
break
|
| 504 |
+
|
| 505 |
+
# dedupe and join
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 506 |
seen = set()
|
| 507 |
filtered = []
|
| 508 |
for t in text_pieces:
|
|
|
|
|
|
|
| 509 |
if t and t not in seen:
|
| 510 |
filtered.append(t)
|
| 511 |
seen.add(t)
|
| 512 |
out = "\n\n".join(filtered)
|
| 513 |
|
| 514 |
+
|
| 515 |
except Exception as e:
|
| 516 |
tb = traceback.format_exc()
|
| 517 |
st.session_state["last_error"] = f"Responses API error: {e}\n\nDebug: {debug_info}\n\nTraceback:\n{tb}"
|