Spaces:
Build error
Build error
CB commited on
Update streamlit_app.py
Browse files- streamlit_app.py +1 -11
streamlit_app.py
CHANGED
|
@@ -30,7 +30,6 @@ except Exception:
|
|
| 30 |
# google.generativeai SDK (try both legacy and newer patterns)
|
| 31 |
try:
|
| 32 |
import google.generativeai as genai
|
| 33 |
-
# some installs expose a top-level `responses` object, others require attribute access
|
| 34 |
genai_responses = getattr(genai, "responses", None) or getattr(genai, "Responses", None)
|
| 35 |
upload_file = getattr(genai, "upload_file", None)
|
| 36 |
get_file = getattr(genai, "get_file", None)
|
|
@@ -388,9 +387,6 @@ def generate_via_responses_api(prompt_text: str, processed, model_used: str, max
|
|
| 388 |
user_msg = {"role": "user", "content": "Please summarize the attached video."}
|
| 389 |
call_variants = []
|
| 390 |
|
| 391 |
-
# Two common payload shapes: `genai.responses.generate(model=..., **payload)`
|
| 392 |
-
# and legacy `genai.Responses.create(...)` or model/chat wrappers.
|
| 393 |
-
# Build payloads for both styles.
|
| 394 |
call_variants.append({"method": "responses.generate", "payload": {"model": model_used, "messages": [system_msg, user_msg], "files": [{"name": fname}], "safety_settings": safety_settings, "max_output_tokens": max_tokens}})
|
| 395 |
call_variants.append({"method": "responses.generate_alt", "payload": {"model": model_used, "input": [{"text": prompt_text, "files": [{"name": fname}]}], "safety_settings": safety_settings, "max_output_tokens": max_tokens}})
|
| 396 |
call_variants.append({"method": "legacy_responses_create", "payload": {"model": model_used, "input": prompt_text, "file": fname, "max_output_tokens": max_tokens}})
|
|
@@ -411,25 +407,21 @@ def generate_via_responses_api(prompt_text: str, processed, model_used: str, max
|
|
| 411 |
try:
|
| 412 |
if progress_callback:
|
| 413 |
progress_callback("starting", int(time.time() - start), {"model": model_used, "attempt": attempts, "method": method})
|
| 414 |
-
# Preferred new API style if available
|
| 415 |
if genai_responses is not None and hasattr(genai_responses, "generate"):
|
| 416 |
-
response = genai_responses.generate(**payload)
|
| 417 |
text = _normalize_genai_response(response)
|
| 418 |
if progress_callback:
|
| 419 |
progress_callback("done", int(time.time() - start), {"model": model_used, "attempt": attempts, "method": method})
|
| 420 |
return text
|
| 421 |
-
# Some versions expose a top-level Responses class/or function `genai.Responses.create`
|
| 422 |
if hasattr(genai, "Responses") and hasattr(genai.Responses, "create"):
|
| 423 |
response = genai.Responses.create(**payload) # type: ignore
|
| 424 |
text = _normalize_genai_response(response)
|
| 425 |
if progress_callback:
|
| 426 |
progress_callback("done", int(time.time() - start), {"model": model_used, "attempt": attempts, "method": method})
|
| 427 |
return text
|
| 428 |
-
# Legacy model object style (older gemini SDK wrappers)
|
| 429 |
if hasattr(genai, "GenerativeModel"):
|
| 430 |
try:
|
| 431 |
model_obj = genai.GenerativeModel(model_name=model_used)
|
| 432 |
-
# try chat pattern
|
| 433 |
if hasattr(model_obj, "start_chat"):
|
| 434 |
chat = model_obj.start_chat()
|
| 435 |
resp = chat.send_message(prompt_text, timeout=timeout)
|
|
@@ -440,14 +432,12 @@ def generate_via_responses_api(prompt_text: str, processed, model_used: str, max
|
|
| 440 |
return text
|
| 441 |
except Exception:
|
| 442 |
pass
|
| 443 |
-
# If none matched, raise to be caught below and trigger helpful error
|
| 444 |
raise RuntimeError("No supported response generation method available in installed google-generativeai package.")
|
| 445 |
except Exception as e:
|
| 446 |
last_exc = e
|
| 447 |
msg = str(e)
|
| 448 |
logger.warning("Responses.generate error (model=%s attempt=%s method=%s): %s", model_used, attempts, method, msg)
|
| 449 |
if not is_transient_error(msg):
|
| 450 |
-
# non-transient -> surface meaningful hint for common misconfig issues
|
| 451 |
if "No supported response generation method" in msg or "has no attribute" in msg or "module 'google.generativeai' has no attribute" in msg:
|
| 452 |
raise RuntimeError(
|
| 453 |
"Installed google-generativeai package does not expose a compatible Responses API. "
|
|
|
|
| 30 |
# google.generativeai SDK (try both legacy and newer patterns)
|
| 31 |
try:
|
| 32 |
import google.generativeai as genai
|
|
|
|
| 33 |
genai_responses = getattr(genai, "responses", None) or getattr(genai, "Responses", None)
|
| 34 |
upload_file = getattr(genai, "upload_file", None)
|
| 35 |
get_file = getattr(genai, "get_file", None)
|
|
|
|
| 387 |
user_msg = {"role": "user", "content": "Please summarize the attached video."}
|
| 388 |
call_variants = []
|
| 389 |
|
|
|
|
|
|
|
|
|
|
| 390 |
call_variants.append({"method": "responses.generate", "payload": {"model": model_used, "messages": [system_msg, user_msg], "files": [{"name": fname}], "safety_settings": safety_settings, "max_output_tokens": max_tokens}})
|
| 391 |
call_variants.append({"method": "responses.generate_alt", "payload": {"model": model_used, "input": [{"text": prompt_text, "files": [{"name": fname}]}], "safety_settings": safety_settings, "max_output_tokens": max_tokens}})
|
| 392 |
call_variants.append({"method": "legacy_responses_create", "payload": {"model": model_used, "input": prompt_text, "file": fname, "max_output_tokens": max_tokens}})
|
|
|
|
| 407 |
try:
|
| 408 |
if progress_callback:
|
| 409 |
progress_callback("starting", int(time.time() - start), {"model": model_used, "attempt": attempts, "method": method})
|
|
|
|
| 410 |
if genai_responses is not None and hasattr(genai_responses, "generate"):
|
| 411 |
+
response = genai_responses.generate(**payload)
|
| 412 |
text = _normalize_genai_response(response)
|
| 413 |
if progress_callback:
|
| 414 |
progress_callback("done", int(time.time() - start), {"model": model_used, "attempt": attempts, "method": method})
|
| 415 |
return text
|
|
|
|
| 416 |
if hasattr(genai, "Responses") and hasattr(genai.Responses, "create"):
|
| 417 |
response = genai.Responses.create(**payload) # type: ignore
|
| 418 |
text = _normalize_genai_response(response)
|
| 419 |
if progress_callback:
|
| 420 |
progress_callback("done", int(time.time() - start), {"model": model_used, "attempt": attempts, "method": method})
|
| 421 |
return text
|
|
|
|
| 422 |
if hasattr(genai, "GenerativeModel"):
|
| 423 |
try:
|
| 424 |
model_obj = genai.GenerativeModel(model_name=model_used)
|
|
|
|
| 425 |
if hasattr(model_obj, "start_chat"):
|
| 426 |
chat = model_obj.start_chat()
|
| 427 |
resp = chat.send_message(prompt_text, timeout=timeout)
|
|
|
|
| 432 |
return text
|
| 433 |
except Exception:
|
| 434 |
pass
|
|
|
|
| 435 |
raise RuntimeError("No supported response generation method available in installed google-generativeai package.")
|
| 436 |
except Exception as e:
|
| 437 |
last_exc = e
|
| 438 |
msg = str(e)
|
| 439 |
logger.warning("Responses.generate error (model=%s attempt=%s method=%s): %s", model_used, attempts, method, msg)
|
| 440 |
if not is_transient_error(msg):
|
|
|
|
| 441 |
if "No supported response generation method" in msg or "has no attribute" in msg or "module 'google.generativeai' has no attribute" in msg:
|
| 442 |
raise RuntimeError(
|
| 443 |
"Installed google-generativeai package does not expose a compatible Responses API. "
|