fix models
Browse files
agent.py
CHANGED
|
@@ -57,8 +57,8 @@ def smart_invoke(msgs, use_tools=False, start_tier=0):
|
|
| 57 |
Retries next tier if a 429 (rate limit), 402 (credits), or 404 (model found) error occurs.
|
| 58 |
"""
|
| 59 |
|
| 60 |
-
# Adaptive Gemini names
|
| 61 |
-
gemini_alternatives = ["gemini-2.
|
| 62 |
|
| 63 |
tiers_config = [
|
| 64 |
{"name": "OpenRouter", "key": "OPENROUTER_API_KEY", "provider": "openai", "model_name": "meta-llama/llama-3.3-70b-instruct", "base_url": "https://openrouter.ai/api/v1"},
|
|
@@ -179,11 +179,10 @@ def get_vision_models():
|
|
| 179 |
"""Returns a list of vision models to try, in order of preference."""
|
| 180 |
configs = [
|
| 181 |
{"name": "OpenRouter-Gemini-2.0", "key": "OPENROUTER_API_KEY", "provider": "openai", "model_name": "google/gemini-2.0-flash-001", "base_url": "https://openrouter.ai/api/v1"},
|
| 182 |
-
{"name": "Google-Gemini-2.0-
|
| 183 |
-
{"name": "Google-Gemini-
|
| 184 |
{"name": "NVIDIA-Vision-Llama-11b", "key": "NVIDIA_API_KEY", "provider": "openai", "model_name": "meta/llama-3.2-11b-vision-instruct", "base_url": "https://integrate.api.nvidia.com/v1"},
|
| 185 |
{"name": "NVIDIA-Vision-Llama-90b", "key": "NVIDIA_API_KEY", "provider": "openai", "model_name": "meta/llama-3.2-90b-vision-instruct", "base_url": "https://integrate.api.nvidia.com/v1"},
|
| 186 |
-
{"name": "Groq-Vision", "key": "GROQ_API_KEY", "provider": "groq", "model_name": "llama-3.2-90b-vision-preview"},
|
| 187 |
]
|
| 188 |
models = []
|
| 189 |
for cfg in configs:
|
|
@@ -356,7 +355,10 @@ def analyze_video(video_path: str, question: str) -> str:
|
|
| 356 |
|
| 357 |
return f"Video Summary based on extracted frames and audio:\n{video_context}"
|
| 358 |
except Exception as e:
|
| 359 |
-
|
|
|
|
|
|
|
|
|
|
| 360 |
finally:
|
| 361 |
if downloaded_video and os.path.exists(downloaded_video):
|
| 362 |
try:
|
|
@@ -520,7 +522,12 @@ def answer_message(state: AgentState) -> AgentState:
|
|
| 520 |
time.sleep(4)
|
| 521 |
|
| 522 |
print(f"--- ReAct Step {step + 1} ---")
|
| 523 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 524 |
messages.append(ai_msg)
|
| 525 |
|
| 526 |
# Check if the model requested tools
|
|
|
|
| 57 |
Retries next tier if a 429 (rate limit), 402 (credits), or 404 (model found) error occurs.
|
| 58 |
"""
|
| 59 |
|
| 60 |
+
# Adaptive Gemini names verified via list_models (REST API)
|
| 61 |
+
gemini_alternatives = ["gemini-2.5-flash", "gemini-2.0-flash", "gemini-flash-latest", "gemini-pro-latest"]
|
| 62 |
|
| 63 |
tiers_config = [
|
| 64 |
{"name": "OpenRouter", "key": "OPENROUTER_API_KEY", "provider": "openai", "model_name": "meta-llama/llama-3.3-70b-instruct", "base_url": "https://openrouter.ai/api/v1"},
|
|
|
|
| 179 |
"""Returns a list of vision models to try, in order of preference."""
|
| 180 |
configs = [
|
| 181 |
{"name": "OpenRouter-Gemini-2.0", "key": "OPENROUTER_API_KEY", "provider": "openai", "model_name": "google/gemini-2.0-flash-001", "base_url": "https://openrouter.ai/api/v1"},
|
| 182 |
+
{"name": "Google-Gemini-2.0-Flash", "key": "GOOGLE_API_KEY", "provider": "google", "model_name": "gemini-2.0-flash"},
|
| 183 |
+
{"name": "Google-Gemini-Flash-Latest", "key": "GOOGLE_API_KEY", "provider": "google", "model_name": "gemini-flash-latest"},
|
| 184 |
{"name": "NVIDIA-Vision-Llama-11b", "key": "NVIDIA_API_KEY", "provider": "openai", "model_name": "meta/llama-3.2-11b-vision-instruct", "base_url": "https://integrate.api.nvidia.com/v1"},
|
| 185 |
{"name": "NVIDIA-Vision-Llama-90b", "key": "NVIDIA_API_KEY", "provider": "openai", "model_name": "meta/llama-3.2-90b-vision-instruct", "base_url": "https://integrate.api.nvidia.com/v1"},
|
|
|
|
| 186 |
]
|
| 187 |
models = []
|
| 188 |
for cfg in configs:
|
|
|
|
| 355 |
|
| 356 |
return f"Video Summary based on extracted frames and audio:\n{video_context}"
|
| 357 |
except Exception as e:
|
| 358 |
+
err_msg = str(e)
|
| 359 |
+
if "No address associated with hostname" in err_msg or "Failed to resolve" in err_msg:
|
| 360 |
+
return f"Error: The environment cannot access the internet (DNS failure). Please use 'web_search' or 'wiki_search' to find information about this video content instead of trying to download it."
|
| 361 |
+
return f"Error analyzing video: {err_msg}"
|
| 362 |
finally:
|
| 363 |
if downloaded_video and os.path.exists(downloaded_video):
|
| 364 |
try:
|
|
|
|
| 522 |
time.sleep(4)
|
| 523 |
|
| 524 |
print(f"--- ReAct Step {step + 1} ---")
|
| 525 |
+
|
| 526 |
+
# Max history truncation to avoid 413 Request Too Large errors
|
| 527 |
+
# Keep SystemMessage, first HumanMessage, and the last 6 messages
|
| 528 |
+
safe_messages = messages[:2] + messages[-6:] if len(messages) > 10 else messages
|
| 529 |
+
|
| 530 |
+
ai_msg, current_tier = smart_invoke(safe_messages, use_tools=True, start_tier=current_tier)
|
| 531 |
messages.append(ai_msg)
|
| 532 |
|
| 533 |
# Check if the model requested tools
|