Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,206 +1,1426 @@
|
|
| 1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
-
import requests
|
| 4 |
-
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
|
|
|
|
|
|
|
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
)
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 31 |
-
return fixed_answer
|
| 32 |
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
"""
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
"""
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
if profile:
|
| 42 |
-
username= f"{profile.username}"
|
| 43 |
-
print(f"User logged in: {username}")
|
| 44 |
-
else:
|
| 45 |
-
print("User not logged in.")
|
| 46 |
-
return "Please Login to Hugging Face with the button.", None
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
try:
|
| 54 |
-
|
|
|
|
|
|
|
| 55 |
except Exception as e:
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
try:
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
return
|
| 75 |
-
except requests.exceptions.JSONDecodeError as e:
|
| 76 |
-
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 77 |
-
print(f"Response text: {response.text[:500]}")
|
| 78 |
-
return f"Error decoding server response for questions: {e}", None
|
| 79 |
except Exception as e:
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
-
|
| 111 |
-
|
| 112 |
try:
|
| 113 |
-
|
|
|
|
| 114 |
response.raise_for_status()
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
except Exception as e:
|
| 148 |
-
|
| 149 |
-
print(status_message)
|
| 150 |
-
results_df = pd.DataFrame(results_log)
|
| 151 |
-
return status_message, results_df
|
| 152 |
|
| 153 |
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
gr.Markdown("# Basic Agent Evaluation Runner")
|
| 157 |
-
gr.Markdown(
|
| 158 |
-
"""
|
| 159 |
-
**Instructions:**
|
| 160 |
-
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
| 161 |
-
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
| 162 |
-
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
| 163 |
-
---
|
| 164 |
-
**Disclaimers:**
|
| 165 |
-
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
| 166 |
-
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
| 167 |
-
Please note that this version requires an OpenAI Key to run.
|
| 168 |
-
"""
|
| 169 |
-
)
|
| 170 |
|
| 171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
|
| 173 |
-
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 174 |
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
|
| 179 |
-
run_button.click(
|
| 180 |
-
fn=run_and_submit_all,
|
| 181 |
-
outputs=[status_output, results_table]
|
| 182 |
-
)
|
| 183 |
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
|
| 190 |
-
if space_host_startup:
|
| 191 |
-
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
| 192 |
-
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
| 193 |
-
else:
|
| 194 |
-
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 195 |
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
else:
|
| 201 |
-
print("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
|
| 203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
|
| 205 |
-
|
| 206 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import io
|
| 3 |
+
import json
|
| 4 |
+
import re
|
| 5 |
+
import traceback
|
| 6 |
+
import contextlib
|
| 7 |
+
import uuid
|
| 8 |
+
import time
|
| 9 |
+
import ast
|
| 10 |
+
from typing import List, Optional, TypedDict, Annotated, Dict
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from collections import Counter
|
| 13 |
import gradio as gr
|
|
|
|
|
|
|
| 14 |
import pandas as pd
|
| 15 |
+
import numpy as np
|
| 16 |
+
import torch
|
| 17 |
+
from pydantic import BaseModel, Field
|
| 18 |
+
|
| 19 |
+
# Multimodal & Web Tools
|
| 20 |
+
from transformers import pipeline
|
| 21 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
| 22 |
+
from bs4 import BeautifulSoup
|
| 23 |
+
import requests
|
| 24 |
+
|
| 25 |
+
# LangChain & LangGraph
|
| 26 |
+
from langgraph.graph.message import add_messages
|
| 27 |
+
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage, SystemMessage, AnyMessage, ToolCall
|
| 28 |
+
from langchain_core.tools import tool
|
| 29 |
+
from langgraph.prebuilt import ToolNode
|
| 30 |
+
from langgraph.graph import START, END, StateGraph
|
| 31 |
+
from langchain_groq import ChatGroq
|
| 32 |
+
|
| 33 |
+
# RAG
|
| 34 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 35 |
+
from langchain_community.vectorstores import FAISS
|
| 36 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 37 |
+
from langchain_community.tools import DuckDuckGoSearchRun
|
| 38 |
+
from langchain_core.documents import Document
|
| 39 |
+
|
| 40 |
+
# =============================================================================
|
| 41 |
+
# CONFIGURATION
|
| 42 |
+
# =============================================================================
|
| 43 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 44 |
+
MAX_TURNS = 25
|
| 45 |
+
MAX_MESSAGE_LENGTH = 8000
|
| 46 |
+
REFLECT_EVERY_N_TURNS = 5
|
| 47 |
|
| 48 |
+
# =============================================================================
|
| 49 |
+
# GLOBAL RAG COMPONENTS
|
| 50 |
+
# =============================================================================
|
| 51 |
+
global_embeddings = None
|
| 52 |
+
global_text_splitter = None
|
| 53 |
|
| 54 |
+
def initialize_rag_components():
|
| 55 |
+
"""Initialize RAG components globally."""
|
| 56 |
+
global global_embeddings, global_text_splitter
|
| 57 |
+
|
| 58 |
+
if global_embeddings is None:
|
| 59 |
+
print("Initializing RAG embeddings...")
|
| 60 |
+
try:
|
| 61 |
+
global_embeddings = HuggingFaceEmbeddings(
|
| 62 |
+
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
| 63 |
+
model_kwargs={'device': 'cpu'}
|
| 64 |
+
)
|
| 65 |
+
print("✅ Embeddings initialized.")
|
| 66 |
+
except Exception as e:
|
| 67 |
+
print(f"⚠️ Failed to initialize embeddings: {e}")
|
| 68 |
+
return False
|
| 69 |
+
|
| 70 |
+
if global_text_splitter is None:
|
| 71 |
+
print("Initializing text splitter...")
|
| 72 |
+
global_text_splitter = RecursiveCharacterTextSplitter(
|
| 73 |
+
chunk_size=1000,
|
| 74 |
+
chunk_overlap=200,
|
| 75 |
+
length_function=len,
|
| 76 |
+
separators=["\n\n", "\n", ". ", " ", ""]
|
| 77 |
)
|
| 78 |
+
print("✅ Text splitter initialized.")
|
| 79 |
+
|
| 80 |
+
return True
|
|
|
|
|
|
|
| 81 |
|
| 82 |
+
# =============================================================================
|
| 83 |
+
# ASR INITIALIZATION
|
| 84 |
+
# =============================================================================
|
| 85 |
+
asr_pipeline = None
|
| 86 |
+
try:
|
| 87 |
+
print("Loading ASR (Whisper) pipeline globally...")
|
| 88 |
+
device = 0 if torch.cuda.is_available() else -1
|
| 89 |
+
device_name = "cuda:0" if device == 0 else "cpu"
|
| 90 |
+
print(f"Attempting to use device: {device_name} for ASR.")
|
| 91 |
+
asr_pipeline = pipeline(
|
| 92 |
+
"automatic-speech-recognition",
|
| 93 |
+
model="openai/whisper-base",
|
| 94 |
+
torch_dtype=torch.float16 if device == 0 else torch.float32,
|
| 95 |
+
device=device
|
| 96 |
+
)
|
| 97 |
+
print("✅ ASR (Whisper) pipeline loaded successfully.")
|
| 98 |
+
except Exception as e:
|
| 99 |
+
print(f"⚠️ Warning: Could not load ASR pipeline globally. Error: {e}")
|
| 100 |
+
asr_pipeline = None
|
| 101 |
+
|
| 102 |
+
# =============================================================================
|
| 103 |
+
# UTILITY FUNCTIONS
|
| 104 |
+
# =============================================================================
|
| 105 |
+
def remove_fences_simple(text):
|
| 106 |
+
"""Remove code fences from text."""
|
| 107 |
+
original_text = text
|
| 108 |
+
text = text.strip()
|
| 109 |
+
if text.startswith("```") and text.endswith("```"):
|
| 110 |
+
text = text[3:-3].strip()
|
| 111 |
+
if '\n' in text:
|
| 112 |
+
first_line, rest = text.split('\n', 1)
|
| 113 |
+
if first_line.strip().replace('_','').isalnum() and len(first_line.strip()) < 15:
|
| 114 |
+
text = rest.strip()
|
| 115 |
+
return text
|
| 116 |
+
return original_text
|
| 117 |
+
|
| 118 |
+
def truncate_if_needed(content: str, max_length: int = MAX_MESSAGE_LENGTH) -> str:
|
| 119 |
+
"""Truncate content if it exceeds max length."""
|
| 120 |
+
if len(content) > max_length:
|
| 121 |
+
return content[:max_length] + f"\n...[truncated, {len(content)} total chars]"
|
| 122 |
+
return content
|
| 123 |
+
|
| 124 |
+
def find_file(path: str) -> Optional[Path]:
|
| 125 |
+
"""Find a file by trying multiple path variations."""
|
| 126 |
+
script_dir = Path.cwd()
|
| 127 |
+
safe_path = Path(path).as_posix()
|
| 128 |
+
|
| 129 |
+
paths_to_try = [
|
| 130 |
+
script_dir / safe_path,
|
| 131 |
+
Path(safe_path),
|
| 132 |
+
script_dir / Path(path).name
|
| 133 |
+
]
|
| 134 |
+
|
| 135 |
+
for attempt_path in paths_to_try:
|
| 136 |
+
if attempt_path.exists():
|
| 137 |
+
return attempt_path
|
| 138 |
+
|
| 139 |
+
return None
|
| 140 |
+
|
| 141 |
+
# =============================================================================
|
| 142 |
+
# PLANNING & REFLECTION TOOLS
|
| 143 |
+
# =============================================================================
|
| 144 |
+
|
| 145 |
+
class ThinkInput(BaseModel):
|
| 146 |
+
reasoning: str = Field(description="Brief reasoning summary (under 150 chars)")
|
| 147 |
+
|
| 148 |
+
@tool(args_schema=ThinkInput)
|
| 149 |
+
def think_through_logic(reasoning: str) -> str:
|
| 150 |
"""
|
| 151 |
+
Use this to work through logic puzzles, riddles, or reasoning problems.
|
| 152 |
+
|
| 153 |
+
Call this when:
|
| 154 |
+
- The question is a riddle or brain teaser
|
| 155 |
+
- You need to reason through a logical problem
|
| 156 |
+
- No external information is needed, just thinking
|
| 157 |
+
|
| 158 |
+
After thinking, use calculator if math is involved, then validate and submit answer.
|
| 159 |
"""
|
| 160 |
+
print(f"🧠 Thinking: {reasoning[:100]}...")
|
| 161 |
+
|
| 162 |
+
return f"""✅ Logic reasoning recorded.
|
| 163 |
+
|
| 164 |
+
Next steps:
|
| 165 |
+
1. If math needed → use calculator()
|
| 166 |
+
2. Once you have answer → use validate_answer()
|
| 167 |
+
3. Then → use final_answer_tool()
|
| 168 |
+
|
| 169 |
+
Remember: You MUST call another tool. Do not output reasoning text."""
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class PlanInput(BaseModel):
|
| 173 |
+
task_summary: str = Field(description="Very brief task summary (under 80 chars)")
|
| 174 |
+
|
| 175 |
+
@tool(args_schema=PlanInput)
|
| 176 |
+
def create_plan(task_summary: str) -> str:
|
| 177 |
+
"""
|
| 178 |
+
Creates a plan for multi-step questions. Use for complex tasks only.
|
| 179 |
+
Keep the summary VERY brief to avoid errors.
|
| 180 |
+
"""
|
| 181 |
+
print(f"📋 Planning: {task_summary[:80]}...")
|
| 182 |
+
|
| 183 |
+
return f"""✅ Plan created for: {task_summary}
|
| 184 |
+
|
| 185 |
+
FRAMEWORK:
|
| 186 |
+
1. What info do I need?
|
| 187 |
+
2. What tools will I use?
|
| 188 |
+
3. In what order?
|
| 189 |
+
|
| 190 |
+
Now execute step 1. You MUST call a tool next."""
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class ReflectInput(BaseModel):
|
| 194 |
+
situation: str = Field(description="Brief situation summary (under 80 chars)")
|
| 195 |
+
|
| 196 |
+
@tool(args_schema=ReflectInput)
|
| 197 |
+
def reflect_on_progress(situation: str) -> str:
|
| 198 |
+
"""
|
| 199 |
+
Reflects on progress when stuck. Use after 5+ turns without progress.
|
| 200 |
+
Keep situation summary VERY brief.
|
| 201 |
+
"""
|
| 202 |
+
print(f"🤔 Reflecting: {situation[:80]}...")
|
| 203 |
+
|
| 204 |
+
return f"""🔍 REFLECTION on: {situation}
|
| 205 |
+
|
| 206 |
+
QUESTIONS:
|
| 207 |
+
1. Am I using the right approach?
|
| 208 |
+
2. Should I try a different tool?
|
| 209 |
+
3. Do I actually have the answer already?
|
| 210 |
+
|
| 211 |
+
Take a DIFFERENT approach now. You MUST call a tool next."""
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
class ValidateInput(BaseModel):
|
| 215 |
+
proposed_answer: str = Field(description="The answer to validate")
|
| 216 |
+
original_question: str = Field(description="Original question (first 100 chars)")
|
| 217 |
+
|
| 218 |
+
@tool(args_schema=ValidateInput)
|
| 219 |
+
def validate_answer(proposed_answer: str, original_question: str) -> str:
|
| 220 |
+
"""
|
| 221 |
+
Validates answer format before submission. ALWAYS use before final_answer_tool.
|
| 222 |
+
"""
|
| 223 |
+
print(f"✓ Validating: '{proposed_answer[:50]}...'")
|
| 224 |
+
|
| 225 |
+
issues = []
|
| 226 |
+
warnings = []
|
| 227 |
+
|
| 228 |
+
# Check for conversational fluff
|
| 229 |
+
fluff = ["the answer is", "based on", "according to", "i found", "here is"]
|
| 230 |
+
if any(p in proposed_answer.lower() for p in fluff):
|
| 231 |
+
issues.append("❌ Remove conversational text. Answer only.")
|
| 232 |
+
|
| 233 |
+
# Check for code fences
|
| 234 |
+
if "```" in proposed_answer:
|
| 235 |
+
issues.append("❌ Remove code fences (```).")
|
| 236 |
+
|
| 237 |
+
# Check length
|
| 238 |
+
if len(proposed_answer) > 500:
|
| 239 |
+
warnings.append("⚠️ Answer very long. Just the answer?")
|
| 240 |
+
|
| 241 |
+
# Check for number questions
|
| 242 |
+
if any(k in original_question.lower() for k in ["how many", "what number", "count"]):
|
| 243 |
+
if not any(c.isdigit() for c in proposed_answer):
|
| 244 |
+
warnings.append("⚠️ Question asks for number but answer has no digits.")
|
| 245 |
+
|
| 246 |
+
if issues:
|
| 247 |
+
return "🚫 VALIDATION FAILED:\n" + "\n".join(issues) + "\n\nFix then retry."
|
| 248 |
+
|
| 249 |
+
if warnings:
|
| 250 |
+
return "⚠️ WARNINGS:\n" + "\n".join(warnings) + "\n\nConsider fixing, or proceed if confident."
|
| 251 |
+
|
| 252 |
+
return "✅ VALIDATION PASSED! Now call final_answer_tool() with this answer."
|
| 253 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
|
| 255 |
+
# =============================================================================
|
| 256 |
+
# CORE TOOLS
|
| 257 |
+
# =============================================================================
|
| 258 |
|
| 259 |
+
class SearchInput(BaseModel):
|
| 260 |
+
query: str = Field(description="Search query (concise)")
|
| 261 |
+
|
| 262 |
+
@tool(args_schema=SearchInput)
|
| 263 |
+
def search_tool(query: str) -> str:
|
| 264 |
+
"""Searches web via DuckDuckGo. Use for facts, recent info."""
|
| 265 |
+
if not isinstance(query, str) or not query.strip():
|
| 266 |
+
return "Error: Invalid query."
|
| 267 |
+
|
| 268 |
+
print(f"🔍 Searching: {query}")
|
| 269 |
try:
|
| 270 |
+
search = DuckDuckGoSearchRun()
|
| 271 |
+
result = search.run(query)
|
| 272 |
+
return truncate_if_needed(result)
|
| 273 |
except Exception as e:
|
| 274 |
+
return f"Search error: {str(e)}"
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class CalcInput(BaseModel):
|
| 278 |
+
expression: str = Field(description="Math expression (e.g., '2+2', 'sqrt(16)')")
|
| 279 |
+
|
| 280 |
+
@tool(args_schema=CalcInput)
|
| 281 |
+
def calculator(expression: str) -> str:
|
| 282 |
+
"""
|
| 283 |
+
Evaluates math expressions. Use for ANY calculations.
|
| 284 |
+
Supports: +, -, *, /, **, sqrt, sin, cos, log, pi, e, etc.
|
| 285 |
+
"""
|
| 286 |
+
if not isinstance(expression, str) or not expression.strip():
|
| 287 |
+
return "Error: Invalid expression."
|
| 288 |
+
|
| 289 |
+
print(f"🧮 Calculating: {expression}")
|
| 290 |
+
|
| 291 |
try:
|
| 292 |
+
import math
|
| 293 |
+
safe_dict = {
|
| 294 |
+
'sqrt': math.sqrt, 'sin': math.sin, 'cos': math.cos, 'tan': math.tan,
|
| 295 |
+
'log': math.log, 'log10': math.log10, 'exp': math.exp,
|
| 296 |
+
'pi': math.pi, 'e': math.e, 'abs': abs, 'round': round,
|
| 297 |
+
'pow': pow, 'sum': sum, 'min': min, 'max': max
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
result = eval(expression, {"__builtins__": {}}, safe_dict)
|
| 301 |
+
return str(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 302 |
except Exception as e:
|
| 303 |
+
return f"Calculation error for '{expression}': {str(e)}"
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
class CodeInput(BaseModel):
|
| 307 |
+
code: str = Field(description="Python code (MUST include print() for output)")
|
| 308 |
+
|
| 309 |
+
@tool(args_schema=CodeInput)
|
| 310 |
+
def code_interpreter(code: str) -> str:
|
| 311 |
+
"""
|
| 312 |
+
Executes Python code. Use for data processing, complex logic.
|
| 313 |
+
Available: pandas, numpy, json, re, datetime
|
| 314 |
+
CRITICAL: Always use print() to output results!
|
| 315 |
+
"""
|
| 316 |
+
if not isinstance(code, str):
|
| 317 |
+
return "Error: code must be string."
|
| 318 |
+
|
| 319 |
+
# Safety checks
|
| 320 |
+
dangerous = ['__import__', 'eval(', 'compile(', 'subprocess', 'os.system', 'exec(']
|
| 321 |
+
if any(d in code.lower() for d in dangerous):
|
| 322 |
+
return f"Error: Dangerous operation not allowed."
|
| 323 |
+
|
| 324 |
+
if 'open(' in code.lower() and any(m in code for m in ["'w'", '"w"', "'a'", '"a"']):
|
| 325 |
+
return "Error: File writing not allowed. Use write_file tool."
|
| 326 |
+
|
| 327 |
+
print(f"💻 Executing code ({len(code)} chars)...")
|
| 328 |
+
output_stream = io.StringIO()
|
| 329 |
+
error_stream = io.StringIO()
|
| 330 |
+
|
| 331 |
+
try:
|
| 332 |
+
with contextlib.redirect_stdout(output_stream), contextlib.redirect_stderr(error_stream):
|
| 333 |
+
safe_globals = {
|
| 334 |
+
"pd": pd,
|
| 335 |
+
"np": np,
|
| 336 |
+
"json": json,
|
| 337 |
+
"re": re,
|
| 338 |
+
"__builtins__": __builtins__
|
| 339 |
+
}
|
| 340 |
+
exec(code, safe_globals, {})
|
| 341 |
+
|
| 342 |
+
stdout = output_stream.getvalue()
|
| 343 |
+
stderr = error_stream.getvalue()
|
| 344 |
+
|
| 345 |
+
if stderr:
|
| 346 |
+
return f"Error:\n{stderr}\n\nStdout:\n{stdout}"
|
| 347 |
+
|
| 348 |
+
if stdout:
|
| 349 |
+
return truncate_if_needed(stdout)
|
| 350 |
+
|
| 351 |
+
return "Code executed but no output. Remember to use print()!"
|
| 352 |
+
|
| 353 |
+
except Exception as e:
|
| 354 |
+
return f"Execution failed:\n{traceback.format_exc()}"
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class ReadFileInput(BaseModel):
|
| 358 |
+
path: str = Field(description="File path")
|
| 359 |
+
|
| 360 |
+
@tool(args_schema=ReadFileInput)
|
| 361 |
+
def read_file(path: str) -> str:
|
| 362 |
+
"""Reads file content."""
|
| 363 |
+
if not isinstance(path, str) or not path.strip():
|
| 364 |
+
return "Error: Invalid path."
|
| 365 |
+
|
| 366 |
+
print(f"📄 Reading: {path}")
|
| 367 |
+
|
| 368 |
+
file_path = find_file(path)
|
| 369 |
+
if not file_path:
|
| 370 |
+
return f"Error: File not found: '{path}'\nCWD files: {os.listdir('.')}"
|
| 371 |
+
|
| 372 |
+
try:
|
| 373 |
+
content = file_path.read_text(encoding='utf-8')
|
| 374 |
+
return truncate_if_needed(content)
|
| 375 |
+
except UnicodeDecodeError:
|
| 376 |
+
return f"Error: Binary file. Size: {file_path.stat().st_size} bytes. Try audio_transcription_tool for audio."
|
| 377 |
+
except Exception as e:
|
| 378 |
+
return f"Read error: {str(e)}"
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
class WriteFileInput(BaseModel):
|
| 382 |
+
path: str = Field(description="File path")
|
| 383 |
+
content: str = Field(description="Content to write")
|
| 384 |
+
|
| 385 |
+
@tool(args_schema=WriteFileInput)
|
| 386 |
+
def write_file(path: str, content: str) -> str:
|
| 387 |
+
"""Writes content to file."""
|
| 388 |
+
if not path or not isinstance(content, str):
|
| 389 |
+
return "Error: Invalid inputs."
|
| 390 |
+
|
| 391 |
+
print(f"✍️ Writing: {path}")
|
| 392 |
+
|
| 393 |
+
try:
|
| 394 |
+
file_path = Path.cwd() / path
|
| 395 |
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
| 396 |
+
file_path.write_text(content, encoding='utf-8')
|
| 397 |
+
return f"Wrote {len(content)} chars to '{path}'."
|
| 398 |
+
except Exception as e:
|
| 399 |
+
return f"Write error: {str(e)}"
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
class ListDirInput(BaseModel):
|
| 403 |
+
path: str = Field(description="Directory path", default=".")
|
| 404 |
+
|
| 405 |
+
@tool(args_schema=ListDirInput)
|
| 406 |
+
def list_directory(path: str = ".") -> str:
|
| 407 |
+
"""Lists directory contents."""
|
| 408 |
+
print(f"📁 Listing: {path}")
|
| 409 |
+
|
| 410 |
+
try:
|
| 411 |
+
dir_path = Path.cwd() / path if path != "." else Path.cwd()
|
| 412 |
+
|
| 413 |
+
if not dir_path.is_dir():
|
| 414 |
+
return f"Error: '{path}' not a directory."
|
| 415 |
+
|
| 416 |
+
items = sorted(dir_path.iterdir())
|
| 417 |
+
|
| 418 |
+
if not items:
|
| 419 |
+
return f"Directory '{path}' is empty."
|
| 420 |
+
|
| 421 |
+
files, dirs = [], []
|
| 422 |
+
|
| 423 |
+
for item in items:
|
| 424 |
+
if item.is_dir():
|
| 425 |
+
dirs.append(f"📁 {item.name}/")
|
| 426 |
+
else:
|
| 427 |
+
files.append(f"📄 {item.name} ({item.stat().st_size} bytes)")
|
| 428 |
+
|
| 429 |
+
result = f"Contents of '{path}':\n\n"
|
| 430 |
+
if dirs:
|
| 431 |
+
result += "Directories:\n" + "\n".join(dirs) + "\n\n"
|
| 432 |
+
if files:
|
| 433 |
+
result += "Files:\n" + "\n".join(files)
|
| 434 |
+
|
| 435 |
+
return result
|
| 436 |
+
except Exception as e:
|
| 437 |
+
return f"List error: {str(e)}"
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
class AudioInput(BaseModel):
|
| 441 |
+
file_path: str = Field(description="Audio file path")
|
| 442 |
+
|
| 443 |
+
@tool(args_schema=AudioInput)
|
| 444 |
+
def audio_transcription_tool(file_path: str) -> str:
|
| 445 |
+
"""Transcribes audio using Whisper."""
|
| 446 |
+
if not file_path:
|
| 447 |
+
return "Error: Invalid file path."
|
| 448 |
+
|
| 449 |
+
print(f"🎤 Transcribing: {file_path}")
|
| 450 |
+
|
| 451 |
+
if asr_pipeline is None:
|
| 452 |
+
return "Error: ASR not available."
|
| 453 |
+
|
| 454 |
+
audio_path = find_file(file_path)
|
| 455 |
+
if not audio_path:
|
| 456 |
+
return f"Error: Audio file not found: '{file_path}'"
|
| 457 |
+
|
| 458 |
+
try:
|
| 459 |
+
transcription = asr_pipeline(str(audio_path))
|
| 460 |
+
result_text = transcription.get("text", "")
|
| 461 |
+
|
| 462 |
+
if not result_text:
|
| 463 |
+
return "Error: Transcription empty."
|
| 464 |
+
|
| 465 |
+
return f"Transcription:\n{truncate_if_needed(result_text)}"
|
| 466 |
+
except Exception as e:
|
| 467 |
+
return f"Transcription error: {str(e)}"
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
class YoutubeInput(BaseModel):
|
| 471 |
+
video_url: str = Field(description="YouTube URL")
|
| 472 |
+
|
| 473 |
+
@tool(args_schema=YoutubeInput)
|
| 474 |
+
def get_youtube_transcript(video_url: str) -> str:
|
| 475 |
+
"""Fetches YouTube video transcript."""
|
| 476 |
+
if not video_url:
|
| 477 |
+
return "Error: Invalid URL."
|
| 478 |
+
|
| 479 |
+
print(f"📺 YouTube transcript: {video_url}")
|
| 480 |
+
|
| 481 |
+
try:
|
| 482 |
+
video_id = None
|
| 483 |
+
if "watch?v=" in video_url:
|
| 484 |
+
video_id = video_url.split("v=")[1].split("&")[0]
|
| 485 |
+
elif "youtu.be/" in video_url:
|
| 486 |
+
video_id = video_url.split("youtu.be/")[1].split("?")[0]
|
| 487 |
+
|
| 488 |
+
if not video_id:
|
| 489 |
+
return f"Error: Could not extract video ID."
|
| 490 |
+
|
| 491 |
+
transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
|
| 492 |
+
|
| 493 |
+
if not transcript_list:
|
| 494 |
+
return "Error: No transcript found."
|
| 495 |
+
|
| 496 |
+
full_transcript = " ".join([item["text"] for item in transcript_list])
|
| 497 |
+
return f"Transcript:\n{truncate_if_needed(full_transcript)}"
|
| 498 |
+
except Exception as e:
|
| 499 |
+
return f"Transcript error: {str(e)}"
|
| 500 |
+
|
| 501 |
|
| 502 |
+
class ScrapeInput(BaseModel):
|
| 503 |
+
url: str = Field(description="URL (must start with http:// or https://)")
|
| 504 |
+
query: str = Field(description="What to find on the page")
|
| 505 |
|
| 506 |
+
@tool(args_schema=ScrapeInput)
|
| 507 |
+
def scrape_and_retrieve(url: str, query: str) -> str:
|
| 508 |
+
"""
|
| 509 |
+
Scrapes webpage and uses RAG to find relevant info.
|
| 510 |
+
Use when you need specific info from a known URL.
|
| 511 |
+
"""
|
| 512 |
+
if not url.startswith(('http://', 'https://')):
|
| 513 |
+
return f"Error: Invalid URL format."
|
| 514 |
+
if not query:
|
| 515 |
+
return "Error: Query required."
|
| 516 |
+
|
| 517 |
+
if global_embeddings is None or global_text_splitter is None:
|
| 518 |
+
if not initialize_rag_components():
|
| 519 |
+
return "Error: RAG not initialized."
|
| 520 |
|
| 521 |
+
print(f"🌐 Scraping: {url}")
|
| 522 |
+
|
| 523 |
try:
|
| 524 |
+
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'}
|
| 525 |
+
response = requests.get(url, headers=headers, timeout=20)
|
| 526 |
response.raise_for_status()
|
| 527 |
+
|
| 528 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 529 |
+
|
| 530 |
+
for tag in soup(["script", "style", "nav", "footer", "aside", "header", "iframe"]):
|
| 531 |
+
tag.extract()
|
| 532 |
+
|
| 533 |
+
main = soup.find('main') or soup.find('article') or soup.body
|
| 534 |
+
|
| 535 |
+
if not main:
|
| 536 |
+
return "Error: No main content found."
|
| 537 |
+
|
| 538 |
+
text = main.get_text(separator='\n', strip=True)
|
| 539 |
+
lines = [l.strip() for l in text.splitlines() if l.strip()]
|
| 540 |
+
text = '\n'.join(lines)
|
| 541 |
+
|
| 542 |
+
if len(text) < 50:
|
| 543 |
+
return f"Error: Content too short ({len(text)} chars)."
|
| 544 |
+
|
| 545 |
+
chunks = global_text_splitter.split_text(text)
|
| 546 |
+
|
| 547 |
+
if not chunks:
|
| 548 |
+
return "Error: Could not chunk text."
|
| 549 |
+
|
| 550 |
+
docs = [Document(page_content=c, metadata={"source": url}) for c in chunks]
|
| 551 |
+
|
| 552 |
+
db = FAISS.from_documents(docs, global_embeddings)
|
| 553 |
+
retriever = db.as_retriever(search_kwargs={"k": 5})
|
| 554 |
+
retrieved = retriever.invoke(query)
|
| 555 |
+
|
| 556 |
+
if not retrieved:
|
| 557 |
+
return f"No relevant info found for: '{query}'"
|
| 558 |
+
|
| 559 |
+
context = "\n\n---\n\n".join([f"[Chunk {i+1}]\n{d.page_content}" for i, d in enumerate(retrieved)])
|
| 560 |
+
|
| 561 |
+
return truncate_if_needed(f"From {url}:\n\n{context}")
|
| 562 |
+
|
| 563 |
+
except requests.RequestException as e:
|
| 564 |
+
return f"Fetch error: {str(e)}"
|
| 565 |
except Exception as e:
|
| 566 |
+
return f"Scrape error: {str(e)}\n{traceback.format_exc()}"
|
|
|
|
|
|
|
|
|
|
| 567 |
|
| 568 |
|
| 569 |
+
class FinalAnswerInput(BaseModel):
|
| 570 |
+
answer: str = Field(description="Final answer - EXACTLY what was asked, nothing more")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 571 |
|
| 572 |
+
@tool(args_schema=FinalAnswerInput)
|
| 573 |
+
def final_answer_tool(answer: str) -> str:
|
| 574 |
+
"""
|
| 575 |
+
Submit final answer. CRITICAL RULES:
|
| 576 |
+
1. ALWAYS call validate_answer() first
|
| 577 |
+
2. Answer must be EXACTLY what was asked
|
| 578 |
+
3. NO conversational text
|
| 579 |
+
4. NO explanations
|
| 580 |
+
5. Match requested format exactly
|
| 581 |
+
"""
|
| 582 |
+
if not isinstance(answer, str):
|
| 583 |
+
answer = str(answer)
|
| 584 |
+
|
| 585 |
+
print(f"✅ FINAL ANSWER SUBMITTED: {answer}")
|
| 586 |
+
return answer
|
| 587 |
|
|
|
|
| 588 |
|
| 589 |
+
# =============================================================================
|
| 590 |
+
# DEFINED TOOLS LIST
|
| 591 |
+
# =============================================================================
|
| 592 |
+
defined_tools = [
|
| 593 |
+
# Planning & Reflection
|
| 594 |
+
think_through_logic,
|
| 595 |
+
create_plan,
|
| 596 |
+
reflect_on_progress,
|
| 597 |
+
validate_answer,
|
| 598 |
+
|
| 599 |
+
# Core tools
|
| 600 |
+
search_tool,
|
| 601 |
+
calculator,
|
| 602 |
+
code_interpreter,
|
| 603 |
+
|
| 604 |
+
# File operations
|
| 605 |
+
read_file,
|
| 606 |
+
write_file,
|
| 607 |
+
list_directory,
|
| 608 |
+
|
| 609 |
+
# Specialized
|
| 610 |
+
audio_transcription_tool,
|
| 611 |
+
get_youtube_transcript,
|
| 612 |
+
scrape_and_retrieve,
|
| 613 |
+
|
| 614 |
+
# Final
|
| 615 |
+
final_answer_tool
|
| 616 |
+
]
|
| 617 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 618 |
|
| 619 |
+
# =============================================================================
|
| 620 |
+
# AGENT STATE
|
| 621 |
+
# =============================================================================
|
| 622 |
+
class AgentState(TypedDict):
|
| 623 |
+
messages: Annotated[List[AnyMessage], add_messages]
|
| 624 |
+
turn: int
|
| 625 |
+
has_plan: bool
|
| 626 |
+
consecutive_errors: int
|
| 627 |
+
tool_history: List[str]
|
| 628 |
+
last_tool_was_thinking: bool
|
| 629 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 630 |
|
| 631 |
+
# =============================================================================
|
| 632 |
+
# ENHANCED FALLBACK PARSER
|
| 633 |
+
# =============================================================================
|
| 634 |
+
def parse_tool_call_from_string(content: str, tools: List) -> List[ToolCall]:
|
| 635 |
+
"""Enhanced parser with multiple strategies."""
|
| 636 |
+
print(f"🔧 Fallback parsing (first 300 chars):\n{content[:300]}")
|
| 637 |
+
|
| 638 |
+
tool_name = None
|
| 639 |
+
tool_input = None
|
| 640 |
+
|
| 641 |
+
# STRATEGY 1: Groq's <function=name{...}> format
|
| 642 |
+
groq_match = re.search(r"<function=(\w+)\s*(\{.*?\})\s*(?:>|</function>)", content, re.DOTALL)
|
| 643 |
+
if groq_match:
|
| 644 |
+
try:
|
| 645 |
+
tool_name = groq_match.group(1).strip()
|
| 646 |
+
json_str = groq_match.group(2).strip()
|
| 647 |
+
json_str = json_str.encode().decode('unicode_escape')
|
| 648 |
+
tool_input = json.loads(json_str)
|
| 649 |
+
print(f"✓ Parsed Groq format: {tool_name}")
|
| 650 |
+
except:
|
| 651 |
+
tool_name = None
|
| 652 |
+
|
| 653 |
+
# STRATEGY 2: Standard <function(name)>{...} format
|
| 654 |
+
if not tool_name:
|
| 655 |
+
func_match = re.search(r"<function[(=]\s*([^)]+)\s*[)>](.*)", content, re.DOTALL | re.IGNORECASE)
|
| 656 |
+
if func_match:
|
| 657 |
+
try:
|
| 658 |
+
tool_name = func_match.group(1).strip().replace("'", "").replace('"', '')
|
| 659 |
+
remaining = func_match.group(2)
|
| 660 |
+
json_start = remaining.find('{')
|
| 661 |
+
if json_start != -1:
|
| 662 |
+
json_str = remaining[json_start:].strip().rstrip(',')
|
| 663 |
+
tool_input = json.loads(json_str)
|
| 664 |
+
print(f"✓ Parsed standard format: {tool_name}")
|
| 665 |
+
except:
|
| 666 |
+
tool_name = None
|
| 667 |
+
|
| 668 |
+
# STRATEGY 3: Tool mention with code block → wrap in code_interpreter
|
| 669 |
+
if not tool_name and "```python" in content:
|
| 670 |
+
try:
|
| 671 |
+
code_match = re.search(r"```python\n(.*?)```", content, re.DOTALL)
|
| 672 |
+
if code_match:
|
| 673 |
+
code = code_match.group(1).strip()
|
| 674 |
+
tool_name = "code_interpreter"
|
| 675 |
+
tool_input = {"code": code}
|
| 676 |
+
print(f"✓ Extracted Python code → code_interpreter")
|
| 677 |
+
except:
|
| 678 |
+
pass
|
| 679 |
+
|
| 680 |
+
# STRATEGY 4: Direct tool mention → create minimal valid call
|
| 681 |
+
if not tool_name:
|
| 682 |
+
for tool in tools:
|
| 683 |
+
if tool.name.lower() in content.lower():
|
| 684 |
+
tool_name = tool.name
|
| 685 |
+
tool_input = {}
|
| 686 |
+
|
| 687 |
+
# Try to extract arguments from content
|
| 688 |
+
if tool.args_schema:
|
| 689 |
+
schema = tool.args_schema.model_json_schema()
|
| 690 |
+
for prop in schema.get('properties', {}).keys():
|
| 691 |
+
if prop in schema.get('required', []):
|
| 692 |
+
# Use placeholder
|
| 693 |
+
tool_input[prop] = "auto_extracted"
|
| 694 |
+
|
| 695 |
+
print(f"✓ Found mention of '{tool_name}' → creating default call")
|
| 696 |
+
break
|
| 697 |
+
|
| 698 |
+
# STRATEGY 5: Emergency - if no tool detected, force a reasonable one
|
| 699 |
+
if not tool_name:
|
| 700 |
+
# If content looks like reasoning, use think_through_logic
|
| 701 |
+
if len(content) > 50 and not any(kw in content.lower() for kw in ["error", "failed", "invalid"]):
|
| 702 |
+
tool_name = "think_through_logic"
|
| 703 |
+
tool_input = {"reasoning": content[:150]}
|
| 704 |
+
print(f"⚠️ No tool detected → forcing think_through_logic")
|
| 705 |
+
|
| 706 |
+
# Validate and create tool call
|
| 707 |
+
if tool_name and tool_input is not None:
|
| 708 |
+
matching_tools = [t for t in tools if t.name == tool_name]
|
| 709 |
+
if matching_tools:
|
| 710 |
+
return [ToolCall(name=tool_name, args=tool_input, id=str(uuid.uuid4()))]
|
| 711 |
+
else:
|
| 712 |
+
print(f"❌ Tool '{tool_name}' not in available tools")
|
| 713 |
+
|
| 714 |
+
print("❌ All parsing strategies failed")
|
| 715 |
+
return []
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
# =============================================================================
|
| 719 |
+
# CONDITIONAL EDGE FUNCTION (FIXED)
|
| 720 |
+
# =============================================================================
|
| 721 |
+
def should_continue(state: AgentState):
|
| 722 |
+
"""Decide next step with robust logic."""
|
| 723 |
+
messages = state.get('messages', [])
|
| 724 |
+
if not messages:
|
| 725 |
+
return "agent"
|
| 726 |
+
|
| 727 |
+
last_message = messages[-1]
|
| 728 |
+
current_turn = state.get('turn', 0)
|
| 729 |
+
|
| 730 |
+
# Debug: Print what we're checking
|
| 731 |
+
msg_type = type(last_message).__name__
|
| 732 |
+
print(f"📍 Conditional check - Turn {current_turn}, Last msg type: {msg_type}")
|
| 733 |
+
|
| 734 |
+
# 1. Check turn limit
|
| 735 |
+
if current_turn >= MAX_TURNS:
|
| 736 |
+
print(f"🛑 Max turns ({MAX_TURNS}) reached")
|
| 737 |
+
return END
|
| 738 |
+
|
| 739 |
+
# 2. If last message is ToolMessage, agent needs to process it
|
| 740 |
+
if isinstance(last_message, ToolMessage):
|
| 741 |
+
print(f"📨 Tool result received from '{last_message.name}' → back to agent")
|
| 742 |
+
return "agent"
|
| 743 |
+
|
| 744 |
+
# 3. If last message is AIMessage with tool calls
|
| 745 |
+
if isinstance(last_message, AIMessage) and last_message.tool_calls:
|
| 746 |
+
# Only check the FIRST tool call, not all of them
|
| 747 |
+
first_tool = last_message.tool_calls[0]
|
| 748 |
+
tool_name = first_tool.get("name", "")
|
| 749 |
+
|
| 750 |
+
if tool_name == "final_answer_tool":
|
| 751 |
+
return END
|
| 752 |
+
else:
|
| 753 |
+
return "tools"
|
| 754 |
+
|
| 755 |
+
# 4. If AIMessage but no tool calls (reasoning text)
|
| 756 |
+
if isinstance(last_message, AIMessage) and not last_message.tool_calls:
|
| 757 |
+
# Check for consecutive AI messages (loop)
|
| 758 |
+
if len(messages) >= 2 and isinstance(messages[-2], AIMessage) and not messages[-2].tool_calls:
|
| 759 |
+
print(f"⚠️ Loop detected: 2 consecutive AI messages without tools")
|
| 760 |
+
return END
|
| 761 |
+
|
| 762 |
+
print(f"💭 AI message without tool call → continuing to agent (will force tool)")
|
| 763 |
+
return "agent"
|
| 764 |
+
|
| 765 |
+
# 5. Default: continue to agent
|
| 766 |
+
print(f"🔄 Default → continuing to agent")
|
| 767 |
+
return "agent"
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
# =============================================================================
|
| 771 |
+
# ENHANCED AGENT CLASS
|
| 772 |
+
# =============================================================================
|
| 773 |
+
class PlanningReflectionAgent:
|
| 774 |
+
def __init__(self):
|
| 775 |
+
print("🧠 PlanningReflectionAgent initializing...")
|
| 776 |
+
|
| 777 |
+
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
| 778 |
+
if not GROQ_API_KEY:
|
| 779 |
+
raise ValueError("GROQ_API_KEY not set!")
|
| 780 |
+
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 781 |
+
if not HUGGINGFACEHUB_API_TOKEN:
|
| 782 |
+
raise ValueError("HUGGINGFACEHUB_API_TOKEN secret is not set! Please add it to your Space secrets.")
|
| 783 |
+
|
| 784 |
+
self.tools = defined_tools
|
| 785 |
+
|
| 786 |
+
# Initialize RAG
|
| 787 |
+
if not initialize_rag_components():
|
| 788 |
+
print("⚠️ RAG components failed to initialize.")
|
| 789 |
+
|
| 790 |
+
# Build tool descriptions
|
| 791 |
+
tool_desc_list = []
|
| 792 |
+
for tool in self.tools:
|
| 793 |
+
if tool.args_schema:
|
| 794 |
+
schema = tool.args_schema.model_json_schema()
|
| 795 |
+
args_desc = [f" - {p}: {d.get('description', '')}"
|
| 796 |
+
for p, d in schema.get('properties', {}).items()]
|
| 797 |
+
desc = f"- {tool.name}:\n {tool.description}\n" + "\n".join(args_desc)
|
| 798 |
+
else:
|
| 799 |
+
desc = f"- {tool.name}: {tool.description}"
|
| 800 |
+
tool_desc_list.append(desc)
|
| 801 |
+
tool_descriptions = "\n".join(tool_desc_list)
|
| 802 |
+
|
| 803 |
+
# ULTRA-AGGRESSIVE SYSTEM PROMPT
|
| 804 |
+
self.system_prompt = f"""You are an elite AI agent for GAIA benchmark. Your ONLY job: provide the EXACT answer requested.
|
| 805 |
+
|
| 806 |
+
═══════════════════════════════════════════════════════════════
|
| 807 |
+
⚠️ ABSOLUTE RULES - VIOLATE THESE AND YOU FAIL:
|
| 808 |
+
═══════════════════════════════════════════════════════════════
|
| 809 |
+
|
| 810 |
+
1. **EVERY TURN MUST CALL EXACTLY ONE TOOL** - No exceptions
|
| 811 |
+
2. **NEVER OUTPUT REASONING TEXT WITHOUT A TOOL CALL** - You will fail
|
| 812 |
+
3. **IDENTIFY QUESTION TYPE FIRST** - Logic? Factual? Data? Math?
|
| 813 |
+
4. **LOGIC PUZZLES**: think_through_logic → calculator (if needed) → validate → final_answer
|
| 814 |
+
5. **FACTUAL QUESTIONS**: search_tool → validate → final_answer
|
| 815 |
+
6. **DATA QUESTIONS**: read_file → code_interpreter → validate → final_answer
|
| 816 |
+
7. **ALWAYS VALIDATE**: Call validate_answer() before final_answer_tool()
|
| 817 |
+
8. **FINAL ANSWER FORMAT**: EXACTLY what was asked. NO "The answer is..." or explanations
|
| 818 |
+
|
| 819 |
+
═══════════════════════════════════════════════════════════════
|
| 820 |
+
📋 QUESTION TYPE GUIDE:
|
| 821 |
+
═══════════════════════════════════════════════════════════════
|
| 822 |
+
|
| 823 |
+
**RIDDLES/LOGIC PUZZLES** (No web search needed):
|
| 824 |
+
- Brain teasers, puzzles, logical deduction
|
| 825 |
+
- Strategy: think_through_logic → calculator (if math) → validate → final_answer
|
| 826 |
+
- Example: "If 200 coins, 30 face-down, divide into equal piles..."
|
| 827 |
+
Turn 1: think_through_logic("Adventurer takes 30 coins and flips them")
|
| 828 |
+
Turn 2: calculator("30") [if needed]
|
| 829 |
+
Turn 3: validate_answer("30", question)
|
| 830 |
+
Turn 4: final_answer_tool("30")
|
| 831 |
+
|
| 832 |
+
**FACTUAL/RESEARCH** (Need web):
|
| 833 |
+
- Who, what, when, where questions
|
| 834 |
+
- Strategy: search_tool → scrape_and_retrieve → validate → final_answer
|
| 835 |
+
- Example: "What was Einstein's birthplace population in 1900?"
|
| 836 |
+
Turn 1: search_tool("Albert Einstein birthplace")
|
| 837 |
+
Turn 2: search_tool("Ulm Germany population 1900")
|
| 838 |
+
Turn 3: validate_answer("50000", question)
|
| 839 |
+
Turn 4: final_answer_tool("50000")
|
| 840 |
+
|
| 841 |
+
**DATA ANALYSIS** (Need files):
|
| 842 |
+
- CSV/Excel questions
|
| 843 |
+
- Strategy: list_directory → read_file → code_interpreter → validate → final_answer
|
| 844 |
+
|
| 845 |
+
**SIMPLE MATH**:
|
| 846 |
+
- Calculations
|
| 847 |
+
- Strategy: calculator() → validate_answer() → final_answer_tool()
|
| 848 |
+
|
| 849 |
+
═══════════════════════════════════════════════════════════════
|
| 850 |
+
🎓 CRITICAL EXAMPLES:
|
| 851 |
+
═══════════════════════════════════════════════════════════════
|
| 852 |
+
|
| 853 |
+
Example 1: Logic Puzzle
|
| 854 |
+
Q: "Coin riddle with 200 coins, 30 face-down..."
|
| 855 |
+
✅ CORRECT:
|
| 856 |
+
Turn 1: think_through_logic("Take 30 coins, flip all")
|
| 857 |
+
Turn 2: validate_answer("30", "coin riddle...")
|
| 858 |
+
Turn 3: final_answer_tool("30")
|
| 859 |
+
|
| 860 |
+
❌ WRONG:
|
| 861 |
+
Turn 1: [reasoning text without tool] ← FAILS!
|
| 862 |
+
|
| 863 |
+
Example 2: Letter Bank Puzzle
|
| 864 |
+
Q: "Use letters to spell sentences, which letters need changing?"
|
| 865 |
+
✅ CORRECT:
|
| 866 |
+
Turn 1: code_interpreter("code to count letters...")
|
| 867 |
+
Turn 2: validate_answer("A, B, C", question)
|
| 868 |
+
Turn 3: final_answer_tool("A, B, C")
|
| 869 |
+
|
| 870 |
+
Example 3: Math Problem
|
| 871 |
+
Q: "System of equations to solve..."
|
| 872 |
+
✅ CORRECT:
|
| 873 |
+
Turn 1: code_interpreter("import numpy; solve equations...")
|
| 874 |
+
Turn 2: validate_answer("0, 1, 2", question)
|
| 875 |
+
Turn 3: final_answer_tool("0, 1, 2")
|
| 876 |
+
|
| 877 |
+
═══════════════════════════════════════════════════════════════
|
| 878 |
+
📚 AVAILABLE TOOLS:
|
| 879 |
+
═══════════════════════════════════════════════════════════════
|
| 880 |
+
|
| 881 |
+
{tool_descriptions}
|
| 882 |
+
|
| 883 |
+
═══════════════════════════════════════════════════════════════
|
| 884 |
+
⚡ EXECUTION RULES:
|
| 885 |
+
═══════════════════════════════════════════════════════════════
|
| 886 |
+
|
| 887 |
+
- If you output text without a tool call, you have FAILED
|
| 888 |
+
- If you're unsure, use think_through_logic() to organize thoughts
|
| 889 |
+
- ALWAYS call a tool - preferably the right one for the question type
|
| 890 |
+
- After EVERY tool result, decide: "Do I have the answer? → validate → submit"
|
| 891 |
+
- If stuck after 3 turns: call reflect_on_progress()
|
| 892 |
+
|
| 893 |
+
REMEMBER: One tool per turn. No reasoning without tools. Exact answer format.
|
| 894 |
+
═══════════════════════════════════════════════════════════════
|
| 895 |
+
"""
|
| 896 |
+
|
| 897 |
+
print("Initializing Groq LLM...")
|
| 898 |
+
try:
|
| 899 |
+
#. Initialize the LLM ()
|
| 900 |
+
#self.llm_with_tools = HuggingFaceEndpoint(
|
| 901 |
+
# repo_id="HuggingFaceH4/zephyr-7b-beta", # Changed model
|
| 902 |
+
# huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
|
| 903 |
+
# max_new_tokens=2048, # Increased token limit for potentially longer reasoning/tool use
|
| 904 |
+
# temperature=0.01, # Keep temperature low for factual tasks
|
| 905 |
+
#Use tool_choice="any" to FORCE tool usage
|
| 906 |
+
self.llm_with_tools = ChatGroq(
|
| 907 |
+
temperature=0,
|
| 908 |
+
groq_api_key=GROQ_API_KEY,
|
| 909 |
+
model_name="llama-3.3-70b-versatile",
|
| 910 |
+
max_tokens=4096,
|
| 911 |
+
timeout=60
|
| 912 |
+
).bind_tools(self.tools, tool_choice="auto")
|
| 913 |
+
print("✅ LLM initialized without FORCED tool usage.")
|
| 914 |
+
|
| 915 |
+
except Exception as e:
|
| 916 |
+
print(f"❌ Error initializing HuggingFace: {e}")
|
| 917 |
+
raise
|
| 918 |
+
|
| 919 |
+
# Agent Node with AGGRESSIVE tool forcing
|
| 920 |
+
def agent_node(state: AgentState):
|
| 921 |
+
current_turn = state.get('turn', 0) + 1
|
| 922 |
+
print(f"\n{'='*70}")
|
| 923 |
+
print(f"🤖 AGENT TURN {current_turn}/{MAX_TURNS}")
|
| 924 |
+
print('='*70)
|
| 925 |
+
|
| 926 |
+
if current_turn > MAX_TURNS:
|
| 927 |
+
return {
|
| 928 |
+
"messages": [SystemMessage(content="Max turns reached.")],
|
| 929 |
+
"turn": current_turn
|
| 930 |
+
}
|
| 931 |
+
|
| 932 |
+
# Check if we should force reflection
|
| 933 |
+
consecutive_errors = state.get('consecutive_errors', 0)
|
| 934 |
+
should_reflect = (current_turn > 5 and current_turn % REFLECT_EVERY_N_TURNS == 0) or consecutive_errors >= 3
|
| 935 |
+
|
| 936 |
+
messages_to_send = state["messages"].copy()
|
| 937 |
+
|
| 938 |
+
# Add tool-forcing message if last turn had no tool call
|
| 939 |
+
if len(messages_to_send) >= 2:
|
| 940 |
+
last_msg = messages_to_send[-1]
|
| 941 |
+
if isinstance(last_msg, AIMessage) and not last_msg.tool_calls:
|
| 942 |
+
force_msg = SystemMessage(
|
| 943 |
+
content="⚠️ CRITICAL: You MUST call a tool this turn. NO reasoning text. Pick the most appropriate tool and call it now."
|
| 944 |
+
)
|
| 945 |
+
messages_to_send.append(force_msg)
|
| 946 |
+
print("🚨 Injecting tool-forcing message")
|
| 947 |
+
|
| 948 |
+
# Add reflection hint if needed
|
| 949 |
+
if should_reflect:
|
| 950 |
+
hint = SystemMessage(
|
| 951 |
+
content="⚠️ HINT: Multiple turns without progress. Consider calling reflect_on_progress() or try a different approach."
|
| 952 |
+
)
|
| 953 |
+
messages_to_send.append(hint)
|
| 954 |
+
print("🤔 Injecting reflection hint")
|
| 955 |
+
|
| 956 |
+
# Invoke LLM with retries and fallback
|
| 957 |
+
max_retries = 3
|
| 958 |
+
ai_message = None
|
| 959 |
+
|
| 960 |
+
for attempt in range(max_retries):
|
| 961 |
+
try:
|
| 962 |
+
ai_message = self.llm_with_tools.invoke(messages_to_send)
|
| 963 |
+
|
| 964 |
+
# If we got a valid response with tool calls, break
|
| 965 |
+
if ai_message.tool_calls:
|
| 966 |
+
break
|
| 967 |
+
|
| 968 |
+
# If no tool calls, this is a problem
|
| 969 |
+
print(f"⚠️ LLM returned no tool calls on attempt {attempt+1}")
|
| 970 |
+
|
| 971 |
+
except Exception as e:
|
| 972 |
+
error_str = str(e)
|
| 973 |
+
print(f"⚠️ LLM attempt {attempt+1}/{max_retries} failed: {error_str[:200]}")
|
| 974 |
+
|
| 975 |
+
# If tool_use_failed, try without strict binding
|
| 976 |
+
if "tool_use_failed" in error_str and attempt < max_retries - 1:
|
| 977 |
+
print("🔧 Trying without strict tool enforcement...")
|
| 978 |
+
try:
|
| 979 |
+
simple_llm = ChatGroq(
|
| 980 |
+
temperature=0,
|
| 981 |
+
groq_api_key=os.getenv("GROQ_API_KEY"),
|
| 982 |
+
model_name="llama-3.3-70b-versatile",
|
| 983 |
+
max_tokens=4096,
|
| 984 |
+
timeout=60
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
# Add explicit tool forcing to the message
|
| 988 |
+
force_tool_msg = SystemMessage(
|
| 989 |
+
content="You MUST call a tool. Respond with a tool call, not reasoning text."
|
| 990 |
+
)
|
| 991 |
+
ai_message = simple_llm.invoke(messages_to_send + [force_tool_msg])
|
| 992 |
+
|
| 993 |
+
# Try to parse tool calls from content
|
| 994 |
+
if ai_message.content and not ai_message.tool_calls:
|
| 995 |
+
parsed = parse_tool_call_from_string(ai_message.content, self.tools)
|
| 996 |
+
if parsed:
|
| 997 |
+
ai_message.tool_calls = parsed
|
| 998 |
+
ai_message.content = ""
|
| 999 |
+
print("✓ Fallback parsing succeeded")
|
| 1000 |
+
break
|
| 1001 |
+
except Exception as e2:
|
| 1002 |
+
print(f"⚠️ Fallback also failed: {e2}")
|
| 1003 |
+
|
| 1004 |
+
if attempt == max_retries - 1:
|
| 1005 |
+
# Last resort: inject a default tool call
|
| 1006 |
+
print("🚨 All attempts failed - forcing think_through_logic")
|
| 1007 |
+
ai_message = AIMessage(
|
| 1008 |
+
content="",
|
| 1009 |
+
tool_calls=[ToolCall(
|
| 1010 |
+
name="think_through_logic",
|
| 1011 |
+
args={"reasoning": "Processing question"},
|
| 1012 |
+
id=str(uuid.uuid4())
|
| 1013 |
+
)]
|
| 1014 |
+
)
|
| 1015 |
+
else:
|
| 1016 |
+
time.sleep(2 ** attempt)
|
| 1017 |
+
|
| 1018 |
+
# If still no tool calls after all attempts, force one
|
| 1019 |
+
if not ai_message.tool_calls:
|
| 1020 |
+
if isinstance(ai_message.content, str) and ai_message.content.strip():
|
| 1021 |
+
# Try one more parse
|
| 1022 |
+
parsed = parse_tool_call_from_string(ai_message.content, self.tools)
|
| 1023 |
+
if parsed:
|
| 1024 |
+
ai_message.tool_calls = parsed
|
| 1025 |
+
ai_message.content = ""
|
| 1026 |
+
print("✓ Final parse succeeded")
|
| 1027 |
+
else:
|
| 1028 |
+
# Absolute last resort
|
| 1029 |
+
print("🚨 EMERGENCY: Forcing think_through_logic")
|
| 1030 |
+
ai_message.tool_calls = [ToolCall(
|
| 1031 |
+
name="think_through_logic",
|
| 1032 |
+
args={"reasoning": "analyzing question"},
|
| 1033 |
+
id=str(uuid.uuid4())
|
| 1034 |
+
)]
|
| 1035 |
+
ai_message.content = ""
|
| 1036 |
+
|
| 1037 |
+
# Track tool usage
|
| 1038 |
+
tool_history = state.get('tool_history', [])
|
| 1039 |
+
has_plan = state.get('has_plan', False)
|
| 1040 |
+
|
| 1041 |
+
if ai_message.tool_calls:
|
| 1042 |
+
tool_name = ai_message.tool_calls[0]['name']
|
| 1043 |
+
print(f"🔧 Tool Call: {tool_name}")
|
| 1044 |
+
tool_history.append(tool_name)
|
| 1045 |
+
|
| 1046 |
+
if tool_name == "create_plan":
|
| 1047 |
+
has_plan = True
|
| 1048 |
+
else:
|
| 1049 |
+
print(f"⚠️ No tool call (this shouldn't happen!)")
|
| 1050 |
+
print(f"💭 Content: {ai_message.content[:200]}...")
|
| 1051 |
+
|
| 1052 |
+
return {
|
| 1053 |
+
"messages": [ai_message],
|
| 1054 |
+
"turn": current_turn,
|
| 1055 |
+
"has_plan": has_plan,
|
| 1056 |
+
"tool_history": tool_history,
|
| 1057 |
+
"last_tool_was_thinking": ai_message.tool_calls and ai_message.tool_calls[0]['name'] == 'think_through_logic'
|
| 1058 |
+
}
|
| 1059 |
+
|
| 1060 |
+
# Tool Node with Error Tracking (FIXED)
|
| 1061 |
+
def tool_node_wrapper(state: AgentState):
|
| 1062 |
+
"""Executes tools and tracks errors."""
|
| 1063 |
+
print(f"🔧 Executing tools...")
|
| 1064 |
+
|
| 1065 |
+
# Create fresh ToolNode instance
|
| 1066 |
+
tool_executor = ToolNode(self.tools)
|
| 1067 |
+
|
| 1068 |
+
# Invoke properly
|
| 1069 |
+
result = tool_executor.invoke(state)
|
| 1070 |
+
|
| 1071 |
+
# Track errors
|
| 1072 |
+
consecutive_errors = state.get('consecutive_errors', 0)
|
| 1073 |
+
|
| 1074 |
+
if result.get('messages'):
|
| 1075 |
+
last_msg = result['messages'][-1]
|
| 1076 |
+
if isinstance(last_msg, ToolMessage):
|
| 1077 |
+
if "Error" in last_msg.content or "error" in last_msg.content.lower():
|
| 1078 |
+
consecutive_errors += 1
|
| 1079 |
+
print(f"⚠️ Tool error detected (consecutive: {consecutive_errors})")
|
| 1080 |
+
else:
|
| 1081 |
+
consecutive_errors = 0
|
| 1082 |
+
|
| 1083 |
+
result['consecutive_errors'] = consecutive_errors
|
| 1084 |
+
return result
|
| 1085 |
+
|
| 1086 |
+
# Build Graph
|
| 1087 |
+
print("Building graph...")
|
| 1088 |
+
graph_builder = StateGraph(AgentState)
|
| 1089 |
+
|
| 1090 |
+
graph_builder.add_node("agent", agent_node)
|
| 1091 |
+
graph_builder.add_node("tools", tool_node_wrapper)
|
| 1092 |
+
|
| 1093 |
+
graph_builder.add_edge(START, "agent")
|
| 1094 |
+
|
| 1095 |
+
graph_builder.add_conditional_edges(
|
| 1096 |
+
"agent",
|
| 1097 |
+
should_continue,
|
| 1098 |
+
{
|
| 1099 |
+
"tools": "tools",
|
| 1100 |
+
"agent": "agent",
|
| 1101 |
+
END: END
|
| 1102 |
+
}
|
| 1103 |
+
)
|
| 1104 |
+
|
| 1105 |
+
graph_builder.add_edge("tools", "agent")
|
| 1106 |
+
|
| 1107 |
+
self.graph = graph_builder.compile()
|
| 1108 |
+
print("✅ Graph compiled successfully.")
|
| 1109 |
+
|
| 1110 |
+
def __call__(self, question: str) -> str:
|
| 1111 |
+
"""Execute agent on a question."""
|
| 1112 |
+
print(f"\n{'='*70}")
|
| 1113 |
+
print(f"🎯 NEW QUESTION")
|
| 1114 |
+
print(f"{'='*70}")
|
| 1115 |
+
print(f"Q: {question[:200]}{'...' if len(question) > 200 else ''}")
|
| 1116 |
+
print(f"{'='*70}\n")
|
| 1117 |
+
|
| 1118 |
+
graph_input = {
|
| 1119 |
+
"messages": [
|
| 1120 |
+
SystemMessage(content=self.system_prompt),
|
| 1121 |
+
HumanMessage(content=question)
|
| 1122 |
+
],
|
| 1123 |
+
"turn": 0,
|
| 1124 |
+
"has_plan": False,
|
| 1125 |
+
"consecutive_errors": 0,
|
| 1126 |
+
"tool_history": [],
|
| 1127 |
+
"last_tool_was_thinking": False
|
| 1128 |
+
}
|
| 1129 |
+
|
| 1130 |
+
final_answer = "AGENT FAILED TO PRODUCE ANSWER"
|
| 1131 |
+
all_messages = []
|
| 1132 |
+
|
| 1133 |
+
try:
|
| 1134 |
+
config = {"recursion_limit": MAX_TURNS + 10}
|
| 1135 |
+
|
| 1136 |
+
for event in self.graph.stream(graph_input, stream_mode="values", config=config):
|
| 1137 |
+
if not event.get('messages'):
|
| 1138 |
+
continue
|
| 1139 |
+
|
| 1140 |
+
all_messages = event["messages"]
|
| 1141 |
+
last_message = all_messages[-1]
|
| 1142 |
+
|
| 1143 |
+
# Check for final answer
|
| 1144 |
+
if isinstance(last_message, AIMessage) and last_message.tool_calls:
|
| 1145 |
+
for tool_call in last_message.tool_calls:
|
| 1146 |
+
if tool_call.get("name") == "final_answer_tool":
|
| 1147 |
+
args = tool_call.get('args', {})
|
| 1148 |
+
if 'answer' in args:
|
| 1149 |
+
final_answer = args['answer']
|
| 1150 |
+
print(f"\n{'='*70}")
|
| 1151 |
+
print(f"✅ FINAL ANSWER: '{final_answer}'")
|
| 1152 |
+
print(f"{'='*70}\n")
|
| 1153 |
+
break
|
| 1154 |
+
|
| 1155 |
+
elif isinstance(last_message, ToolMessage):
|
| 1156 |
+
preview = last_message.content[:200].replace('\n', ' ')
|
| 1157 |
+
print(f"📊 Tool '{last_message.name}' result: {preview}...")
|
| 1158 |
+
|
| 1159 |
+
elif isinstance(last_message, AIMessage) and not last_message.tool_calls:
|
| 1160 |
+
print(f"💭 AI: {last_message.content[:200]}...")
|
| 1161 |
+
|
| 1162 |
+
# If no final answer, try to extract from tool messages
|
| 1163 |
+
if final_answer == "AGENT FAILED TO PRODUCE ANSWER":
|
| 1164 |
+
print("⚠️ No final_answer_tool called. Checking tool results...")
|
| 1165 |
+
|
| 1166 |
+
for msg in reversed(all_messages):
|
| 1167 |
+
if isinstance(msg, ToolMessage):
|
| 1168 |
+
if msg.name in ["calculator", "think_through_logic", "code_interpreter"]:
|
| 1169 |
+
content = msg.content.strip()
|
| 1170 |
+
# Look for short, answer-like content
|
| 1171 |
+
if content and len(content) < 200 and not content.startswith("Error"):
|
| 1172 |
+
# Extract just the result part
|
| 1173 |
+
lines = content.split('\n')
|
| 1174 |
+
for line in reversed(lines):
|
| 1175 |
+
if line.strip() and not line.startswith(('✅', '⚠️', 'Next', 'Remember')):
|
| 1176 |
+
final_answer = line.strip()
|
| 1177 |
+
print(f"📝 Extracted from {msg.name}: '{final_answer}'")
|
| 1178 |
+
break
|
| 1179 |
+
break
|
| 1180 |
+
|
| 1181 |
+
# Clean the answer
|
| 1182 |
+
cleaned = str(final_answer).strip()
|
| 1183 |
+
|
| 1184 |
+
# Remove prefixes
|
| 1185 |
+
prefixes = [
|
| 1186 |
+
"the answer is:", "here is the answer:", "based on",
|
| 1187 |
+
"final answer:", "answer:", "the final answer is:",
|
| 1188 |
+
"my answer is:", "according to", "i found that",
|
| 1189 |
+
"the result is:", "result:"
|
| 1190 |
+
]
|
| 1191 |
+
for prefix in prefixes:
|
| 1192 |
+
if cleaned.lower().startswith(prefix.lower()):
|
| 1193 |
+
potential = cleaned[len(prefix):].strip()
|
| 1194 |
+
if potential:
|
| 1195 |
+
cleaned = potential
|
| 1196 |
+
break
|
| 1197 |
+
|
| 1198 |
+
# Remove code fences and quotes
|
| 1199 |
+
cleaned = remove_fences_simple(cleaned)
|
| 1200 |
+
|
| 1201 |
+
while cleaned.startswith("`") and cleaned.endswith("`"):
|
| 1202 |
+
cleaned = cleaned[1:-1].strip()
|
| 1203 |
+
|
| 1204 |
+
if (cleaned.startswith('"') and cleaned.endswith('"')) or \
|
| 1205 |
+
(cleaned.startswith("'") and cleaned.endswith("'")):
|
| 1206 |
+
cleaned = cleaned[1:-1].strip()
|
| 1207 |
+
|
| 1208 |
+
# Remove trailing period for short answers
|
| 1209 |
+
if cleaned.endswith('.') and len(cleaned.split()) < 10:
|
| 1210 |
+
cleaned = cleaned[:-1]
|
| 1211 |
+
|
| 1212 |
+
print(f"\n{'='*70}")
|
| 1213 |
+
print(f"🎉 RETURNING ANSWER")
|
| 1214 |
+
print(f"{'='*70}")
|
| 1215 |
+
print(f"{cleaned}")
|
| 1216 |
+
print(f"{'='*70}\n")
|
| 1217 |
+
|
| 1218 |
+
return cleaned
|
| 1219 |
+
|
| 1220 |
+
except Exception as e:
|
| 1221 |
+
print(f"❌ Graph error: {e}")
|
| 1222 |
+
print(traceback.format_exc())
|
| 1223 |
+
return f"AGENT ERROR: {e}"
|
| 1224 |
+
|
| 1225 |
+
|
| 1226 |
+
# =============================================================================
|
| 1227 |
+
# GLOBAL AGENT INSTANTIATION
|
| 1228 |
+
# =============================================================================
|
| 1229 |
+
agent = None
|
| 1230 |
+
|
| 1231 |
+
try:
|
| 1232 |
+
initialize_rag_components()
|
| 1233 |
+
|
| 1234 |
+
agent = PlanningReflectionAgent()
|
| 1235 |
+
print("✅ Global PlanningReflectionAgent instantiated.")
|
| 1236 |
+
|
| 1237 |
+
# Verify it's callable
|
| 1238 |
+
if not callable(agent):
|
| 1239 |
+
print("❌ ERROR: Agent not callable!")
|
| 1240 |
+
agent = None
|
| 1241 |
else:
|
| 1242 |
+
print("✅ Agent is callable.")
|
| 1243 |
+
|
| 1244 |
+
if asr_pipeline is None:
|
| 1245 |
+
print("⚠️ ASR Pipeline not loaded.")
|
| 1246 |
+
|
| 1247 |
+
except Exception as e:
|
| 1248 |
+
print(f"❌ FATAL: Agent initialization failed: {e}")
|
| 1249 |
+
traceback.print_exc()
|
| 1250 |
+
agent = None
|
| 1251 |
|
| 1252 |
+
# ====================================================
|
| 1253 |
+
# --- (Original Template Code - Mock Questions Version) ---
|
| 1254 |
+
def run_and_submit_all( profile: gr.OAuthProfile | None): # Corrected type hint
|
| 1255 |
+
"""
|
| 1256 |
+
Fetches MOCK questions, runs the BasicAgent on them, simulates submission prep,
|
| 1257 |
+
and displays the results. DOES NOT SUBMIT.
|
| 1258 |
+
"""
|
| 1259 |
+
space_id = os.getenv("SPACE_ID")
|
| 1260 |
+
username = profile.username if profile else "local_test_user"
|
| 1261 |
+
print(f"User: {username}{'' if profile else ' (dummy)'}")
|
| 1262 |
+
|
| 1263 |
+
# Check if global agent initialized
|
| 1264 |
+
if not agent:
|
| 1265 |
+
return "FATAL ERROR: Global agent failed to initialize. Check logs.", None
|
| 1266 |
+
|
| 1267 |
+
print("Using globally instantiated agent.")
|
| 1268 |
+
agent_code = f"httpsS://huggingface.co/spaces/{space_id}/tree/main" if space_id else "local_run" # Corrected URL
|
| 1269 |
+
print(f"Agent code URL: {agent_code}")
|
| 1270 |
+
print("--- USING MOCK QUESTIONS ---")
|
| 1271 |
+
|
| 1272 |
+
# --- MOCK QUESTIONS ---
|
| 1273 |
+
#
|
| 1274 |
+
# vvv PASTE YOUR FULL LIST OF 20 MOCK QUESTIONS HERE vvv
|
| 1275 |
+
#
|
| 1276 |
+
mock_questions_data = [
|
| 1277 |
+
{
|
| 1278 |
+
"task_id": "mock_level1_001",
|
| 1279 |
+
"question": r"""Here's a fun riddle that I'd like you to try.\n\nAn adventurer exploring an ancient tomb came across a horde of gold coins, all neatly stacked in columns. As he reached to scoop them into his backpack, a mysterious voice filled the room. \"You have fallen for my trap adventurer,\" the voice began, and suddenly the doorway to the chamber was sealed by a heavy rolling disk of stone. The adventurer tried to move the stone disk but was unable to budge the heavy stone. Trapped, he was startled when the voice again spoke. \n\n\"If you solve my riddle, I will reward you with a portion of my riches, but if you are not clever, you will never leave this treasure chamber. Before you are 200 gold coins. I pose a challenge to you, adventurer. Within these stacks of coins, all but 30 are face-up. You must divide the coins into two piles, one is yours, and one is mine. You may place as many coins as you like in either pile. You may flip any coins over, but you may not balance any coins on their edges. For every face-down coin in your pile, you will be rewarded with two gold coins. But be warned, if both piles do not contain the same number of face-down coins, the door will remain sealed for all eternity!\"\n\nThe adventurer smiled, as this would be an easy task. All he had to do was flip over every coin so it was face down, and he would win the entire treasure! As he moved to the columns of coins, however, the light suddenly faded, and he was left in total darkness. The adventurer reached forward and picked up one of the coins, and was shocked when he realized that both sides felt almost the same. Without the light, he was unable to determine which side of the coin was heads and which side was tails. He carefully replaced the coin in its original orientation and tried to think of a way to solve the puzzle. Finally, out of desperation, the adventurer removed 30 coins to create his pile. He then carefully flipped over each coin in his pile, so its orientation was inverted from its original state.\n\n\"I've finished,\" he said, and the lights returned. Looking at the two piles, he noticed that the larger pile contained 14 face-down coins.\n\nWhat was the outcome for the adventurer? If he failed the challenge, please respond with \"The adventurer died.\" Otherwise, please provide the number of coins the adventurer won at the conclusion of the riddle. If the adventurer won any coins, provide your response as the number of coins, with no other text."""
|
| 1280 |
+
},
|
| 1281 |
+
{
|
| 1282 |
+
"task_id": "mock_level1_002",
|
| 1283 |
+
"question": r"""If you use some of the letters in the given Letter Bank to spell out the sentence "I am a penguin halfway to the moon", which of the remaining unused letters would have to be changed to spell out, "The moon is made of cheese"? Return a comma-separated alphabetized list.\nLetter Bank: {OAMFETIMPECRFSHTDNIWANEPNOFAAIYOOMGUTNAHHLNEHCME}"""
|
| 1284 |
+
},
|
| 1285 |
+
{
|
| 1286 |
+
"task_id": "mock_level1_003",
|
| 1287 |
+
"question": r"""A data annotator stayed up too late creating test questions to check that a system was working properly and submitted several questions with mathematical errors. On nights when they created 15 test questions, they made 1 error. On nights when they created fewer than 15 questions, they also corrected 3 errors. On nights they created 20 questions, they made 0 errors. On nights when they created 25 or more, they made 4 errors. Over the course of five nights, the worker produced a total of 6 errors. When asked how many nights they created 15 questions, they gave three possible numbers as responses. What are the three numbers, presented in the format x, y, z in ascending order?"""
|
| 1288 |
+
},
|
| 1289 |
+
{
|
| 1290 |
+
"task_id": "mock_level1_004",
|
| 1291 |
+
"question": r"""Please solve the following crossword:\n\n|1|2|3|4|5|\n|6| | | | |\n|7| | | | |\n|8| | | | |\n|X|9| | | |\n\nI have indicated by numbers where the hints start, so you should replace numbers and spaces by the answers.\nAnd X denotes a black square that isn\u2019t to fill.\n\nACROSS\n- 1 Wooden strips on a bed frame\n- 6 _ Minhaj, Peabody-winning comedian for "Patriot Act"\n- 7 Japanese city of 2.6+ million\n- 8 Stopwatch, e.g.\n- 9 Pain in the neck\n\nDOWN\n- 1 Quick drink of whiskey\n- 2 Eye procedure\n- 3 "Same here," in a three-word phrase\n- 4 Already occupied, as a seat\n- 5 Sarcastically critical commentary. Answer by concatenating the characters you choose to fill the crossword, in row-major order."""
|
| 1292 |
+
},
|
| 1293 |
+
{
|
| 1294 |
+
"task_id": "mock_level1_005",
|
| 1295 |
+
"question": r"""I wanted to make another batch of cherry melomel. I remember liking the last recipe I tried, but I can't remember it off the top of my head. It was from the Reddit, r/mead. I remember that the user who made it had a really distinct name, I think it was StormBeforeDawn. Could you please look up the recipe for me? I'm not sure if it has been changed, so please make sure that the recipe you review wasn't updated after July 14, 2022. That's the last time I tried the recipe.\n\nWhat I want to know is how many cherries I'm supposed to use. I'm making a 10-gallon batch in two 5-gallon carboys. Please just respond with the integer number of pounds of whole cherries with pits that are supposed to be used for a 10-gallon batch."""
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"task_id": "mock_level1_006",
|
| 1299 |
+
"question": r"""Verify each of the following ISBN 13 numbers:\n\n1. 9783518188156\n2. 9788476540746\n3. 9788415091004\n4. 9788256014590\n5. 9782046407331\n\nIf any are invalid, correct them by changing the final digit. Then, return the list, comma separated, in the same order as in the question."""
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"task_id": "mock_level1_007",
|
| 1303 |
+
"question": r"""A porterhouse by any other name is centered around a letter. What does Three Dog Night think about the first natural number that starts with that letter? Give the first line from the lyrics that references it."""
|
| 1304 |
+
},
|
| 1305 |
+
{
|
| 1306 |
+
"task_id": "mock_level1_008",
|
| 1307 |
+
"question": r"""Bob has genome type Aa, and Linda has genome type Aa. Assuming that a child of theirs also has a child with someone who also has genome type Aa, what is the probability that Bob and Linda's grandchild will have Genome type Aa? Write the answer as a percentage, rounding to the nearest integer if necessary."""
|
| 1308 |
+
},
|
| 1309 |
+
{
|
| 1310 |
+
"task_id": "mock_level1_009",
|
| 1311 |
+
"question": r"""An array of candy is set out to choose from including gumballs, candy corn, gumdrops, banana taffy, chocolate chips, and gummy bears. There is one bag of each type of candy. The gumballs come in red, orange, yellow, green, blue, and brown. The candy corn is yellow, white, and orange. The gumdrops are red, green, purple, yellow, and orange. The banana taffy is yellow. The chocolate chips are brown and white. The gummy bears are red, green, yellow, and orange. Five people pass through and each selects one bag. The first selects one with only primary colors. The second selects one with no primary colors. The third selects one with all the primary colors. The fourth selects one that has neither the most nor the least colors of the remaining bags. The fifth selects the one with their favorite color, green. A second bag of the candy the first person chose is added to the remaining bag of candy. Which two candies are in the remaining bag after the addition? Give me them in a comma separated list, in alphabetical order"""
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"task_id": "mock_level1_010",
|
| 1315 |
+
"question": r"""In the year 2020, where were koi fish found in the watershed with the id 02040203? Give only the name of the pond, lake, or stream where the fish were found, and not the name of the city or county."""
|
| 1316 |
+
},
|
| 1317 |
+
{
|
| 1318 |
+
"task_id": "mock_level1_011",
|
| 1319 |
+
"question": r"""In Sonia Sanchez\u2019s poem \u201cfather\u2019s voice\u201d, what primary colour is evoked by the imagery in the beginning of the tenth stanza? Answer with a capitalized word."""
|
| 1320 |
+
},
|
| 1321 |
+
{
|
| 1322 |
+
"task_id": "mock_level1_012",
|
| 1323 |
+
"question": r"""According to Papers with Code, what was the name of the first model to go beyond 70% of accuracy on ImageNet ?"""
|
| 1324 |
+
},
|
| 1325 |
+
{
|
| 1326 |
+
"task_id": "mock_level1_013",
|
| 1327 |
+
"question": r"""What is the dimension of the boundary of the tame twindragon rounded to two decimal places?"""
|
| 1328 |
+
},
|
| 1329 |
+
{
|
| 1330 |
+
"task_id": "mock_level1_014",
|
| 1331 |
+
"question": r"""In what year was the home village of the subject of British Museum item #Bb,11.118 founded?"""
|
| 1332 |
+
},
|
| 1333 |
+
{
|
| 1334 |
+
"task_id": "mock_level1_015",
|
| 1335 |
+
"question": r"""What is the ISSN of the journal that included G. Scott's potato article that mentioned both a fast food restaurant and a Chinese politician in the title in a 2012 issue?"""
|
| 1336 |
+
},
|
| 1337 |
+
{
|
| 1338 |
+
"task_id": "mock_level1_016",
|
| 1339 |
+
"question": r"""VNV Nation has a song that shares its title with the nickname of Louis XV. What album was it released with?"""
|
| 1340 |
+
},
|
| 1341 |
+
{
|
| 1342 |
+
"task_id": "mock_level1_017",
|
| 1343 |
+
"question": r"""If I combine a Beatle's first name and a type of beer, in what category and year of Nobel Prize do I have a winner? Answer using the format CATEGORY, YEAR."""
|
| 1344 |
+
},
|
| 1345 |
+
{
|
| 1346 |
+
"task_id": "mock_level1_018",
|
| 1347 |
+
"question": r"""In the version of NumPy where the numpy.msort function was deprecated, which attribute was added to the numpy.polynomial package's polynomial classes?"""
|
| 1348 |
+
},
|
| 1349 |
+
{
|
| 1350 |
+
"task_id": "mock_level1_019",
|
| 1351 |
+
"question": r"""A word meaning dramatic or theatrical forms a species of duck when appended with two letters and then duplicated. What is that word?"""
|
| 1352 |
+
},
|
| 1353 |
+
{
|
| 1354 |
+
"task_id": "mock_level1_020",
|
| 1355 |
+
"question": r"""As of August 2023, how many in-text citations on the West African Vodun Wikipedia page reference a source that was cited using Scopus?"""
|
| 1356 |
+
}
|
| 1357 |
+
]
|
| 1358 |
+
|
| 1359 |
+
questions_data = mock_questions_data
|
| 1360 |
+
print(f"Using {len(questions_data)} mock questions.")
|
| 1361 |
+
|
| 1362 |
+
results_log, answers_payload = [], []
|
| 1363 |
+
print(f"Running agent on {len(questions_data)} mock questions...")
|
| 1364 |
+
|
| 1365 |
+
for i, item in enumerate(questions_data):
|
| 1366 |
+
task_id, question_text = item.get("task_id"), item.get("question")
|
| 1367 |
+
if not task_id or question_text is None: print(f"Skipping mock item {i+1}"); continue
|
| 1368 |
+
|
| 1369 |
+
print(f"\n--- Running Mock Task {i+1} (ID: {task_id}) ---")
|
| 1370 |
+
try:
|
| 1371 |
+
file_path = item.get("file_path")
|
| 1372 |
+
question_text_with_context = question_text
|
| 1373 |
+
if file_path:
|
| 1374 |
+
question_text_with_context = f"{question_text}\n\n[Attached File: {file_path}]"
|
| 1375 |
+
print(f"Q includes file: {file_path}")
|
| 1376 |
+
|
| 1377 |
+
submitted_answer = agent(question_text_with_context)
|
| 1378 |
+
submitted_answer_str = str(submitted_answer) if submitted_answer is not None else ""
|
| 1379 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer_str})
|
| 1380 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer_str})
|
| 1381 |
+
print(f"--- Mock Task {task_id} Complete ---")
|
| 1382 |
+
except Exception as e:
|
| 1383 |
+
print(f"FATAL ERROR on mock task {task_id}: {e}")
|
| 1384 |
+
import traceback; traceback.print_exc()
|
| 1385 |
+
submitted_answer = f"AGENT CRASH: {e}"
|
| 1386 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 1387 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 1388 |
|
| 1389 |
+
if not answers_payload: return "Agent produced no answers.", pd.DataFrame(results_log)
|
| 1390 |
+
|
| 1391 |
+
status_update = f"Finished mock run. Processed {len(answers_payload)} answers for '{username}'."
|
| 1392 |
+
print(status_update); print("--- MOCK RUN - SUBMISSION SKIPPED ---")
|
| 1393 |
+
final_status = "--- Mock RUN COMPLETE ---\n" + status_update + "\nSubmission SKIPPED." # Corrected typo
|
| 1394 |
+
results_df = pd.DataFrame(results_log); results_df['Correct'] = 'N/A (Mock)'
|
| 1395 |
+
return final_status, results_df
|
| 1396 |
+
|
| 1397 |
+
|
| 1398 |
+
# --- Build Gradio Interface ---
|
| 1399 |
+
with gr.Blocks() as demo:
|
| 1400 |
+
gr.Markdown("# GAIA Agent - MOCK TEST (Groq Llama3.1)")
|
| 1401 |
+
gr.Markdown("""
|
| 1402 |
+
**Instructions:** Click 'Run Mock Evaluation'.
|
| 1403 |
+
**Notes:** Uses Groq (Llama-3.3-70b Executor). Ensure `GROQ_API_KEY` secret/env var exists. **DOES NOT** fetch official Qs or submit. Check logs for details.
|
| 1404 |
+
""")
|
| 1405 |
+
gr.LoginButton()
|
| 1406 |
+
run_button = gr.Button("Run Mock Evaluation")
|
| 1407 |
+
status_output = gr.Textbox(label="Run Status / Mock Result", lines=5, interactive=False)
|
| 1408 |
+
results_table = gr.DataFrame(label="Mock Qs, Agent Answers, Results", wrap=True)
|
| 1409 |
+
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
|
| 1410 |
+
|
| 1411 |
+
if __name__ == "__main__":
|
| 1412 |
+
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 1413 |
+
space_host_startup = os.getenv("SPACE_ID"); space_id_startup = os.getenv("SPACE_ID") # Corrected variable name
|
| 1414 |
+
if space_host_startup: print(f"✅ SPACE_HOST: {space_host_startup}\n Runtime URL: https://{space_host_startup}.hf.space")
|
| 1415 |
+
else: print("ℹ️ No SPACE_HOST (local?).")
|
| 1416 |
+
if space_id_startup: print(f"✅ SPACE_ID: {space_id_startup}\n Repo URL: https://huggingface.co/spaces/{space_id_startup}\n Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
| 1417 |
+
else: print("ℹ️ No SPACE_ID (local?).")
|
| 1418 |
+
try: script_dir = os.path.dirname(os.path.realpath(__file__))
|
| 1419 |
+
except NameError: script_dir = os.getcwd()
|
| 1420 |
+
print(f"Script directory: {script_dir}")
|
| 1421 |
+
print(f"CWD: {os.getcwd()}")
|
| 1422 |
+
try: print("Files in CWD:", os.listdir("."))
|
| 1423 |
+
except FileNotFoundError: print("Warning: CWD listing failed.")
|
| 1424 |
+
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 1425 |
+
print("Launching Gradio Interface...")
|
| 1426 |
+
demo.queue().launch(debug=True, share=False)
|