Update app.py
Browse files
app.py
CHANGED
|
@@ -3,6 +3,7 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
|
|
|
| 6 |
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
|
@@ -51,6 +52,20 @@ class BasicAgent:
|
|
| 51 |
except Exception as e:
|
| 52 |
return f"[{self.agent_name} Error: {e}]"
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 56 |
"""
|
|
@@ -106,17 +121,18 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 106 |
results_log = []
|
| 107 |
answers_payload = []
|
| 108 |
print(f"Running agent on {len(questions_data)} questions...")
|
| 109 |
-
for idx, item in enumerate(questions_data):
|
| 110 |
task_id = item.get("task_id")
|
| 111 |
question_text = item.get("question")
|
| 112 |
print(f"===== [Celum is answering No. {idx+1}/{len(questions_data)} ] =====")
|
| 113 |
try:
|
| 114 |
-
submitted_answer = agent
|
| 115 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 116 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 117 |
except Exception as e:
|
| 118 |
print(f"[Celum Error at Q{idx+1}]: {e}")
|
| 119 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
|
|
|
| 120 |
|
| 121 |
if not answers_payload:
|
| 122 |
print("Agent did not produce any answers to submit.")
|
|
@@ -135,7 +151,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 135 |
result_data = response.json()
|
| 136 |
final_status = (
|
| 137 |
f"Submission Successful!\n"
|
| 138 |
-
f"AI:
|
| 139 |
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
| 140 |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
| 141 |
f"Message: {result_data.get('message', 'No message received.')}"
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
import time
|
| 7 |
|
| 8 |
# (Keep Constants as is)
|
| 9 |
# --- Constants ---
|
|
|
|
| 52 |
except Exception as e:
|
| 53 |
return f"[{self.agent_name} Error: {e}]"
|
| 54 |
|
| 55 |
+
def safe_run_agent(agent, question, idx, total, max_retries=3):
|
| 56 |
+
tries = 0
|
| 57 |
+
while tries < max_retries:
|
| 58 |
+
try:
|
| 59 |
+
return agent(question, idx, total)
|
| 60 |
+
except Exception as e:
|
| 61 |
+
if "RateLimitError" in str(e) or "rate limit" in str(e).lower():
|
| 62 |
+
wait_time = 30 + tries * 10
|
| 63 |
+
print(f"Rate limit hit, sleeping {wait_time}s before retry... (try {tries+1}/{max_retries})")
|
| 64 |
+
time.sleep(wait_time)
|
| 65 |
+
tries += 1
|
| 66 |
+
else:
|
| 67 |
+
return f"[Agent Error: {e}]"
|
| 68 |
+
return "[Agent Error: Rate limit retries exceeded]"
|
| 69 |
|
| 70 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 71 |
"""
|
|
|
|
| 121 |
results_log = []
|
| 122 |
answers_payload = []
|
| 123 |
print(f"Running agent on {len(questions_data)} questions...")
|
| 124 |
+
for idx, item in enumerate(questions_data[:3]):
|
| 125 |
task_id = item.get("task_id")
|
| 126 |
question_text = item.get("question")
|
| 127 |
print(f"===== [Celum is answering No. {idx+1}/{len(questions_data)} ] =====")
|
| 128 |
try:
|
| 129 |
+
submitted_answer = safe_run_agent(agent, question_text, idx, len(questions_data))
|
| 130 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 131 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 132 |
except Exception as e:
|
| 133 |
print(f"[Celum Error at Q{idx+1}]: {e}")
|
| 134 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
| 135 |
+
time.sleep(7)
|
| 136 |
|
| 137 |
if not answers_payload:
|
| 138 |
print("Agent did not produce any answers to submit.")
|
|
|
|
| 151 |
result_data = response.json()
|
| 152 |
final_status = (
|
| 153 |
f"Submission Successful!\n"
|
| 154 |
+
f"AI: Celum\n"
|
| 155 |
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
| 156 |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
| 157 |
f"Message: {result_data.get('message', 'No message received.')}"
|