Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -313,8 +313,7 @@ def tavily_search(query: str) -> str:
|
|
| 313 |
try:
|
| 314 |
result = tavily.search(query=query, search_depth="advanced")
|
| 315 |
return f"Search results for '{query}':\n" + "\n".join([f"- {r['content']}" for r in result['results']])
|
| 316 |
-
except Exception as e:
|
| 317 |
-
return f"Error during Tavily search: {e}"
|
| 318 |
|
| 319 |
@tool
|
| 320 |
def read_file(url: str) -> str:
|
|
@@ -325,7 +324,6 @@ def read_file(url: str) -> str:
|
|
| 325 |
response = requests.get(url)
|
| 326 |
response.raise_for_status()
|
| 327 |
with open(filename, 'wb') as f: f.write(response.content)
|
| 328 |
-
|
| 329 |
if url.lower().endswith('.pdf'):
|
| 330 |
try:
|
| 331 |
pdf_reader = pypdf.PdfReader(filename)
|
|
@@ -349,7 +347,7 @@ def python_interpreter(code: str) -> str:
|
|
| 349 |
|
| 350 |
#
|
| 351 |
# ================================================================================================
|
| 352 |
-
# ✅ 2. CONFIGURE AND BUILD THE AGENT GRAPH
|
| 353 |
# ================================================================================================
|
| 354 |
#
|
| 355 |
class AgentState(TypedDict):
|
|
@@ -361,43 +359,33 @@ def build_agent_graph():
|
|
| 361 |
tool_map = {tool.name: tool for tool in tools}
|
| 362 |
|
| 363 |
repo_id = "CohereForAI/c4ai-command-r-plus"
|
|
|
|
|
|
|
|
|
|
| 364 |
llm = HuggingFaceEndpoint(
|
| 365 |
-
repo_id=repo_id,
|
|
|
|
|
|
|
|
|
|
| 366 |
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 367 |
)
|
| 368 |
|
| 369 |
def call_model(state: AgentState):
|
| 370 |
-
"""Invokes the LLM
|
| 371 |
-
messages
|
| 372 |
-
#
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
if isinstance(msg, SystemMessage):
|
| 376 |
-
prompt_str += f"<|SYSTEM|>\n{msg.content}\n"
|
| 377 |
-
elif isinstance(msg, HumanMessage):
|
| 378 |
-
prompt_str += f"<|USER|>\n{msg.content}\n"
|
| 379 |
-
elif isinstance(msg, AIMessage):
|
| 380 |
-
prompt_str += f"<|ASSISTANT|>\n{msg.content}\n"
|
| 381 |
-
elif isinstance(msg, ToolMessage):
|
| 382 |
-
prompt_str += f"<|TOOL_RESULT|>\n{msg.content}\n"
|
| 383 |
-
|
| 384 |
-
prompt_str += "<|ASSISTANT|>"
|
| 385 |
-
|
| 386 |
-
response_text = llm.invoke(prompt_str)
|
| 387 |
-
return {"messages": [AIMessage(content=response_text)]}
|
| 388 |
|
| 389 |
def should_continue(state: AgentState) -> str:
|
| 390 |
"""Determines whether to call a tool or end the loop."""
|
| 391 |
last_message_content = state['messages'][-1].content.strip()
|
| 392 |
-
|
| 393 |
-
# A simple check: if the response looks like a JSON object, it's a tool call.
|
| 394 |
if last_message_content.startswith('{') and last_message_content.endswith('}'):
|
| 395 |
-
# More robust check for JSON tool call
|
| 396 |
try:
|
| 397 |
json.loads(last_message_content)
|
| 398 |
return "action"
|
| 399 |
except json.JSONDecodeError:
|
| 400 |
-
return "end"
|
| 401 |
else:
|
| 402 |
return "end"
|
| 403 |
|
|
@@ -410,16 +398,14 @@ def build_agent_graph():
|
|
| 410 |
parameters = tool_call_data.get("parameters", {})
|
| 411 |
|
| 412 |
if tool_name not in tool_map:
|
| 413 |
-
|
| 414 |
-
return {"messages": [ToolMessage(content=error_message, tool_call_id="error")]}
|
| 415 |
|
| 416 |
selected_tool = tool_map[tool_name]
|
| 417 |
tool_output = selected_tool.invoke(parameters)
|
| 418 |
return {"messages": [ToolMessage(content=str(tool_output), tool_call_id=tool_name)]}
|
| 419 |
|
| 420 |
except Exception as e:
|
| 421 |
-
|
| 422 |
-
return {"messages": [ToolMessage(content=error_message, tool_call_id="error")]}
|
| 423 |
|
| 424 |
workflow = StateGraph(AgentState)
|
| 425 |
workflow.add_node("agent", call_model)
|
|
@@ -436,23 +422,16 @@ def build_agent_graph():
|
|
| 436 |
#
|
| 437 |
class GaiaAgent:
|
| 438 |
def __init__(self):
|
| 439 |
-
print("GaiaAgent initialized. Building Command R+ agent with
|
| 440 |
self.agent_app = build_agent_graph()
|
| 441 |
|
| 442 |
def __call__(self, question: str) -> str:
|
| 443 |
print(f"\n{'='*60}\nAgent received question: {question[:100]}...\n{'='*60}")
|
| 444 |
-
initial_input = {
|
| 445 |
-
"messages": [
|
| 446 |
-
SystemMessage(content=AGENT_SYSTEM_PROMPT),
|
| 447 |
-
HumanMessage(content=question)
|
| 448 |
-
]
|
| 449 |
-
}
|
| 450 |
final_state = None
|
| 451 |
for i, step in enumerate(self.agent_app.stream(initial_input, {"recursion_limit": 15})):
|
| 452 |
if i == 0: print("--- Starting Agentic Loop ---")
|
| 453 |
-
|
| 454 |
-
print(step)
|
| 455 |
-
final_state = list(step.values())[0] # Get the state from the graph step
|
| 456 |
|
| 457 |
final_answer_message = final_state['messages'][-1]
|
| 458 |
final_answer = str(final_answer_message.content).strip()
|
|
@@ -465,26 +444,21 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 465 |
if not profile: return "Please Login to Hugging Face with the button.", None
|
| 466 |
username = f"{profile.username}"
|
| 467 |
print(f"User logged in: {username}")
|
| 468 |
-
|
| 469 |
api_url = DEFAULT_API_URL
|
| 470 |
questions_url = f"{api_url}/questions"
|
| 471 |
submit_url = f"{api_url}/submit"
|
| 472 |
-
|
| 473 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 474 |
-
|
| 475 |
try:
|
| 476 |
response = requests.get(questions_url, timeout=15)
|
| 477 |
response.raise_for_status()
|
| 478 |
questions_data = response.json()
|
| 479 |
-
print(f"Fetched {len(questions_data)} questions.")
|
| 480 |
except Exception as e:
|
| 481 |
return f"An unexpected error occurred fetching questions: {e}", None
|
| 482 |
|
| 483 |
results_log = []
|
| 484 |
answers_payload = []
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
agent_instance = GaiaAgent() # Instantiate the agent once
|
| 488 |
|
| 489 |
for item in questions_data:
|
| 490 |
task_id = item.get("task_id")
|
|
@@ -502,7 +476,6 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 502 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 503 |
|
| 504 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 505 |
-
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 506 |
try:
|
| 507 |
response = requests.post(submit_url, json=submission_data, timeout=90)
|
| 508 |
response.raise_for_status()
|
|
@@ -514,19 +487,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 514 |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
| 515 |
f"Message: {result_data.get('message', 'No message received.')}"
|
| 516 |
)
|
| 517 |
-
|
| 518 |
-
return final_status, results_df
|
| 519 |
except Exception as e:
|
| 520 |
-
|
| 521 |
-
results_df = pd.DataFrame(results_log)
|
| 522 |
-
return status_message, results_df
|
| 523 |
|
| 524 |
with gr.Blocks() as demo:
|
| 525 |
-
gr.Markdown("# GAIA Agent Final Assessment (Open Source: Command R+ -
|
| 526 |
gr.Markdown(
|
| 527 |
"""
|
| 528 |
-
**Instructor's Note:** This version
|
| 529 |
-
|
| 530 |
"""
|
| 531 |
)
|
| 532 |
gr.LoginButton()
|
|
|
|
| 313 |
try:
|
| 314 |
result = tavily.search(query=query, search_depth="advanced")
|
| 315 |
return f"Search results for '{query}':\n" + "\n".join([f"- {r['content']}" for r in result['results']])
|
| 316 |
+
except Exception as e: return f"Error during Tavily search: {e}"
|
|
|
|
| 317 |
|
| 318 |
@tool
|
| 319 |
def read_file(url: str) -> str:
|
|
|
|
| 324 |
response = requests.get(url)
|
| 325 |
response.raise_for_status()
|
| 326 |
with open(filename, 'wb') as f: f.write(response.content)
|
|
|
|
| 327 |
if url.lower().endswith('.pdf'):
|
| 328 |
try:
|
| 329 |
pdf_reader = pypdf.PdfReader(filename)
|
|
|
|
| 347 |
|
| 348 |
#
|
| 349 |
# ================================================================================================
|
| 350 |
+
# ✅ 2. CONFIGURE AND BUILD THE AGENT GRAPH
|
| 351 |
# ================================================================================================
|
| 352 |
#
|
| 353 |
class AgentState(TypedDict):
|
|
|
|
| 359 |
tool_map = {tool.name: tool for tool in tools}
|
| 360 |
|
| 361 |
repo_id = "CohereForAI/c4ai-command-r-plus"
|
| 362 |
+
|
| 363 |
+
# <<<--- CHANGE 1: Explicitly set `task="conversational"` --->>>
|
| 364 |
+
# This is the crucial fix. We are telling the endpoint to use the correct API pipeline.
|
| 365 |
llm = HuggingFaceEndpoint(
|
| 366 |
+
repo_id=repo_id,
|
| 367 |
+
task="conversational", # This is the key fix!
|
| 368 |
+
max_new_tokens=1024,
|
| 369 |
+
temperature=0.1,
|
| 370 |
huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 371 |
)
|
| 372 |
|
| 373 |
def call_model(state: AgentState):
|
| 374 |
+
"""Invokes the LLM using the conversational task."""
|
| 375 |
+
# <<<--- CHANGE 2: The conversational task takes a list of messages directly --->>>
|
| 376 |
+
# This is cleaner and the correct way to use this pipeline.
|
| 377 |
+
response = llm.invoke(state['messages'])
|
| 378 |
+
return {"messages": [response]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 379 |
|
| 380 |
def should_continue(state: AgentState) -> str:
|
| 381 |
"""Determines whether to call a tool or end the loop."""
|
| 382 |
last_message_content = state['messages'][-1].content.strip()
|
|
|
|
|
|
|
| 383 |
if last_message_content.startswith('{') and last_message_content.endswith('}'):
|
|
|
|
| 384 |
try:
|
| 385 |
json.loads(last_message_content)
|
| 386 |
return "action"
|
| 387 |
except json.JSONDecodeError:
|
| 388 |
+
return "end"
|
| 389 |
else:
|
| 390 |
return "end"
|
| 391 |
|
|
|
|
| 398 |
parameters = tool_call_data.get("parameters", {})
|
| 399 |
|
| 400 |
if tool_name not in tool_map:
|
| 401 |
+
return {"messages": [ToolMessage(content=f"Error: Tool '{tool_name}' not found.", tool_call_id="error")]}
|
|
|
|
| 402 |
|
| 403 |
selected_tool = tool_map[tool_name]
|
| 404 |
tool_output = selected_tool.invoke(parameters)
|
| 405 |
return {"messages": [ToolMessage(content=str(tool_output), tool_call_id=tool_name)]}
|
| 406 |
|
| 407 |
except Exception as e:
|
| 408 |
+
return {"messages": [ToolMessage(content=f"Error processing tool call: {e}. Content: '{last_message_content}'", tool_call_id="error")]}
|
|
|
|
| 409 |
|
| 410 |
workflow = StateGraph(AgentState)
|
| 411 |
workflow.add_node("agent", call_model)
|
|
|
|
| 422 |
#
|
| 423 |
class GaiaAgent:
|
| 424 |
def __init__(self):
|
| 425 |
+
print("GaiaAgent initialized. Building Command R+ agent with 'conversational' task...")
|
| 426 |
self.agent_app = build_agent_graph()
|
| 427 |
|
| 428 |
def __call__(self, question: str) -> str:
|
| 429 |
print(f"\n{'='*60}\nAgent received question: {question[:100]}...\n{'='*60}")
|
| 430 |
+
initial_input = {"messages": [SystemMessage(content=AGENT_SYSTEM_PROMPT), HumanMessage(content=question)]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 431 |
final_state = None
|
| 432 |
for i, step in enumerate(self.agent_app.stream(initial_input, {"recursion_limit": 15})):
|
| 433 |
if i == 0: print("--- Starting Agentic Loop ---")
|
| 434 |
+
final_state = list(step.values())[0]
|
|
|
|
|
|
|
| 435 |
|
| 436 |
final_answer_message = final_state['messages'][-1]
|
| 437 |
final_answer = str(final_answer_message.content).strip()
|
|
|
|
| 444 |
if not profile: return "Please Login to Hugging Face with the button.", None
|
| 445 |
username = f"{profile.username}"
|
| 446 |
print(f"User logged in: {username}")
|
|
|
|
| 447 |
api_url = DEFAULT_API_URL
|
| 448 |
questions_url = f"{api_url}/questions"
|
| 449 |
submit_url = f"{api_url}/submit"
|
|
|
|
| 450 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 451 |
+
|
| 452 |
try:
|
| 453 |
response = requests.get(questions_url, timeout=15)
|
| 454 |
response.raise_for_status()
|
| 455 |
questions_data = response.json()
|
|
|
|
| 456 |
except Exception as e:
|
| 457 |
return f"An unexpected error occurred fetching questions: {e}", None
|
| 458 |
|
| 459 |
results_log = []
|
| 460 |
answers_payload = []
|
| 461 |
+
agent_instance = GaiaAgent()
|
|
|
|
|
|
|
| 462 |
|
| 463 |
for item in questions_data:
|
| 464 |
task_id = item.get("task_id")
|
|
|
|
| 476 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 477 |
|
| 478 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
|
|
|
| 479 |
try:
|
| 480 |
response = requests.post(submit_url, json=submission_data, timeout=90)
|
| 481 |
response.raise_for_status()
|
|
|
|
| 487 |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
| 488 |
f"Message: {result_data.get('message', 'No message received.')}"
|
| 489 |
)
|
| 490 |
+
return final_status, pd.DataFrame(results_log)
|
|
|
|
| 491 |
except Exception as e:
|
| 492 |
+
return f"An unexpected error occurred during submission: {e}", pd.DataFrame(results_log)
|
|
|
|
|
|
|
| 493 |
|
| 494 |
with gr.Blocks() as demo:
|
| 495 |
+
gr.Markdown("# GAIA Agent Final Assessment (Open Source: Command R+ - Corrected Task)")
|
| 496 |
gr.Markdown(
|
| 497 |
"""
|
| 498 |
+
**Instructor's Note:** This version corrects the `HuggingFaceEndpoint` invocation by specifying `task="conversational"`.
|
| 499 |
+
This is the final key required to make the Command R+ model work correctly with the Hugging Face Inference API for our agent.
|
| 500 |
"""
|
| 501 |
)
|
| 502 |
gr.LoginButton()
|