kamorou commited on
Commit
815ebf4
·
verified ·
1 Parent(s): 075f1f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -29
app.py CHANGED
@@ -248,7 +248,6 @@
248
 
249
  #
250
 
251
- ##################
252
  import os
253
  import io
254
  import requests
@@ -261,8 +260,8 @@ import operator
261
  # --- LangChain & LangGraph Imports ---
262
  from langchain_core.messages import BaseMessage, HumanMessage, ToolMessage, AIMessage, SystemMessage
263
  from langchain_core.tools import tool
264
- # <<<--- CHANGE 1: Import Google Gemini instead of OpenAI --->>>
265
- from langchain_google_genai import ChatGoogleGenerativeAI
266
  from langgraph.graph import StateGraph, END
267
  from langgraph.prebuilt import ToolNode
268
  from tavily import TavilyClient
@@ -273,7 +272,7 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
273
  FILES_DIR = "./files"
274
  os.makedirs(FILES_DIR, exist_ok=True)
275
 
276
- # --- System Prompt (Unchanged, it's strong) ---
277
  AGENT_SYSTEM_PROMPT = """You are a world-class AI agent, specialized in solving complex problems from the GAIA benchmark.
278
  Your task is to analyze the user's question, think step-by-step, and use the provided tools to find the correct answer.
279
  CRITICAL INSTRUCTIONS:
@@ -292,7 +291,7 @@ Think, use your tools, and then provide ONLY the final, precise answer.
292
 
293
  #
294
  # ================================================================================================
295
- # ✅ 1. AGENT'S TOOLS (Unchanged)
296
  # ================================================================================================
297
  #
298
  tavily = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
@@ -314,7 +313,6 @@ def read_file(url: str) -> str:
314
  """
315
  Downloads a file from a given URL, saves it locally, and returns its content.
316
  It can handle both plain text files and PDF files.
317
- Use this tool when a question provides a URL to a file that needs to be read.
318
  """
319
  print(f"--- Calling Read File Tool with URL: {url} ---")
320
  try:
@@ -363,7 +361,7 @@ def python_interpreter(code: str) -> str:
363
 
364
  #
365
  # ================================================================================================
366
- # ✅ 2. CONFIGURE AND BUILD THE AGENT GRAPH (NOW WITH GEMINI 1.5 PRO)
367
  # ================================================================================================
368
  #
369
  class AgentState(TypedDict):
@@ -373,24 +371,37 @@ def build_agent_graph():
373
  """Builds the LangGraph agent."""
374
  tools = [tavily_search, read_file, python_interpreter]
375
 
376
- # <<<--- CHANGE 2: Instantiate the Gemini Model --->>>
377
- # It will use the GOOGLE_API_KEY from your secrets.
378
- # Note: `convert_system_message_to_human=True` is a useful flag for compatibility,
379
- # ensuring our powerful system prompt is always understood correctly by the Gemini model.
380
- llm = ChatGoogleGenerativeAI(
381
- model="gemini-1.5-pro-latest",
382
- temperature=0,
383
- convert_system_message_to_human=True
 
384
  )
385
 
386
  llm_with_tools = llm.bind_tools(tools)
387
 
388
  def call_model(state: AgentState) -> dict:
 
389
  messages = state['messages']
390
- response = llm_with_tools.invoke(messages)
 
 
 
 
 
 
 
 
 
 
391
  return {"messages": [response]}
392
 
393
  def should_continue(state: AgentState) -> str:
 
394
  return "action" if state['messages'][-1].tool_calls else "end"
395
 
396
  tool_node = ToolNode(tools)
@@ -404,13 +415,13 @@ def build_agent_graph():
404
 
405
  #
406
  # ================================================================================================
407
- # ✅ 3. AGENT CLASS AND EVALUATION LOGIC (Unchanged)
408
  # ================================================================================================
409
  #
410
  class GaiaAgent:
411
  def __init__(self):
412
- # <<<--- CHANGE 3: Update the print statement for clarity --->>>
413
- print("GaiaAgent initialized. Building fresh Gemini 1.5 Pro graph...")
414
  self.agent_app = build_agent_graph()
415
 
416
  def __call__(self, question: str) -> str:
@@ -422,8 +433,7 @@ class GaiaAgent:
422
  ]
423
  }
424
  final_state = None
425
- # We increase the recursion limit slightly, as some models might take an extra step for reasoning.
426
- for i, step in enumerate(self.agent_app.stream(initial_input, {"recursion_limit": 20})):
427
  if i == 0: print("--- Starting Agentic Loop ---")
428
  final_state = step
429
 
@@ -432,7 +442,6 @@ class GaiaAgent:
432
  print(f"\n--- Agent finished. Final Answer: {final_answer} ---\n")
433
  return final_answer
434
 
435
- # (The rest of the file remains exactly the same)
436
  def run_and_submit_all( profile: gr.OAuthProfile | None):
437
  space_id = os.getenv("SPACE_ID")
438
  if not profile: return "Please Login to Hugging Face with the button.", None
@@ -483,7 +492,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
483
  f"Submission Successful!\n"
484
  f"User: {result_data.get('username')}\n"
485
  f"Overall Score: {result_data.get('score', 'N/A')}% "
486
- f"({result_data.get('correct_count', '?')}/{result_a.get('total_attempted', '?')} correct)\n"
487
  f"Message: {result_data.get('message', 'No message received.')}"
488
  )
489
  results_df = pd.DataFrame(results_log)
@@ -495,14 +504,15 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
495
 
496
  # --- Gradio Interface ---
497
  with gr.Blocks() as demo:
498
- # <<<--- CHANGE 4: Update the title in the UI --->>>
499
- gr.Markdown("# GAIA Agent Final Assessment (V6 - Gemini 1.5 Pro)")
500
  gr.Markdown(
501
  """
502
- **Instructor's Note:** This version has been upgraded to use Google's `gemini-1.5-pro-latest` model.
503
- 1. Ensure `GOOGLE_API_KEY` and `TAVILY_API_KEY` are set in your Space secrets.
504
- 2. Ensure `requirements.txt` is updated with `langchain-google-genai`.
505
- 3. Let's see how Gemini performs! Good luck.
 
506
  """
507
  )
508
  gr.LoginButton()
 
248
 
249
  #
250
 
 
251
  import os
252
  import io
253
  import requests
 
260
  # --- LangChain & LangGraph Imports ---
261
  from langchain_core.messages import BaseMessage, HumanMessage, ToolMessage, AIMessage, SystemMessage
262
  from langchain_core.tools import tool
263
+ # <<<--- CHANGE: Import the HuggingFaceEndpoint for open-source models --->>>
264
+ from langchain_huggingface import HuggingFaceEndpoint
265
  from langgraph.graph import StateGraph, END
266
  from langgraph.prebuilt import ToolNode
267
  from tavily import TavilyClient
 
272
  FILES_DIR = "./files"
273
  os.makedirs(FILES_DIR, exist_ok=True)
274
 
275
+ # --- System Prompt (Unchanged, it's strong and model-agnostic) ---
276
  AGENT_SYSTEM_PROMPT = """You are a world-class AI agent, specialized in solving complex problems from the GAIA benchmark.
277
  Your task is to analyze the user's question, think step-by-step, and use the provided tools to find the correct answer.
278
  CRITICAL INSTRUCTIONS:
 
291
 
292
  #
293
  # ================================================================================================
294
+ # ✅ 1. DEFINE THE AGENT'S TOOLS (Unchanged)
295
  # ================================================================================================
296
  #
297
  tavily = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
 
313
  """
314
  Downloads a file from a given URL, saves it locally, and returns its content.
315
  It can handle both plain text files and PDF files.
 
316
  """
317
  print(f"--- Calling Read File Tool with URL: {url} ---")
318
  try:
 
361
 
362
  #
363
  # ================================================================================================
364
+ # ✅ 2. CONFIGURE AND BUILD THE AGENT GRAPH (WITH HUGGING FACE)
365
  # ================================================================================================
366
  #
367
  class AgentState(TypedDict):
 
371
  """Builds the LangGraph agent."""
372
  tools = [tavily_search, read_file, python_interpreter]
373
 
374
+ # <<<--- CHANGE: Instantiate the Hugging Face Model Endpoint --->>>
375
+ # This uses the recommended Command R+ model for its excellent tool-use capabilities.
376
+ # It will automatically use the HUGGINGFACEHUB_API_TOKEN secret.
377
+ repo_id = "CohereForAI/c4ai-command-r-plus"
378
+ llm = HuggingFaceEndpoint(
379
+ repo_id=repo_id,
380
+ max_new_tokens=1024,
381
+ temperature=0, # Keep temperature low for fact-based tasks
382
+ huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN")
383
  )
384
 
385
  llm_with_tools = llm.bind_tools(tools)
386
 
387
  def call_model(state: AgentState) -> dict:
388
+ """Helper function to prepare messages and call the model."""
389
  messages = state['messages']
390
+ # The HuggingFaceEndpoint doesn't support a separate SystemMessage.
391
+ # We'll format the system prompt and the latest human message together.
392
+ if isinstance(messages[0], SystemMessage):
393
+ # Start with the system message content
394
+ formatted_messages = [HumanMessage(content=messages[0].content + "\n\nHere is the user's question:\n" + messages[-1].content)]
395
+ # Add any previous tool outputs
396
+ formatted_messages.extend(messages[1:-1])
397
+ else:
398
+ formatted_messages = messages
399
+
400
+ response = llm_with_tools.invoke(formatted_messages)
401
  return {"messages": [response]}
402
 
403
  def should_continue(state: AgentState) -> str:
404
+ """Determines whether to continue the loop or end."""
405
  return "action" if state['messages'][-1].tool_calls else "end"
406
 
407
  tool_node = ToolNode(tools)
 
415
 
416
  #
417
  # ================================================================================================
418
+ # ✅ 3. AGENT CLASS AND EVALUATION LOGIC
419
  # ================================================================================================
420
  #
421
  class GaiaAgent:
422
  def __init__(self):
423
+ # <<<--- CHANGE: Update print statement for new model --->>>
424
+ print("GaiaAgent initialized. Building fresh Command R+ agent graph...")
425
  self.agent_app = build_agent_graph()
426
 
427
  def __call__(self, question: str) -> str:
 
433
  ]
434
  }
435
  final_state = None
436
+ for i, step in enumerate(self.agent_app.stream(initial_input, {"recursion_limit": 15})):
 
437
  if i == 0: print("--- Starting Agentic Loop ---")
438
  final_state = step
439
 
 
442
  print(f"\n--- Agent finished. Final Answer: {final_answer} ---\n")
443
  return final_answer
444
 
 
445
  def run_and_submit_all( profile: gr.OAuthProfile | None):
446
  space_id = os.getenv("SPACE_ID")
447
  if not profile: return "Please Login to Hugging Face with the button.", None
 
492
  f"Submission Successful!\n"
493
  f"User: {result_data.get('username')}\n"
494
  f"Overall Score: {result_data.get('score', 'N/A')}% "
495
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
496
  f"Message: {result_data.get('message', 'No message received.')}"
497
  )
498
  results_df = pd.DataFrame(results_log)
 
504
 
505
  # --- Gradio Interface ---
506
  with gr.Blocks() as demo:
507
+ # <<<--- CHANGE: Update UI titles and descriptions for the new model --->>>
508
+ gr.Markdown("# GAIA Agent Final Assessment (Open Source: Command R+)")
509
  gr.Markdown(
510
  """
511
+ **Instructor's Note:** This version runs a top-tier open-source model from the Hugging Face Hub: **`CohereForAI/c4ai-command-r-plus`**.
512
+ This model is state-of-the-art for agentic tool use.
513
+ 1. Ensure you have a **`HUGGINGFACEHUB_API_TOKEN`** and a **`TAVILY_API_KEY`** set in your Space secrets.
514
+ 2. Ensure your `requirements.txt` includes `langchain-huggingface`.
515
+ 3. Good luck! Let's see how this powerful open model performs.
516
  """
517
  )
518
  gr.LoginButton()