diepala commited on
Commit
cc197ca
·
1 Parent(s): 41eef3b

First passing run, with 35 points

Browse files
Files changed (5) hide show
  1. .gitignore +1 -0
  2. agent.py +72 -7
  3. agent_lc.py +52 -0
  4. app.py +2 -2
  5. requirements.txt +9 -1
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
agent.py CHANGED
@@ -1,15 +1,80 @@
1
- from smolagents import CodeAgent, HfApiModel, DuckDuckGoSearchTool
2
 
3
- model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- agent = CodeAgent(
6
- tools=[
7
- DuckDuckGoSearchTool(),
 
 
 
 
 
8
  ],
9
- model=model,
10
  )
11
 
12
 
13
  def run_agent(question: str) -> str:
14
- response = agent.run(question)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  return str(response)
 
1
+ import os
2
 
3
+ from smolagents import (
4
+ CodeAgent,
5
+ HfApiModel,
6
+ OpenAIServerModel,
7
+ DuckDuckGoSearchTool,
8
+ ToolCallingAgent,
9
+ WikipediaSearchTool,
10
+ )
11
+
12
+ import logging
13
+ import sys
14
+
15
+ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
16
+ logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
17
+
18
+ from llama_index.core.agent.workflow import AgentWorkflow
19
+ from llama_index.llms.gemini import Gemini
20
+ from llama_index.tools.wikipedia import WikipediaToolSpec
21
+ import asyncio
22
+
23
+ # model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct") # Limit is reached very fast
24
+ # model = OpenAIServerModel(
25
+ # model_id="gemini-2.0-flash",
26
+ # api_base="https://generativelanguage.googleapis.com/v1beta/openai/",
27
+ # api_key=os.environ.get("GEMINI_API_KEY"),
28
+ # )
29
+
30
+
31
+ # agent = CodeAgent(
32
+ # tools=[
33
+ # DuckDuckGoSearchTool(),
34
+ # # WikipediaSearchTool(),
35
+ # ],
36
+ # additional_authorized_imports=["bs4", "pandas", "numpy", "csv", "json"],
37
+ # model=model,
38
+ # add_base_tools=True,
39
+ # )
40
+ # agent = ToolCallingAgent(
41
+ # tools=[
42
+ # DuckDuckGoSearchTool(),
43
+ # WikipediaSearchTool(),
44
+ # ],
45
+ # model=model,
46
+ # add_base_tools=True,
47
+ # )
48
 
49
+
50
+ model = Gemini(
51
+ model="gemini-2.0-flash",
52
+ )
53
+
54
+ agent = AgentWorkflow.from_tools_or_functions(
55
+ [
56
+ *WikipediaToolSpec().to_tool_list(),
57
  ],
58
+ llm=model,
59
  )
60
 
61
 
62
  def run_agent(question: str) -> str:
63
+ prompt = f"""
64
+ You are a helpful assistant that answers requested questions using tools.
65
+ I will give you a question at the end. Report your thoughts, and give the final answer with the following template:
66
+ FINAL ANSWER: [YOUR FINAL ANSWER].
67
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
68
+ If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
69
+ If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
70
+ If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
71
+
72
+ Remember that your answer should start with "FINAL ANSWER: " and be followed by the answer.
73
+
74
+ The question is:
75
+
76
+ {question}
77
+ """
78
+
79
+ response = agent.run(prompt)
80
  return str(response)
agent_lc.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_google_genai import ChatGoogleGenerativeAI
2
+ from langchain_community.tools.tavily_search import TavilySearchResults
3
+ from langgraph.prebuilt import create_react_agent
4
+ from langgraph.checkpoint.memory import MemorySaver
5
+ from langchain_core.messages import HumanMessage, AIMessage
6
+ import re
7
+
8
+ model = ChatGoogleGenerativeAI(
9
+ model="gemini-2.0-flash",
10
+ temperature=0,
11
+ )
12
+
13
+ search_tool = TavilySearchResults(max_results=5)
14
+
15
+ memory = MemorySaver()
16
+ agent = create_react_agent(model, [search_tool], checkpointer=memory)
17
+
18
+
19
+ def run_agent(question: str) -> str:
20
+ prompt = f"""
21
+ You are a helpful assistant that answers requested questions using tools.
22
+ I will give you a question at the end. Report your thoughts, and give the final answer with the following template:
23
+ FINAL ANSWER: [YOUR FINAL ANSWER].
24
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
25
+ If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
26
+ If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
27
+ If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
28
+
29
+ Remember that your answer should start with "FINAL ANSWER: " and be followed by the answer.
30
+
31
+ The question is:
32
+
33
+ {question}
34
+ """
35
+ for step in agent.stream(
36
+ {"messages": [HumanMessage(content=prompt)]},
37
+ {"configurable": {"thread_id": "tid1"}},
38
+ stream_mode="values",
39
+ ):
40
+ step["messages"][-1].pretty_print()
41
+
42
+ # check if the step is a final answer
43
+ if isinstance(step["messages"][-1], AIMessage):
44
+ # check if the answer is final, and extract it
45
+ final_answer = re.search(
46
+ r"FINAL ANSWER: (.*)",
47
+ step["messages"][-1].content,
48
+ )
49
+ if final_answer:
50
+ return final_answer.group(1).strip()
51
+
52
+ return "No final answer found."
app.py CHANGED
@@ -4,7 +4,7 @@ import requests
4
  import pandas as pd
5
  from concurrent.futures import ThreadPoolExecutor
6
 
7
- from agent import run_agent
8
 
9
  # (Keep Constants as is)
10
  # --- Constants ---
@@ -103,7 +103,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
103
  results_log = []
104
  answers_payload = []
105
  print(f"Running agent on {len(questions_data)} questions...")
106
- with ThreadPoolExecutor(max_workers=20) as executor:
107
  futures = {
108
  executor.submit(process_item, agent, item): item for item in questions_data
109
  }
 
4
  import pandas as pd
5
  from concurrent.futures import ThreadPoolExecutor
6
 
7
+ from agent_lc import run_agent
8
 
9
  # (Keep Constants as is)
10
  # --- Constants ---
 
103
  results_log = []
104
  answers_payload = []
105
  print(f"Running agent on {len(questions_data)} questions...")
106
+ with ThreadPoolExecutor(max_workers=1) as executor:
107
  futures = {
108
  executor.submit(process_item, agent, item): item for item in questions_data
109
  }
requirements.txt CHANGED
@@ -1,3 +1,11 @@
1
  gradio
2
  requests
3
- smolagents
 
 
 
 
 
 
 
 
 
1
  gradio
2
  requests
3
+ smolagents[openai]
4
+ wikipedia-api
5
+ beautifulsoup4
6
+ llama-index
7
+ llama-index-tools-wikipedia
8
+ llama-index-llms-gemini
9
+ langchain-google-genai
10
+ langchain-community
11
+ langgraph