beitemian commited on
Commit
2df4234
·
1 Parent(s): 81917a3
Files changed (4) hide show
  1. agent.py +103 -0
  2. app.py +25 -3
  3. requirements.txt +4 -2
  4. tools.py +89 -0
agent.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llama_index.core.workflow import (
2
+ Event,
3
+ StartEvent,
4
+ StopEvent,
5
+ Workflow,
6
+ step,
7
+ )
8
+ from llama_index.llms.deepseek import DeepSeek
9
+ from llama_index.llms.openai import OpenAI
10
+ import sys
11
+ from pathlib import Path
12
+ project_root = str(Path(__file__).parent)
13
+ if project_root not in sys.path:
14
+ sys.path.insert(0, project_root)
15
+ import os
16
+ from tools import web_search, math_tools
17
+ #from memory import set_memory
18
+ from llama_index.core.agent.workflow import FunctionAgent, AgentWorkflow, ToolCall, ToolCallResult, ReActAgent
19
+ #from llama_index.core.memory import Memory
20
+ import asyncio
21
+ from datetime import datetime
22
+ from llama_index.core.chat_engine.types import ChatMessage
23
+ from llama_index.core import Settings
24
+ from llama_index.core.callbacks import CallbackManager
25
+ from llama_index.utils.workflow import (
26
+ draw_most_recent_execution,
27
+ )
28
+ llm_gpt4o_mini = OpenAI(
29
+ model="gpt-4o-mini",
30
+ api_key=os.getenv("OPENAI_API_KEY"),
31
+ temperature=0.3,
32
+ max_retries=5,
33
+ timeout=100
34
+ )
35
+ llm_deepseek_r1 = DeepSeek(
36
+ model="deepseek-chat",
37
+ api_key=os.getenv("DEEPSEEK_API_KEY"),
38
+ temperature=0.2,
39
+ max_retries=5,
40
+ timeout=100
41
+ )
42
+ #test the llm
43
+ #answerr = llm_deepseek_r1.complete("What is the capital of France?")
44
+ #print(answerr)
45
+
46
+ researcher_agent = FunctionAgent(
47
+ llm=llm_deepseek_r1,
48
+ name = "Viktor",
49
+ description="An agent that researches local documentation and the web, and creates succint reports about a given topic based on the information it found",
50
+ system_prompt=f"""Viktor is a multi-step reasoning assistant designed to provide comprehensive answers to user questions through systematic analysis and research. Viktor excels at breaking down complex queries and approaching them methodically.
51
+
52
+ Available tools for Viktor:
53
+ 1. 'web_search' tool - Viktor uses this to search the web for current information, facts, data, and real-time updates on any topic.
54
+ 2. Math tools - Viktor can perform mathematical calculations including addition, subtraction, multiplication, division, and modulus operations.
55
+
56
+ Viktor's Multi-Step Reasoning Workflow:
57
+ 1. **Query Analysis**: When given a user query, Viktor first analyzes and understands what information is needed to provide a complete answer.
58
+
59
+ 2. **Planning Phase**: Viktor develops a structured plan by:
60
+ - Breaking complex questions into smaller, specific sub-questions
61
+ - Identifying what types of information are needed (facts, recent updates, multiple perspectives, etc.)
62
+ - Determining the logical sequence for gathering information
63
+
64
+ 3. **Information Gathering**: Viktor systematically searches for information by:
65
+ - Formulating targeted search queries for each aspect of the question
66
+ - Conducting multiple searches to gather comprehensive information
67
+ - Seeking current and relevant data to ensure accuracy
68
+
69
+ 4. **Analysis and Synthesis**: Viktor processes the gathered information by:
70
+ - Analyzing different sources and perspectives
71
+ - Identifying patterns, connections, and key insights
72
+ - Cross-referencing information for consistency and accuracy
73
+
74
+ 5. **Structured Response**: Viktor provides a well-organized answer that:
75
+ - Directly addresses the user's question
76
+ - Presents information in a logical, easy-to-follow structure
77
+ - Uses clear headings and formatting when appropriate
78
+ - Includes relevant details while maintaining focus
79
+
80
+ Reasoning Approach:
81
+ - Viktor approaches each question methodically, thinking through multiple angles
82
+ - For complex topics, Viktor gathers information from different perspectives
83
+ - Viktor considers both current information and broader context
84
+ - Viktor reasons through cause-and-effect relationships and implications
85
+ - Viktor provides balanced, well-researched responses
86
+
87
+ Response Guidelines:
88
+ - STRICT!!! Only answer the question directly, concisely. For example, if the question is "What is the capital of France?", the answer should be "Paris".
89
+ - No elaboration or explanation, only straight answer to the question.
90
+ - Plain text response formatting.
91
+ """,
92
+ tools = [
93
+ web_search,
94
+ math_tools.add,
95
+ math_tools.subtract,
96
+ math_tools.multiply,
97
+ math_tools.divide,
98
+ math_tools.modulus
99
+ ]
100
+ )
101
+
102
+ workflow_agent = AgentWorkflow(agents = [researcher_agent], root_agent=researcher_agent.name, verbose=True)
103
+
app.py CHANGED
@@ -3,7 +3,13 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
 
 
 
 
6
 
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
@@ -12,12 +18,28 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
  def __init__(self):
 
15
  print("BasicAgent initialized.")
16
  def __call__(self, question: str) -> str:
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from llama_index.core.llms import ChatMessage
7
+ from dotenv import load_dotenv
8
+ from Final_Assignment_Agent.agent import workflow_agent
9
+ # Load environment variables from .env file
10
+ load_dotenv()
11
 
12
+ HF_TOKEN = os.getenv("HF_TOKEN")
13
  # (Keep Constants as is)
14
  # --- Constants ---
15
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
18
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
19
  class BasicAgent:
20
  def __init__(self):
21
+ self.agent = workflow_agent
22
  print("BasicAgent initialized.")
23
  def __call__(self, question: str) -> str:
24
  print(f"Agent received question (first 50 chars): {question[:50]}...")
25
+ try:
26
+ # Use asyncio.run() to properly handle the async workflow
27
+ import asyncio
28
+
29
+ async def run_agent():
30
+ # For AgentWorkflow, use the correct method to run the workflow
31
+ # Based on test results, this is the correct way to call it
32
+ result = await self.agent.run(user_msg=question)
33
+ return result
34
+
35
+ # Use asyncio.run() which handles event loop creation/cleanup
36
+ answer = asyncio.run(run_agent())
37
+ print(f"Agent returning answer: {answer}")
38
+ return str(answer)
39
+ except Exception as e:
40
+ error_msg = f"Error running agent: {str(e)}"
41
+ print(error_msg)
42
+ return error_msg
43
 
44
  def run_and_submit_all( profile: gr.OAuthProfile | None):
45
  """
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
- gradio
2
- requests
 
 
 
1
+ gradio[oauth]
2
+ requests
3
+ pandas
4
+ python-dotenv
tools.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from linkup import LinkupClient
2
+ from dotenv import load_dotenv
3
+ import os
4
+ load_dotenv()
5
+ linkup_client = LinkupClient(api_key=os.getenv("LINKUP_API_KEY"))
6
+
7
+ async def web_search(query: str) -> dict:
8
+ """Search the web for information about a given topic, or access real-time data through the web_search tool about anything on the internet. Returns a dictionary containing the answer and a list of sources."""
9
+ response = linkup_client.search(
10
+ query=query,
11
+ depth='standard',
12
+ output_type="sourcedAnswer"
13
+ )
14
+ answer = response.answer
15
+ # Format sources as a list of markdown links
16
+ sources = [f"- [{source.name}]({source.url})" for source in response.sources]
17
+ return {"answer": answer, "sources": sources}
18
+
19
+ # MathTools
20
+ class MathTools:
21
+ """A class containing basic mathematical operations."""
22
+
23
+ def multiply(self, a: float, b: float) -> float:
24
+ """Multiply two numbers.
25
+ Args:
26
+ a: first number
27
+ b: second number
28
+ Returns:
29
+ The product of a and b
30
+ """
31
+ return a * b
32
+
33
+ def add(self, a: float, b: float) -> float:
34
+ """Add two numbers.
35
+
36
+ Args:
37
+ a: first number
38
+ b: second number
39
+ Returns:
40
+ The sum of a and b
41
+ """
42
+ return a + b
43
+
44
+ def subtract(self, a: float, b: float) -> float:
45
+ """Subtract two numbers.
46
+
47
+ Args:
48
+ a: first number
49
+ b: second number
50
+ Returns:
51
+ The difference of a and b
52
+ """
53
+ return a - b
54
+
55
+ def divide(self, a: float, b: float) -> float:
56
+ """Divide two numbers.
57
+
58
+ Args:
59
+ a: dividend
60
+ b: divisor
61
+ Returns:
62
+ The quotient of a divided by b
63
+ Raises:
64
+ ValueError: If b is zero
65
+ """
66
+ if b == 0:
67
+ raise ValueError("Cannot divide by zero.")
68
+ return a / b
69
+
70
+ def modulus(self, a: int, b: int) -> int:
71
+ """Get the modulus of two numbers.
72
+
73
+ Args:
74
+ a: dividend
75
+ b: divisor
76
+ Returns:
77
+ The remainder of a divided by b
78
+ Raises:
79
+ ValueError: If b is zero
80
+ """
81
+ if b == 0:
82
+ raise ValueError("Cannot perform modulus with zero.")
83
+ return a % b
84
+
85
+ # Create an instance for easy importing
86
+ math_tools = MathTools()
87
+
88
+
89
+