Spaces:
Sleeping
Sleeping
Upload 10 files
Browse files- app.py +47 -32
- gitattributes +35 -0
- prompts/system_prompt.txt +9 -0
- requirements.txt +5 -0
- results/.gitkeep +0 -0
- src/__pycache__/agent.cpython-310.pyc +0 -0
- src/__pycache__/tools.cpython-310.pyc +0 -0
- src/agent.py +145 -0
- src/tools.py +62 -0
app.py
CHANGED
|
@@ -1,34 +1,27 @@
|
|
| 1 |
-
import os
|
| 2 |
import gradio as gr
|
|
|
|
| 3 |
import requests
|
| 4 |
-
|
| 5 |
import pandas as pd
|
| 6 |
|
|
|
|
|
|
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
class BasicAgent:
|
| 14 |
-
def __init__(self):
|
| 15 |
-
print("BasicAgent initialized.")
|
| 16 |
-
def __call__(self, question: str) -> str:
|
| 17 |
-
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 18 |
-
fixed_answer = "This is a default answer."
|
| 19 |
-
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 20 |
-
return fixed_answer
|
| 21 |
-
|
| 22 |
-
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 23 |
"""
|
| 24 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 25 |
and displays the results.
|
| 26 |
"""
|
| 27 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
if profile:
|
| 31 |
-
username= f"{profile.username}"
|
| 32 |
print(f"User logged in: {username}")
|
| 33 |
else:
|
| 34 |
print("User not logged in.")
|
|
@@ -40,11 +33,12 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 40 |
|
| 41 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 42 |
try:
|
| 43 |
-
agent =
|
| 44 |
except Exception as e:
|
| 45 |
print(f"Error instantiating agent: {e}")
|
| 46 |
return f"Error initializing agent: {e}", None
|
| 47 |
-
# In the case of an app running as a hugging Face space, this link points
|
|
|
|
| 48 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 49 |
print(agent_code)
|
| 50 |
|
|
@@ -55,16 +49,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 55 |
response.raise_for_status()
|
| 56 |
questions_data = response.json()
|
| 57 |
if not questions_data:
|
| 58 |
-
|
| 59 |
-
|
| 60 |
print(f"Fetched {len(questions_data)} questions.")
|
| 61 |
except requests.exceptions.RequestException as e:
|
| 62 |
print(f"Error fetching questions: {e}")
|
| 63 |
return f"Error fetching questions: {e}", None
|
| 64 |
except requests.exceptions.JSONDecodeError as e:
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
except Exception as e:
|
| 69 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 70 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
@@ -81,18 +75,38 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 81 |
continue
|
| 82 |
try:
|
| 83 |
submitted_answer = agent(question_text)
|
| 84 |
-
answers_payload.append(
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
except Exception as e:
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
if not answers_payload:
|
| 91 |
print("Agent did not produce any answers to submit.")
|
| 92 |
-
return
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
-
# 4. Prepare Submission
|
| 95 |
-
submission_data = {
|
|
|
|
|
|
|
|
|
|
| 96 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 97 |
print(status_update)
|
| 98 |
|
|
@@ -175,7 +189,8 @@ if __name__ == "__main__":
|
|
| 175 |
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 176 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 177 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 178 |
-
|
|
|
|
| 179 |
|
| 180 |
if space_host_startup:
|
| 181 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
@@ -193,4 +208,4 @@ if __name__ == "__main__":
|
|
| 193 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 194 |
|
| 195 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 196 |
-
demo.launch(debug=True, share=False)
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
import requests
|
| 4 |
+
|
| 5 |
import pandas as pd
|
| 6 |
|
| 7 |
+
from src.agent import Agent
|
| 8 |
+
|
| 9 |
# (Keep Constants as is)
|
| 10 |
# --- Constants ---
|
| 11 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 12 |
|
| 13 |
+
|
| 14 |
+
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
"""
|
| 16 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 17 |
and displays the results.
|
| 18 |
"""
|
| 19 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 20 |
+
# Get the SPACE_ID for sending link to the code
|
| 21 |
+
space_id = os.getenv("SPACE_ID")
|
| 22 |
|
| 23 |
if profile:
|
| 24 |
+
username = f"{profile.username}"
|
| 25 |
print(f"User logged in: {username}")
|
| 26 |
else:
|
| 27 |
print("User not logged in.")
|
|
|
|
| 33 |
|
| 34 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 35 |
try:
|
| 36 |
+
agent = Agent()
|
| 37 |
except Exception as e:
|
| 38 |
print(f"Error instantiating agent: {e}")
|
| 39 |
return f"Error initializing agent: {e}", None
|
| 40 |
+
# In the case of an app running as a hugging Face space, this link points
|
| 41 |
+
# toward your codebase ( usefull for others so please keep it public)
|
| 42 |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 43 |
print(agent_code)
|
| 44 |
|
|
|
|
| 49 |
response.raise_for_status()
|
| 50 |
questions_data = response.json()
|
| 51 |
if not questions_data:
|
| 52 |
+
print("Fetched questions list is empty.")
|
| 53 |
+
return "Fetched questions list is empty or invalid format.", None
|
| 54 |
print(f"Fetched {len(questions_data)} questions.")
|
| 55 |
except requests.exceptions.RequestException as e:
|
| 56 |
print(f"Error fetching questions: {e}")
|
| 57 |
return f"Error fetching questions: {e}", None
|
| 58 |
except requests.exceptions.JSONDecodeError as e:
|
| 59 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 60 |
+
print(f"Response text: {response.text[:500]}")
|
| 61 |
+
return f"Error decoding server response for questions: {e}", None
|
| 62 |
except Exception as e:
|
| 63 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 64 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
|
|
| 75 |
continue
|
| 76 |
try:
|
| 77 |
submitted_answer = agent(question_text)
|
| 78 |
+
answers_payload.append(
|
| 79 |
+
{"task_id": task_id, "submitted_answer": submitted_answer}
|
| 80 |
+
)
|
| 81 |
+
results_log.append(
|
| 82 |
+
{
|
| 83 |
+
"Task ID": task_id,
|
| 84 |
+
"Question": question_text,
|
| 85 |
+
"Submitted Answer": submitted_answer
|
| 86 |
+
}
|
| 87 |
+
)
|
| 88 |
except Exception as e:
|
| 89 |
+
print(f"Error running agent on task {task_id}: {e}")
|
| 90 |
+
results_log.append(
|
| 91 |
+
{
|
| 92 |
+
"Task ID": task_id,
|
| 93 |
+
"Question": question_text,
|
| 94 |
+
"Submitted Answer": f"AGENT ERROR: {e}"
|
| 95 |
+
}
|
| 96 |
+
)
|
| 97 |
|
| 98 |
if not answers_payload:
|
| 99 |
print("Agent did not produce any answers to submit.")
|
| 100 |
+
return (
|
| 101 |
+
"Agent did not produce any answers to submit.",
|
| 102 |
+
pd.DataFrame(results_log)
|
| 103 |
+
)
|
| 104 |
|
| 105 |
+
# 4. Prepare Submission
|
| 106 |
+
submission_data = {
|
| 107 |
+
"username": username.strip(),
|
| 108 |
+
"agent_code": agent_code, "answers": answers_payload
|
| 109 |
+
}
|
| 110 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 111 |
print(status_update)
|
| 112 |
|
|
|
|
| 189 |
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 190 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 191 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 192 |
+
# Get SPACE_ID at startup
|
| 193 |
+
space_id_startup = os.getenv("SPACE_ID")
|
| 194 |
|
| 195 |
if space_host_startup:
|
| 196 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
|
|
| 208 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 209 |
|
| 210 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 211 |
+
demo.launch(debug=True, share=False)
|
gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
prompts/system_prompt.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are a helpful agent capable of answering the provided questions. To provide proper answers you must:
|
| 2 |
+
|
| 3 |
+
1. Think step by step and feel free to write down your whole thought.
|
| 4 |
+
2. Reason about your reply.
|
| 5 |
+
3. Use the provided tools if necessary.
|
| 6 |
+
4. The format of your reply should be in json containing two fields, one called "reasoning" that will include your whole though and one called "answer" containing the final answer. Remember, WE NEED A VALID JSON, no markdown fence arround it!
|
| 7 |
+
5. The "answer" field should include just the answer and nothing more, no explaination, no units after any number, JUST THE ANSWER!
|
| 8 |
+
6. In the "reasoning" field, be precise on what tools you use in the process.
|
| 9 |
+
7. Do not come up with information that you do not know or you are not sure about. If you don't know something, search for it in the web using the provided tools.
|
requirements.txt
CHANGED
|
@@ -1,2 +1,7 @@
|
|
|
|
|
| 1 |
gradio
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
requests
|
|
|
|
| 1 |
+
duckduckgo_search==8.0.4
|
| 2 |
gradio
|
| 3 |
+
langchain==0.3.26
|
| 4 |
+
langchain-community==0.3.26
|
| 5 |
+
langfuse==3.0.5
|
| 6 |
+
langgraph==0.4.8
|
| 7 |
requests
|
results/.gitkeep
ADDED
|
File without changes
|
src/__pycache__/agent.cpython-310.pyc
ADDED
|
Binary file (3.26 kB). View file
|
|
|
src/__pycache__/tools.cpython-310.pyc
ADDED
|
Binary file (1.35 kB). View file
|
|
|
src/agent.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
from langchain_core.messages import SystemMessage, HumanMessage
|
| 4 |
+
from langchain_openai.chat_models import ChatOpenAI
|
| 5 |
+
from langfuse import Langfuse, get_client
|
| 6 |
+
from langfuse.langchain import CallbackHandler
|
| 7 |
+
from langgraph.graph import START, StateGraph, MessagesState
|
| 8 |
+
from langgraph.prebuilt import tools_condition
|
| 9 |
+
from langgraph.prebuilt import ToolNode
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Agent:
|
| 13 |
+
"""
|
| 14 |
+
Class representing a basic agent that can answer questions.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
model: str,
|
| 20 |
+
tools: list,
|
| 21 |
+
system_prompt_path: str,
|
| 22 |
+
openai_api_key: str = None,
|
| 23 |
+
langfuse_callback_handler: CallbackHandler = None
|
| 24 |
+
):
|
| 25 |
+
"""
|
| 26 |
+
Initialize the agent object.
|
| 27 |
+
:param model: The OpenAI model to use.
|
| 28 |
+
:param tools: List of tools the agent can use.
|
| 29 |
+
:param system_prompt_path: Path to the system prompt file.
|
| 30 |
+
:param openai_api_key: OpenAI API key for authentication.
|
| 31 |
+
:param langfuse_callback_handler: Langfuse callback handler for
|
| 32 |
+
tracking and logging interactions.
|
| 33 |
+
"""
|
| 34 |
+
self.chat_model = ChatOpenAI(
|
| 35 |
+
model=model,
|
| 36 |
+
api_key=openai_api_key,
|
| 37 |
+
)
|
| 38 |
+
with open(system_prompt_path, "r") as file:
|
| 39 |
+
self.system_prompt = file.read()
|
| 40 |
+
self.tools = tools
|
| 41 |
+
if langfuse_callback_handler is not None:
|
| 42 |
+
self.chat_model.callbacks = [langfuse_callback_handler]
|
| 43 |
+
self.chat_model_with_tools = self.chat_model.bind_tools(
|
| 44 |
+
tools=tools,
|
| 45 |
+
parallel_tool_calls=False
|
| 46 |
+
)
|
| 47 |
+
self.graph = self.__build_graph()
|
| 48 |
+
|
| 49 |
+
def __call__(self, question: str) -> str:
|
| 50 |
+
"""
|
| 51 |
+
Call the agent with a question.
|
| 52 |
+
:param question: The question to ask the agent.
|
| 53 |
+
:return: The agent's response.
|
| 54 |
+
"""
|
| 55 |
+
reply = self.reply(question)
|
| 56 |
+
reply = json.loads(reply)
|
| 57 |
+
answer = reply.get("answer", "")
|
| 58 |
+
return answer
|
| 59 |
+
|
| 60 |
+
def reply(self, question: str) -> str:
|
| 61 |
+
"""
|
| 62 |
+
Reply to a question using the agent and return the agents full reply
|
| 63 |
+
with reasoning included.
|
| 64 |
+
:param question: The question to ask the agent.
|
| 65 |
+
:return: The agent's response.
|
| 66 |
+
"""
|
| 67 |
+
final_state = self.graph.invoke(
|
| 68 |
+
input={
|
| 69 |
+
"messages": [
|
| 70 |
+
SystemMessage(content=self.system_prompt),
|
| 71 |
+
HumanMessage(content=question)
|
| 72 |
+
]
|
| 73 |
+
},
|
| 74 |
+
config={
|
| 75 |
+
"callbacks": self.chat_model.callbacks
|
| 76 |
+
}
|
| 77 |
+
)
|
| 78 |
+
return final_state["messages"][-1].content
|
| 79 |
+
|
| 80 |
+
def __build_graph(self):
|
| 81 |
+
"""
|
| 82 |
+
Build the graph for the agent.
|
| 83 |
+
"""
|
| 84 |
+
builder = StateGraph(MessagesState)
|
| 85 |
+
|
| 86 |
+
# Define nodes: these do the work
|
| 87 |
+
builder.add_node("assistant", self.__assistant)
|
| 88 |
+
builder.add_node("tools", ToolNode(self.tools))
|
| 89 |
+
|
| 90 |
+
# Define edges: these determine how the control flow moves
|
| 91 |
+
builder.add_edge(START, "assistant")
|
| 92 |
+
builder.add_conditional_edges(
|
| 93 |
+
"assistant",
|
| 94 |
+
tools_condition,
|
| 95 |
+
)
|
| 96 |
+
builder.add_edge("tools", "assistant")
|
| 97 |
+
return builder.compile()
|
| 98 |
+
|
| 99 |
+
def __assistant(self, state: MessagesState) -> MessagesState:
|
| 100 |
+
"""
|
| 101 |
+
The assistant function that processes the state and returns a response.
|
| 102 |
+
:param state: The current state of the agent.
|
| 103 |
+
:return: Updated state with the assistant's response.
|
| 104 |
+
"""
|
| 105 |
+
response = self.chat_model_with_tools.invoke(state["messages"])
|
| 106 |
+
return {"messages": [response]}
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
if __name__ == "__main__":
|
| 110 |
+
|
| 111 |
+
import os
|
| 112 |
+
from langchain_community.tools import DuckDuckGoSearchResults
|
| 113 |
+
|
| 114 |
+
from tools import multiply, add, subtract, divide, modulus
|
| 115 |
+
|
| 116 |
+
# Initialize Langfuse client with constructor arguments
|
| 117 |
+
Langfuse(
|
| 118 |
+
public_key=os.environ.get("LANGFUSE_PUBLIC_KEY"),
|
| 119 |
+
secret_key=os.environ.get("LANGFUSE_SECRET_KEY"),
|
| 120 |
+
host='https://cloud.langfuse.com'
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
# Get the configured client instance
|
| 124 |
+
langfuse = get_client()
|
| 125 |
+
|
| 126 |
+
# Initialize the Langfuse handler
|
| 127 |
+
langfuse_handler = CallbackHandler()
|
| 128 |
+
|
| 129 |
+
tools = [multiply, add, subtract, divide, modulus]
|
| 130 |
+
tools.append(
|
| 131 |
+
DuckDuckGoSearchResults()
|
| 132 |
+
)
|
| 133 |
+
agent = Agent(
|
| 134 |
+
model="gpt-4o",
|
| 135 |
+
tools=tools,
|
| 136 |
+
system_prompt_path="prompts/system_prompt.txt",
|
| 137 |
+
openai_api_key=os.environ.get("OPENAI_API_KEY"),
|
| 138 |
+
langfuse_callback_handler=langfuse_handler
|
| 139 |
+
)
|
| 140 |
+
response = agent(
|
| 141 |
+
question="""
|
| 142 |
+
Search for Tom Cruise and summarize the results for me.
|
| 143 |
+
"""
|
| 144 |
+
)
|
| 145 |
+
print(response)
|
src/tools.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_core.tools import tool
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
@tool
|
| 5 |
+
def multiply(a: int, b: int) -> int:
|
| 6 |
+
"""Multiply two numbers.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
a: first int
|
| 10 |
+
b: second int
|
| 11 |
+
"""
|
| 12 |
+
return a * b
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@tool
|
| 16 |
+
def add(a: int, b: int) -> int:
|
| 17 |
+
"""
|
| 18 |
+
Add two numbers.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
a: first int
|
| 22 |
+
b: second int
|
| 23 |
+
"""
|
| 24 |
+
return a + b
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@tool
|
| 28 |
+
def subtract(a: int, b: int) -> int:
|
| 29 |
+
"""
|
| 30 |
+
Subtract two numbers.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
a: first int
|
| 34 |
+
b: second int
|
| 35 |
+
"""
|
| 36 |
+
return a - b
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@tool
|
| 40 |
+
def divide(a: int, b: int) -> int:
|
| 41 |
+
"""
|
| 42 |
+
Divide two numbers.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
a: numerator
|
| 46 |
+
b: denominator
|
| 47 |
+
"""
|
| 48 |
+
if b == 0:
|
| 49 |
+
raise ValueError("Cannot divide by zero.")
|
| 50 |
+
return a / b
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@tool
|
| 54 |
+
def modulus(a: int, b: int) -> int:
|
| 55 |
+
"""
|
| 56 |
+
Get the modulus of two numbers.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
a: first int
|
| 60 |
+
b: second int
|
| 61 |
+
"""
|
| 62 |
+
return a % b
|