AGiorni commited on
Commit
20e8a32
·
1 Parent(s): 969a1ea

added duckduck searchgo tool

Browse files
.env CHANGED
@@ -1,4 +1,9 @@
1
  AZURE_OPENAI_API_KEY = "3wCRrQmKGc0k5eYxe2USK4yXeuQj7EtEWjF4fw0Z7aICCKDHJKYXJQQJ99BHACHYHv6XJ3w3AAAAACOGALEY"
2
  AZURE_OPENAI_ENDPOINT = "https://agent-training-foundryres.openai.azure.com/"
3
  AZURE_OPENAI_DEPLOYMENT_NAME = "gpt-4o"
4
- OPENAI_API_VERSION = "2025-04-01-preview"
 
 
 
 
 
 
1
  AZURE_OPENAI_API_KEY = "3wCRrQmKGc0k5eYxe2USK4yXeuQj7EtEWjF4fw0Z7aICCKDHJKYXJQQJ99BHACHYHv6XJ3w3AAAAACOGALEY"
2
  AZURE_OPENAI_ENDPOINT = "https://agent-training-foundryres.openai.azure.com/"
3
  AZURE_OPENAI_DEPLOYMENT_NAME = "gpt-4o"
4
+ OPENAI_API_VERSION = "2025-04-01-preview"
5
+
6
+ # Get keys for your project from the project settings page: https://cloud.langfuse.com
7
+ LANGFUSE_PUBLIC_KEY = "pk-lf-7eab47cc-6608-4d4d-95e9-1e830b60b9e9"
8
+ LANGFUSE_SECRET_KEY = "sk-lf-438ec200-3367-425a-8283-4f858ba924f1"
9
+ LANGFUSE_HOST = "https://cloud.langfuse.com" # 🇪🇺 EU region
__pycache__/main_agent.cpython-313.pyc CHANGED
Binary files a/__pycache__/main_agent.cpython-313.pyc and b/__pycache__/main_agent.cpython-313.pyc differ
 
__pycache__/prompts_lib.cpython-313.pyc CHANGED
Binary files a/__pycache__/prompts_lib.cpython-313.pyc and b/__pycache__/prompts_lib.cpython-313.pyc differ
 
__pycache__/tools.cpython-313.pyc ADDED
Binary file (304 Bytes). View file
 
app.py CHANGED
@@ -21,7 +21,7 @@ class BasicAgent:
21
 
22
  message = prompts_lib.system_prompt2 + question
23
 
24
- response = main_agent.graph.invoke({'messages': message})
25
 
26
  answer = response.get('messages')[1].content
27
 
 
21
 
22
  message = prompts_lib.system_prompt2 + question
23
 
24
+ response = main_agent.agent.invoke({'messages': message})
25
 
26
  answer = response.get('messages')[1].content
27
 
main_agent.py CHANGED
@@ -4,6 +4,8 @@ from langgraph.graph.message import add_messages
4
  from langchain_core.messages import AnyMessage, SystemMessage
5
  from langchain_openai import AzureChatOpenAI
6
  from langgraph.graph import START, StateGraph
 
 
7
 
8
  import prompts_lib as my_prompts
9
 
@@ -26,7 +28,11 @@ llm = AzureChatOpenAI(
26
  temperature=0
27
  )
28
 
 
 
 
29
 
 
30
  system_prompt = my_prompts.system_prompt
31
  system_message = SystemMessage(content=system_prompt)
32
 
@@ -36,15 +42,17 @@ def assistant(state: State):
36
  "messages": [llm.invoke(state["messages"])]
37
  }
38
 
 
39
  # define graph
40
  builder = StateGraph(State)
41
 
42
  # add nodes
43
  builder.add_node("assistant", assistant)
 
44
 
45
  # define edges
46
  builder.add_edge(START, "assistant")
47
- # No conditional edges in this simple example
48
-
49
  # compile gtaph
50
  agent = builder.compile()
 
4
  from langchain_core.messages import AnyMessage, SystemMessage
5
  from langchain_openai import AzureChatOpenAI
6
  from langgraph.graph import START, StateGraph
7
+ from tools import duckduck_tool
8
+ from langgraph.prebuilt import ToolNode, tools_condition
9
 
10
  import prompts_lib as my_prompts
11
 
 
28
  temperature=0
29
  )
30
 
31
+ # bild tools
32
+ tools = [duckduck_tool]
33
+ chat_w_tools = llm.bind_tools(tools)
34
 
35
+ # load system prompt
36
  system_prompt = my_prompts.system_prompt
37
  system_message = SystemMessage(content=system_prompt)
38
 
 
42
  "messages": [llm.invoke(state["messages"])]
43
  }
44
 
45
+
46
  # define graph
47
  builder = StateGraph(State)
48
 
49
  # add nodes
50
  builder.add_node("assistant", assistant)
51
+ builder.add_node("tools", ToolNode(tools))
52
 
53
  # define edges
54
  builder.add_edge(START, "assistant")
55
+ builder.add_conditional_edges("assistant", tools_condition)
56
+ builder.add_edge("tools", "assistant")
57
  # compile gtaph
58
  agent = builder.compile()
prompts_lib.py CHANGED
@@ -12,7 +12,8 @@ digits in plain text unless specified otherwise.
12
  If you are asked for a comma separated list, apply the above rules depending of whether the element
13
  to be put in the list is a number or a string.'''
14
 
15
- system_prompt2 = ''' You are a general AI assistant. I will ask you a question. Report just YOUR FINAL ANSWER.
 
16
  YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated
17
  list of numbers and/or strings.
18
  If you are asked for a number, don’t use comma to write your number neither use units such as $ or
 
12
  If you are asked for a comma separated list, apply the above rules depending of whether the element
13
  to be put in the list is a number or a string.'''
14
 
15
+ system_prompt2 = ''' You are a general AI assistant. I will ask you a question. Use the tools you have available if necessary.
16
+ Report just YOUR FINAL ANSWER.
17
  YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated
18
  list of numbers and/or strings.
19
  If you are asked for a number, don’t use comma to write your number neither use units such as $ or
requirements.txt CHANGED
@@ -2,4 +2,7 @@ gradio
2
  requests
3
  langgraph
4
  langchain-openai
5
- dotenv
 
 
 
 
2
  requests
3
  langgraph
4
  langchain-openai
5
+ dotenv
6
+ langchain-community
7
+ ddgs
8
+ langfuse
test.ipynb CHANGED
@@ -14,7 +14,7 @@
14
  },
15
  {
16
  "cell_type": "code",
17
- "execution_count": 18,
18
  "id": "bbd5b78b",
19
  "metadata": {},
20
  "outputs": [],
@@ -25,61 +25,39 @@
25
  },
26
  {
27
  "cell_type": "code",
28
- "execution_count": 12,
29
  "id": "76d480cd",
30
  "metadata": {},
31
  "outputs": [],
32
  "source": [
33
- "message = pl.system_prompt2 + 'what is the capital of france'"
34
  ]
35
  },
36
  {
37
  "cell_type": "code",
38
- "execution_count": 13,
39
- "id": "62435d86",
40
- "metadata": {},
41
- "outputs": [],
42
- "source": [
43
- "response = ma.graph.invoke({'messages': message})"
44
- ]
45
- },
46
- {
47
- "cell_type": "code",
48
- "execution_count": 14,
49
- "id": "1ad3fa57",
50
- "metadata": {},
51
- "outputs": [
52
- {
53
- "data": {
54
- "text/plain": [
55
- "'Paris'"
56
- ]
57
- },
58
- "execution_count": 14,
59
- "metadata": {},
60
- "output_type": "execute_result"
61
- }
62
- ],
63
- "source": [
64
- "response.get('messages')[1].content"
65
- ]
66
- },
67
- {
68
- "cell_type": "code",
69
- "execution_count": 28,
70
  "id": "e097a098",
71
  "metadata": {},
72
  "outputs": [],
73
  "source": [
 
 
 
 
 
 
74
  "class BasicAgent:\n",
75
  " def __init__(self):\n",
76
- " pass\n",
77
  " def __call__(self, question: str) -> str:\n",
78
  " print(f\"Agent received question (first 50 chars): {question[:50]}...\")\n",
79
  "\n",
80
  " message = pl.system_prompt2 + question\n",
81
  "\n",
82
- " response = ma.agent.invoke({'messages': message})\n",
 
 
 
83
  "\n",
84
  " answer = response.get('messages')[1].content\n",
85
  "\n",
@@ -90,7 +68,7 @@
90
  },
91
  {
92
  "cell_type": "code",
93
- "execution_count": 29,
94
  "id": "af68a6a5",
95
  "metadata": {},
96
  "outputs": [],
@@ -100,7 +78,7 @@
100
  },
101
  {
102
  "cell_type": "code",
103
- "execution_count": 30,
104
  "id": "02c87f0c",
105
  "metadata": {},
106
  "outputs": [
@@ -108,13 +86,13 @@
108
  "name": "stdout",
109
  "output_type": "stream",
110
  "text": [
111
- "Agent received question (first 50 chars): What is the capital of Italy?...\n",
112
- "Agent returning answer: Rome\n"
113
  ]
114
  }
115
  ],
116
  "source": [
117
- "answer = agent(\"What is the capital of Italy?\")"
118
  ]
119
  },
120
  {
 
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 3,
18
  "id": "bbd5b78b",
19
  "metadata": {},
20
  "outputs": [],
 
25
  },
26
  {
27
  "cell_type": "code",
28
+ "execution_count": 5,
29
  "id": "76d480cd",
30
  "metadata": {},
31
  "outputs": [],
32
  "source": [
33
+ "message = pl.system_prompt2 + 'How many studio albums were published by Mercedes Sousa between 2000 and 2009 included?'"
34
  ]
35
  },
36
  {
37
  "cell_type": "code",
38
+ "execution_count": 18,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  "id": "e097a098",
40
  "metadata": {},
41
  "outputs": [],
42
  "source": [
43
+ "from langfuse.langchain import CallbackHandler\n",
44
+ "\n",
45
+ "# Initialize Langfuse CallbackHandler for LangGraph/Langchain (tracing)\n",
46
+ "langfuse_handler = CallbackHandler()\n",
47
+ "langfuse_handler2 = CallbackHandler()\n",
48
+ "\n",
49
  "class BasicAgent:\n",
50
  " def __init__(self):\n",
51
+ " self.agent = ma.agent\n",
52
  " def __call__(self, question: str) -> str:\n",
53
  " print(f\"Agent received question (first 50 chars): {question[:50]}...\")\n",
54
  "\n",
55
  " message = pl.system_prompt2 + question\n",
56
  "\n",
57
+ " response = self.agent.invoke(\n",
58
+ " input={'messages': message},\n",
59
+ " config={\"callbacks\": [langfuse_handler2]}\n",
60
+ " )\n",
61
  "\n",
62
  " answer = response.get('messages')[1].content\n",
63
  "\n",
 
68
  },
69
  {
70
  "cell_type": "code",
71
+ "execution_count": 19,
72
  "id": "af68a6a5",
73
  "metadata": {},
74
  "outputs": [],
 
78
  },
79
  {
80
  "cell_type": "code",
81
+ "execution_count": 20,
82
  "id": "02c87f0c",
83
  "metadata": {},
84
  "outputs": [
 
86
  "name": "stdout",
87
  "output_type": "stream",
88
  "text": [
89
+ "Agent received question (first 50 chars): How many studio albums were published by Mercedes ...\n",
90
+ "Agent returning answer: Seven\n"
91
  ]
92
  }
93
  ],
94
  "source": [
95
+ "answer = agent(\"How many studio albums were published by Mercedes Sousa between 2000 and 2009 included? use duckduckgo tool you have available to search wikipedia to respond\")"
96
  ]
97
  },
98
  {
tools.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from langchain_community.tools import DuckDuckGoSearchRun
2
+
3
+
4
+ duckduck_tool = DuckDuckGoSearchRun()