Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- src/agent.py +83 -2
- src/utils.py +16 -0
src/agent.py
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
|
|
| 1 |
import json
|
|
|
|
| 2 |
|
| 3 |
from langchain_core.messages import SystemMessage, HumanMessage
|
| 4 |
from langchain_core.rate_limiters import InMemoryRateLimiter
|
|
@@ -20,6 +22,7 @@ class Agent:
|
|
| 20 |
model: str,
|
| 21 |
tools: list,
|
| 22 |
system_prompt_path: str,
|
|
|
|
| 23 |
openai_api_key: str = None,
|
| 24 |
langfuse_callback_handler: CallbackHandler = None
|
| 25 |
):
|
|
@@ -28,6 +31,7 @@ class Agent:
|
|
| 28 |
:param model: The OpenAI model to use.
|
| 29 |
:param tools: List of tools the agent can use.
|
| 30 |
:param system_prompt_path: Path to the system prompt file.
|
|
|
|
| 31 |
:param openai_api_key: OpenAI API key for authentication.
|
| 32 |
:param langfuse_callback_handler: Langfuse callback handler for
|
| 33 |
tracking and logging interactions.
|
|
@@ -47,6 +51,7 @@ class Agent:
|
|
| 47 |
)
|
| 48 |
with open(system_prompt_path, "r") as file:
|
| 49 |
self.system_prompt = file.read()
|
|
|
|
| 50 |
self.tools = tools
|
| 51 |
if langfuse_callback_handler is not None:
|
| 52 |
self.chat_model.callbacks = [langfuse_callback_handler]
|
|
@@ -56,18 +61,27 @@ class Agent:
|
|
| 56 |
)
|
| 57 |
self.graph = self.__build_graph()
|
| 58 |
|
| 59 |
-
def __call__(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
"""
|
| 61 |
Reply to a question using the agent and return the agents full reply
|
| 62 |
with reasoning included.
|
| 63 |
:param question: The question to ask the agent.
|
|
|
|
| 64 |
:return: The agent's response.
|
| 65 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
final_state = self.graph.invoke(
|
| 67 |
input={
|
| 68 |
"messages": [
|
| 69 |
SystemMessage(content=self.system_prompt),
|
| 70 |
-
|
| 71 |
]
|
| 72 |
},
|
| 73 |
config={
|
|
@@ -105,10 +119,77 @@ class Agent:
|
|
| 105 |
response = self.chat_model_with_tools.invoke(state["messages"])
|
| 106 |
return {"messages": [response]}
|
| 107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
|
| 109 |
if __name__ == "__main__":
|
| 110 |
|
| 111 |
import os
|
|
|
|
| 112 |
from langchain_community.tools import DuckDuckGoSearchResults
|
| 113 |
|
| 114 |
from tools import multiply, add, subtract, divide, modulus
|
|
|
|
| 1 |
+
import base64
|
| 2 |
import json
|
| 3 |
+
from os.path import join
|
| 4 |
|
| 5 |
from langchain_core.messages import SystemMessage, HumanMessage
|
| 6 |
from langchain_core.rate_limiters import InMemoryRateLimiter
|
|
|
|
| 22 |
model: str,
|
| 23 |
tools: list,
|
| 24 |
system_prompt_path: str,
|
| 25 |
+
data_path: str,
|
| 26 |
openai_api_key: str = None,
|
| 27 |
langfuse_callback_handler: CallbackHandler = None
|
| 28 |
):
|
|
|
|
| 31 |
:param model: The OpenAI model to use.
|
| 32 |
:param tools: List of tools the agent can use.
|
| 33 |
:param system_prompt_path: Path to the system prompt file.
|
| 34 |
+
:param data_path: Data to be used by the agent.
|
| 35 |
:param openai_api_key: OpenAI API key for authentication.
|
| 36 |
:param langfuse_callback_handler: Langfuse callback handler for
|
| 37 |
tracking and logging interactions.
|
|
|
|
| 51 |
)
|
| 52 |
with open(system_prompt_path, "r") as file:
|
| 53 |
self.system_prompt = file.read()
|
| 54 |
+
self.data_path = data_path
|
| 55 |
self.tools = tools
|
| 56 |
if langfuse_callback_handler is not None:
|
| 57 |
self.chat_model.callbacks = [langfuse_callback_handler]
|
|
|
|
| 61 |
)
|
| 62 |
self.graph = self.__build_graph()
|
| 63 |
|
| 64 |
+
def __call__(
|
| 65 |
+
self,
|
| 66 |
+
question: str,
|
| 67 |
+
question_file: str | None
|
| 68 |
+
) -> tuple[str, str]:
|
| 69 |
"""
|
| 70 |
Reply to a question using the agent and return the agents full reply
|
| 71 |
with reasoning included.
|
| 72 |
:param question: The question to ask the agent.
|
| 73 |
+
:param question_file: The file that comes with the question.
|
| 74 |
:return: The agent's response.
|
| 75 |
"""
|
| 76 |
+
human_message = self.__format_human_message(
|
| 77 |
+
question=question,
|
| 78 |
+
question_file=question_file
|
| 79 |
+
)
|
| 80 |
final_state = self.graph.invoke(
|
| 81 |
input={
|
| 82 |
"messages": [
|
| 83 |
SystemMessage(content=self.system_prompt),
|
| 84 |
+
human_message
|
| 85 |
]
|
| 86 |
},
|
| 87 |
config={
|
|
|
|
| 119 |
response = self.chat_model_with_tools.invoke(state["messages"])
|
| 120 |
return {"messages": [response]}
|
| 121 |
|
| 122 |
+
def __format_human_message(
|
| 123 |
+
self,
|
| 124 |
+
question: str,
|
| 125 |
+
question_file: str | None
|
| 126 |
+
) -> HumanMessage:
|
| 127 |
+
"""
|
| 128 |
+
Format the human message for the agent.
|
| 129 |
+
:param question: The question to ask the agent.
|
| 130 |
+
:param question_file: The file that comes with the question.
|
| 131 |
+
:return: Formatted HumanMessage.
|
| 132 |
+
"""
|
| 133 |
+
if question_file is None:
|
| 134 |
+
human_message = HumanMessage(content=question)
|
| 135 |
+
else:
|
| 136 |
+
if '.png' in question_file:
|
| 137 |
+
with open(join(self.data_path, question_file), "rb") as file:
|
| 138 |
+
file_content = base64.b64encode(file.read()).\
|
| 139 |
+
decode("utf-8")
|
| 140 |
+
human_message = HumanMessage(
|
| 141 |
+
content=[
|
| 142 |
+
{
|
| 143 |
+
'type': 'text',
|
| 144 |
+
'text': question
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
'type': 'image',
|
| 148 |
+
'source_type': 'base64',
|
| 149 |
+
'data': file_content,
|
| 150 |
+
"mime_type": "image/png"
|
| 151 |
+
}
|
| 152 |
+
]
|
| 153 |
+
)
|
| 154 |
+
elif '.mp3' in question_file:
|
| 155 |
+
with open(join(self.data_path, question_file), "rb") as file:
|
| 156 |
+
file_content = base64.b64encode(file.read()).\
|
| 157 |
+
decode("utf-8")
|
| 158 |
+
human_message = HumanMessage(
|
| 159 |
+
content=[
|
| 160 |
+
{
|
| 161 |
+
'type': 'text',
|
| 162 |
+
'text': question
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
'type': 'audio',
|
| 166 |
+
'source_type': 'base64',
|
| 167 |
+
'data': file_content,
|
| 168 |
+
'mime_type': 'audio/mp3'
|
| 169 |
+
}
|
| 170 |
+
]
|
| 171 |
+
)
|
| 172 |
+
elif '.py' in question_file:
|
| 173 |
+
with open(join(self.data_path, question_file), "r") as file:
|
| 174 |
+
file_content = file.read()
|
| 175 |
+
human_message = HumanMessage(
|
| 176 |
+
content=[
|
| 177 |
+
{
|
| 178 |
+
'type': 'text',
|
| 179 |
+
'text': f'''{question}\n\nHere is the code:
|
| 180 |
+
```python\n{file_content}\n```'''
|
| 181 |
+
},
|
| 182 |
+
]
|
| 183 |
+
)
|
| 184 |
+
elif '.xlsx' in question_file:
|
| 185 |
+
human_message = HumanMessage(content=question)
|
| 186 |
+
return human_message
|
| 187 |
+
|
| 188 |
|
| 189 |
if __name__ == "__main__":
|
| 190 |
|
| 191 |
import os
|
| 192 |
+
|
| 193 |
from langchain_community.tools import DuckDuckGoSearchResults
|
| 194 |
|
| 195 |
from tools import multiply, add, subtract, divide, modulus
|
src/utils.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def download_file(url: str, local_filepath: str) -> None:
|
| 5 |
+
"""
|
| 6 |
+
Download a file from a URL and save it locally.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
url (str): The URL of the file to download.
|
| 10 |
+
local_filename (str): The local path where the file will be saved.
|
| 11 |
+
"""
|
| 12 |
+
with requests.get(url, stream=True) as response:
|
| 13 |
+
response.raise_for_status()
|
| 14 |
+
with open(local_filepath, 'wb') as file:
|
| 15 |
+
for chunk in response.iter_content(chunk_size=8192):
|
| 16 |
+
file.write(chunk)
|