Spaces:
Sleeping
Sleeping
| import json | |
| from langchain_core.messages import SystemMessage, HumanMessage | |
| from langchain_openai.chat_models import ChatOpenAI | |
| from langfuse import Langfuse, get_client | |
| from langfuse.langchain import CallbackHandler | |
| from langgraph.graph import START, StateGraph, MessagesState | |
| from langgraph.prebuilt import tools_condition | |
| from langgraph.prebuilt import ToolNode | |
| class Agent: | |
| """ | |
| Class representing a basic agent that can answer questions. | |
| """ | |
| def __init__( | |
| self, | |
| model: str, | |
| tools: list, | |
| system_prompt_path: str, | |
| openai_api_key: str = None, | |
| langfuse_callback_handler: CallbackHandler = None | |
| ): | |
| """ | |
| Initialize the agent object. | |
| :param model: The OpenAI model to use. | |
| :param tools: List of tools the agent can use. | |
| :param system_prompt_path: Path to the system prompt file. | |
| :param openai_api_key: OpenAI API key for authentication. | |
| :param langfuse_callback_handler: Langfuse callback handler for | |
| tracking and logging interactions. | |
| """ | |
| self.chat_model = ChatOpenAI( | |
| model=model, | |
| api_key=openai_api_key, | |
| ) | |
| with open(system_prompt_path, "r") as file: | |
| self.system_prompt = file.read() | |
| self.tools = tools | |
| if langfuse_callback_handler is not None: | |
| self.chat_model.callbacks = [langfuse_callback_handler] | |
| self.chat_model_with_tools = self.chat_model.bind_tools( | |
| tools=tools, | |
| parallel_tool_calls=False | |
| ) | |
| self.graph = self.__build_graph() | |
| def __call__(self, question: str) -> tuple[str, str]: | |
| """ | |
| Reply to a question using the agent and return the agents full reply | |
| with reasoning included. | |
| :param question: The question to ask the agent. | |
| :return: The agent's response. | |
| """ | |
| final_state = self.graph.invoke( | |
| input={ | |
| "messages": [ | |
| SystemMessage(content=self.system_prompt), | |
| HumanMessage(content=question) | |
| ] | |
| }, | |
| config={ | |
| "callbacks": self.chat_model.callbacks | |
| } | |
| ) | |
| reply = json.loads(final_state["messages"][-1].content) | |
| return reply["reasoning"], reply["answer"] | |
| def __build_graph(self): | |
| """ | |
| Build the graph for the agent. | |
| """ | |
| builder = StateGraph(MessagesState) | |
| # Define nodes: these do the work | |
| builder.add_node("assistant", self.__assistant) | |
| builder.add_node("tools", ToolNode(self.tools)) | |
| # Define edges: these determine how the control flow moves | |
| builder.add_edge(START, "assistant") | |
| builder.add_conditional_edges( | |
| "assistant", | |
| tools_condition, | |
| ) | |
| builder.add_edge("tools", "assistant") | |
| return builder.compile() | |
| def __assistant(self, state: MessagesState) -> MessagesState: | |
| """ | |
| The assistant function that processes the state and returns a response. | |
| :param state: The current state of the agent. | |
| :return: Updated state with the assistant's response. | |
| """ | |
| response = self.chat_model_with_tools.invoke(state["messages"]) | |
| return {"messages": [response]} | |
| if __name__ == "__main__": | |
| import os | |
| from langchain_community.tools import DuckDuckGoSearchResults | |
| from tools import multiply, add, subtract, divide, modulus | |
| # Initialize Langfuse client with constructor arguments | |
| Langfuse( | |
| public_key=os.environ.get("LANGFUSE_PUBLIC_KEY"), | |
| secret_key=os.environ.get("LANGFUSE_SECRET_KEY"), | |
| host='https://cloud.langfuse.com' | |
| ) | |
| # Get the configured client instance | |
| langfuse = get_client() | |
| # Initialize the Langfuse handler | |
| langfuse_handler = CallbackHandler() | |
| tools = [multiply, add, subtract, divide, modulus] | |
| tools.append( | |
| DuckDuckGoSearchResults() | |
| ) | |
| agent = Agent( | |
| model="gpt-4o", | |
| tools=tools, | |
| system_prompt_path="prompts/system_prompt.txt", | |
| openai_api_key=os.environ.get("OPENAI_API_KEY"), | |
| langfuse_callback_handler=langfuse_handler | |
| ) | |
| response = agent( | |
| question=""" | |
| Search for Tom Cruise and summarize the results for me. | |
| """ | |
| ) | |
| print(response) | |