import os from dotenv import load_dotenv from langchain.tools import tool from langchain_core.messages import SystemMessage, HumanMessage from langchain_core.tools import tool from langchain.tools.retriever import create_retriever_tool from langchain_community.tools.tavily_search import TavilySearchResults from langchain_community.document_loaders import WikipediaLoader, ArxivLoader from langchain_community.vectorstores import SupabaseVectorStore from langchain_groq import ChatGroq from langgraph.prebuilt import tools_condition, ToolNode from langgraph.graph import START, StateGraph, MessagesState from langchain_huggingface import HuggingFaceEmbeddings from langchain_huggingface import ChatHuggingFace from supabase.client import Client, create_client load_dotenv() @tool def multiply(a: int, b: int) -> int: """Multiply two numbers. Args: a: first int b: second int """ return a * b @tool def add(a: int, b: int) -> int: """Add two numbers. Args: a: first int b: second int """ return a + b @tool def subtract(a: int, b: int) -> int: """Subtract two numbers. Args: a: first int b: second int """ return a - b @tool def divide(a: int, b: int) -> int: """Divide two numbers. Args: a: first int b: second int """ if b == 0: raise ValueError("Cannot divide by zero.") return a / b @tool def modulus(a: int, b: int) -> int: """Get the modulus of two numbers. Args: a: first int b: second int """ return a % b @tool def wiki_search(query: str) -> str: """Search Wikipedia for a query and return maximum 2 results. Args: query: The search query.""" search_docs = WikipediaLoader(query=query, load_max_docs=2).load() formatted_search_docs = "\n\n---\n\n".join( [ f'\n{doc.page_content}\n' for doc in search_docs ]) return {"wiki_results": formatted_search_docs} @tool def web_search(query: str) -> str: """Search Tavily for a query and return maximum 3 results. Args: query: The search query.""" search_docs = TavilySearchResults(max_results=3).invoke(query=query) formatted_search_docs = "\n\n---\n\n".join( [ f'\n{doc.page_content}\n' for doc in search_docs ]) return {"web_results": formatted_search_docs} @tool def arvix_search(query: str) -> str: """Search Arxiv for a query and return maximum 3 result. Args: query: The search query.""" search_docs = ArxivLoader(query=query, load_max_docs=3).load() formatted_search_docs = "\n\n---\n\n".join( [ f'\n{doc.page_content[:1000]}\n' for doc in search_docs ]) return {"arvix_results": formatted_search_docs} # load the system prompt from the file with open("system_prompt.txt", "r", encoding="utf-8") as f: system_prompt = f.read() # System message sys_msg = SystemMessage(content=system_prompt) # build a retriever embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768 supabase: Client = create_client( os.environ.get("SUPABASE_URL"), os.environ.get("SUPABASE_SERVICE_KEY")) vector_store = SupabaseVectorStore( client=supabase, embedding= embeddings, table_name="documents", query_name="get_docs", ) create_retriever_tool = create_retriever_tool( retriever=vector_store.as_retriever(), name="Question Search", description="A tool to retrieve similar questions from a vector store.", ) tools = [ multiply, add, subtract, divide, modulus, wiki_search, web_search, arvix_search, ] # Build the state graph def build_graph(): llm = ChatGroq(model="qwen-qwq-32b", temperature=0) llm_with_tools = llm.bind_tools(tools) def assistant_node(state: MessagesState): """Assistant node""" return {"messages": [llm_with_tools.invoke(state["messages"])]} def retriever_node(state: MessagesState): """Retriever node""" similar_question = vector_store.similarity_search(state["messages"][0].content) if not similar_question: return {"messages": [HumanMessage(content="No similar questions found in the database.")]} example_msg = HumanMessage( content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}", ) return {"messages": [sys_msg] + state["messages"] + [example_msg]} build_graph = StateGraph(MessagesState) build_graph.add_node("retreiver", retriever_node) build_graph.add_node("assistant", assistant_node) build_graph.add_node("tools", ToolNode(tools=tools)) build_graph.add_edge(START, "retreiver") build_graph.add_edge("retreiver", "assistant") build_graph.add_conditional_edges( "assistant", tools_condition ) build_graph.add_edge("tools", "assistant") return build_graph.compile() # test if __name__ == "__main__": question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?" # Build the graph graph = build_graph() # Run the graph messages = [HumanMessage(content=question)] messages = graph.invoke({"messages": messages}) for m in messages["messages"]: m.pretty_print()