Spaces:
Sleeping
Sleeping
| """LangGraph Agent""" | |
| import os | |
| from dotenv import load_dotenv | |
| from langchain_core.tools import tool | |
| from langchain_tavily import TavilySearch | |
| from langchain_community.document_loaders import ArxivLoader, WikipediaLoader | |
| from langchain_core.messages import AIMessage | |
| from langgraph.graph import StateGraph, MessagesState | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_groq import ChatGroq | |
| from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import SupabaseVectorStore | |
| from langchain.tools.retriever import create_retriever_tool | |
| from supabase.client import Client, create_client | |
| def multiply(a: int, b: int) -> int: | |
| """Multiply two numbers. | |
| Args: | |
| a: first int | |
| b: second int | |
| """ | |
| return a * b | |
| def add(a: int, b: int) -> int: | |
| """Add two numbers. | |
| Args: | |
| a: first int | |
| b: second int | |
| """ | |
| return a + b | |
| def subtract(a: int, b: int) -> int: | |
| """Subtract two numbers. | |
| Args: | |
| a: first int | |
| b: second int | |
| """ | |
| return a - b | |
| def divide(a: int, b: int) -> int: | |
| """Divide two numbers. | |
| Args: | |
| a: first int | |
| b: second int | |
| """ | |
| if b == 0: | |
| raise ValueError("Cannot divide by zero.") | |
| return a / b | |
| def modulus(a: int, b: int) -> int: | |
| """Get the modulus of two numbers. | |
| Args: | |
| a: first int | |
| b: second int | |
| """ | |
| return a % b | |
| def web_search(query: str) -> str: | |
| """Search the web for a query. | |
| Args: | |
| query: The search query string. | |
| Returns: | |
| The search results as a string. | |
| """ | |
| raw_result = TavilySearch(max_results=3).invoke(query) | |
| search_results = raw_result.get("results", []) | |
| formatted_search_results = "\n\n---\n\n".join( | |
| [ | |
| f'<Document source="{res.get("url")}" page=""/>\n{res.get("content", "")}\n</Document>' | |
| for res in search_results | |
| ]) | |
| return {"web_results": formatted_search_results} | |
| def arxiv_search(query: str) -> str: | |
| """Search Arxiv for a query and return maximum 3 result. | |
| Args: | |
| query: The search query.""" | |
| loader = ArxivLoader(query=query, load_max_docs=3).load() | |
| docs = loader.load() | |
| formatted_list = [] | |
| for doc in docs: | |
| if "id" in doc: | |
| arxiv_id = doc["id"] | |
| source = f"https://arxiv.org/abs/{arxiv_id}" | |
| formatted = f'<Document Source="{source}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>' | |
| formatted_list.append(formatted) | |
| formatted_search_docs = "\n\n---\n\n".join(formatted_list) | |
| return {"arxiv_results": formatted_search_docs} | |
| def wiki_search(query: str) -> str: | |
| """Search Wikipedia for a query and return maximum 3 result. | |
| Args: | |
| query: The search query.""" | |
| loader = WikipediaLoader(query=query, load_max_docs=3) | |
| docs = loader.load() | |
| formatted_docs = "\n\n---\n\n".join( | |
| f'<Document Source="{doc.metadata.get("source", "")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>' | |
| for doc in docs | |
| ) | |
| return {"wiki_results": formatted_docs} | |
| tools = [ | |
| multiply, | |
| add, | |
| subtract, | |
| divide, | |
| modulus, | |
| wiki_search, | |
| web_search, | |
| arxiv_search, | |
| ] | |
| # Build retriever | |
| embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768 | |
| supabase: Client = create_client( | |
| os.environ.get("SUPABASE_URL"), | |
| os.environ.get("SUPABASE_SERVICE_KEY")) | |
| vector_store = SupabaseVectorStore( | |
| client=supabase, | |
| embedding= embeddings, | |
| table_name="documents", | |
| query_name="match_documents_langchain", | |
| ) | |
| create_retriever_tool = create_retriever_tool( | |
| retriever=vector_store.as_retriever(), | |
| name="Question Search", | |
| description="A tool to retrieve similar questions from a vector store.", | |
| ) | |
| # Build graph function | |
| def build_graph(provider: str = "google"): | |
| """Build the graph""" | |
| # Load environment variables from .env file | |
| if provider == "google": | |
| # Google Gemini | |
| llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0) | |
| elif provider == "groq": | |
| # Groq https://console.groq.com/docs/models | |
| llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it | |
| elif provider == "huggingface": | |
| # TODO: Add huggingface endpoint | |
| llm = ChatHuggingFace( | |
| llm=HuggingFaceEndpoint( | |
| url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf", | |
| temperature=0, | |
| ), | |
| ) | |
| else: | |
| raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.") | |
| # Bind tools to LLM | |
| llm_with_tools = llm.bind_tools(tools) | |
| def retriever(state: MessagesState): | |
| query = state["messages"][-1].content | |
| similar_doc = vector_store.similarity_search(query, k=1)[0] | |
| content = similar_doc.page_content | |
| if "Final answer :" in content: | |
| answer = content.split("Final answer :")[-1].strip() | |
| else: | |
| answer = content.strip() | |
| return {"messages": [AIMessage(content=answer)]} | |
| builder = StateGraph(MessagesState) | |
| builder.add_node("retriever", retriever) | |
| # Retriever start and end points | |
| builder.set_entry_point("retriever") | |
| builder.set_finish_point("retriever") | |
| # Compile graph | |
| return builder.compile() | |
| if __name__ == "__main__": | |
| # Example usage | |
| print("testing agent tools") | |
| print(web_search("LangGraph Agent")) # Outputs search results as a string |