| import os |
| from dotenv import load_dotenv |
| from langgraph.graph import START, StateGraph, MessagesState |
| from langgraph.prebuilt import tools_condition |
| from langgraph.prebuilt import ToolNode |
| from langchain_google_genai import ChatGoogleGenerativeAI |
| from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings |
| from langchain_community.tools.tavily_search import TavilySearchResults |
| from langchain_community.document_loaders import WikipediaLoader |
| from langchain_community.document_loaders import ArxivLoader |
| from langchain_community.vectorstores import SupabaseVectorStore |
| from langchain_core.messages import SystemMessage, HumanMessage |
| from langchain_core.tools import tool |
| from langchain.tools.retriever import create_retriever_tool |
|
|
| load_dotenv() |
|
|
| @tool |
| def multiply(a: int, b: int) -> int: |
| """Multiply two numbers. |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a * b |
|
|
| @tool |
| def add(a: int, b: int) -> int: |
| """Add two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a + b |
|
|
| @tool |
| def subtract(a: int, b: int) -> int: |
| """Subtract two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a - b |
|
|
| @tool |
| def divide(a: int, b: int) -> int: |
| """Divide two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| if b == 0: |
| raise ValueError("Cannot divide by zero.") |
| return a / b |
|
|
| @tool |
| def modulus(a: int, b: int) -> int: |
| """Get the modulus of two numbers. |
| |
| Args: |
| a: first int |
| b: second int |
| """ |
| return a % b |
|
|
| @tool |
| def wiki_search(query: str) -> str: |
| """Search Wikipedia for a query and return maximum two results. |
| |
| Args: |
| query: The search query.""" |
| search_docs = WikipediaLoader(query=query, load_max_docs=2).load() |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
| for doc in search_docs |
| ]) |
| return {"wiki_results": formatted_search_docs} |
|
|
| @tool |
| def web_search(query: str) -> str: |
| """Search Tavily for a query and return maximum three results. |
| |
| Args: |
| query: The search query.""" |
| search_docs = TavilySearchResults(max_results=3).invoke(query=query) |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
| for doc in search_docs |
| ]) |
| return {"web_results": formatted_search_docs} |
|
|
| @tool |
| def arvix_search(query: str) -> str: |
| """Search Arxiv for a query and return maximum three results. |
| |
| Args: |
| query: The search query.""" |
| search_docs = ArxivLoader(query=query, load_max_docs=3).load() |
| formatted_search_docs = "\n\n---\n\n".join( |
| [ |
| f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>' |
| for doc in search_docs |
| ]) |
| return {"arvix_results": formatted_search_docs} |
|
|
|
|
|
|
| system_prompt="You are a helpful assistant tasked with answering questions using a set of tools. Now, I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, do not use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, do not use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.Your answer should only start with 'FINAL ANSWER: ', then follows with the answer." |
| sys_msg = SystemMessage(content=system_prompt) |
|
|
| import pandas as pd |
| import ast |
| import chromadb |
| from chromadb.utils import embedding_functions |
|
|
| |
| csv_file_path = 'embeddings.csv' |
| df = pd.read_csv(csv_file_path) |
|
|
| |
| embeddings = df['embedding'].apply(ast.literal_eval).tolist() |
|
|
| |
| metadata = df['metadata'].apply(ast.literal_eval).tolist() |
|
|
| |
| ids = [str(i) for i in range(len(embeddings))] |
|
|
| |
| client = chromadb.Client() |
| collection = client.create_collection(name="my_collection") |
|
|
| |
| for embedding, meta, id in zip(embeddings, metadata, ids): |
| collection.add( |
| embeddings=[embedding], |
| metadatas=[meta], |
| ids=[id] |
| ) |
|
|
| |
| def as_retriever(): |
| def retriever(query): |
| |
| query_embedding = embeddings.embed_query(query) |
| results = collection.query( |
| query_embeddings=[query_embedding], |
| n_results=1 |
| ) |
| return results |
| return retriever |
|
|
| |
| create_retriever_tool = { |
| "retriever": as_retriever(), |
| "name": "Question Search", |
| "description": "A tool to retrieve similar questions from a vector store.", |
| } |
|
|
|
|
| tools = [ |
| multiply, |
| add, |
| subtract, |
| divide, |
| modulus, |
| wiki_search, |
| web_search, |
| arvix_search, |
| ] |
|
|
| |
| def build_graph(provider: str = "huggingface"): |
| """Build the graph""" |
| |
| if provider == "google": |
| |
| llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0) |
| elif provider == "huggingface": |
| |
| llm = ChatHuggingFace( |
| llm=HuggingFaceEndpoint( |
| endpoint_url="https://api-inference.huggingface.co/models/HuggingFaceTB/SmolLM2-1.7B-Instruct", |
| huggingfacehub_api_token=os.getenv("HF_INFERENCE_ENDPOINT") |
| ) |
| ) |
| else: |
| raise ValueError("Invalid provider. Choose 'google' or 'huggingface'.") |
|
|
| |
| llm_with_tools = llm.bind_tools(tools) |
|
|
| |
| def assistant(state: MessagesState): |
| """Assistant node""" |
| return {"messages": [llm_with_tools.invoke(state["messages"])]} |
| from typing import Dict, List, Any |
|
|
| from langchain_huggingface import HuggingFaceEmbeddings |
|
|
| |
| embeddings_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") |
|
|
| def retriever(state: Dict[str, Any]) -> Dict[str, List[HumanMessage]]: |
| """Retriever node using ChromaDB for similarity search.""" |
| |
| query = state["messages"][0].content |
|
|
| |
| query_embedding = embeddings_model.embed_query(query) |
|
|
| |
| results = collection.query( |
| query_embeddings=[query_embedding], |
| n_results=1 |
| ) |
|
|
| |
| similar_question_content = results['documents'][0][0] |
|
|
| |
| example_msg = HumanMessage( |
| content=f"Here I provide a similar question and answer for reference: \n\n{similar_question_content}", |
| ) |
|
|
| |
| return {"messages": [sys_msg] + state["messages"] + [example_msg]} |
|
|
|
|
|
|
| builder = StateGraph(MessagesState) |
| builder.add_node("retriever", retriever) |
| builder.add_node("assistant", assistant) |
| builder.add_node("tools", ToolNode(tools)) |
| builder.add_edge(START, "retriever") |
| builder.add_edge("retriever", "assistant") |
| builder.add_conditional_edges( |
| "assistant", |
| tools_condition, |
| ) |
| builder.add_edge("tools", "assistant") |
|
|
| |
| return builder.compile() |