"""LangGraph Agent""" import os from langgraph.graph import START, StateGraph, MessagesState from langgraph.prebuilt import tools_condition from langgraph.prebuilt import ToolNode from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings from langchain_community.document_loaders import WikipediaLoader from langchain_community.document_loaders import ArxivLoader from langchain_core.messages import SystemMessage, HumanMessage from langchain_core.tools import tool from langchain.tools.retriever import create_retriever_tool from langchain_community.tools import DuckDuckGoSearchResults from langchain_community.vectorstores import Chroma import json import chromadb chromadb.config.Settings.telemetry_enabled = False @tool def multiply(a: int, b: int) -> int: """Multiply two numbers. Args: a: first int b: second int """ return a * b @tool def add(a: int, b: int) -> int: """Add two numbers. Args: a: first int b: second int """ return a + b @tool def subtract(a: int, b: int) -> int: """Subtract two numbers. Args: a: first int b: second int """ return a - b @tool def divide(a: int, b: int) -> int: """Divide two numbers. Args: a: first int b: second int """ if b == 0: raise ValueError("Cannot divide by zero.") return a / b @tool def modulus(a: int, b: int) -> int: """Get the modulus of two numbers. Args: a: first int b: second int """ return a % b @tool def wiki_search(query: str) -> str: """Search Wikipedia for a query and return maximum 2 results. Args: query: The search query.""" search_docs = WikipediaLoader(query=query, load_max_docs=2).load() formatted_search_docs = "\n\n---\n\n".join( [ f'\n{doc.page_content}\n' for doc in search_docs ]) return {"wiki_results": formatted_search_docs} @tool def web_search(query: str) -> dict: """Search DuckDuckGo for a query and return maximum 3 results using LangChain.""" # Crea il tool DuckDuckGo search = DuckDuckGoSearchResults(max_results=3) docs = search.run(query) # restituisce una lista di dict con 'title', 'link', 'snippet' # Formattiamo i risultati per il LLM formatted = "\n\n---\n\n".join( f'\n{doc["title"]}: {doc["snippet"]}\n' for doc in docs ) return {"web_results": formatted} @tool def arxiv_search(query: str) -> str: """Search Arxiv for a query and return maximum 3 result. Args: query: The search query.""" search_docs = ArxivLoader(query=query, load_max_docs=3).load() formatted_search_docs = "\n\n---\n\n".join( [ f'\n{doc.page_content[:1000]}\n' for doc in search_docs ]) return {"arvix_results": formatted_search_docs} # load the system prompt from the file with open("system_prompt.txt", "r", encoding="utf-8") as f: system_prompt = f.read() print(system_prompt) # System message sys_msg = SystemMessage(content=system_prompt) with open('metadata.jsonl', 'r') as jsonl_file: json_list = list(jsonl_file) json_QA = [] for json_str in json_list: json_data = json.loads(json_str) json_QA.append(json_data) # Usa gli stessi embeddings embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # Inizializza Chroma from langchain.schema import Document from langchain_community.vectorstores import Chroma # Prepara la lista di documenti docs = [] for sample in json_QA: print(len(docs)) content = f"Question : {sample['Question']}\n\nFinal answer : {sample['Final answer']}" metadata = {"source": sample['task_id']} doc = Document(page_content=content, metadata=metadata) docs.append(doc) print('fatto') # Inizializza il vector store Chroma vector_store = Chroma.from_documents( documents=docs, embedding=embeddings, persist_directory="./chroma_db" ) # Crea il retriever tool create_retriever_tool = create_retriever_tool( retriever=vector_store.as_retriever(), name="Question Search", description="A tool to retrieve similar questions from a local Chroma vector store.", ) tools = [ multiply, add, subtract, divide, modulus, wiki_search, web_search, arxiv_search, ] # Build graph function def build_graph(): """Build the graph""" llm = ChatHuggingFace( llm=HuggingFaceEndpoint( repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", temperature=0, ), ) llm_with_tools = llm.bind_tools(tools) # Node def assistant(state: MessagesState): """Assistant node""" return {"messages": [llm_with_tools.invoke(state["messages"])]} def retriever(state: MessagesState): """Retriever node""" similar_question = vector_store.similarity_search(state["messages"][0].content) example_msg = HumanMessage( content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}", ) return {"messages": [sys_msg] + state["messages"] + [example_msg]} builder = StateGraph(MessagesState) builder.add_node("retriever", retriever) builder.add_node("assistant", assistant) builder.add_node("tools", ToolNode(tools)) builder.add_edge(START, "retriever") builder.add_edge("retriever", "assistant") builder.add_conditional_edges( "assistant", tools_condition, ) builder.add_edge("tools", "assistant") # Compile graph return builder.compile() # test if __name__ == "__main__": graph = build_graph() # Carica il file JSON with open('questions.json', 'r', encoding='utf-8') as f: data = json.load(f) # Estrai le domande questions = [entry['question'] for entry in data if 'question' in entry] # Mostra o usa la lista di domande for q in questions: print('orig:', q) messages = [HumanMessage(content=q)] messages = graph.invoke({"messages": messages}) m=messages["messages"][-1] #for m in messages["messages"]: content = m.content if hasattr(m, "content") else str(m) print("Full response:", content) if "FINAL ANSWER:" in content: answer = content.rsplit("FINAL ANSWER:", 1)[-1].strip() print("✅ Estratto finale:", answer) else: print("❌ Nessuna risposta finale trovata.") break