Spaces:
Runtime error
Runtime error
| import os | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| # --- Supabase Setup (only if credentials are provided) --- | |
| supabase_url = os.getenv("SUPABASE_URL") | |
| supabase_key = os.getenv("SUPABASE_SERVICE_KEY") or os.getenv("SUPABASE_KEY") | |
| if supabase_url and supabase_key: | |
| from supabase.client import Client, create_client | |
| from langchain_community.vectorstores import SupabaseVectorStore | |
| from langchain.tools.retriever import create_retriever_tool | |
| supabase: Client = create_client(supabase_url, supabase_key) | |
| else: | |
| supabase = None | |
| # --- Standard Imports --- | |
| from langgraph.graph import START, StateGraph, MessagesState | |
| from langgraph.prebuilt import tools_condition, ToolNode | |
| from langchain_core.messages import SystemMessage, HumanMessage, AIMessage | |
| from langchain_core.tools import tool | |
| # LLM adapter: Hugging Face only | |
| from langchain_huggingface import ChatHuggingFace, HuggingFaceEmbeddings, HuggingFacePipeline | |
| # Optional document loaders | |
| from langchain_community.tools.tavily_search import TavilySearchResults | |
| from langchain_community.document_loaders import WikipediaLoader, ArxivLoader | |
| # --- Simple Math Tools --- | |
| def multiply(a: int, b: int) -> int: | |
| """Multiply two integers and return the result""" | |
| return a * b | |
| def add(a: int, b: int) -> int: | |
| """Add two integers and return the sum""" | |
| return a + b | |
| def subtract(a: int, b: int) -> int: | |
| """Subtract the second integer from the first and return the difference""" | |
| return a - b | |
| def divide(a: int, b: int) -> float: | |
| """Divide the first integer by the second and return the quotient""" | |
| if b == 0: | |
| raise ValueError("Cannot divide by zero.") | |
| return a / b | |
| def modulus(a: int, b: int) -> int: | |
| """Return the modulus of dividing the first integer by the second""" | |
| return a % b | |
| # --- Search Tools --- | |
| def wiki_search(query: str) -> str: | |
| """Search Wikipedia for the query and return up to 2 documents""" | |
| docs = WikipediaLoader(query=query, load_max_docs=2).load() | |
| return "\n\n---\n\n".join( | |
| f'<Document source="{doc.metadata["source"]}"/>\n{doc.page_content}' for doc in docs | |
| ) | |
| def web_search(query: str) -> str: | |
| """Search the web using Tavily and return up to 3 results""" | |
| docs = TavilySearchResults(max_results=3).invoke(query=query) | |
| return "\n\n---\n\n".join( | |
| f'<Document source="{d.metadata["source"]}"/>\n{d.page_content}' for d in docs | |
| ) | |
| def arvix_search(query: str) -> str: | |
| """Search Arxiv for the query and return up to 3 documents""" | |
| docs = ArxivLoader(query=query, load_max_docs=3).load() | |
| return "\n\n---\n\n".join( | |
| f'<Document source="{d.metadata["source"]}"/>\n{d.page_content[:1000]}' for d in docs | |
| ) | |
| # --- Assemble Tools List --- | |
| tools = [multiply, add, subtract, divide, modulus, wiki_search, web_search, arvix_search] | |
| # If supabase is configured, add retriever tool | |
| if supabase: | |
| embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") | |
| vector_store = SupabaseVectorStore( | |
| client=supabase, | |
| embedding=embeddings, | |
| table_name="documents", | |
| query_name="match_documents_langchain", | |
| ) | |
| retriever_tool = create_retriever_tool( | |
| retriever=vector_store.as_retriever(), | |
| name="Question Search", | |
| description="Retrieve similar questions from the vector store", | |
| ) | |
| tools.append(retriever_tool) | |
| # --- Load System Prompt --- | |
| with open("system_prompt.txt", "r", encoding="utf-8") as f: | |
| sys_msg = SystemMessage(content=f.read()) | |
| # --- Graph Builder (HF-only) --- | |
| def build_graph(): | |
| """ | |
| Build and return a StateGraph using a Hugging Face chat LLM with tools. | |
| """ | |
| try: | |
| hf_token = os.getenv("HUGGINGFACE_TOKEN") or os.getenv("HF_TOKEN") or os.getenv("HF_API_TOKEN") | |
| if hf_token: | |
| print("Using HuggingFace Inference API...") | |
| from langchain_huggingface import HuggingFaceEndpoint | |
| llm = HuggingFaceEndpoint( | |
| repo_id="microsoft/DialoGPT-medium", | |
| huggingfacehub_api_token=hf_token, | |
| model_kwargs={"temperature": 0.1, "max_new_tokens": 512} | |
| ) | |
| llm = ChatHuggingFace(llm=llm) | |
| print("✓ Successfully initialized HF Inference API") | |
| else: | |
| print("No HF token found, creating mock LLM for demo…") | |
| class SimpleMockLLM: | |
| def bind_tools(self, tools): | |
| return self | |
| def invoke(self, messages): | |
| from langchain_core.messages import AIMessage | |
| last_msg = messages[-1] if messages else None | |
| content = getattr(last_msg, 'content', str(last_msg)).lower() if last_msg else "" | |
| if any(word in content for word in ['math', 'calculate', 'add', 'multiply']): | |
| return AIMessage(content="I can help with math! Try asking me to add, multiply, subtract, or divide numbers.") | |
| elif any(word in content for word in ['search', 'find', 'look up']): | |
| return AIMessage(content="I can search Wikipedia, Arxiv, or the web for information. What would you like me to search for?") | |
| else: | |
| return AIMessage(content=f"Hello! I'm a demo assistant. You said: {content[:100]}...") | |
| llm = SimpleMockLLM() | |
| print("✓ Created demo LLM") | |
| except Exception as e: | |
| print(f"Error initializing LLM: {e}") | |
| class BasicMockLLM: | |
| def bind_tools(self, tools): | |
| return self | |
| def invoke(self, messages): | |
| from langchain_core.messages import AIMessage | |
| return AIMessage(content="Demo mode: Please configure a token for full functionality.") | |
| llm = BasicMockLLM() | |
| print("✓ Using basic fallback LLM") | |
| llm_with_tools = llm.bind_tools(tools) | |
| def retriever(state: MessagesState): | |
| if supabase: | |
| query = state["messages"][-1].content | |
| doc = vector_store.similarity_search(query, k=1)[0] | |
| content = doc.page_content | |
| answer = content.split("Final answer :")[-1].strip() if "Final answer :" in content else content.strip() | |
| return {"messages": [AIMessage(content=answer)]} | |
| return {"messages": state["messages"]} | |
| def assistant(state: MessagesState): | |
| return {"messages": [llm_with_tools.invoke(state["messages"])]} | |
| g = StateGraph(MessagesState) | |
| g.add_node("retriever", retriever) | |
| g.add_node("assistant", assistant) | |
| g.add_edge(START, "retriever") | |
| g.add_edge("retriever", "assistant") | |
| g.add_conditional_edges("assistant", tools_condition) | |
| g.add_node("tools", ToolNode(tools)) | |
| g.add_edge("tools", "assistant") | |
| g.set_entry_point("retriever") | |
| g.set_finish_point("assistant") | |
| return g.compile() | |