Spaces:
Sleeping
Sleeping
File size: 5,286 Bytes
d49dc28 df93443 2ad636b 52bd7b5 d49dc28 9968396 d49dc28 55db96e d49dc28 55db96e d49dc28 55db96e b87fed4 df93443 d49dc28 55db96e d49dc28 4a4ae8f 52bd7b5 2ad636b 4a4ae8f 52bd7b5 4a4ae8f d49dc28 0960126 6eaace3 106ddbe 6eaace3 d49dc28 55db96e 106ddbe d49dc28 106ddbe d49dc28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import os
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.document_loaders import ArxivLoader
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_community.retrievers import WikipediaRetriever
from langchain.tools.retriever import create_retriever_tool
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_community.llms import YandexGPT
from langchain_core.tools import tool
from supabase.client import Client, create_client
from langchain_deepseek import ChatDeepSeek
load_dotenv()
@tool
def wiki_search(query: str) -> str:
"""Search Wikipedia for a query and return maximum 2 results.
Args:
query: The search query."""
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"wiki_results": formatted_search_docs}
@tool
def web_search(query: str) -> str:
"""Search Tavily for a query and return maximum 3 results.
Args:
query: The search query."""
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"web_results": formatted_search_docs}
@tool
def arvix_search(query: str) -> str:
"""Search Arxiv for a query and return maximum 3 result.
Args:
query: The search query."""
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
for doc in search_docs
])
return {"arvix_results": formatted_search_docs}
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# System message
sys_msg = SystemMessage(content=system_prompt)
# build a retriever
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
supabase: Client = create_client(
os.environ.get("SUPABASE_URL"),
os.environ.get("SUPABASE_SERVICE_KEY"))
vector_store = SupabaseVectorStore(
client=supabase,
embedding=embeddings,
table_name="documents",
query_name="match_documents_langchain",
)
retriever_tool = create_retriever_tool(
retriever=vector_store.as_retriever(
search_type="similarity",
search_kwargs={"k": 5}
),
name="question_search",
description="A tool to retrieve similar questions from a vector store.",
)
tools = [
wiki_search,
web_search,
arvix_search,
retriever_tool,
]
def build_graph():
llm = ChatHuggingFace(
llm=HuggingFaceEndpoint(
repo_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
#llm = YandexGPT(
# api_key=os.environ["YANDEX_API_KEY"],
# folder_id=os.environ["YANDEX_FOLDER_ID"],
# model_uri=os.environ["YANDEX_MODEL_URI"],
#)
#llm = ChatDeepSeek(
# model="deepseek-chat",
# temperature=0,
# max_tokens=None,
# timeout=None,
# max_retries=2,
#)
#llm_with_tools = llm.bind_tools(tools)
def assistant(state: MessagesState):
"""Assistant node"""
return {"messages": [llm_with_tools.invoke(state["messages"])]}
def retriever(state: MessagesState):
"""Retriever node"""
similar_question = vector_store.similarity_search(state["messages"][0].content)
print('Similar questions:')
print(similar_question)
if len(similar_question) > 0:
example_msg = HumanMessage(
content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
)
#return {"messages": [{"role": "system", "content": similar_question[0].page_content}]}
return {"messages": [sys_msg] + state["messages"] + [example_msg]}
return {"messages": [sys_msg] + state["messages"]}
builder = StateGraph(MessagesState)
builder.add_node("retriever", retriever)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_edge(START, "retriever")
builder.add_edge("retriever", "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition,
)
builder.add_edge("tools", "assistant")
return builder.compile() |