articles_time_news / langchain_react.py
SylvainWei's picture
Upload langchain_react.py
6e02d4b verified
# 完整部署示例代码
from langchain_community.llms import Ollama
from langchain_community.document_loaders import DirectoryLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_ollama import OllamaEmbeddings
from langchain import hub
from langchain.agents import create_react_agent, AgentExecutor
from langchain.tools.retriever import create_retriever_tool
import nltk
nltk.download('punkt_tab')
nltk.download('averaged_perceptron_tagger_eng')
# 首先还得保证下面的库安装了
# pip install unstructured
# pip install chromadb
# pip install typing_extensions (如果python版本是3.8及以下)
# 1. 初始化LLM
llm = Ollama(model="llama3.1:8b-instruct", temperature=0, num_ctx=8192)
# 2. 加载和准备文档 TODO: 修改path/to/articles为每个TCE对应的实际目录
loader = DirectoryLoader('/home/weishaohang/workspace/Omni-Temp/test_articles', glob="**/*.txt")
# test_articles是一个目录,下面是该次被检索的文件
# .txt文件是该次被检索的文件
documents = loader.load()
"""[Document(metadata={'source': '/home/weishaohang/workspace/Omni-Temp/test_articles/article2.txt'}, page_content='bbbbbb'), Document(metadata={'source': '/home/weishaohang/workspace/Omni-Temp/test_articles/article3.txt'}, page_content='cccccc'), Document(metadata={'source': '/home/weishaohang/workspace/Omni-Temp/test_articles/article1.txt'}, page_content='aaaaaa')]"""
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=128) # 512是每个chunk的大小,128是chunk之间的重叠大小。通过重复内容桥接相邻文本块,保障关键信息不因分割而丢失。
texts = text_splitter.split_documents(documents)
"""[Document(metadata={'source': '/home/weishaohang/workspace/Omni-Temp/test_articles/article2.txt'}, page_content='bbbbbb'), Document(metadata={'source': '/home/weishaohang/workspace/Omni-Temp/test_articles/article3.txt'}, page_content='cccccc'), Document(metadata={'source': '/home/weishaohang/workspace/Omni-Temp/test_articles/article1.txt'}, page_content='aaaaaa')]"""
# 3. 创建向量数据库
embeddings = OllamaEmbeddings(model="nomic-embed-text")
vectorstore = Chroma.from_documents(documents=texts, embedding=embeddings, persist_directory="./chroma_db")
print(vectorstore)
exit()
# 4. 创建检索器工具
retriever = vectorstore.as_retriever(search_kwargs={"k": 3}) # search_kwargs是搜索参数,k是返回的文档数量
retriever_tool = create_retriever_tool(
retriever,
name="local_knowledge_base",
description="Search for information in local articles collection."
)
# 5. 创建ReAct Agent
prompt = hub.pull("hwchase17/react")
print(prompt)
# agent = create_react_agent(llm, [retriever_tool], prompt)
# agent_executor = AgentExecutor(agent=agent, tools=[retriever_tool], verbose=True)
# # 6. 运行Agent
# response = agent_executor.invoke({"input": "Your benchmark question here?"})
# print(response['output'])