deepseek / app.py
ikun520's picture
Update app.py
254aa75 verified
# coding: utf-8
from openai import OpenAI
from docx import Document
import numpy as np
import faiss
from sentence_transformers import SentenceTransformer
import os
import gradio as gr
# 配置参数
WORD_DOC_PATH = "知识库.docx" # Word文档路径
VECTOR_INDEX_PATH = "faiss_index.index" # 向量索引保存路径
TEXT_DATA_PATH = "text_data.npy" # 文本数据保存路径
EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2" # 嵌入模型
# 初始化模型和客户端
client = OpenAI(
base_url='https://api-inference.modelscope.cn/v1/',
api_key='7ed44f86-e2c6-4b85-9c4a-26eacfc2e5ee',
)
embedder = SentenceTransformer(EMBEDDING_MODEL)
def process_word_document():
"""处理Word文档并分块"""
doc = Document(WORD_DOC_PATH)
chunks = []
current_chunk = []
chunk_size = 300
for para in doc.paragraphs:
text = para.text.strip()
if text:
words = text.split()
current_chunk.extend(words)
while len(current_chunk) > chunk_size:
chunks.append(" ".join(current_chunk[:chunk_size]))
current_chunk = current_chunk[chunk_size:]
if current_chunk:
chunks.append(" ".join(current_chunk))
return chunks
def create_vector_store():
"""创建并保存向量存储"""
if os.path.exists(VECTOR_INDEX_PATH):
return
chunks = process_word_document()
embeddings = embedder.encode(chunks, convert_to_tensor=False)
embeddings = np.array(embeddings).astype('float32')
dimension = embeddings.shape[1]
index = faiss.IndexFlatL2(dimension)
index.add(embeddings)
faiss.write_index(index, VECTOR_INDEX_PATH)
np.save(TEXT_DATA_PATH, np.array(chunks))
def search_knowledge(query, top_k=3):
"""知识检索"""
index = faiss.read_index(VECTOR_INDEX_PATH)
text_data = np.load(TEXT_DATA_PATH, allow_pickle=True)
query_embedding = embedder.encode([query], convert_to_tensor=False)
query_embedding = np.array(query_embedding).astype('float32')
distances, indices = index.search(query_embedding, top_k)
return "\n".join([text_data[i] for i in indices[0]])
def respond(message, history, max_tokens, temperature, top_p, user_input):
"""Gradio响应函数"""
# 检索相关知识
context = search_knowledge(user_input)
# 构建对话消息
messages = [
{"role": "system", "content": f"基于以下知识回答问题,如果不知道就说不知道:\n{context}"},
{"role": "user", "content": user_input}
]
# 流式生成响应
full_response = ""
response = client.chat.completions.create(
model='deepseek-ai/DeepSeek-R1',
messages=messages,
stream=True,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
done_reasoning = False
for chunk in response:
reasoning_chunk = chunk.choices[0].delta.reasoning_content or ""
answer_chunk = chunk.choices[0].delta.content or ""
if reasoning_chunk:
full_response += reasoning_chunk
print(reasoning_chunk, end='', flush=True)
elif answer_chunk:
if not done_reasoning:
print('\n\n=== 最终答案 ===\n')
done_reasoning = True
print(answer_chunk, end='', flush=True)
print("\n" + "="*50)
return full_response
# 初始化向量存储
create_vector_store()
# 创建Gradio界面
demo = gr.ChatInterface(
fn=respond,
additional_inputs=[
gr.Textbox(label="用户提问"),
gr.Slider(512, 2048, value=512, step=1, label="最大Token数"),
gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="温度参数"),
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p采样"),
],
theme=gr.themes.Soft(),
title="制度文档问答系统",
description="输入关于广西警察学院制度的问题进行问答"
)
if __name__ == "__main__":
demo.launch()