Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
from typing import List
|
| 6 |
+
|
| 7 |
+
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 8 |
+
from langchain.vectorstores import FAISS
|
| 9 |
+
from langchain.chains import RetrievalQA
|
| 10 |
+
from langchain.chat_models import ChatOpenAI
|
| 11 |
+
|
| 12 |
+
# enable_chat = False # 初始化为False
|
| 13 |
+
|
| 14 |
+
# def toggle_enable_chat():
|
| 15 |
+
# global enable_chat
|
| 16 |
+
# enable_chat = not enable_chat
|
| 17 |
+
# return f"Enable Chat set to {enable_chat}"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def initialize_aliyun_qa_bot(vector_store_dir: str="aliyun_qa"):
|
| 21 |
+
print(vector_store_dir)
|
| 22 |
+
db = FAISS.load_local(vector_store_dir, OpenAIEmbeddings())
|
| 23 |
+
print(db)
|
| 24 |
+
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
|
| 25 |
+
|
| 26 |
+
global ALIYUN_BOT
|
| 27 |
+
ALIYUN_BOT = RetrievalQA.from_chain_type(llm,
|
| 28 |
+
retriever=db.as_retriever(search_type="similarity_score_threshold",
|
| 29 |
+
search_kwargs={"score_threshold": 0.8}))
|
| 30 |
+
# 返回向量数据库的检索结果
|
| 31 |
+
ALIYUN_BOT.return_source_documents = True
|
| 32 |
+
|
| 33 |
+
return ALIYUN_BOT
|
| 34 |
+
|
| 35 |
+
def aliyun_chat(message, history):
|
| 36 |
+
print(f"[message]{message}")
|
| 37 |
+
print(f"[history]{history}")
|
| 38 |
+
# TODO: 从命令行参数中获取
|
| 39 |
+
enable_chat = False
|
| 40 |
+
|
| 41 |
+
ans = ALIYUN_BOT({"query": message})
|
| 42 |
+
# 如果检索出结果,或者开了大模型聊天模式
|
| 43 |
+
# 返回 RetrievalQA combine_documents_chain 整合的结果
|
| 44 |
+
if ans["source_documents"] or enable_chat:
|
| 45 |
+
print(f"[result]{ans['result']}")
|
| 46 |
+
print(f"[source_documents]{ans['source_documents']}")
|
| 47 |
+
return ans["result"]
|
| 48 |
+
# 否则输出套路话术
|
| 49 |
+
else:
|
| 50 |
+
return "基于现有文档数据,无法回答你的问题,请您联系对应的阿里云企业咨询顾问。"
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def launch_gradio():
|
| 54 |
+
demo = gr.ChatInterface(
|
| 55 |
+
fn=aliyun_chat,
|
| 56 |
+
title="企业数字化转型咨询顾问(aliyun)问答机器人",
|
| 57 |
+
examples=["阿里云有没有成熟的方法或者模板能够供我们去把业务价值和底座组件对应起来?","我们要建设中交二航局的数字化底座,阿里云有什么建议","公共云上,EMR和MaxCompute的使用量多少?","阿里公共云上能用maxcompute吗?"],
|
| 58 |
+
description='<div style="font-family: \'KaiTi\', \'楷体\', serif; font-size: 18px; color: red;text-align: center;">问答数据基于<a href="https://kdocs.cn/l/cszrR7raahdS" style="font-family: \'KaiTi\', \'楷体\', serif; font-size: 18px; color: blue;">阿里云咨询QA文档</a> 请以官方文档为准。</div>'+'<div style="text-align: center;">\
|
| 59 |
+
<img src="https://i.postimg.cc/y8T53gjr/logo.png" alt="精益价值logo" style="height: 80px; margin:0px auto;"></div>',
|
| 60 |
+
# cache_examples=True,
|
| 61 |
+
# retry_btn=None,
|
| 62 |
+
# undo_btn=None,
|
| 63 |
+
chatbot=gr.Chatbot(height=450),
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
demo.launch(share=True, server_name="localhost", auth=("admin", "leanvalue"))
|
| 67 |
+
demo.deploy()
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
# 初始化机器人
|
| 71 |
+
initialize_aliyun_qa_bot("aliyun_qa")
|
| 72 |
+
# 启动 Gradio 服务
|
| 73 |
+
launch_gradio()
|