Update app.py
Browse files
app.py
CHANGED
|
@@ -1,35 +1,48 @@
|
|
| 1 |
-
# ✅ app.py - 向量
|
| 2 |
|
| 3 |
import json
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
import faiss
|
| 6 |
import torch
|
| 7 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 8 |
from sentence_transformers import SentenceTransformer
|
| 9 |
|
| 10 |
-
# ✅
|
| 11 |
QA_FILE = "qa.json"
|
|
|
|
| 12 |
DOCS_FILE = "docs.json"
|
| 13 |
VECTOR_FILE = "faiss_index.faiss"
|
| 14 |
EMBED_MODEL = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
|
| 15 |
GEN_MODEL = "Qwen/Qwen1.5-0.5B-Chat"
|
| 16 |
|
| 17 |
-
# ✅
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
with open(QA_FILE, "r", encoding="utf-8") as f:
|
| 19 |
qa_data = json.load(f)
|
| 20 |
-
|
| 21 |
-
# ✅ 載入文件對應段落
|
| 22 |
with open(DOCS_FILE, "r", encoding="utf-8") as f:
|
| 23 |
docs = json.load(f)
|
| 24 |
-
|
| 25 |
-
# ✅ 載入向量庫與模型
|
| 26 |
index = faiss.read_index(VECTOR_FILE)
|
| 27 |
embedder = SentenceTransformer(EMBED_MODEL)
|
| 28 |
tokenizer = AutoTokenizer.from_pretrained(GEN_MODEL, trust_remote_code=True)
|
| 29 |
model = AutoModelForCausalLM.from_pretrained(GEN_MODEL, trust_remote_code=True).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 30 |
model.eval()
|
| 31 |
|
| 32 |
-
# ✅ QA 關鍵字
|
| 33 |
|
| 34 |
def retrieve_qa_context(user_input):
|
| 35 |
for item in qa_data:
|
|
@@ -41,15 +54,14 @@ def retrieve_qa_context(user_input):
|
|
| 41 |
return item["response"]
|
| 42 |
return None
|
| 43 |
|
| 44 |
-
# ✅
|
| 45 |
|
| 46 |
def search_context_faiss(user_input, top_k=3):
|
| 47 |
vec = embedder.encode([user_input])
|
| 48 |
D, I = index.search(vec, top_k)
|
| 49 |
-
|
| 50 |
-
return "\n".join(retrieved)
|
| 51 |
|
| 52 |
-
# ✅
|
| 53 |
|
| 54 |
def generate_answer(user_input, context):
|
| 55 |
prompt = f"""
|
|
@@ -71,7 +83,7 @@ def generate_answer(user_input, context):
|
|
| 71 |
return line.strip()
|
| 72 |
return response[-90:]
|
| 73 |
|
| 74 |
-
# ✅
|
| 75 |
|
| 76 |
def answer(user_input):
|
| 77 |
direct = retrieve_qa_context(user_input)
|
|
@@ -81,13 +93,13 @@ def answer(user_input):
|
|
| 81 |
context = search_context_faiss(user_input)
|
| 82 |
return generate_answer(user_input, context)
|
| 83 |
|
| 84 |
-
# ✅ Gradio
|
| 85 |
interface = gr.Interface(
|
| 86 |
fn=answer,
|
| 87 |
inputs=gr.Textbox(lines=2, placeholder="請輸入與南臺科技大學相關的問題..."),
|
| 88 |
outputs="text",
|
| 89 |
-
title="南臺科技大學 問答機器人(向量式 RAG)",
|
| 90 |
-
description="
|
| 91 |
theme="default"
|
| 92 |
)
|
| 93 |
|
|
|
|
| 1 |
+
# ✅ app.py - 自動建構向量庫版本(向量式 RAG)
|
| 2 |
|
| 3 |
import json
|
| 4 |
+
import os
|
| 5 |
import gradio as gr
|
| 6 |
import faiss
|
| 7 |
import torch
|
| 8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 9 |
from sentence_transformers import SentenceTransformer
|
| 10 |
|
| 11 |
+
# ✅ 路徑與模型設定
|
| 12 |
QA_FILE = "qa.json"
|
| 13 |
+
TEXT_FILE = "web_data.txt"
|
| 14 |
DOCS_FILE = "docs.json"
|
| 15 |
VECTOR_FILE = "faiss_index.faiss"
|
| 16 |
EMBED_MODEL = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
|
| 17 |
GEN_MODEL = "Qwen/Qwen1.5-0.5B-Chat"
|
| 18 |
|
| 19 |
+
# ✅ 自動建構向量資料庫(若不存在)
|
| 20 |
+
if not (os.path.exists(VECTOR_FILE) and os.path.exists(DOCS_FILE)):
|
| 21 |
+
print("⚙️ 未偵測到向量資料庫,開始自動建構...")
|
| 22 |
+
with open(TEXT_FILE, "r", encoding="utf-8") as f:
|
| 23 |
+
content = f.read()
|
| 24 |
+
docs = [chunk.strip() for chunk in content.split("\n\n") if chunk.strip()]
|
| 25 |
+
embedder = SentenceTransformer(EMBED_MODEL)
|
| 26 |
+
embeddings = embedder.encode(docs, show_progress_bar=True)
|
| 27 |
+
index = faiss.IndexFlatL2(embeddings[0].shape[0])
|
| 28 |
+
index.add(embeddings)
|
| 29 |
+
faiss.write_index(index, VECTOR_FILE)
|
| 30 |
+
with open(DOCS_FILE, "w", encoding="utf-8") as f:
|
| 31 |
+
json.dump(docs, f, ensure_ascii=False, indent=2)
|
| 32 |
+
print("✅ 嵌入建構完成,共儲存段落:", len(docs))
|
| 33 |
+
|
| 34 |
+
# ✅ 載入資料與模型
|
| 35 |
with open(QA_FILE, "r", encoding="utf-8") as f:
|
| 36 |
qa_data = json.load(f)
|
|
|
|
|
|
|
| 37 |
with open(DOCS_FILE, "r", encoding="utf-8") as f:
|
| 38 |
docs = json.load(f)
|
|
|
|
|
|
|
| 39 |
index = faiss.read_index(VECTOR_FILE)
|
| 40 |
embedder = SentenceTransformer(EMBED_MODEL)
|
| 41 |
tokenizer = AutoTokenizer.from_pretrained(GEN_MODEL, trust_remote_code=True)
|
| 42 |
model = AutoModelForCausalLM.from_pretrained(GEN_MODEL, trust_remote_code=True).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 43 |
model.eval()
|
| 44 |
|
| 45 |
+
# ✅ QA 關鍵字回應優先
|
| 46 |
|
| 47 |
def retrieve_qa_context(user_input):
|
| 48 |
for item in qa_data:
|
|
|
|
| 54 |
return item["response"]
|
| 55 |
return None
|
| 56 |
|
| 57 |
+
# ✅ 向量搜尋段落
|
| 58 |
|
| 59 |
def search_context_faiss(user_input, top_k=3):
|
| 60 |
vec = embedder.encode([user_input])
|
| 61 |
D, I = index.search(vec, top_k)
|
| 62 |
+
return "\n".join([docs[i] for i in I[0] if i < len(docs)])
|
|
|
|
| 63 |
|
| 64 |
+
# ✅ LLM 生成繁體中文回答
|
| 65 |
|
| 66 |
def generate_answer(user_input, context):
|
| 67 |
prompt = f"""
|
|
|
|
| 83 |
return line.strip()
|
| 84 |
return response[-90:]
|
| 85 |
|
| 86 |
+
# ✅ 主問答流程
|
| 87 |
|
| 88 |
def answer(user_input):
|
| 89 |
direct = retrieve_qa_context(user_input)
|
|
|
|
| 93 |
context = search_context_faiss(user_input)
|
| 94 |
return generate_answer(user_input, context)
|
| 95 |
|
| 96 |
+
# ✅ 啟動 Gradio 介面
|
| 97 |
interface = gr.Interface(
|
| 98 |
fn=answer,
|
| 99 |
inputs=gr.Textbox(lines=2, placeholder="請輸入與南臺科技大學相關的問題..."),
|
| 100 |
outputs="text",
|
| 101 |
+
title="南臺科技大學 問答機器人(向量式 RAG 自動建構)",
|
| 102 |
+
description="首次啟動會自動建立向量資料庫,支援 QA 關鍵字與語意檢索,繁體中文回答。",
|
| 103 |
theme="default"
|
| 104 |
)
|
| 105 |
|