wallacechu3 / data.py
Wallacechu's picture
Upload 3 files
1b43f88 verified
import gradio as gr
from sentence_transformers import SentenceTransformer
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import faiss
import numpy as np
import torch
# 1️⃣ 載入文本資料
with open("data.txt", "r", encoding="utf-8") as f:
docs = f.readlines()
# 2️⃣ 建立文本向量
embedder = SentenceTransformer("all-MiniLM-L6-v2")
doc_embeddings = embedder.encode(docs, convert_to_numpy=True)
# 3️⃣ 建立 FAISS 向量索引
index = faiss.IndexFlatL2(doc_embeddings.shape[1])
index.add(doc_embeddings)
# 4️⃣ 載入中文生成模型
model_name = "IDEA-CCNL/Wenzhong-GPT2-110M"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
# 5️⃣ 定義 RAG 聊天功能
def rag_chat(question):
q_emb = embedder.encode([question], convert_to_numpy=True)
D, I = index.search(q_emb, k=2) # 找出最相關的 2 句
context = "\n".join([docs[i].strip() for i in I[0]])
prompt = f"根據以下資料回答問題:\n{context}\n\n問題:{question}\n答案:"
output = generator(prompt, max_length=120, num_return_sequences=1, do_sample=True)[0]["generated_text"]
return output
# 6️⃣ 建立 Gradio 介面
demo = gr.Interface(
fn=rag_chat,
inputs=gr.Textbox(label="輸入你的問題", placeholder="例如:RAG 是什麼?"),
outputs=gr.Textbox(label="模型回答"),
title="🧠 中文 RAG 聊天機器人",
description="這個模型會根據提供的文本資料找出相關資訊,並用中文回答。"
)
if __name__ == "__main__":
demo.launch()