Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -62,7 +62,6 @@ print(f"👉 最終使用模型:{MODEL_NAME}")
|
|
| 62 |
tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_DIR)
|
| 63 |
model = AutoModelForCausalLM.from_pretrained(LOCAL_MODEL_DIR)
|
| 64 |
|
| 65 |
-
# 修正 pad_token 缺失問題
|
| 66 |
if tokenizer.pad_token is None:
|
| 67 |
tokenizer.pad_token = tokenizer.eos_token
|
| 68 |
|
|
@@ -76,7 +75,7 @@ generator = pipeline(
|
|
| 76 |
def call_local_inference(prompt, max_new_tokens=256):
|
| 77 |
try:
|
| 78 |
if "中文" not in prompt:
|
| 79 |
-
prompt += "\n
|
| 80 |
outputs = generator(
|
| 81 |
prompt,
|
| 82 |
max_new_tokens=max_new_tokens,
|
|
@@ -89,59 +88,64 @@ def call_local_inference(prompt, max_new_tokens=256):
|
|
| 89 |
return f"(生成失敗:{e})"
|
| 90 |
|
| 91 |
# -------------------------------
|
| 92 |
-
# 3.
|
| 93 |
# -------------------------------
|
| 94 |
-
def
|
| 95 |
docx_file = "/tmp/generated_article.docx"
|
| 96 |
doc = DocxDocument()
|
| 97 |
doc.add_heading(query, level=1)
|
| 98 |
-
doc.save(docx_file)
|
| 99 |
|
| 100 |
all_text = []
|
| 101 |
|
| 102 |
-
# 🔍 RAG
|
| 103 |
retrieved_docs = retriever.get_relevant_documents(query)
|
| 104 |
context_texts = [d.page_content for d in retrieved_docs]
|
| 105 |
-
|
| 106 |
|
| 107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
progress_text = f"⏳ 正在生成第 {i+1}/{segments} 段..."
|
| 109 |
prompt = (
|
| 110 |
-
f"
|
| 111 |
-
f"
|
| 112 |
-
f"主題:{query}。\n
|
|
|
|
| 113 |
)
|
| 114 |
paragraph = call_local_inference(prompt)
|
| 115 |
all_text.append(paragraph)
|
| 116 |
|
| 117 |
-
#
|
| 118 |
doc = DocxDocument(docx_file)
|
| 119 |
doc.add_paragraph(f"第{i+1}段:\n{paragraph}")
|
| 120 |
doc.save(docx_file)
|
| 121 |
|
| 122 |
-
yield "\n\n".join(all_text), None, f"本次使用模型:{MODEL_NAME}",
|
| 123 |
|
| 124 |
final_progress = f"✅ 已完成全部 {segments} 段生成!"
|
| 125 |
-
yield "\n\n".join(all_text), docx_file, f"本次使用模型:{MODEL_NAME}",
|
| 126 |
|
| 127 |
# -------------------------------
|
| 128 |
# 4. Gradio 介面
|
| 129 |
# -------------------------------
|
| 130 |
with gr.Blocks() as demo:
|
| 131 |
gr.Markdown("# 📺 電視弘法視頻生成文章RAG系統")
|
| 132 |
-
gr.Markdown("
|
| 133 |
|
| 134 |
query_input = gr.Textbox(lines=2, placeholder="請輸入文章主題", label="文章主題")
|
| 135 |
-
segments_input = gr.Slider(minimum=1, maximum=10, step=1, value=
|
| 136 |
output_text = gr.Textbox(label="生成文章")
|
| 137 |
output_file = gr.File(label="下載 DOCX")
|
| 138 |
model_used_text = gr.Textbox(label="實際使用模型", interactive=False)
|
| 139 |
-
context_text = gr.Textbox(label="
|
| 140 |
progress_text = gr.Textbox(label="生成進度", interactive=False)
|
| 141 |
|
| 142 |
btn = gr.Button("生成文章")
|
| 143 |
btn.click(
|
| 144 |
-
|
| 145 |
inputs=[query_input, segments_input],
|
| 146 |
outputs=[output_text, output_file, model_used_text, context_text, progress_text]
|
| 147 |
)
|
|
|
|
| 62 |
tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_DIR)
|
| 63 |
model = AutoModelForCausalLM.from_pretrained(LOCAL_MODEL_DIR)
|
| 64 |
|
|
|
|
| 65 |
if tokenizer.pad_token is None:
|
| 66 |
tokenizer.pad_token = tokenizer.eos_token
|
| 67 |
|
|
|
|
| 75 |
def call_local_inference(prompt, max_new_tokens=256):
|
| 76 |
try:
|
| 77 |
if "中文" not in prompt:
|
| 78 |
+
prompt += "\n(請用中文回答,且只依據提供的內容生成,不可加入其他知識)"
|
| 79 |
outputs = generator(
|
| 80 |
prompt,
|
| 81 |
max_new_tokens=max_new_tokens,
|
|
|
|
| 88 |
return f"(生成失敗:{e})"
|
| 89 |
|
| 90 |
# -------------------------------
|
| 91 |
+
# 3. 僅基於 RAG 的文章生成
|
| 92 |
# -------------------------------
|
| 93 |
+
def generate_article_rag_only(query, segments=3):
|
| 94 |
docx_file = "/tmp/generated_article.docx"
|
| 95 |
doc = DocxDocument()
|
| 96 |
doc.add_heading(query, level=1)
|
| 97 |
+
doc.save(docx_file)
|
| 98 |
|
| 99 |
all_text = []
|
| 100 |
|
| 101 |
+
# 🔍 RAG 檢索
|
| 102 |
retrieved_docs = retriever.get_relevant_documents(query)
|
| 103 |
context_texts = [d.page_content for d in retrieved_docs]
|
| 104 |
+
full_context = "\n".join(context_texts)
|
| 105 |
|
| 106 |
+
# 切分成小片段,避免模型超載
|
| 107 |
+
splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
|
| 108 |
+
chunks = splitter.split_text(full_context)
|
| 109 |
+
|
| 110 |
+
for i, chunk in enumerate(chunks[:segments]):
|
| 111 |
progress_text = f"⏳ 正在生成第 {i+1}/{segments} 段..."
|
| 112 |
prompt = (
|
| 113 |
+
f"以下是唯一可用的參考內容:\n{chunk}\n\n"
|
| 114 |
+
f"請基於這些內容,寫一段約150-200字的中文文章,"
|
| 115 |
+
f"主題:{query}。\n"
|
| 116 |
+
f"⚠️ 僅能使用參考內容,不可加入其他知識。"
|
| 117 |
)
|
| 118 |
paragraph = call_local_inference(prompt)
|
| 119 |
all_text.append(paragraph)
|
| 120 |
|
| 121 |
+
# 即時寫入 DOCX
|
| 122 |
doc = DocxDocument(docx_file)
|
| 123 |
doc.add_paragraph(f"第{i+1}段:\n{paragraph}")
|
| 124 |
doc.save(docx_file)
|
| 125 |
|
| 126 |
+
yield "\n\n".join(all_text), None, f"本次使用模型:{MODEL_NAME}", full_context, progress_text
|
| 127 |
|
| 128 |
final_progress = f"✅ 已完成全部 {segments} 段生成!"
|
| 129 |
+
yield "\n\n".join(all_text), docx_file, f"本次使用模型:{MODEL_NAME}", full_context, final_progress
|
| 130 |
|
| 131 |
# -------------------------------
|
| 132 |
# 4. Gradio 介面
|
| 133 |
# -------------------------------
|
| 134 |
with gr.Blocks() as demo:
|
| 135 |
gr.Markdown("# 📺 電視弘法視頻生成文章RAG系統")
|
| 136 |
+
gr.Markdown("只基於 faiss_db 內容生成文章,不加入外部知識。")
|
| 137 |
|
| 138 |
query_input = gr.Textbox(lines=2, placeholder="請輸入文章主題", label="文章主題")
|
| 139 |
+
segments_input = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="段落數")
|
| 140 |
output_text = gr.Textbox(label="生成文章")
|
| 141 |
output_file = gr.File(label="下載 DOCX")
|
| 142 |
model_used_text = gr.Textbox(label="實際使用模型", interactive=False)
|
| 143 |
+
context_text = gr.Textbox(label="檢索到的內容", interactive=False, lines=6)
|
| 144 |
progress_text = gr.Textbox(label="生成進度", interactive=False)
|
| 145 |
|
| 146 |
btn = gr.Button("生成文章")
|
| 147 |
btn.click(
|
| 148 |
+
generate_article_rag_only,
|
| 149 |
inputs=[query_input, segments_input],
|
| 150 |
outputs=[output_text, output_file, model_used_text, context_text, progress_text]
|
| 151 |
)
|