Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,72 +1,122 @@
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
-
from langchain_core.prompts import PromptTemplate
|
| 4 |
from langchain_community.document_loaders import PyPDFLoader
|
| 5 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 6 |
-
import google.generativeai as genai
|
| 7 |
-
from langchain.chains.question_answering import load_qa_chain
|
| 8 |
import torch
|
| 9 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
#
|
| 12 |
-
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
| 13 |
-
|
| 14 |
-
# Load Mistral model
|
| 15 |
model_path = "nvidia/Mistral-NeMo-Minitron-8B-Base"
|
| 16 |
-
mistral_tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 17 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 18 |
dtype = torch.bfloat16
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def initialize(file_path, question):
|
| 22 |
try:
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
Answer:
|
| 29 |
-
"""
|
| 30 |
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
| 31 |
|
| 32 |
if os.path.exists(file_path):
|
| 33 |
pdf_loader = PyPDFLoader(file_path)
|
| 34 |
pages = pdf_loader.load_and_split()
|
| 35 |
-
context = "\n".join(str(page.page_content) for page in pages[:30])
|
| 36 |
-
stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
| 37 |
-
stuff_answer = stuff_chain({"input_documents": pages, "question": question, "context": context}, return_only_outputs=True)
|
| 38 |
-
gemini_answer = stuff_answer['output_text']
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
with torch.no_grad():
|
| 44 |
-
mistral_outputs = mistral_model.generate(mistral_inputs, max_length=50)
|
| 45 |
-
mistral_output = mistral_tokenizer.decode(mistral_outputs[0], skip_special_tokens=True)
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
else:
|
| 50 |
-
return "
|
| 51 |
except Exception as e:
|
| 52 |
-
return f"
|
| 53 |
-
|
| 54 |
-
# Define Gradio Interface
|
| 55 |
-
input_file = gr.File(label="Upload PDF File")
|
| 56 |
-
input_question = gr.Textbox(label="Ask about the document")
|
| 57 |
-
output_text = gr.Textbox(label="Answer - Combined Gemini and Mistral")
|
| 58 |
|
|
|
|
| 59 |
def pdf_qa(file, question):
|
| 60 |
if file is None:
|
| 61 |
-
return "
|
| 62 |
return initialize(file.name, question)
|
| 63 |
|
| 64 |
-
#
|
| 65 |
-
gr.Interface(
|
| 66 |
fn=pdf_qa,
|
| 67 |
-
inputs=[
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
|
|
|
| 3 |
from langchain_community.document_loaders import PyPDFLoader
|
|
|
|
|
|
|
|
|
|
| 4 |
import torch
|
| 5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 6 |
+
from langchain_community.llms import HuggingFacePipeline
|
| 7 |
+
from langchain.chains.question_answering import load_qa_chain
|
| 8 |
+
from langchain_core.prompts import PromptTemplate
|
| 9 |
+
from transformers import pipeline
|
| 10 |
|
| 11 |
+
# 載入 Mistral 模型
|
|
|
|
|
|
|
|
|
|
| 12 |
model_path = "nvidia/Mistral-NeMo-Minitron-8B-Base"
|
|
|
|
| 13 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 14 |
dtype = torch.bfloat16
|
| 15 |
+
print(f"使用設備: {device}")
|
| 16 |
+
|
| 17 |
+
# 初始化 tokenizer
|
| 18 |
+
mistral_tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 19 |
+
|
| 20 |
+
# 初始化模型
|
| 21 |
+
mistral_model = AutoModelForCausalLM.from_pretrained(
|
| 22 |
+
model_path,
|
| 23 |
+
torch_dtype=dtype,
|
| 24 |
+
device_map=device,
|
| 25 |
+
low_cpu_mem_usage=True
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# 創建 pipeline
|
| 29 |
+
text_generation_pipeline = pipeline(
|
| 30 |
+
"text-generation",
|
| 31 |
+
model=mistral_model,
|
| 32 |
+
tokenizer=mistral_tokenizer,
|
| 33 |
+
max_length=512,
|
| 34 |
+
temperature=0.3,
|
| 35 |
+
top_p=0.95,
|
| 36 |
+
device_map=device
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# 為 pipeline 創建 LangChain 包裝器
|
| 40 |
+
llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
|
| 41 |
|
| 42 |
def initialize(file_path, question):
|
| 43 |
try:
|
| 44 |
+
prompt_template = """根據提供的上下文盡可能準確地回答問題。如果上下文中沒有包含答案,請說「上下文中沒有提供答案」\n\n
|
| 45 |
+
上下文: \n {context}?\n
|
| 46 |
+
問題: \n {question} \n
|
| 47 |
+
回答:
|
| 48 |
+
"""
|
|
|
|
|
|
|
| 49 |
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
| 50 |
|
| 51 |
if os.path.exists(file_path):
|
| 52 |
pdf_loader = PyPDFLoader(file_path)
|
| 53 |
pages = pdf_loader.load_and_split()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
# 限制上下文以避免超出令牌限制
|
| 56 |
+
max_pages = 5 # 根據模型容量和文檔長度調整
|
| 57 |
+
context = "\n".join(str(page.page_content) for page in pages[:max_pages])
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
+
try:
|
| 60 |
+
# 使用 Mistral 創建問答鏈
|
| 61 |
+
stuff_chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt)
|
| 62 |
+
|
| 63 |
+
# 使用有限的頁面獲取答案
|
| 64 |
+
stuff_answer = stuff_chain(
|
| 65 |
+
{"input_documents": pages[:max_pages], "question": question, "context": context},
|
| 66 |
+
return_only_outputs=True
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
main_answer = stuff_answer['output_text']
|
| 70 |
+
|
| 71 |
+
# 生成後續問題
|
| 72 |
+
follow_up_prompt = f"根據這個回答: {main_answer}\n生成一個相關的後續問題:"
|
| 73 |
+
follow_up_inputs = mistral_tokenizer.encode(follow_up_prompt, return_tensors='pt').to(device)
|
| 74 |
+
|
| 75 |
+
with torch.no_grad():
|
| 76 |
+
follow_up_outputs = mistral_model.generate(
|
| 77 |
+
follow_up_inputs,
|
| 78 |
+
max_length=256,
|
| 79 |
+
temperature=0.7,
|
| 80 |
+
top_p=0.9,
|
| 81 |
+
do_sample=True
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
follow_up = mistral_tokenizer.decode(follow_up_outputs[0], skip_special_tokens=True)
|
| 85 |
+
|
| 86 |
+
# 提取問題
|
| 87 |
+
if "後續問題:" in follow_up.lower():
|
| 88 |
+
follow_up = follow_up.split("後續問題:", 1)[1].strip()
|
| 89 |
+
|
| 90 |
+
combined_output = f"回答: {main_answer}\n\n可能的後續問題: {follow_up}"
|
| 91 |
+
return combined_output
|
| 92 |
+
|
| 93 |
+
except Exception as e:
|
| 94 |
+
if "exceeds the maximum token count" in str(e):
|
| 95 |
+
return "錯誤: 文檔太大無法處理。請嘗試使用較小的文檔。"
|
| 96 |
+
else:
|
| 97 |
+
raise e
|
| 98 |
else:
|
| 99 |
+
return "錯誤: 無法處理文檔。請確保PDF文件存在且有效。"
|
| 100 |
except Exception as e:
|
| 101 |
+
return f"發生錯誤: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
+
# 定義 Gradio 界面
|
| 104 |
def pdf_qa(file, question):
|
| 105 |
if file is None:
|
| 106 |
+
return "請先上傳PDF文件。"
|
| 107 |
return initialize(file.name, question)
|
| 108 |
|
| 109 |
+
# 創建 Gradio 界面
|
| 110 |
+
demo = gr.Interface(
|
| 111 |
fn=pdf_qa,
|
| 112 |
+
inputs=[
|
| 113 |
+
gr.File(label="上傳PDF文件", file_types=[".pdf"]),
|
| 114 |
+
gr.Textbox(label="詢問文檔內容", placeholder="這個文檔主要講了什麼?")
|
| 115 |
+
],
|
| 116 |
+
outputs=gr.Textbox(label="Mistral 回答"),
|
| 117 |
+
title="基於Mistral的PDF問答系統",
|
| 118 |
+
description="上傳PDF文件並提出問題,Mistral模型將分析內容並提供回答和可能的後續問題。"
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
if __name__ == "__main__":
|
| 122 |
+
demo.launch()
|