ziphai commited on
Commit
6360c9f
·
verified ·
1 Parent(s): b1b7468

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -36
app.py CHANGED
@@ -7,23 +7,23 @@ from langchain_community.vectorstores import Chroma
7
  from langchain_openai import ChatOpenAI, OpenAIEmbeddings
8
  from dotenv import load_dotenv
9
 
10
- # 加載環境變量
11
  load_dotenv()
12
  os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
13
 
14
- # 驗證 OpenAI API Key
15
  api_key = os.getenv('OPENAI_API_KEY')
16
  if not api_key:
17
- raise ValueError("請設置 'OPENAI_API_KEY' 環境變數")
18
 
19
  # OpenAI API key
20
  openai_api_key = api_key
21
 
22
- # 將聊天歷史轉換為適合 LangChain 的二元組格式
23
  def transform_history_for_langchain(history):
24
- return [(chat[0], chat[1]) for chat in history if chat[0]] # 使用整數索引來訪問元組中的元素
25
 
26
- # Gradio 的歷史紀錄轉換為 OpenAI 格式
27
  def transform_history_for_openai(history):
28
  new_history = []
29
  for chat in history:
@@ -33,7 +33,7 @@ def transform_history_for_openai(history):
33
  new_history.append({"role": "assistant", "content": chat[1]})
34
  return new_history
35
 
36
- # 載入和處理文件的函數
37
  def load_and_process_documents(folder_path):
38
  documents = []
39
  for file in os.listdir(folder_path):
@@ -58,24 +58,24 @@ def load_and_process_documents(folder_path):
58
  )
59
  return vectordb
60
 
61
- # 初始化向量數據庫為全局變量
62
  if 'vectordb' not in globals():
63
  vectordb = load_and_process_documents("./")
64
 
65
- # 定義查詢處理函數
66
  def handle_query(user_message, temperature, chat_history):
67
  try:
68
  if not user_message:
69
- return chat_history # 返回不變的聊天記錄
70
 
71
- # 使用 LangChain ConversationalRetrievalChain 處理查詢
72
  preface = """
73
- 指令: 全部以繁體中文呈現,200字以內。
74
- 除了與文件相關內容可回答之外,與文件內容不相關的問題都必須回答:這問題很深奧,需要請示JohnLiao大神...
75
  """
76
- query = f"{preface} 查詢內容:{user_message}"
77
 
78
- # 提取之前的回答作為上下文,並轉換成 LangChain 支持的格式
79
  previous_answers = transform_history_for_langchain(chat_history)
80
 
81
  pdf_qa = ConversationalRetrievalChain.from_llm(
@@ -85,54 +85,54 @@ def handle_query(user_message, temperature, chat_history):
85
  verbose=False
86
  )
87
 
88
- # 調用模型進行查詢
89
  result = pdf_qa.invoke({"question": query, "chat_history": previous_answers})
90
 
91
- # 確保 'answer' 在結果中
92
  if "answer" not in result:
93
- return chat_history + [("系統", "抱歉,出現了一個錯誤。")]
94
 
95
- # 更新對話歷史中的 AI 回應
96
- chat_history[-1] = (user_message, result["answer"]) # 更新最後一個記錄,配對用戶輸入和 AI 回應
97
 
98
  return chat_history
99
 
100
  except Exception as e:
101
- return chat_history + [("系統", f"出現錯誤: {str(e)}")]
102
 
103
- # 使用 Gradio Blocks API 創建自訂聊天介面
104
  with gr.Blocks() as demo:
105
- gr.Markdown("<h1 style='text-align: center;'>AI 小助教</h1>")
106
 
107
  chatbot = gr.Chatbot()
108
  state = gr.State([])
109
 
110
  with gr.Row():
111
  with gr.Column(scale=0.85):
112
- txt = gr.Textbox(show_label=False, placeholder="請輸入您的問題...")
113
  with gr.Column(scale=0.15, min_width=0):
114
- submit_btn = gr.Button("提問")
115
 
116
- # 用戶輸入後立即顯示提問文字,不添加回應部分,並清空輸入框
117
  def user_input(user_message, history):
118
- history.append((user_message, "")) # 顯示提問文字,回應部分為空字符串
119
- return history, "", history # 返回清空的輸入框以及更新的聊天歷史
120
 
121
- # 處理 AI 回應,更新回應部分
122
  def bot_response(history):
123
- user_message = history[-1][0] # 獲取最新的用戶輸入
124
- history = handle_query(user_message, 0.7, history) # 調用處理函數
125
- return history, history # 返回更新後的聊天記錄
126
 
127
- # 先顯示提問文字,然後處理 AI 回應,並清空輸入框
128
  submit_btn.click(user_input, [txt, state], [chatbot, txt, state], queue=False).then(
129
  bot_response, state, [chatbot, state]
130
  )
131
 
132
- # 支援按 "Enter" 提交問題,立即顯示提問文字並清空輸入框
133
  txt.submit(user_input, [txt, state], [chatbot, txt, state], queue=False).then(
134
  bot_response, state, [chatbot, state]
135
  )
136
 
137
- # 啟動 Gradio 應用
138
- demo.launch()
 
7
  from langchain_openai import ChatOpenAI, OpenAIEmbeddings
8
  from dotenv import load_dotenv
9
 
10
+ # Load environment variables
11
  load_dotenv()
12
  os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
13
 
14
+ # Validate OpenAI API Key
15
  api_key = os.getenv('OPENAI_API_KEY')
16
  if not api_key:
17
+ raise ValueError("Please set the 'OPENAI_API_KEY' environment variable")
18
 
19
  # OpenAI API key
20
  openai_api_key = api_key
21
 
22
+ # Transform chat history for LangChain format
23
  def transform_history_for_langchain(history):
24
+ return [(chat[0], chat[1]) for chat in history if chat[0]]
25
 
26
+ # Transform chat history for OpenAI format
27
  def transform_history_for_openai(history):
28
  new_history = []
29
  for chat in history:
 
33
  new_history.append({"role": "assistant", "content": chat[1]})
34
  return new_history
35
 
36
+ # Load and process documents function
37
  def load_and_process_documents(folder_path):
38
  documents = []
39
  for file in os.listdir(folder_path):
 
58
  )
59
  return vectordb
60
 
61
+ # Initialize vector database as a global variable
62
  if 'vectordb' not in globals():
63
  vectordb = load_and_process_documents("./")
64
 
65
+ # Define query handling function for RAG
66
  def handle_query(user_message, temperature, chat_history):
67
  try:
68
  if not user_message:
69
+ return chat_history # Return unchanged chat history
70
 
71
+ # Use LangChain's ConversationalRetrievalChain to handle the query
72
  preface = """
73
+ Instruction: Answer in Traditional Chinese, within 200 characters.
74
+ If the question is unrelated to the documents, respond with: 此事無可奉告,話說這件事須請教海虔王...
75
  """
76
+ query = f"{preface} Query content: {user_message}"
77
 
78
+ # Extract previous answers as context, converting them to LangChain format
79
  previous_answers = transform_history_for_langchain(chat_history)
80
 
81
  pdf_qa = ConversationalRetrievalChain.from_llm(
 
85
  verbose=False
86
  )
87
 
88
+ # Invoke the model to handle the query
89
  result = pdf_qa.invoke({"question": query, "chat_history": previous_answers})
90
 
91
+ # Ensure 'answer' is present in the result
92
  if "answer" not in result:
93
+ return chat_history + [("System", "Sorry, an error occurred.")]
94
 
95
+ # Update the AI response in chat history
96
+ chat_history[-1] = (user_message, result["answer"]) # Update the last record, pairing user input with AI response
97
 
98
  return chat_history
99
 
100
  except Exception as e:
101
+ return chat_history + [("System", f"An error occurred: {str(e)}")]
102
 
103
+ # Create a custom chat interface using Gradio Blocks API
104
  with gr.Blocks() as demo:
105
+ gr.Markdown("<h1 style='text-align: center;'>AI Assistant for AI Forum</h1>")
106
 
107
  chatbot = gr.Chatbot()
108
  state = gr.State([])
109
 
110
  with gr.Row():
111
  with gr.Column(scale=0.85):
112
+ txt = gr.Textbox(show_label=False, placeholder="Please enter your question...")
113
  with gr.Column(scale=0.15, min_width=0):
114
+ submit_btn = gr.Button("Ask")
115
 
116
+ # Immediately show user input without response part, and clear input box
117
  def user_input(user_message, history):
118
+ history.append((user_message, "")) # Show user message, response part as empty string
119
+ return history, "", history # Return cleared input box and updated chat history
120
 
121
+ # Handle AI response, update response part
122
  def bot_response(history):
123
+ user_message = history[-1][0] # Get the latest user input
124
+ history = handle_query(user_message, 0.7, history) # Call the query handler
125
+ return history, history # Return updated chat history
126
 
127
+ # First show user message, then handle AI response, clear input box
128
  submit_btn.click(user_input, [txt, state], [chatbot, txt, state], queue=False).then(
129
  bot_response, state, [chatbot, state]
130
  )
131
 
132
+ # Support pressing "Enter" to submit question, immediately show user input, clear input box
133
  txt.submit(user_input, [txt, state], [chatbot, txt, state], queue=False).then(
134
  bot_response, state, [chatbot, state]
135
  )
136
 
137
+ # Launch Gradio app
138
+ demo.launch()