Paul720810 commited on
Commit
a79c4da
·
verified ·
1 Parent(s): 6981dec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +257 -90
app.py CHANGED
@@ -10,18 +10,21 @@ from sentence_transformers import SentenceTransformer, util
10
  from huggingface_hub import hf_hub_download
11
  from llama_cpp import Llama
12
  from typing import List, Dict, Tuple, Optional
 
 
13
 
14
  # ==================== 配置區 ====================
15
  DATASET_REPO_ID = "Paul720810/Text-to-SQL-Softline"
16
  GGUF_REPO_ID = "Paul720810/gguf-models"
17
  GGUF_FILENAME = "qwen2.5-coder-1.5b-sql-finetuned.q4_k_m.gguf"
18
 
19
- FEW_SHOT_EXAMPLES_COUNT = 1
20
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
21
 
22
  print("=" * 60)
23
- print("🤖 Text-to-SQL (GGUF) 系統啟動中...")
24
  print(f"📊 數據集: {DATASET_REPO_ID}")
 
25
  print(f"💻 設備: {DEVICE}")
26
  print("=" * 60)
27
 
@@ -33,52 +36,62 @@ def format_log(message: str, level: str = "INFO") -> str:
33
  return f"[{get_current_time()}] [{level.upper()}] {message}"
34
 
35
  def parse_sql_from_response(response_text: str) -> Optional[str]:
 
36
  # 1. 先找 ```sql ... ```
37
  match = re.search(r"```sql\n(.*?)\n```", response_text, re.DOTALL)
38
  if match:
39
  return match.group(1).strip()
40
 
41
- # 2. 如果沒找到,嘗試找最長的 SELECT 語句
42
  match = re.search(r"(SELECT .*?;)", response_text, re.DOTALL | re.IGNORECASE)
43
  if match:
44
  return match.group(1).strip()
45
 
46
- return None
 
 
 
 
 
 
47
 
 
48
 
49
  # ==================== Text-to-SQL 核心類 ====================
50
  class TextToSQLSystem:
51
- def __init__(self, embed_model='sentence-transformers/paraphrase-multilingual-mpnet-base-v2'):
52
  self.log_history = []
53
- self._log("初始化系統...")
54
-
55
- # 1. 載入 schema
56
- self.schema = self._load_schema()
57
-
58
- # 2. 載入檢索模型
59
- self.model = SentenceTransformer(embed_model, device=DEVICE)
60
- self.dataset, self.corpus_embeddings = self._load_and_encode_dataset()
61
-
62
- # 3. 載入 GGUF 模型
63
- model_path = hf_hub_download(
64
- repo_id=GGUF_REPO_ID,
65
- filename=GGUF_FILENAME,
66
- repo_type="dataset"
67
- )
68
- self.llm = Llama(
69
- model_path=model_path,
70
- n_ctx=2048, # 少一半上下文 → 快
71
- n_threads=os.cpu_count(),# 用滿 CPU
72
- n_batch=512, # 增加 batch size → 減少迭代
73
- verbose=False
74
- )
75
- self._log(f"✅ 已載入 GGUF 模型: {GGUF_FILENAME}")
 
76
 
77
  def _log(self, message: str, level: str = "INFO"):
78
  self.log_history.append(format_log(message, level))
79
  print(format_log(message, level))
80
 
81
- def _load_schema(self) -> Dict:
 
82
  try:
83
  schema_path = hf_hub_download(
84
  repo_id=DATASET_REPO_ID,
@@ -86,97 +99,224 @@ class TextToSQLSystem:
86
  repo_type="dataset"
87
  )
88
  with open(schema_path, "r", encoding="utf-8") as f:
89
- self._log("成功載入資料庫結構 (sqlite_schema_FULL.json)")
90
- return json.load(f)
91
  except Exception as e:
92
  self._log(f"❌ 載入 schema 失敗: {e}", "ERROR")
93
- return {}
94
-
95
- def _format_schema_for_prompt(self) -> str:
96
- formatted = "資料庫結構:\n"
97
- for table, cols in self.schema.items():
98
- formatted += f"Table: {table}\n"
99
- for col in cols:
100
- formatted += f" - {col['name']} ({col['type']}) # {col.get('description','')}\n"
101
- formatted += "\n"
102
- return formatted
103
 
104
- def _load_and_encode_dataset(self):
 
105
  try:
 
106
  dataset = load_dataset(DATASET_REPO_ID, data_files="training_data.jsonl", split="train")
 
107
  corpus = [item['messages'][0]['content'] for item in dataset]
108
  self._log(f"正在編碼 {len(corpus)} 個問題...")
 
109
  embeddings = self.model.encode(corpus, convert_to_tensor=True, device=DEVICE)
110
- self._log("✅ 範例問題編碼完成。")
111
- return dataset, embeddings
 
 
 
 
 
 
112
  except Exception as e:
113
- self._log(f"❌ 載入或編碼數據集失敗: {e}", "ERROR")
114
- return None, None
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  def find_most_similar(self, question: str, top_k: int) -> List[Dict]:
117
- if self.corpus_embeddings is None: return []
118
- q_emb = self.model.encode(question, convert_to_tensor=True, device=DEVICE)
119
- scores = util.cos_sim(q_emb, self.corpus_embeddings)[0]
120
- top = torch.topk(scores, k=min(top_k, len(self.corpus_embeddings)))
121
- results = []
122
- for score, idx in zip(top[0], top[1]):
123
- item = self.dataset[idx.item()]
124
- q = item['messages'][0]['content']
125
- a = item['messages'][1]['content']
126
- sql = parse_sql_from_response(a) or "無法解析範例SQL"
127
- results.append({"similarity": score.item(), "question": q, "sql": sql})
128
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
  def _build_prompt(self, user_q: str, examples: List[Dict]) -> str:
131
- system_instruction = (
132
- "你是一位頂尖的 SQLite 專家。\n"
133
- "請嚴格遵守以下規則:\n"
134
- "1. 僅輸出最終的 SQL 語法,不要包含解釋或自然語言。\n"
135
- "2. 必須使用 ```sql 開頭 和 ``` 結尾包住查詢。\n"
136
- "3. 查詢必須能直接在 SQLite 執行。\n"
137
- )
138
- schema_str = self._format_schema_for_prompt()
139
- ex_str = "--- 範例 ---\n"
140
- for i, ex in enumerate(examples, 1):
141
- ex_str += f"範例 {i} 問題: {ex['question']}\nSQL:\n```sql\n{ex['sql']}\n```\n\n"
142
- return f"{system_instruction}\n\n{schema_str}\n{ex_str}\n--- 使用者問題 ---\n請根據以上資訊,生成 SQL 查詢:\n\"{user_q}\""
 
 
 
 
 
 
 
 
 
 
143
 
144
  def huggingface_api_call(self, prompt: str) -> str:
 
 
 
 
145
  try:
146
- self._log("🧠 使用 GGUF 模型生成 SQL...")
 
 
 
147
  output = self.llm(
148
  prompt,
149
- max_tokens=128,
150
- temperature=0.2,
151
- stop=["</s>", "```"],
 
152
  echo=False
153
  )
154
- text = output["choices"][0]["text"].strip()
155
- return text
156
  except Exception as e:
157
  self._log(f"❌ 生成失敗: {e}", "ERROR")
158
  return f"生成失敗: {e}"
159
 
160
  def process_question(self, question: str) -> Tuple[str, str]:
 
 
 
 
 
 
161
  self.log_history = []
162
- self._log(f"⏰ 問題: {question}")
163
 
 
 
 
 
 
 
164
  self._log("🔍 尋找相似範例...")
165
  examples = self.find_most_similar(question, FEW_SHOT_EXAMPLES_COUNT)
166
 
 
167
  self._log("📝 建立 Prompt...")
168
  prompt = self._build_prompt(question, examples)
 
169
 
170
- self._log("🧠 開始生成...")
 
171
  response = self.huggingface_api_call(prompt)
172
 
 
173
  sql = parse_sql_from_response(response)
174
  if sql:
175
- self._log("✅ 成功解析 SQL")
176
- return sql, "生成成功"
177
  else:
178
  self._log("❌ 未能解析 SQL", "ERROR")
179
- return f"原始回應:\n{response}", "生成失敗"
 
 
 
 
180
 
181
  # ==================== Gradio 介面 ====================
182
  text_to_sql_system = TextToSQLSystem()
@@ -184,24 +324,51 @@ text_to_sql_system = TextToSQLSystem()
184
  def process_query(q: str):
185
  if not q.strip():
186
  return "", "等待輸入", "請輸入問題"
 
187
  sql, status = text_to_sql_system.process_question(q)
188
- logs = "\n".join(text_to_sql_system.log_history)
 
189
  return sql, status, logs
190
 
191
- with gr.Blocks(theme=gr.themes.Soft(), title="Text-to-SQL Assistant (GGUF)") as demo:
192
- gr.Markdown("# 📊 Text-to-SQL Assistant (GGUF)")
 
 
 
 
 
 
 
 
 
 
 
193
  with gr.Row():
194
  with gr.Column(scale=2):
195
- inp = gr.Textbox(lines=3, label="💬 問題")
196
- btn = gr.Button("🚀 生成 SQL")
197
  status = gr.Textbox(label="狀態", interactive=False)
 
198
  with gr.Column(scale=3):
199
- sql_out = gr.Code(label="🤖 SQL", language="sql")
200
- with gr.Accordion("日誌", open=False):
201
- logs = gr.Textbox(lines=15, label="處理日誌", interactive=False)
 
 
 
 
 
 
 
 
202
 
 
203
  btn.click(process_query, inputs=[inp], outputs=[sql_out, status, logs])
204
  inp.submit(process_query, inputs=[inp], outputs=[sql_out, status, logs])
205
 
206
  if __name__ == "__main__":
207
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
10
  from huggingface_hub import hf_hub_download
11
  from llama_cpp import Llama
12
  from typing import List, Dict, Tuple, Optional
13
+ import faiss
14
+ from functools import lru_cache
15
 
16
  # ==================== 配置區 ====================
17
  DATASET_REPO_ID = "Paul720810/Text-to-SQL-Softline"
18
  GGUF_REPO_ID = "Paul720810/gguf-models"
19
  GGUF_FILENAME = "qwen2.5-coder-1.5b-sql-finetuned.q4_k_m.gguf"
20
 
21
+ FEW_SHOT_EXAMPLES_COUNT = 1 # 只使用1个最相关的范例
22
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
23
 
24
  print("=" * 60)
25
+ print("🤖 Text-to-SQL (GGUF) 極速版系統啟動中...")
26
  print(f"📊 數據集: {DATASET_REPO_ID}")
27
+ print(f"🤖 GGUF 模型: {GGUF_REPO_ID}/{GGUF_FILENAME}")
28
  print(f"💻 設備: {DEVICE}")
29
  print("=" * 60)
30
 
 
36
  return f"[{get_current_time()}] [{level.upper()}] {message}"
37
 
38
  def parse_sql_from_response(response_text: str) -> Optional[str]:
39
+ """從模型輸出提取 SQL"""
40
  # 1. 先找 ```sql ... ```
41
  match = re.search(r"```sql\n(.*?)\n```", response_text, re.DOTALL)
42
  if match:
43
  return match.group(1).strip()
44
 
45
+ # 2. SQL 語句
46
  match = re.search(r"(SELECT .*?;)", response_text, re.DOTALL | re.IGNORECASE)
47
  if match:
48
  return match.group(1).strip()
49
 
50
+ # 3. 找沒有分號的 SQL
51
+ match = re.search(r"(SELECT .*?)(?=\n\n|\n```|$)", response_text, re.DOTALL | re.IGNORECASE)
52
+ if match:
53
+ sql = match.group(1).strip()
54
+ if not sql.endswith(';'):
55
+ sql += ';'
56
+ return sql
57
 
58
+ return None
59
 
60
  # ==================== Text-to-SQL 核心類 ====================
61
  class TextToSQLSystem:
62
+ def __init__(self, embed_model='all-MiniLM-L6-v2'):
63
  self.log_history = []
64
+ self._log("初始化極速系統...")
65
+ self.query_cache = {}
66
+
67
+ # 並行載入所有組件
68
+ import threading
69
+ self.schema = {}
70
+ self.model = None
71
+ self.dataset = None
72
+ self.corpus_embeddings = None
73
+ self.faiss_index = None
74
+ self.llm = None
75
+
76
+ threads = [
77
+ threading.Thread(target=self._load_schema),
78
+ threading.Thread(target=self._load_embedding_model),
79
+ threading.Thread(target=self._load_gguf_model)
80
+ ]
81
+
82
+ for t in threads:
83
+ t.start()
84
+ for t in threads:
85
+ t.join()
86
+
87
+ self._log("✅ 所有組件載入完成")
88
 
89
  def _log(self, message: str, level: str = "INFO"):
90
  self.log_history.append(format_log(message, level))
91
  print(format_log(message, level))
92
 
93
+ def _load_schema(self):
94
+ """載入數據庫結構"""
95
  try:
96
  schema_path = hf_hub_download(
97
  repo_id=DATASET_REPO_ID,
 
99
  repo_type="dataset"
100
  )
101
  with open(schema_path, "r", encoding="utf-8") as f:
102
+ self.schema = json.load(f)
103
+ self._log("✅ 數據庫結構載入完成")
104
  except Exception as e:
105
  self._log(f"❌ 載入 schema 失敗: {e}", "ERROR")
 
 
 
 
 
 
 
 
 
 
106
 
107
+ def _load_embedding_model(self):
108
+ """載入檢索模型和數據"""
109
  try:
110
+ self.model = SentenceTransformer('all-MiniLM-L6-v2', device=DEVICE)
111
  dataset = load_dataset(DATASET_REPO_ID, data_files="training_data.jsonl", split="train")
112
+ self.dataset = dataset
113
  corpus = [item['messages'][0]['content'] for item in dataset]
114
  self._log(f"正在編碼 {len(corpus)} 個問題...")
115
+
116
  embeddings = self.model.encode(corpus, convert_to_tensor=True, device=DEVICE)
117
+ self.corpus_embeddings = embeddings
118
+
119
+ # 建立 FAISS 索引
120
+ embeddings_np = embeddings.cpu().numpy()
121
+ self.faiss_index = faiss.IndexFlatIP(embeddings_np.shape[1])
122
+ self.faiss_index.add(embeddings_np)
123
+
124
+ self._log("✅ FAISS 向量索引建立完成")
125
  except Exception as e:
126
+ self._log(f"❌ 載入檢索模型失敗: {e}", "ERROR")
 
127
 
128
+ def _load_gguf_model(self):
129
+ """載入 GGUF 模型"""
130
+ try:
131
+ model_path = hf_hub_download(
132
+ repo_id=GGUF_REPO_ID,
133
+ filename=GGUF_FILENAME,
134
+ repo_type="dataset"
135
+ )
136
+ self.llm = Llama(
137
+ model_path=model_path,
138
+ n_ctx=1024,
139
+ n_threads=os.cpu_count(),
140
+ n_batch=512,
141
+ n_gpu_layers=0,
142
+ verbose=False
143
+ )
144
+ self._log("✅ GGUF 模型載入完成")
145
+ except Exception as e:
146
+ self._log(f"❌ 載入 GGUF 模型失敗: {e}", "ERROR")
147
+
148
+ def _identify_relevant_tables(self, question: str) -> List[str]:
149
+ """智能識別問題相關的表"""
150
+ question_lower = question.lower()
151
+ relevant_tables = []
152
+
153
+ # 關鍵詞映射表
154
+ keyword_to_table = {
155
+ 'jobtimeline': ['報告', '完成', '時間', '日期', 'month', 'year', '月', '年', '報告', '完成時間'],
156
+ 'tsr53sampledescription': ['客戶', '買家', '申請', '評級', 'rating', 'client', '申請方', '買方'],
157
+ 'tsr53invoice': ['金額', '發票', '價格', '費用', 'amount', 'price', '錢', '成本'],
158
+ 'jobeventslog': ['事件', '操作', '用戶', 'event', 'action', '操作記錄'],
159
+ 'calendardays': ['工作日', '假期', 'workday', 'holiday', '日期', '放假']
160
+ }
161
+
162
+ for table, keywords in keyword_to_table.items():
163
+ if table in self.schema and any(keyword in question_lower for keyword in keywords):
164
+ relevant_tables.append(table)
165
+
166
+ # 如果沒有匹配,返回最重要的表
167
+ return relevant_tables[:2] if relevant_tables else ['jobtimeline', 'tsr53sampledescription']
168
+
169
+ def _format_relevant_schema(self, table_names: List[str]) -> str:
170
+ """只顯示相關表結構"""
171
+ if not self.schema:
172
+ return "無數據庫結構信息"
173
+
174
+ formatted = "相關表結構:\n"
175
+ for table in table_names:
176
+ if table in self.schema:
177
+ formatted += f"## {table}\n"
178
+ for col in self.schema[table][:6]: # 只顯示前6個列
179
+ col_desc = col.get('description', '')
180
+ formatted += f"- {col['name']} ({col['type']})"
181
+ if col_desc:
182
+ formatted += f" # {col_desc}"
183
+ formatted += "\n"
184
+ formatted += "\n"
185
+
186
+ return formatted
187
+
188
+ @lru_cache(maxsize=100)
189
  def find_most_similar(self, question: str, top_k: int) -> List[Dict]:
190
+ """使用 FAISS 快速檢索相似問題"""
191
+ if self.faiss_index is None or self.dataset is None:
192
+ return []
193
+
194
+ try:
195
+ q_emb = self.model.encode(question, convert_to_tensor=True, device=DEVICE)
196
+ q_emb_np = q_emb.cpu().numpy().reshape(1, -1)
197
+
198
+ # FAISS 搜索
199
+ distances, indices = self.faiss_index.search(q_emb_np, min(top_k + 2, len(self.dataset)))
200
+
201
+ results = []
202
+ seen_questions = set()
203
+
204
+ for i, idx in enumerate(indices[0]):
205
+ if len(results) >= top_k:
206
+ break
207
+
208
+ item = self.dataset[idx]
209
+ q_content = item['messages'][0]['content']
210
+ a_content = item['messages'][1]['content']
211
+
212
+ # 提取純淨問題
213
+ clean_q = re.sub(r"以下是一個SQL查詢任務:\s*指令:\s*", "", q_content).strip()
214
+ if clean_q in seen_questions:
215
+ continue
216
+
217
+ seen_questions.add(clean_q)
218
+ sql = parse_sql_from_response(a_content) or "無法解析範例SQL"
219
+
220
+ results.append({
221
+ "similarity": float(distances[0][i]),
222
+ "question": clean_q,
223
+ "sql": sql
224
+ })
225
+
226
+ return results
227
+
228
+ except Exception as e:
229
+ self._log(f"❌ 檢索失敗: {e}", "ERROR")
230
+ return []
231
 
232
  def _build_prompt(self, user_q: str, examples: List[Dict]) -> str:
233
+ """建立精簡的提示詞"""
234
+ # 識別相關表
235
+ relevant_tables = self._identify_relevant_tables(user_q)
236
+ schema_str = self._format_relevant_schema(relevant_tables)
237
+
238
+ # 極簡指令
239
+ system_instruction = "生成SQL查詢。只輸出```sql...```內容。確保SQL語法正確。"
240
+
241
+ # 只顯示一個最有用的範例
242
+ ex_str = ""
243
+ if examples:
244
+ best_example = examples[0]
245
+ ex_str = f"參考範例:\n問題: {best_example['question']}\nSQL: ```sql\n{best_example['sql']}\n```\n\n"
246
+
247
+ prompt = f"{system_instruction}\n{schema_str}\n{ex_str}問題: {user_q}\nSQL:"
248
+
249
+ # 檢查長度,如果太長則進一步精簡
250
+ if len(prompt) > 1500:
251
+ self._log("⚠️ 提示詞過長,進行精簡")
252
+ prompt = f"{system_instruction}\n{schema_str}\n問題: {user_q}\nSQL:"
253
+
254
+ return prompt
255
 
256
  def huggingface_api_call(self, prompt: str) -> str:
257
+ """使用 GGUF 模型生成"""
258
+ if self.llm is None:
259
+ return "模型未載入"
260
+
261
  try:
262
+ # 檢查prompt長度
263
+ if len(prompt) > 1800:
264
+ prompt = prompt[:1800] + "..."
265
+
266
  output = self.llm(
267
  prompt,
268
+ max_tokens=256,
269
+ temperature=0.1,
270
+ top_p=0.9,
271
+ stop=["</s>", "```", ";", "\n\n"],
272
  echo=False
273
  )
274
+ return output["choices"][0]["text"].strip()
275
+
276
  except Exception as e:
277
  self._log(f"❌ 生成失敗: {e}", "ERROR")
278
  return f"生成失敗: {e}"
279
 
280
  def process_question(self, question: str) -> Tuple[str, str]:
281
+ """處理使用者問題"""
282
+ # 檢查緩存
283
+ if question in self.query_cache:
284
+ self._log("⚡ 使用緩存結果")
285
+ return self.query_cache[question]
286
+
287
  self.log_history = []
288
+ self._log(f"⏰ 處理問題: {question}")
289
 
290
+ # 長度檢查
291
+ if len(question) > 200:
292
+ self._log("⚠️ 問題過長,進行精簡")
293
+ question = question[:200]
294
+
295
+ # 檢索相似範例
296
  self._log("🔍 尋找相似範例...")
297
  examples = self.find_most_similar(question, FEW_SHOT_EXAMPLES_COUNT)
298
 
299
+ # 建立提示詞
300
  self._log("📝 建立 Prompt...")
301
  prompt = self._build_prompt(question, examples)
302
+ self._log(f"📏 Prompt 長度: {len(prompt)} 字符")
303
 
304
+ # 生成 SQL
305
+ self._log("🧠 開始生成 SQL...")
306
  response = self.huggingface_api_call(prompt)
307
 
308
+ # 解析結果
309
  sql = parse_sql_from_response(response)
310
  if sql:
311
+ self._log("✅ SQL 生成成功")
312
+ result = (sql, "生成成功")
313
  else:
314
  self._log("❌ 未能解析 SQL", "ERROR")
315
+ result = (f"無法解析SQL。原始回應:\n{response}", "生成失敗")
316
+
317
+ # 緩存結果
318
+ self.query_cache[question] = result
319
+ return result
320
 
321
  # ==================== Gradio 介面 ====================
322
  text_to_sql_system = TextToSQLSystem()
 
324
  def process_query(q: str):
325
  if not q.strip():
326
  return "", "等待輸入", "請輸入問題"
327
+
328
  sql, status = text_to_sql_system.process_question(q)
329
+ logs = "\n".join(text_to_sql_system.log_history[-10:]) # 只顯示最後10條日誌
330
+
331
  return sql, status, logs
332
 
333
+ # 範例問題
334
+ examples = [
335
+ "2024年每月完成多少份報告?",
336
+ "統計各種評級(Pass/Fail)的分布情況",
337
+ "找出總金額最高的10個工作單",
338
+ "哪些客戶的工作單數量最多?",
339
+ "A組昨天完成了多少個測試項目?"
340
+ ]
341
+
342
+ with gr.Blocks(theme=gr.themes.Soft(), title="Text-to-SQL 極速助手") as demo:
343
+ gr.Markdown("# ⚡ Text-to-SQL 極速助手 (GGUF)")
344
+ gr.Markdown("輸入自然語言問題,自動生成SQL查詢")
345
+
346
  with gr.Row():
347
  with gr.Column(scale=2):
348
+ inp = gr.Textbox(lines=3, label="💬 您的問題", placeholder="例如:2024年每月完成多少份報告?")
349
+ btn = gr.Button("🚀 生成 SQL", variant="primary")
350
  status = gr.Textbox(label="狀態", interactive=False)
351
+
352
  with gr.Column(scale=3):
353
+ sql_out = gr.Code(label="🤖 生成的 SQL", language="sql", lines=8)
354
+
355
+ with gr.Accordion("📋 處理日誌", open=False):
356
+ logs = gr.Textbox(lines=8, label="日誌", interactive=False)
357
+
358
+ # 範例區
359
+ gr.Examples(
360
+ examples=examples,
361
+ inputs=inp,
362
+ label="💡 點擊試用範例問題"
363
+ )
364
 
365
+ # 綁定事件
366
  btn.click(process_query, inputs=[inp], outputs=[sql_out, status, logs])
367
  inp.submit(process_query, inputs=[inp], outputs=[sql_out, status, logs])
368
 
369
  if __name__ == "__main__":
370
+ demo.launch(
371
+ server_name="0.0.0.0",
372
+ server_port=7860,
373
+ share=False
374
+ )