sofzcc commited on
Commit
0ed57dc
Β·
verified Β·
1 Parent(s): bb87ddf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +228 -108
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import re
3
  import json
4
  from pathlib import Path
5
- from typing import List, Dict, Tuple
6
 
7
  import numpy as np
8
  import faiss
@@ -28,6 +28,7 @@ HEADING_RE = re.compile(r"^(#{1,6})\s+(.*)$", re.MULTILINE)
28
 
29
  # ----------- Load Markdown -----------
30
  def read_markdown_files(kb_dir: Path) -> List[Dict]:
 
31
  docs = []
32
  for md_path in sorted(kb_dir.glob("*.md")):
33
  text = md_path.read_text(encoding="utf-8", errors="ignore")
@@ -35,10 +36,19 @@ def read_markdown_files(kb_dir: Path) -> List[Dict]:
35
  m = re.search(r"^#\s+(.*)$", text, flags=re.MULTILINE)
36
  if m:
37
  title = m.group(1).strip()
38
- docs.append({"filepath": str(md_path), "filename": md_path.name, "title": title, "text": text})
 
 
 
 
 
39
  return docs
40
 
41
- def chunk_markdown(doc: Dict, chunk_chars: int = 1200, overlap: int = 150) -> List[Dict]:
 
 
 
 
42
  text = doc["text"]
43
  sections = re.split(r"(?=^##\s+|\n##\s+|\n###\s+|^###\s+)", text, flags=re.MULTILINE)
44
  if len(sections) == 1:
@@ -47,16 +57,19 @@ def chunk_markdown(doc: Dict, chunk_chars: int = 1200, overlap: int = 150) -> Li
47
  chunks = []
48
  for sec in sections:
49
  sec = sec.strip()
50
- if not sec:
51
  continue
 
52
  heading_match = HEADING_RE.search(sec)
53
  section_heading = heading_match.group(2).strip() if heading_match else doc["title"]
54
 
 
55
  start = 0
56
  while start < len(sec):
57
  end = min(start + chunk_chars, len(sec))
58
  chunk_text = sec[start:end].strip()
59
- if chunk_text:
 
60
  chunks.append({
61
  "doc_title": doc["title"],
62
  "filename": doc["filename"],
@@ -64,9 +77,11 @@ def chunk_markdown(doc: Dict, chunk_chars: int = 1200, overlap: int = 150) -> Li
64
  "section": section_heading,
65
  "content": chunk_text
66
  })
 
67
  if end == len(sec):
68
  break
69
  start = max(0, end - overlap)
 
70
  return chunks
71
 
72
  # ----------- KB Index -----------
@@ -75,13 +90,20 @@ class KBIndex:
75
  self.embedder = SentenceTransformer(EMBEDDING_MODEL_NAME)
76
  self.reader_tokenizer = AutoTokenizer.from_pretrained(READER_MODEL_NAME)
77
  self.reader_model = AutoModelForQuestionAnswering.from_pretrained(READER_MODEL_NAME)
78
- self.reader = pipeline("question-answering", model=self.reader_model, tokenizer=self.reader_tokenizer)
 
 
 
 
 
 
79
 
80
  self.index = None
81
  self.embeddings = None
82
  self.metadata = []
83
 
84
  def build(self, kb_dir: Path):
 
85
  docs = read_markdown_files(kb_dir)
86
  if not docs:
87
  raise RuntimeError(f"No markdown files found in {kb_dir.resolve()}")
@@ -89,11 +111,17 @@ class KBIndex:
89
  all_chunks = []
90
  for d in docs:
91
  all_chunks.extend(chunk_markdown(d))
92
- texts = [c["content"] for c in all_chunks]
93
- if not texts:
94
  raise RuntimeError("No content chunks generated from KB.")
95
 
96
- embeddings = self.embedder.encode(texts, batch_size=32, convert_to_numpy=True, show_progress_bar=False)
 
 
 
 
 
 
97
  faiss.normalize_L2(embeddings)
98
 
99
  dim = embeddings.shape[1]
@@ -109,7 +137,8 @@ class KBIndex:
109
  json.dump(self.metadata, f, ensure_ascii=False, indent=2)
110
  faiss.write_index(index, str(FAISS_PATH))
111
 
112
- def load(self):
 
113
  if not (EMBEDDINGS_PATH.exists() and METADATA_PATH.exists() and FAISS_PATH.exists()):
114
  return False
115
  self.embeddings = np.load(EMBEDDINGS_PATH)
@@ -118,108 +147,149 @@ class KBIndex:
118
  self.index = faiss.read_index(str(FAISS_PATH))
119
  return True
120
 
121
- def retrieve(self, query: str, top_k: int = 4) -> List[Tuple[int, float]]:
 
122
  q_emb = self.embedder.encode([query], convert_to_numpy=True)
123
  faiss.normalize_L2(q_emb)
124
  D, I = self.index.search(q_emb, top_k)
125
  return list(zip(I[0].tolist(), D[0].tolist()))
126
 
127
- def answer(self, question: str, retrieved: List[Tuple[int, float]]):
128
- best = {"text": None, "score": -1e9, "meta": None, "sim": 0.0}
 
 
 
 
 
129
  for idx, sim in retrieved:
130
  meta = self.metadata[idx]
131
  ctx = meta["content"]
 
132
  try:
133
  out = self.reader(question=question, context=ctx)
134
- except Exception:
 
 
 
 
 
 
 
 
 
 
135
  continue
136
- score = float(out.get("score", 0.0))
137
- if score > best["score"]:
138
- best = {"text": out.get("answer", "").strip(), "score": score, "meta": meta, "sim": float(sim)}
139
- if not best["text"]:
140
- return None, 0.0, []
 
 
 
 
141
  citations = []
142
  seen = set()
143
- for idx, _ in retrieved[:2]:
144
  m = self.metadata[idx]
145
  key = (m["filename"], m["section"])
146
  if key in seen:
147
  continue
148
  seen.add(key)
149
- citations.append({"title": m["doc_title"], "filename": m["filename"], "section": m["section"]})
150
- return best["text"], best["score"], citations
151
-
 
 
 
 
 
 
 
152
  kb = KBIndex()
153
 
154
  def ensure_index():
155
- # Build on first run in Space; load if cached
156
  if not kb.load():
157
- kb.build(KB_DIR)
 
 
 
 
158
  ensure_index()
159
 
160
  # ----------- Guardrails -----------
161
- LOW_CONF_THRESHOLD = 0.20
162
- LOW_SIM_THRESHOLD = 0.30
163
-
164
- HELPFUL_SUGGESTIONS = [
165
- ("Connect WhatsApp", "How do I connect my WhatsApp number?"),
166
- ("Reset Password", "I can't sign in / forgot my password"),
167
- ("First Automation", "How do I create my first automation?"),
168
- ("Billing & Invoices", "How do I download invoices for billing?"),
169
- ("Fix Instagram Connect", "Why can't I connect Instagram?")
170
  ]
171
 
172
  def format_citations(citations: List[Dict]) -> str:
 
173
  if not citations:
174
  return ""
175
- return "\n".join([f"β€’ **{c['title']}** β€” _{c['section']}_ (`{c['filename']}`)" for c in citations])
 
 
 
176
 
177
- def respond(user_msg, history):
 
178
  user_msg = (user_msg or "").strip()
 
179
  if not user_msg:
180
- return "How can I help? Try: **Connect WhatsApp** or **Reset password**."
181
 
182
- retrieved = kb.retrieve(user_msg, top_k=4)
 
 
183
  if not retrieved:
184
- return "I couldn’t find anything yet. Try rephrasing or pick a quick action below."
185
-
186
- span, score, citations = kb.answer(user_msg, retrieved)
187
- if not span:
188
- suggestions = "\n".join([f"- {c['title']} β€” _{c['section']}_" for c in citations]) or "- Try a different query."
189
- return f"I’m not fully sure. Here are the closest matches:\n\n{suggestions}"
 
 
 
 
 
 
 
190
 
191
- best_sim = max([s for _, s in retrieved]) if retrieved else 0.0
192
- low_conf = (score < LOW_CONF_THRESHOLD) or (best_sim < LOW_SIM_THRESHOLD)
193
  citations_md = format_citations(citations)
194
- base_answer = span if len(span) > 3 else "I found a relevant section. Opening the steps in the cited article."
195
-
196
- if low_conf:
197
  return (
198
- f"{base_answer}\n\n---\n**I may be uncertain.** Here are relevant articles:\n{citations_md}\n\n"
199
- f"If this doesn’t solve it, ask me to *escalate to human support*."
 
 
 
 
 
 
 
 
 
200
  )
201
 
202
- return f"{base_answer}\n\n---\n**Sources:**\n{citations_md}\n_Tip: Say **show full steps** for more detail._"
203
-
204
- def quick_intent(label):
205
- for l, q in HELPFUL_SUGGESTIONS:
206
- if l == label:
207
- return q
208
- return ""
209
-
210
- def rebuild_index():
211
- kb.build(KB_DIR)
212
- return gr.update(value="βœ… Index rebuilt from KB.")
213
-
214
- # ----------- Gradio UI -----------
215
- def process_message(user_input, history):
216
- """
217
- history is a list of dicts: [{"role":"user","content":...}, {"role":"assistant","content":...}, ...]
218
- We return updated history and a cleared textbox.
219
- """
220
  user_input = (user_input or "").strip()
221
  if not user_input:
222
  return history, gr.update(value="")
 
223
  reply = respond(user_input, history or [])
224
  new_history = (history or []) + [
225
  {"role": "user", "content": user_input},
@@ -227,52 +297,102 @@ def process_message(user_input, history):
227
  ]
228
  return new_history, gr.update(value="")
229
 
230
- def process_quick(label, history):
231
- # turn a quick button into a user message
232
- q = quick_intent(label)
233
- return process_message(q, history)
 
 
 
 
 
 
 
 
 
 
234
 
235
- with gr.Blocks(title="Self-Service KB Assistant", fill_height=True) as demo:
 
 
 
 
 
 
 
 
 
 
236
  gr.Markdown(
237
  """
238
- # 🧩 Self-Service Knowledge Assistant
239
- Ask about setup, automations, billing, or troubleshooting.
240
- The assistant answers **only from the Knowledge Base** and cites the articles it used.
241
-
242
- **Quick actions:** Use a button below to try a common task.
243
  """
244
  )
245
-
 
246
  with gr.Row():
247
- chat = gr.Chatbot(height=420, show_copy_button=True, type="messages") # βœ… modern format
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  with gr.Row():
249
- txt = gr.Textbox(placeholder="e.g., How do I connect WhatsApp?", scale=10)
250
- send = gr.Button("Send", variant="primary")
251
-
252
- with gr.Row():
253
- # Quick action buttons
254
- btn_whatsapp = gr.Button("Connect WhatsApp")
255
- btn_reset = gr.Button("Reset Password")
256
- btn_first = gr.Button("First Automation")
257
- btn_billing = gr.Button("Billing & Invoices")
258
- btn_ig = gr.Button("Fix Instagram Connect")
259
-
260
- with gr.Accordion("Admin", open=False):
261
- gr.Markdown("Rebuild the search index after changing files in `/kb`.")
262
- rebuild = gr.Button("Rebuild Index")
263
- status = gr.Markdown("")
264
-
265
- # Wire up events
 
266
  send.click(process_message, inputs=[txt, chat], outputs=[chat, txt])
267
- txt.submit(process_message, inputs=[txt, chat], outputs=[chat, txt]) # Enter key
268
-
269
- btn_whatsapp.click(process_quick, inputs=[gr.State("Connect WhatsApp"), chat], outputs=[chat, txt])
270
- btn_reset.click(process_quick, inputs=[gr.State("Reset Password"), chat], outputs=[chat, txt])
271
- btn_first.click(process_quick, inputs=[gr.State("First Automation"), chat], outputs=[chat, txt])
272
- btn_billing.click(process_quick, inputs=[gr.State("Billing & Invoices"), chat], outputs=[chat, txt])
273
- btn_ig.click(process_quick, inputs=[gr.State("Fix Instagram Connect"), chat], outputs=[chat, txt])
274
-
275
- rebuild.click(lambda: (kb.build(KB_DIR), "βœ… Index rebuilt from KB.")[1], outputs=status)
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
  if __name__ == "__main__":
278
- demo.launch()
 
2
  import re
3
  import json
4
  from pathlib import Path
5
+ from typing import List, Dict, Tuple, Optional
6
 
7
  import numpy as np
8
  import faiss
 
28
 
29
  # ----------- Load Markdown -----------
30
  def read_markdown_files(kb_dir: Path) -> List[Dict]:
31
+ """Read all markdown files from the knowledge base directory."""
32
  docs = []
33
  for md_path in sorted(kb_dir.glob("*.md")):
34
  text = md_path.read_text(encoding="utf-8", errors="ignore")
 
36
  m = re.search(r"^#\s+(.*)$", text, flags=re.MULTILINE)
37
  if m:
38
  title = m.group(1).strip()
39
+ docs.append({
40
+ "filepath": str(md_path),
41
+ "filename": md_path.name,
42
+ "title": title,
43
+ "text": text
44
+ })
45
  return docs
46
 
47
+ def chunk_markdown(doc: Dict, chunk_chars: int = 800, overlap: int = 200) -> List[Dict]:
48
+ """
49
+ Split markdown document into overlapping chunks for better retrieval.
50
+ Reduced chunk size and increased overlap for more precise matching.
51
+ """
52
  text = doc["text"]
53
  sections = re.split(r"(?=^##\s+|\n##\s+|\n###\s+|^###\s+)", text, flags=re.MULTILINE)
54
  if len(sections) == 1:
 
57
  chunks = []
58
  for sec in sections:
59
  sec = sec.strip()
60
+ if not sec or len(sec) < 50: # Skip very short sections
61
  continue
62
+
63
  heading_match = HEADING_RE.search(sec)
64
  section_heading = heading_match.group(2).strip() if heading_match else doc["title"]
65
 
66
+ # Better chunking logic
67
  start = 0
68
  while start < len(sec):
69
  end = min(start + chunk_chars, len(sec))
70
  chunk_text = sec[start:end].strip()
71
+
72
+ if len(chunk_text) > 50: # Only keep substantial chunks
73
  chunks.append({
74
  "doc_title": doc["title"],
75
  "filename": doc["filename"],
 
77
  "section": section_heading,
78
  "content": chunk_text
79
  })
80
+
81
  if end == len(sec):
82
  break
83
  start = max(0, end - overlap)
84
+
85
  return chunks
86
 
87
  # ----------- KB Index -----------
 
90
  self.embedder = SentenceTransformer(EMBEDDING_MODEL_NAME)
91
  self.reader_tokenizer = AutoTokenizer.from_pretrained(READER_MODEL_NAME)
92
  self.reader_model = AutoModelForQuestionAnswering.from_pretrained(READER_MODEL_NAME)
93
+ self.reader = pipeline(
94
+ "question-answering",
95
+ model=self.reader_model,
96
+ tokenizer=self.reader_tokenizer,
97
+ max_answer_len=200,
98
+ handle_impossible_answer=True
99
+ )
100
 
101
  self.index = None
102
  self.embeddings = None
103
  self.metadata = []
104
 
105
  def build(self, kb_dir: Path):
106
+ """Build the FAISS index from markdown files."""
107
  docs = read_markdown_files(kb_dir)
108
  if not docs:
109
  raise RuntimeError(f"No markdown files found in {kb_dir.resolve()}")
 
111
  all_chunks = []
112
  for d in docs:
113
  all_chunks.extend(chunk_markdown(d))
114
+
115
+ if not all_chunks:
116
  raise RuntimeError("No content chunks generated from KB.")
117
 
118
+ texts = [c["content"] for c in all_chunks]
119
+ embeddings = self.embedder.encode(
120
+ texts,
121
+ batch_size=32,
122
+ convert_to_numpy=True,
123
+ show_progress_bar=True
124
+ )
125
  faiss.normalize_L2(embeddings)
126
 
127
  dim = embeddings.shape[1]
 
137
  json.dump(self.metadata, f, ensure_ascii=False, indent=2)
138
  faiss.write_index(index, str(FAISS_PATH))
139
 
140
+ def load(self) -> bool:
141
+ """Load pre-built index from disk."""
142
  if not (EMBEDDINGS_PATH.exists() and METADATA_PATH.exists() and FAISS_PATH.exists()):
143
  return False
144
  self.embeddings = np.load(EMBEDDINGS_PATH)
 
147
  self.index = faiss.read_index(str(FAISS_PATH))
148
  return True
149
 
150
+ def retrieve(self, query: str, top_k: int = 6) -> List[Tuple[int, float]]:
151
+ """Retrieve top-k most similar chunks for a query."""
152
  q_emb = self.embedder.encode([query], convert_to_numpy=True)
153
  faiss.normalize_L2(q_emb)
154
  D, I = self.index.search(q_emb, top_k)
155
  return list(zip(I[0].tolist(), D[0].tolist()))
156
 
157
+ def answer(self, question: str, retrieved: List[Tuple[int, float]]) -> Tuple[Optional[str], float, List[Dict], float]:
158
+ """
159
+ Extract answer from retrieved chunks using QA model.
160
+ Returns: (answer_text, qa_score, citations, best_similarity)
161
+ """
162
+ candidates = []
163
+
164
  for idx, sim in retrieved:
165
  meta = self.metadata[idx]
166
  ctx = meta["content"]
167
+
168
  try:
169
  out = self.reader(question=question, context=ctx)
170
+ score = float(out.get("score", 0.0))
171
+ answer_text = out.get("answer", "").strip()
172
+
173
+ if answer_text and len(answer_text) > 5:
174
+ candidates.append({
175
+ "text": answer_text,
176
+ "score": score,
177
+ "meta": meta,
178
+ "sim": float(sim)
179
+ })
180
+ except Exception as e:
181
  continue
182
+
183
+ if not candidates:
184
+ return None, 0.0, [], max([s for _, s in retrieved]) if retrieved else 0.0
185
+
186
+ # Sort by combined score (QA score + similarity)
187
+ candidates.sort(key=lambda x: x["score"] * 0.7 + x["sim"] * 0.3, reverse=True)
188
+ best = candidates[0]
189
+
190
+ # Generate citations from top retrieved chunks
191
  citations = []
192
  seen = set()
193
+ for idx, _ in retrieved[:3]:
194
  m = self.metadata[idx]
195
  key = (m["filename"], m["section"])
196
  if key in seen:
197
  continue
198
  seen.add(key)
199
+ citations.append({
200
+ "title": m["doc_title"],
201
+ "filename": m["filename"],
202
+ "section": m["section"]
203
+ })
204
+
205
+ best_sim = max([s for _, s in retrieved]) if retrieved else 0.0
206
+ return best["text"], best["score"], citations, best_sim
207
+
208
+ # Initialize KB
209
  kb = KBIndex()
210
 
211
  def ensure_index():
212
+ """Build index on first run or load from cache."""
213
  if not kb.load():
214
+ if KB_DIR.exists():
215
+ kb.build(KB_DIR)
216
+ else:
217
+ print(f"Warning: KB directory {KB_DIR} not found. Please create it and add markdown files.")
218
+
219
  ensure_index()
220
 
221
  # ----------- Guardrails -----------
222
+ CONFIDENCE_THRESHOLD = 0.25
223
+ SIMILARITY_THRESHOLD = 0.35
224
+
225
+ QUICK_ACTIONS = [
226
+ ("πŸ”— Connect WhatsApp", "How do I connect my WhatsApp number?"),
227
+ ("πŸ”‘ Reset Password", "I can't sign in / forgot my password"),
228
+ ("⚑ First Automation", "How do I create my first automation?"),
229
+ ("πŸ’³ Billing & Invoices", "How do I download invoices for billing?"),
230
+ ("πŸ“Έ Fix Instagram", "Why can't I connect Instagram?")
231
  ]
232
 
233
  def format_citations(citations: List[Dict]) -> str:
234
+ """Format citations as markdown list."""
235
  if not citations:
236
  return ""
237
+ lines = []
238
+ for c in citations:
239
+ lines.append(f"β€’ **{c['title']}** β€” _{c['section']}_")
240
+ return "\n".join(lines)
241
 
242
+ def respond(user_msg: str, history: List) -> str:
243
+ """Generate response to user query using RAG pipeline."""
244
  user_msg = (user_msg or "").strip()
245
+
246
  if not user_msg:
247
+ return "πŸ‘‹ How can I help? Ask me anything about the knowledge base, or use a quick action button below."
248
 
249
+ # Retrieve relevant chunks
250
+ retrieved = kb.retrieve(user_msg, top_k=6)
251
+
252
  if not retrieved:
253
+ return "❌ I couldn't find any relevant information. Please try rephrasing your question or contact support."
254
+
255
+ # Extract answer using QA model
256
+ answer, qa_score, citations, best_sim = kb.answer(user_msg, retrieved)
257
+
258
+ if not answer:
259
+ # Fallback: show closest matches
260
+ citations_md = format_citations(citations)
261
+ return (
262
+ f"πŸ€” I couldn't extract a specific answer, but here are the most relevant sections:\n\n"
263
+ f"{citations_md}\n\n"
264
+ f"πŸ’‘ Try rephrasing your question or ask me to show more details."
265
+ )
266
 
267
+ # Check confidence
268
+ low_confidence = (qa_score < CONFIDENCE_THRESHOLD) or (best_sim < SIMILARITY_THRESHOLD)
269
  citations_md = format_citations(citations)
270
+
271
+ # Format response based on confidence
272
+ if low_confidence:
273
  return (
274
+ f"⚠️ **Answer (Low Confidence):**\n{answer}\n\n"
275
+ f"---\n"
276
+ f"πŸ“š **Related Sources:**\n{citations_md}\n\n"
277
+ f"πŸ’¬ *If this doesn't help, please say \"escalate to support\" for human assistance.*"
278
+ )
279
+ else:
280
+ return (
281
+ f"βœ… **Answer:**\n{answer}\n\n"
282
+ f"---\n"
283
+ f"πŸ“š **Sources:**\n{citations_md}\n\n"
284
+ f"πŸ’‘ *Say \"show more details\" to see the full context.*"
285
  )
286
 
287
+ def process_message(user_input: str, history: List) -> Tuple[List, Dict]:
288
+ """Process user message and return updated chat history."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
  user_input = (user_input or "").strip()
290
  if not user_input:
291
  return history, gr.update(value="")
292
+
293
  reply = respond(user_input, history or [])
294
  new_history = (history or []) + [
295
  {"role": "user", "content": user_input},
 
297
  ]
298
  return new_history, gr.update(value="")
299
 
300
+ def process_quick(label: str, history: List) -> Tuple[List, Dict]:
301
+ """Process quick action button click."""
302
+ for btn_label, query in QUICK_ACTIONS:
303
+ if label == btn_label:
304
+ return process_message(query, history)
305
+ return history, gr.update(value="")
306
+
307
+ def rebuild_index_handler():
308
+ """Rebuild the search index from KB directory."""
309
+ try:
310
+ kb.build(KB_DIR)
311
+ return "βœ… Index rebuilt successfully! Ready to answer questions."
312
+ except Exception as e:
313
+ return f"❌ Error rebuilding index: {str(e)}"
314
 
315
+ # ----------- Gradio UI -----------
316
+ with gr.Blocks(
317
+ title="RAG Knowledge Assistant",
318
+ theme=gr.themes.Soft(primary_hue="blue"),
319
+ css="""
320
+ .contain { max-width: 1200px; margin: auto; }
321
+ .quick-btn { min-width: 180px !important; }
322
+ """
323
+ ) as demo:
324
+
325
+ # Header
326
  gr.Markdown(
327
  """
328
+ # πŸ€– RAG Knowledge Assistant
329
+ ### AI-powered Q&A with document retrieval and citation
330
+ Ask questions about your knowledge base, and get answers backed by relevant sources.
 
 
331
  """
332
  )
333
+
334
+ # Main chat interface
335
  with gr.Row():
336
+ with gr.Column(scale=1):
337
+ chat = gr.Chatbot(
338
+ height=500,
339
+ show_copy_button=True,
340
+ type="messages",
341
+ avatar_images=(None, "https://em-content.zobj.net/source/twitter/376/robot_1f916.png")
342
+ )
343
+
344
+ with gr.Row():
345
+ txt = gr.Textbox(
346
+ placeholder="πŸ’¬ Ask a question (e.g., How do I connect WhatsApp?)",
347
+ scale=9,
348
+ show_label=False,
349
+ container=False
350
+ )
351
+ send = gr.Button("Send", variant="primary", scale=1)
352
+
353
+ # Quick action buttons
354
+ gr.Markdown("### ⚑ Quick Actions")
355
  with gr.Row():
356
+ quick_buttons = []
357
+ for label, _ in QUICK_ACTIONS:
358
+ btn = gr.Button(label, elem_classes="quick-btn", size="sm")
359
+ quick_buttons.append((btn, label))
360
+
361
+ # Admin section
362
+ with gr.Accordion("πŸ”§ Admin Panel", open=False):
363
+ gr.Markdown(
364
+ """
365
+ **Rebuild Index:** Use this after adding or modifying files in the `/kb` directory.
366
+ The system will re-scan all markdown files and update the search index.
367
+ """
368
+ )
369
+ with gr.Row():
370
+ rebuild_btn = gr.Button("πŸ”„ Rebuild Index", variant="secondary")
371
+ status_msg = gr.Markdown("")
372
+
373
+ # Event handlers
374
  send.click(process_message, inputs=[txt, chat], outputs=[chat, txt])
375
+ txt.submit(process_message, inputs=[txt, chat], outputs=[chat, txt])
376
+
377
+ for btn, label in quick_buttons:
378
+ btn.click(
379
+ process_quick,
380
+ inputs=[gr.State(label), chat],
381
+ outputs=[chat, txt]
382
+ )
383
+
384
+ rebuild_btn.click(rebuild_index_handler, outputs=status_msg)
385
+
386
+ # Footer
387
+ gr.Markdown(
388
+ """
389
+ ---
390
+ πŸ’‘ **Tips:**
391
+ - Be specific in your questions for better results
392
+ - Check the cited sources for full context
393
+ - Use quick actions for common tasks
394
+ """
395
+ )
396
 
397
  if __name__ == "__main__":
398
+ demo.launch()