KeenWoo commited on
Commit
bac9328
·
verified ·
1 Parent(s): 3fd8776

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -73
app.py CHANGED
@@ -2,21 +2,33 @@ import os
2
  import json
3
  import shutil
4
  import gradio as gr
 
5
  from typing import List, Dict, Any, Optional
6
 
7
  # --- Agent Imports & Safe Fallbacks ---
8
  try:
9
- from alz_companion.agent import bootstrap_vectorstore, make_rag_chain, answer_query, synthesize_tts, transcribe_audio, detect_tags_from_query
 
 
 
10
  from alz_companion.prompts import BEHAVIOUR_TAGS, EMOTION_STYLES
 
11
  AGENT_OK = True
12
  except Exception as e:
13
  AGENT_OK = False
 
14
  def bootstrap_vectorstore(sample_paths=None, index_path="data/"): return object()
15
- def make_rag_chain(vs, **kwargs): return lambda q, **k: {"answer": f"(Demo) You asked: {q}", "sources": []}
 
16
  def answer_query(chain, q, **kwargs): return chain(q, **kwargs)
17
  def synthesize_tts(text: str, lang: str = "en"): return None
18
  def transcribe_audio(filepath: str, lang: str = "en"): return "This is a transcribed message."
19
  def detect_tags_from_query(query: str, behavior_options: list, emotion_options: list): return {"detected_behavior": "None", "detected_emotion": "None"}
 
 
 
 
 
20
  BEHAVIOUR_TAGS = {"None": []}
21
  EMOTION_STYLES = {"None": {}}
22
  print(f"WARNING: Could not import from alz_companion ({e}). Running in UI-only demo mode.")
@@ -34,9 +46,11 @@ CONFIG = {
34
  # --- File Management & Vector Store Logic ---
35
  INDEX_BASE = os.getenv('INDEX_BASE', 'data')
36
  UPLOADS_BASE = os.path.join(INDEX_BASE, "uploads")
 
37
  os.makedirs(UPLOADS_BASE, exist_ok=True)
38
  THEME_PATHS = {t: os.path.join(INDEX_BASE, f"faiss_index_{t.replace(' ', '').lower()}") for t in CONFIG["themes"]}
39
  vectorstores = {}
 
40
 
41
  def canonical_theme(tk: str) -> str: return tk if tk in CONFIG["themes"] else "All"
42
  def theme_upload_dir(theme: str) -> str:
@@ -83,7 +97,7 @@ def seed_files_into_theme(theme: str):
83
  man["files"][fname] = bool(enable)
84
  changed = True
85
  if changed: save_manifest(theme, man)
86
- # this line is not needed -> return changed
87
  def ensure_index(theme='All'):
88
  theme = canonical_theme(theme)
89
  if theme in vectorstores: return vectorstores[theme]
@@ -98,9 +112,55 @@ def collect_settings(*args):
98
  keys = ["role", "patient_name", "caregiver_name", "tone", "language", "tts_lang", "temperature", "behaviour_tag", "emotion_tag", "active_theme", "tts_on", "debug_mode"]
99
  return dict(zip(keys, args))
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  def chat_fn(user_text, audio_file, settings, chat_history):
 
102
  question = (user_text or "").strip()
103
-
104
  if audio_file and not question:
105
  try:
106
  voice_lang_name = settings.get("tts_lang", "English")
@@ -110,71 +170,43 @@ def chat_fn(user_text, audio_file, settings, chat_history):
110
  err_msg = f"Audio Error: {e}" if settings.get("debug_mode") else "Sorry, I couldn't understand the audio."
111
  chat_history.append({"role": "assistant", "content": err_msg})
112
  return "", None, chat_history
113
-
114
  if not question:
115
  return "", None, chat_history
116
-
117
  chat_history.append({"role": "user", "content": question})
118
-
119
- # --- NLU Classification Logic ---
120
  manual_behavior_tag = settings.get("behaviour_tag")
121
  manual_emotion_tag = settings.get("emotion_tag")
122
-
123
  if manual_behavior_tag not in [None, "None"] or manual_emotion_tag not in [None, "None"]:
124
  scenario_tag = manual_behavior_tag
125
  emotion_tag = manual_emotion_tag
126
- print("Using manual tags from UI.")
127
  else:
128
- print("No manual tags set, performing auto-detection...")
129
  behavior_options = CONFIG.get("behavior_tags", [])
130
  emotion_options = CONFIG.get("emotion_tags", [])
131
-
132
  detected_tags = detect_tags_from_query(question, behavior_options=behavior_options, emotion_options=emotion_options)
133
-
134
  scenario_tag = detected_tags.get("detected_behavior")
135
  emotion_tag = detected_tags.get("detected_emotion")
136
-
137
  if (scenario_tag and scenario_tag != "None") or (emotion_tag and emotion_tag != "None"):
138
  detected_msg = f"*(Auto-detected context: Behavior=`{scenario_tag}`, Emotion=`{emotion_tag}`)*"
139
  chat_history.append({"role": "assistant", "content": detected_msg})
140
-
141
- # --- END OF NLU LOGIC ---
142
-
143
  active_theme = settings.get("active_theme", "All")
144
- vs = ensure_index(active_theme)
145
-
 
146
  rag_chain_settings = {
147
- "role": settings.get("role"),
148
- "temperature": settings.get("temperature"),
149
- "language": settings.get("language"),
150
- "patient_name": settings.get("patient_name"),
151
- "caregiver_name": settings.get("caregiver_name"),
152
- "tone": settings.get("tone"),
153
  }
154
-
155
- chain = make_rag_chain(vs, **rag_chain_settings)
156
-
157
  if scenario_tag == "None": scenario_tag = None
158
  if emotion_tag == "None": emotion_tag = None
159
-
160
  simple_history = chat_history[:-1]
161
-
162
- response = answer_query(
163
- chain,
164
- question,
165
- chat_history=simple_history,
166
- scenario_tag=scenario_tag,
167
- emotion_tag=emotion_tag
168
- )
169
-
170
  answer = response.get("answer", "[No answer found]")
171
  chat_history.append({"role": "assistant", "content": answer})
172
-
173
  audio_out = None
174
  if settings.get("tts_on") and answer:
175
  tts_lang_code = CONFIG["languages"].get(settings.get("tts_lang"), "en")
176
  audio_out = synthesize_tts(answer, lang=tts_lang_code)
177
-
178
  from gradio import update
179
  return "", (update(value=audio_out, visible=bool(audio_out))), chat_history
180
 
@@ -206,27 +238,9 @@ def auto_setup_on_load(current_theme):
206
  all_settings = collect_settings("patient", "", "", "warm", "English", "English", 0.7, "None", "None", "All", True, False)
207
  files_ui, status_msg = refresh_file_list_ui(current_theme)
208
  return all_settings, files_ui, status_msg
209
- # In app.py, add this new function
210
 
211
- def pre_load_indexes():
212
- """Pre-builds the FAISS index for all themes at startup."""
213
- print("Pre-loading knowledge base indexes at startup...")
214
- for theme in CONFIG["themes"]:
215
- print(f" - Loading index for theme: '{theme}'")
216
- try:
217
- ensure_index(theme)
218
- print(f" ...'{theme}' theme loaded successfully.")
219
- except Exception as e:
220
- print(f" ...Error loading theme '{theme}': {e}")
221
- print("All indexes loaded. Application is ready.")
222
-
223
- # --- UI Definition (Must be at the end of the file) ----------------
224
- CSS = """
225
- .gradio-container { font-size: 14px; }
226
- #chatbot { min-height: 250px; }
227
- #audio_out audio { max-height: 20px; }
228
- #audio_in audio { max-height: 20px; padding: 0; }
229
- """
230
 
231
  with gr.Blocks(theme=gr.themes.Soft(), css=CSS) as demo:
232
  settings_state = gr.State({})
@@ -234,34 +248,45 @@ with gr.Blocks(theme=gr.themes.Soft(), css=CSS) as demo:
234
  with gr.Tab("Chat"):
235
  user_text = gr.Textbox(show_label=False, placeholder="Type your message here...")
236
  audio_in = gr.Audio(sources=["microphone"], type="filepath", label="Voice Input", elem_id="audio_in")
237
-
238
  with gr.Row():
239
  submit_btn = gr.Button("Send", variant="primary")
 
240
  clear_btn = gr.Button("Clear")
241
-
242
  audio_out = gr.Audio(label="Response Audio", autoplay=True, visible=True, elem_id="audio_out")
243
  chatbot = gr.Chatbot(elem_id="chatbot", label="Conversation", type="messages")
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
  with gr.Tab("Settings"):
246
  with gr.Group():
247
  gr.Markdown("## Conversation & Persona Settings")
248
  with gr.Row():
249
- role = gr.Radio(CONFIG["roles"], value="patient", label="Your Role")
250
  temperature = gr.Slider(0.0, 1.2, value=0.7, step=0.1, label="Creativity")
251
  tone = gr.Dropdown(CONFIG["tones"], value="warm", label="Response Tone")
252
  with gr.Row():
253
  patient_name = gr.Textbox(label="Patient's Name", placeholder="e.g., 'Dad' or 'John'")
254
  caregiver_name = gr.Textbox(label="Caregiver's Name", placeholder="e.g., 'me' or 'Jane'")
255
- behaviour_tag = gr.Dropdown(CONFIG["behavior_tags"], value="None", label="Behaviour Filter")
256
- emotion_tag = gr.Dropdown(CONFIG["emotion_tags"], value="None", label="Emotion Filter")
257
-
258
  with gr.Accordion("Language, Voice & Debugging", open=False):
259
  language = gr.Dropdown(list(CONFIG["languages"].keys()), value="English", label="Response Language")
260
  tts_lang = gr.Dropdown(list(CONFIG["languages"].keys()), value="English", label="Voice Language")
261
  tts_on = gr.Checkbox(True, label="Enable Voice Response (TTS)")
262
  debug_mode = gr.Checkbox(False, label="Show Debug Info")
263
-
264
- gr.Markdown("--- \n ## Knowledge Base Management")
265
  active_theme = gr.Radio(CONFIG["themes"], value="All", label="Active Knowledge Theme")
266
  with gr.Row():
267
  with gr.Column(scale=1):
@@ -281,17 +306,37 @@ with gr.Blocks(theme=gr.themes.Soft(), css=CSS) as demo:
281
  component.change(fn=collect_settings, inputs=all_settings_components, outputs=settings_state)
282
 
283
  submit_btn.click(fn=chat_fn, inputs=[user_text, audio_in, settings_state, chatbot], outputs=[user_text, audio_out, chatbot])
284
- clear_btn.click(lambda: (None, None, [], None, ""), outputs=[user_text, audio_out, chatbot, audio_in, user_text])
 
 
 
285
 
286
  upload_btn.click(upload_knowledge, inputs=[files_in, active_theme], outputs=[mgmt_status]).then(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
287
  save_files_btn.click(save_file_selection, inputs=[active_theme, files_box], outputs=[mgmt_status])
288
- seed_btn.click(seed_files_into_theme, inputs=[active_theme], outputs=[]).then(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
289
  refresh_btn.click(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
290
  active_theme.change(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
291
  demo.load(auto_setup_on_load, inputs=[active_theme], outputs=[settings_state, files_box, mgmt_status])
292
 
293
- # --- Launch (Must be the last part of the script) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
  if __name__ == "__main__":
295
- pre_load_indexes() # <-- ADD THIS LINE to pre-load everything
296
  demo.queue().launch(debug=True)
297
-
 
2
  import json
3
  import shutil
4
  import gradio as gr
5
+ from datetime import datetime
6
  from typing import List, Dict, Any, Optional
7
 
8
  # --- Agent Imports & Safe Fallbacks ---
9
  try:
10
+ from alz_companion.agent import (
11
+ bootstrap_vectorstore, make_rag_chain, answer_query, synthesize_tts,
12
+ transcribe_audio, detect_tags_from_query, describe_image, build_or_load_vectorstore
13
+ )
14
  from alz_companion.prompts import BEHAVIOUR_TAGS, EMOTION_STYLES
15
+ from langchain.schema import Document
16
  AGENT_OK = True
17
  except Exception as e:
18
  AGENT_OK = False
19
+ # Define all fallback functions and classes
20
  def bootstrap_vectorstore(sample_paths=None, index_path="data/"): return object()
21
+ def build_or_load_vectorstore(docs, index_path, is_personal=False): return object()
22
+ def make_rag_chain(vs_general, vs_personal, **kwargs): return lambda q, **k: {"answer": f"(Demo) You asked: {q}", "sources": []}
23
  def answer_query(chain, q, **kwargs): return chain(q, **kwargs)
24
  def synthesize_tts(text: str, lang: str = "en"): return None
25
  def transcribe_audio(filepath: str, lang: str = "en"): return "This is a transcribed message."
26
  def detect_tags_from_query(query: str, behavior_options: list, emotion_options: list): return {"detected_behavior": "None", "detected_emotion": "None"}
27
+ def describe_image(image_path: str): return "This is a description of an image."
28
+ class Document:
29
+ def __init__(self, page_content, metadata):
30
+ self.page_content = page_content
31
+ self.metadata = metadata
32
  BEHAVIOUR_TAGS = {"None": []}
33
  EMOTION_STYLES = {"None": {}}
34
  print(f"WARNING: Could not import from alz_companion ({e}). Running in UI-only demo mode.")
 
46
  # --- File Management & Vector Store Logic ---
47
  INDEX_BASE = os.getenv('INDEX_BASE', 'data')
48
  UPLOADS_BASE = os.path.join(INDEX_BASE, "uploads")
49
+ PERSONAL_INDEX_PATH = os.path.join(INDEX_BASE, "personal_faiss_index")
50
  os.makedirs(UPLOADS_BASE, exist_ok=True)
51
  THEME_PATHS = {t: os.path.join(INDEX_BASE, f"faiss_index_{t.replace(' ', '').lower()}") for t in CONFIG["themes"]}
52
  vectorstores = {}
53
+ personal_vectorstore = None
54
 
55
  def canonical_theme(tk: str) -> str: return tk if tk in CONFIG["themes"] else "All"
56
  def theme_upload_dir(theme: str) -> str:
 
97
  man["files"][fname] = bool(enable)
98
  changed = True
99
  if changed: save_manifest(theme, man)
100
+
101
  def ensure_index(theme='All'):
102
  theme = canonical_theme(theme)
103
  if theme in vectorstores: return vectorstores[theme]
 
112
  keys = ["role", "patient_name", "caregiver_name", "tone", "language", "tts_lang", "temperature", "behaviour_tag", "emotion_tag", "active_theme", "tts_on", "debug_mode"]
113
  return dict(zip(keys, args))
114
 
115
+ def add_personal_knowledge(text_input, file_input, image_input):
116
+ global personal_vectorstore
117
+ if not any([text_input, file_input, image_input]):
118
+ return "Please provide text, a file, or an image to add."
119
+ docs_to_add = []
120
+ if text_input and text_input.strip():
121
+ docs_to_add.append(Document(page_content=text_input.strip(), metadata={"source": "Text Input"}))
122
+ if file_input:
123
+ transcribed_text = transcribe_audio(file_input.name)
124
+ docs_to_add.append(Document(page_content=transcribed_text, metadata={"source": os.path.basename(file_input.name)}))
125
+ if image_input:
126
+ described_text = describe_image(image_input.name)
127
+ docs_to_add.append(Document(page_content=described_text, metadata={"source": "Image Input"}))
128
+ if not docs_to_add:
129
+ return "No processable content found to add."
130
+ if personal_vectorstore is None:
131
+ personal_vectorstore = build_or_load_vectorstore(docs_to_add, PERSONAL_INDEX_PATH, is_personal=True)
132
+ else:
133
+ personal_vectorstore.add_documents(docs_to_add)
134
+ personal_vectorstore.save_local(PERSONAL_INDEX_PATH)
135
+ return f"Successfully added {len(docs_to_add)} new item(s) to personal knowledge base."
136
+
137
+ def save_chat_to_memory(chat_history):
138
+ global personal_vectorstore
139
+ if not chat_history:
140
+ return "Nothing to save."
141
+ formatted_chat = []
142
+ for message in chat_history:
143
+ role = "User" if message["role"] == "user" else "Assistant"
144
+ content = message["content"].strip()
145
+ if content.startswith("*(Auto-detected context:"):
146
+ continue
147
+ formatted_chat.append(f"{role}: {content}")
148
+ conversation_text = "\n".join(formatted_chat)
149
+ if not conversation_text:
150
+ return "No conversation content to save."
151
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
152
+ doc_to_add = Document(page_content=conversation_text, metadata={"source": f"Conversation saved on {timestamp}"})
153
+ if personal_vectorstore is None:
154
+ personal_vectorstore = build_or_load_vectorstore([doc_to_add], PERSONAL_INDEX_PATH, is_personal=True)
155
+ else:
156
+ personal_vectorstore.add_documents([doc_to_add])
157
+ personal_vectorstore.save_local(PERSONAL_INDEX_PATH)
158
+ print(f"Saved conversation to long-term memory.")
159
+ return f"Conversation from {timestamp} saved successfully to long-term memory!"
160
+
161
  def chat_fn(user_text, audio_file, settings, chat_history):
162
+ global personal_vectorstore
163
  question = (user_text or "").strip()
 
164
  if audio_file and not question:
165
  try:
166
  voice_lang_name = settings.get("tts_lang", "English")
 
170
  err_msg = f"Audio Error: {e}" if settings.get("debug_mode") else "Sorry, I couldn't understand the audio."
171
  chat_history.append({"role": "assistant", "content": err_msg})
172
  return "", None, chat_history
 
173
  if not question:
174
  return "", None, chat_history
 
175
  chat_history.append({"role": "user", "content": question})
 
 
176
  manual_behavior_tag = settings.get("behaviour_tag")
177
  manual_emotion_tag = settings.get("emotion_tag")
 
178
  if manual_behavior_tag not in [None, "None"] or manual_emotion_tag not in [None, "None"]:
179
  scenario_tag = manual_behavior_tag
180
  emotion_tag = manual_emotion_tag
 
181
  else:
 
182
  behavior_options = CONFIG.get("behavior_tags", [])
183
  emotion_options = CONFIG.get("emotion_tags", [])
 
184
  detected_tags = detect_tags_from_query(question, behavior_options=behavior_options, emotion_options=emotion_options)
 
185
  scenario_tag = detected_tags.get("detected_behavior")
186
  emotion_tag = detected_tags.get("detected_emotion")
 
187
  if (scenario_tag and scenario_tag != "None") or (emotion_tag and emotion_tag != "None"):
188
  detected_msg = f"*(Auto-detected context: Behavior=`{scenario_tag}`, Emotion=`{emotion_tag}`)*"
189
  chat_history.append({"role": "assistant", "content": detected_msg})
 
 
 
190
  active_theme = settings.get("active_theme", "All")
191
+ vs_general = ensure_index(active_theme)
192
+ if personal_vectorstore is None:
193
+ personal_vectorstore = build_or_load_vectorstore([], PERSONAL_INDEX_PATH, is_personal=True)
194
  rag_chain_settings = {
195
+ "role": settings.get("role"), "temperature": settings.get("temperature"),
196
+ "language": settings.get("language"), "patient_name": settings.get("patient_name"),
197
+ "caregiver_name": settings.get("caregiver_name"), "tone": settings.get("tone"),
 
 
 
198
  }
199
+ chain = make_rag_chain(vs_general, personal_vectorstore, **rag_chain_settings)
 
 
200
  if scenario_tag == "None": scenario_tag = None
201
  if emotion_tag == "None": emotion_tag = None
 
202
  simple_history = chat_history[:-1]
203
+ response = answer_query(chain, question, chat_history=simple_history, scenario_tag=scenario_tag, emotion_tag=emotion_tag)
 
 
 
 
 
 
 
 
204
  answer = response.get("answer", "[No answer found]")
205
  chat_history.append({"role": "assistant", "content": answer})
 
206
  audio_out = None
207
  if settings.get("tts_on") and answer:
208
  tts_lang_code = CONFIG["languages"].get(settings.get("tts_lang"), "en")
209
  audio_out = synthesize_tts(answer, lang=tts_lang_code)
 
210
  from gradio import update
211
  return "", (update(value=audio_out, visible=bool(audio_out))), chat_history
212
 
 
238
  all_settings = collect_settings("patient", "", "", "warm", "English", "English", 0.7, "None", "None", "All", True, False)
239
  files_ui, status_msg = refresh_file_list_ui(current_theme)
240
  return all_settings, files_ui, status_msg
 
241
 
242
+ # --- UI Definition ---
243
+ CSS = ".gradio-container { font-size: 14px; } #chatbot { min-height: 250px; } #audio_out audio { max-height: 40px; } #audio_in audio { max-height: 40px; padding: 0; }"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
  with gr.Blocks(theme=gr.themes.Soft(), css=CSS) as demo:
246
  settings_state = gr.State({})
 
248
  with gr.Tab("Chat"):
249
  user_text = gr.Textbox(show_label=False, placeholder="Type your message here...")
250
  audio_in = gr.Audio(sources=["microphone"], type="filepath", label="Voice Input", elem_id="audio_in")
 
251
  with gr.Row():
252
  submit_btn = gr.Button("Send", variant="primary")
253
+ save_btn = gr.Button("Save to Memory")
254
  clear_btn = gr.Button("Clear")
255
+ chat_status = gr.Markdown()
256
  audio_out = gr.Audio(label="Response Audio", autoplay=True, visible=True, elem_id="audio_out")
257
  chatbot = gr.Chatbot(elem_id="chatbot", label="Conversation", type="messages")
258
 
259
+ with gr.Tab("Personalize"):
260
+ gr.Markdown("## Add to Personal Knowledge Base")
261
+ gr.Markdown("Add personal notes, memories, or descriptions of people and places. You can also upload audio/video notes or images.")
262
+ with gr.Row():
263
+ with gr.Column(scale=2):
264
+ personal_text = gr.Textbox(lines=5, label="Text Input", placeholder="e.g., 'My father's name is John. He loves listening to Frank Sinatra music.'")
265
+ with gr.Column(scale=1):
266
+ personal_file = gr.File(label="Upload Audio/Video File")
267
+ personal_image = gr.Image(type="filepath", label="Upload Image")
268
+ with gr.Row():
269
+ personal_add_btn = gr.Button("Add Knowledge to Memory", variant="primary")
270
+ personal_status = gr.Markdown()
271
+
272
  with gr.Tab("Settings"):
273
  with gr.Group():
274
  gr.Markdown("## Conversation & Persona Settings")
275
  with gr.Row():
276
+ role = gr.Radio(CONFIG["roles"], value="caregiver", label="Your Role")
277
  temperature = gr.Slider(0.0, 1.2, value=0.7, step=0.1, label="Creativity")
278
  tone = gr.Dropdown(CONFIG["tones"], value="warm", label="Response Tone")
279
  with gr.Row():
280
  patient_name = gr.Textbox(label="Patient's Name", placeholder="e.g., 'Dad' or 'John'")
281
  caregiver_name = gr.Textbox(label="Caregiver's Name", placeholder="e.g., 'me' or 'Jane'")
282
+ behaviour_tag = gr.Dropdown(CONFIG["behavior_tags"], value="None", label="Behaviour Filter (Manual Override)")
283
+ emotion_tag = gr.Dropdown(CONFIG["emotion_tags"], value="None", label="Emotion Filter (Manual Override)")
 
284
  with gr.Accordion("Language, Voice & Debugging", open=False):
285
  language = gr.Dropdown(list(CONFIG["languages"].keys()), value="English", label="Response Language")
286
  tts_lang = gr.Dropdown(list(CONFIG["languages"].keys()), value="English", label="Voice Language")
287
  tts_on = gr.Checkbox(True, label="Enable Voice Response (TTS)")
288
  debug_mode = gr.Checkbox(False, label="Show Debug Info")
289
+ gr.Markdown("--- \n ## General Knowledge Base Management")
 
290
  active_theme = gr.Radio(CONFIG["themes"], value="All", label="Active Knowledge Theme")
291
  with gr.Row():
292
  with gr.Column(scale=1):
 
306
  component.change(fn=collect_settings, inputs=all_settings_components, outputs=settings_state)
307
 
308
  submit_btn.click(fn=chat_fn, inputs=[user_text, audio_in, settings_state, chatbot], outputs=[user_text, audio_out, chatbot])
309
+ save_btn.click(fn=save_chat_to_memory, inputs=[chatbot], outputs=[chat_status])
310
+ clear_btn.click(lambda: (None, None, [], None, "", ""), outputs=[user_text, audio_out, chatbot, audio_in, user_text, chat_status])
311
+
312
+ personal_add_btn.click(fn=add_personal_knowledge, inputs=[personal_text, personal_file, personal_image], outputs=[personal_status]).then(lambda: (None, None, None), outputs=[personal_text, personal_file, personal_image])
313
 
314
  upload_btn.click(upload_knowledge, inputs=[files_in, active_theme], outputs=[mgmt_status]).then(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
315
  save_files_btn.click(save_file_selection, inputs=[active_theme, files_box], outputs=[mgmt_status])
316
+ seed_btn.click(seed_files_into_theme, inputs=[active_theme]).then(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
317
  refresh_btn.click(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
318
  active_theme.change(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
319
  demo.load(auto_setup_on_load, inputs=[active_theme], outputs=[settings_state, files_box, mgmt_status])
320
 
321
+ # --- Startup Logic ---
322
+ def pre_load_indexes():
323
+ global personal_vectorstore
324
+ print("Pre-loading all knowledge base indexes at startup...")
325
+ for theme in CONFIG["themes"]:
326
+ print(f" - Loading general index for theme: '{theme}'")
327
+ try:
328
+ ensure_index(theme)
329
+ print(f" ...'{theme}' theme loaded successfully.")
330
+ except Exception as e:
331
+ print(f" ...Error loading theme '{theme}': {e}")
332
+ print(" - Loading personal knowledge index...")
333
+ try:
334
+ personal_vectorstore = build_or_load_vectorstore([], PERSONAL_INDEX_PATH, is_personal=True)
335
+ print(" ...Personal knowledge loaded successfully.")
336
+ except Exception as e:
337
+ print(f" ...Error loading personal knowledge: {e}")
338
+ print("All indexes loaded. Application is ready.")
339
+
340
  if __name__ == "__main__":
341
+ pre_load_indexes()
342
  demo.queue().launch(debug=True)