hbchiu commited on
Commit
60c5cb8
Β·
verified Β·
1 Parent(s): 88a845e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -4
app.py CHANGED
@@ -40,7 +40,7 @@ print(f"\n--- Chunk 0 ---\n{chunks[0]}")
40
  print(f"\n--- Chunk 1 ---\n{chunks[1]}")
41
 
42
  embed_model = SentenceTransformer("all-MiniLM-L6-v2")
43
- ##### IS INDEX FILE THE ISSUE
44
  print(f"Loaded FAISS index with {index.ntotal} vectors")
45
  print(f"Loaded {len(chunks)} chunks")
46
 
@@ -53,7 +53,7 @@ def retrieve_rag_context(query, k=5):
53
  print(f" RAG chunk: source={chunk.get('source')} text={chunk['text'][:80]}")
54
  results.append(chunk["text"])
55
  return "\n\n---\n\n".join(results)
56
- #### IS RETRIEVAL RETURNING THE RIGHT CHUNKS
57
  print(f"Retrieved chunks: {[c.get('topic') for c in [chunks[i] for i in indices[0]]]}")
58
 
59
  # ── SYSTEM PROMPTS ─────────────────────────────
@@ -172,10 +172,28 @@ def respond_to_message(message, history, lang="es"):
172
  print(f"LLM error: {e}")
173
  return "Ho sento, no puc generar una resposta en aquest moment." if lang=="ca" else "Lo siento, no puedo generar una respuesta en este momento."
174
 
175
- ##### IS LLM ignoring RAG context?
176
  print(f"Full system prompt length: {len(full_system)} chars")
177
  print(f"RAG context preview: {rag_context[:300]}")
178
- #####
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  # ── Pipelines ─────────────────────────────────────────────────
181
  # ── Voice Pipeline with Language Support ─────────────────────────
 
40
  print(f"\n--- Chunk 1 ---\n{chunks[1]}")
41
 
42
  embed_model = SentenceTransformer("all-MiniLM-L6-v2")
43
+ # ── Index file debug ──────────────────────────────────────────
44
  print(f"Loaded FAISS index with {index.ntotal} vectors")
45
  print(f"Loaded {len(chunks)} chunks")
46
 
 
53
  print(f" RAG chunk: source={chunk.get('source')} text={chunk['text'][:80]}")
54
  results.append(chunk["text"])
55
  return "\n\n---\n\n".join(results)
56
+ # ── Retrieval debug ──────────────────────────────────────────
57
  print(f"Retrieved chunks: {[c.get('topic') for c in [chunks[i] for i in indices[0]]]}")
58
 
59
  # ── SYSTEM PROMPTS ─────────────────────────────
 
172
  print(f"LLM error: {e}")
173
  return "Ho sento, no puc generar una resposta en aquest moment." if lang=="ca" else "Lo siento, no puedo generar una respuesta en este momento."
174
 
175
+ # ── RAG debug ──────────────────────────────────────────
176
  print(f"Full system prompt length: {len(full_system)} chars")
177
  print(f"RAG context preview: {rag_context[:300]}")
178
+
179
+
180
+ # ── User Onboarding ─────────────────────────────
181
+ #
182
+ # For new user, initiate introductory conversation
183
+ # Capture user info and preferences
184
+ ### Adapt questions from Zarit Burden Interview, Caregiver Qual of Life Index, COPE inventory
185
+ ### What is their knowledge of AD? How long have you been their caregiver? Self-rate stress level? Etc
186
+ #
187
+ # Capture care recipient info and preferences
188
+ ### Adapt questions from functional staging tool (FAST), Global Deterioration Scale (GDS)
189
+ ### Do they live alone, with caregiver, with someone else? Urban (public transportation) or suburban? (Driving)
190
+ ### Is home smart-device enabled, or is it a possibilty? (Fire alarms, elopement alarms, bed alarms, auto-lighting, voice asst)
191
+ #
192
+ # Option to complete by voice or text
193
+ # Store in caregiver profile -> json
194
+ # Dynamic questions using responses to personalize. ("Hi, Maria. It's nice to meet you! Can you tell me more about...")
195
+ # Inject profile into LLM for personalization
196
+
197
 
198
  # ── Pipelines ─────────────────────────────────────────────────
199
  # ── Voice Pipeline with Language Support ─────────────────────────