Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -40,7 +40,7 @@ print(f"\n--- Chunk 0 ---\n{chunks[0]}")
|
|
| 40 |
print(f"\n--- Chunk 1 ---\n{chunks[1]}")
|
| 41 |
|
| 42 |
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 43 |
-
#
|
| 44 |
print(f"Loaded FAISS index with {index.ntotal} vectors")
|
| 45 |
print(f"Loaded {len(chunks)} chunks")
|
| 46 |
|
|
@@ -53,7 +53,7 @@ def retrieve_rag_context(query, k=5):
|
|
| 53 |
print(f" RAG chunk: source={chunk.get('source')} text={chunk['text'][:80]}")
|
| 54 |
results.append(chunk["text"])
|
| 55 |
return "\n\n---\n\n".join(results)
|
| 56 |
-
#
|
| 57 |
print(f"Retrieved chunks: {[c.get('topic') for c in [chunks[i] for i in indices[0]]]}")
|
| 58 |
|
| 59 |
# ββ SYSTEM PROMPTS βββββββββββββββββββββββββββββ
|
|
@@ -172,10 +172,28 @@ def respond_to_message(message, history, lang="es"):
|
|
| 172 |
print(f"LLM error: {e}")
|
| 173 |
return "Ho sento, no puc generar una resposta en aquest moment." if lang=="ca" else "Lo siento, no puedo generar una respuesta en este momento."
|
| 174 |
|
| 175 |
-
#
|
| 176 |
print(f"Full system prompt length: {len(full_system)} chars")
|
| 177 |
print(f"RAG context preview: {rag_context[:300]}")
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
|
| 180 |
# ββ Pipelines βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 181 |
# ββ Voice Pipeline with Language Support βββββββββββββββββββββββββ
|
|
|
|
| 40 |
print(f"\n--- Chunk 1 ---\n{chunks[1]}")
|
| 41 |
|
| 42 |
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 43 |
+
# ββ Index file debug ββββββββββββββββββββββββββββββββββββββββββ
|
| 44 |
print(f"Loaded FAISS index with {index.ntotal} vectors")
|
| 45 |
print(f"Loaded {len(chunks)} chunks")
|
| 46 |
|
|
|
|
| 53 |
print(f" RAG chunk: source={chunk.get('source')} text={chunk['text'][:80]}")
|
| 54 |
results.append(chunk["text"])
|
| 55 |
return "\n\n---\n\n".join(results)
|
| 56 |
+
# ββ Retrieval debug ββββββββββββββββββββββββββββββββββββββββββ
|
| 57 |
print(f"Retrieved chunks: {[c.get('topic') for c in [chunks[i] for i in indices[0]]]}")
|
| 58 |
|
| 59 |
# ββ SYSTEM PROMPTS βββββββββββββββββββββββββββββ
|
|
|
|
| 172 |
print(f"LLM error: {e}")
|
| 173 |
return "Ho sento, no puc generar una resposta en aquest moment." if lang=="ca" else "Lo siento, no puedo generar una respuesta en este momento."
|
| 174 |
|
| 175 |
+
# ββ RAG debug ββββββββββββββββββββββββββββββββββββββββββ
|
| 176 |
print(f"Full system prompt length: {len(full_system)} chars")
|
| 177 |
print(f"RAG context preview: {rag_context[:300]}")
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
# ββ User Onboarding βββββββββββββββββββββββββββββ
|
| 181 |
+
#
|
| 182 |
+
# For new user, initiate introductory conversation
|
| 183 |
+
# Capture user info and preferences
|
| 184 |
+
### Adapt questions from Zarit Burden Interview, Caregiver Qual of Life Index, COPE inventory
|
| 185 |
+
### What is their knowledge of AD? How long have you been their caregiver? Self-rate stress level? Etc
|
| 186 |
+
#
|
| 187 |
+
# Capture care recipient info and preferences
|
| 188 |
+
### Adapt questions from functional staging tool (FAST), Global Deterioration Scale (GDS)
|
| 189 |
+
### Do they live alone, with caregiver, with someone else? Urban (public transportation) or suburban? (Driving)
|
| 190 |
+
### Is home smart-device enabled, or is it a possibilty? (Fire alarms, elopement alarms, bed alarms, auto-lighting, voice asst)
|
| 191 |
+
#
|
| 192 |
+
# Option to complete by voice or text
|
| 193 |
+
# Store in caregiver profile -> json
|
| 194 |
+
# Dynamic questions using responses to personalize. ("Hi, Maria. It's nice to meet you! Can you tell me more about...")
|
| 195 |
+
# Inject profile into LLM for personalization
|
| 196 |
+
|
| 197 |
|
| 198 |
# ββ Pipelines βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 199 |
# ββ Voice Pipeline with Language Support βββββββββββββββββββββββββ
|