Spaces:
Sleeping
Sleeping
Update alz_companion/agent.py
Browse files- alz_companion/agent.py +2 -1
alz_companion/agent.py
CHANGED
|
@@ -762,6 +762,7 @@ def make_rag_chain(vs_general: FAISS, vs_personal: FAISS, *, for_evaluation: boo
|
|
| 762 |
test_temperature = 0.0 # Modify the local variable
|
| 763 |
raw_answer = call_llm(messages, temperature=test_temperature)
|
| 764 |
answer = _clean_surface_text(raw_answer)
|
|
|
|
| 765 |
|
| 766 |
else: # caregiving_scenario
|
| 767 |
# --- MODIFICATION START: Integrate the severity-based logic ---
|
|
@@ -805,7 +806,7 @@ def make_rag_chain(vs_general: FAISS, vs_personal: FAISS, *, for_evaluation: boo
|
|
| 805 |
test_temperature = 0.0 # Modify the local variable
|
| 806 |
raw_answer = call_llm(messages, temperature=test_temperature)
|
| 807 |
answer = _clean_surface_text(raw_answer)
|
| 808 |
-
|
| 809 |
high_risk_scenarios = ["exit_seeking", "wandering", "elopement"]
|
| 810 |
if kwargs.get("scenario_tag") and kwargs["scenario_tag"].lower() in high_risk_scenarios:
|
| 811 |
answer += f"\n\n---\n{RISK_FOOTER}"
|
|
|
|
| 762 |
test_temperature = 0.0 # Modify the local variable
|
| 763 |
raw_answer = call_llm(messages, temperature=test_temperature)
|
| 764 |
answer = _clean_surface_text(raw_answer)
|
| 765 |
+
print("[DEBUG] Factual / Sum / Multi LLM Answer: " {answer})
|
| 766 |
|
| 767 |
else: # caregiving_scenario
|
| 768 |
# --- MODIFICATION START: Integrate the severity-based logic ---
|
|
|
|
| 806 |
test_temperature = 0.0 # Modify the local variable
|
| 807 |
raw_answer = call_llm(messages, temperature=test_temperature)
|
| 808 |
answer = _clean_surface_text(raw_answer)
|
| 809 |
+
print("[DEBUG] Caregiving Case LLM Answer: " {answer})
|
| 810 |
high_risk_scenarios = ["exit_seeking", "wandering", "elopement"]
|
| 811 |
if kwargs.get("scenario_tag") and kwargs["scenario_tag"].lower() in high_risk_scenarios:
|
| 812 |
answer += f"\n\n---\n{RISK_FOOTER}"
|