KeenWoo commited on
Commit
dae4b4c
·
verified ·
1 Parent(s): 8d15c9a

Update alz_companion/agent.py

Browse files
Files changed (1) hide show
  1. alz_companion/agent.py +2 -2
alz_companion/agent.py CHANGED
@@ -762,7 +762,7 @@ def make_rag_chain(vs_general: FAISS, vs_personal: FAISS, *, for_evaluation: boo
762
  test_temperature = 0.0 # Modify the local variable
763
  raw_answer = call_llm(messages, temperature=test_temperature)
764
  answer = _clean_surface_text(raw_answer)
765
- print("[DEBUG] Factual / Sum / Multi LLM Answer: " {answer})
766
 
767
  else: # caregiving_scenario
768
  # --- MODIFICATION START: Integrate the severity-based logic ---
@@ -806,7 +806,7 @@ def make_rag_chain(vs_general: FAISS, vs_personal: FAISS, *, for_evaluation: boo
806
  test_temperature = 0.0 # Modify the local variable
807
  raw_answer = call_llm(messages, temperature=test_temperature)
808
  answer = _clean_surface_text(raw_answer)
809
- print("[DEBUG] Caregiving Case LLM Answer: " {answer})
810
  high_risk_scenarios = ["exit_seeking", "wandering", "elopement"]
811
  if kwargs.get("scenario_tag") and kwargs["scenario_tag"].lower() in high_risk_scenarios:
812
  answer += f"\n\n---\n{RISK_FOOTER}"
 
762
  test_temperature = 0.0 # Modify the local variable
763
  raw_answer = call_llm(messages, temperature=test_temperature)
764
  answer = _clean_surface_text(raw_answer)
765
+ print("[DEBUG] Factual / Sum / Multi LLM Answer: ", {answer})
766
 
767
  else: # caregiving_scenario
768
  # --- MODIFICATION START: Integrate the severity-based logic ---
 
806
  test_temperature = 0.0 # Modify the local variable
807
  raw_answer = call_llm(messages, temperature=test_temperature)
808
  answer = _clean_surface_text(raw_answer)
809
+ print("[DEBUG] Caregiving Case LLM Answer: ", {answer})
810
  high_risk_scenarios = ["exit_seeking", "wandering", "elopement"]
811
  if kwargs.get("scenario_tag") and kwargs["scenario_tag"].lower() in high_risk_scenarios:
812
  answer += f"\n\n---\n{RISK_FOOTER}"