Saathi / scripts /smoke_test.py
Pushpraj
fixed distortion
2bbcf98
"""Smoke test for Saathi — data integrity, imports, and targeted unit checks.
Run with: python3 scripts/smoke_test.py
"""
from __future__ import annotations
import json
import sys
import traceback
from pathlib import Path
ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(ROOT))
failures: list[str] = []
passes: list[str] = []
def ok(msg: str) -> None:
passes.append(msg)
print(f" OK {msg}")
def fail(msg: str) -> None:
failures.append(msg)
print(f" FAIL {msg}")
def section(title: str) -> None:
print(f"\n=== {title} ===")
# ---------------------------------------------------------------------------
# 1. Data sanity
# ---------------------------------------------------------------------------
section("1. Data files")
try:
with open(ROOT / "data/helplines_india.json", encoding="utf-8") as f:
helplines = json.load(f)
print(f" helplines_india.json: {len(helplines)} entries")
names = [h["name"] for h in helplines]
if "Tele-MANAS" in names:
tm = next(h for h in helplines if h["name"] == "Tele-MANAS")
number = tm["number"]
alt = tm.get("alt_number", "-")
has_source = bool(tm.get("source"))
ok(f"Tele-MANAS present: {number} / {alt} (source={has_source})")
else:
fail("Tele-MANAS missing from helplines")
if any(n == "KIRAN" for n in names):
fail("KIRAN still present in helplines (should be removed)")
else:
ok("KIRAN absent from helplines")
# Substring match — entries are often named "iCall (TISS)" not just "iCall"
for expected in ["iCall", "Vandrevala", "AASRA"]:
if any(expected in n for n in names):
ok(f"{expected} present")
else:
fail(f"{expected} missing from helplines")
except Exception as e:
fail(f"helplines_india.json load failed: {e}")
try:
with open(ROOT / "data/ipc_bns_sections.json", encoding="utf-8") as f:
sections_data = json.load(f)
print(f" ipc_bns_sections.json: {len(sections_data)} sections")
proc = [s for s in sections_data if "procedural_rights" in (s.get("category") or [])]
false_c = [s for s in sections_data if "false_complaint_warning" in (s.get("category") or [])]
if proc:
ok(f"procedural_rights sections: {len(proc)}")
for s in proc:
print(f" - {s.get('act')} {s.get('section')}: {s.get('title')}")
else:
fail("no procedural_rights sections")
if false_c:
ok(f"false_complaint_warning sections: {len(false_c)}")
for s in false_c:
print(f" - {s.get('act')} {s.get('section')}: {s.get('title')}")
else:
fail("no false_complaint_warning sections")
except Exception as e:
fail(f"ipc_bns_sections.json load failed: {e}")
try:
with open(ROOT / "data/mental_health_resources.json", encoding="utf-8") as f:
resources = json.load(f)
print(f" mental_health_resources.json: {len(resources)} cities")
if len(resources) >= 15:
ok(f"{len(resources)} cities available (pitch-ready)")
else:
fail(f"only {len(resources)} cities, expected 15+")
except Exception as e:
fail(f"mental_health_resources.json load failed: {e}")
try:
with open(ROOT / "data/screeners.json", encoding="utf-8") as f:
screeners = json.load(f)
phq9 = screeners.get("phq9", {})
gad7 = screeners.get("gad7", {})
print(f" screeners.json: PHQ-9={len(phq9.get('items', []))} items, GAD-7={len(gad7.get('items', []))} items")
if len(phq9.get("items", [])) == 9:
ok("PHQ-9 has 9 items")
else:
fail("PHQ-9 item count is not 9")
if len(gad7.get("items", [])) == 7:
ok("GAD-7 has 7 items")
else:
fail("GAD-7 item count is not 7")
if len(phq9.get("response_labels", {}).get("en", [])) == 4 and len(gad7.get("response_labels", {}).get("en", [])) == 4:
ok("screener response labels use the 0-3 scale")
else:
fail("screener response labels missing 0-3 scale")
if phq9.get("bands", [])[-1].get("max") == 27 and gad7.get("bands", [])[-1].get("max") == 21:
ok("screener max scores are PHQ-9=27 and GAD-7=21")
else:
fail("screener max scores are wrong")
except Exception as e:
fail(f"screeners.json load failed: {e}")
try:
prompt_names = [
"saathi_chat",
"legal_aid",
"student_corner",
"cognitive_journal",
"soothe_poetry",
"voice_notes",
]
for prompt_name in prompt_names:
prompt_path = ROOT / "backend" / "prompts" / f"{prompt_name}.txt"
text = prompt_path.read_text(encoding="utf-8")
text.format(
language_name="English",
sections_json="[]",
cross_module_memory="",
)
ok(f"prompt formats: {prompt_name}")
except Exception as e:
fail(f"prompt file format check failed: {e}")
# ---------------------------------------------------------------------------
# 2. Module imports
# ---------------------------------------------------------------------------
section("2. Module imports (no API calls)")
# We need to stub out streamlit + anthropic so imports don't crash when the
# runtime isn't available. If they're installed, great; otherwise we fall back.
import importlib.util
streamlit_available = importlib.util.find_spec("streamlit") is not None
anthropic_available = importlib.util.find_spec("anthropic") is not None
print(f" streamlit installed: {streamlit_available}")
print(f" anthropic installed: {anthropic_available}")
# Always test the pure-Python backend pieces that don't need streamlit
pure_modules = [
"backend.safeguards",
"backend.resources",
"backend.i18n",
]
for name in pure_modules:
try:
__import__(name)
ok(f"import {name}")
except Exception as e:
fail(f"import {name}: {e}")
traceback.print_exc()
# Only test streamlit-dependent modules if streamlit is installed
st_modules = [
"backend.claude_client",
"modules.saathi_chat",
"modules.legal_aid",
"modules.student_corner",
"modules.cognitive_journal",
"modules.soothe_poetry",
"modules.voice_notes",
"app",
]
if streamlit_available and anthropic_available:
for name in st_modules:
try:
__import__(name)
ok(f"import {name}")
except Exception as e:
fail(f"import {name}: {e}")
traceback.print_exc()
else:
print(" (skipping streamlit/anthropic modules — dependencies not installed)")
# ---------------------------------------------------------------------------
# 3. Safeguards unit tests
# ---------------------------------------------------------------------------
section("3. Crisis + active-danger regex")
try:
from backend.safeguards import check_crisis, check_active_danger # type: ignore
crisis_positives = [
"I want to kill myself",
"khudkushi karna chahta hoon",
"jaan deni hai mujhe",
"I cannot live anymore",
"I can't go on anymore",
"I'm going to die",
"there is nothing to live for",
"I wish I were dead",
"मुझे जीना नहीं है",
]
crisis_negatives = [
"I'm anxious about exams",
"my coworker is harassing me",
"the pressure is heavy but I'm managing",
"I want to live a more meaningful life",
]
for t in crisis_positives:
if check_crisis(t):
ok(f"crisis detected: '{t[:40]}'")
else:
fail(f"crisis MISSED: '{t}'")
for t in crisis_negatives:
if not check_crisis(t):
ok(f"crisis not triggered: '{t[:40]}'")
else:
fail(f"crisis FALSE POSITIVE: '{t}'")
danger_positives = [
"he has a knife at my throat",
"my husband is hitting me right now",
"someone is outside my door with a weapon",
"he is beating me",
]
danger_negatives = [
"my coworker harassed me last week",
"I am afraid of my neighbour",
"my ex sends me rude messages",
"he is here for the meeting",
"they are here to help",
]
for t in danger_positives:
if check_active_danger(t):
ok(f"active danger: '{t[:40]}'")
else:
fail(f"active danger MISSED: '{t}'")
for t in danger_negatives:
if not check_active_danger(t):
ok(f"active danger not triggered: '{t[:40]}'")
else:
fail(f"active danger FALSE POSITIVE: '{t}'")
except Exception as e:
fail(f"safeguards unit tests crashed: {e}")
traceback.print_exc()
# ---------------------------------------------------------------------------
# 4. Classifier unit tests (the substring bug we fixed)
# ---------------------------------------------------------------------------
section("4. Legal classifier regex (word-boundary safety)")
try:
from backend.resources import classify_legal_situation_keywords # type: ignore
classifier_cases = [
# text, expected_category_in, must_not_be
("he raped me", {"sexual_violence"}, "sexual_harassment"),
("my ex is threatening me over the phone", {"criminal_intimidation", "stalking", "other"}, "workplace_harassment"),
("my neighbour is stalking me and sending creepy messages", {"stalking", "cyber_harassment"}, None),
("my manager at the HR department is harassing me", {"workplace_harassment"}, None),
("my husband hits me and I want to leave", {"domestic_violence"}, None),
("someone leaked my private photos online", {"cyber_harassment"}, None),
("random walk in the park was nice", {"other"}, None),
]
for text, expected_set, must_not_be in classifier_cases:
result = classify_legal_situation_keywords(text)
if result in expected_set:
ok(f"classifier: '{text[:45]}' → {result}")
else:
fail(f"classifier: '{text}' → {result}, expected one of {expected_set}")
if must_not_be and result == must_not_be:
fail(f"classifier regression: '{text}' matched banned category {must_not_be}")
except Exception as e:
fail(f"classifier unit tests crashed: {e}")
traceback.print_exc()
# ---------------------------------------------------------------------------
# 5. i18n key coverage
# ---------------------------------------------------------------------------
section("5. i18n key coverage (en + hi)")
try:
from backend.i18n import t # type: ignore
required_keys = [
"source_label",
"legal_other_title",
"legal_other_body",
"legal_other_nalsa",
"legal_other_police",
"legal_procedural_heading",
"legal_false_complaint_warning_heading",
"legal_active_danger_title",
"legal_active_danger_body",
"legal_active_danger_after",
"tab_voice",
"sessions_heading",
"sessions_new_button",
"journal_section_phq9_label",
"journal_section_gad7_label",
"journal_section_checkin_label",
"journal_section_dashboard_label",
"journal_about_heading",
"journal_about_body",
"journal_logged",
"journal_saved_without_analysis",
"journal_analysis_failed_summary",
"journal_chart_xaxis",
"journal_chart_yaxis",
"journal_col_entry",
"journal_col_logged",
"journal_col_mood",
"journal_col_distortions",
"journal_col_support",
"journal_col_note",
"journal_col_observation",
"journal_snapshot_heading",
"journal_snapshot_hint",
"phq9_header",
"gad7_header",
"screener_disclaimer",
"checkin_header",
"dashboard_header",
"dashboard_screener_timeline_heading",
"dashboard_checkin_history_heading",
"dashboard_journal_history_heading",
"dashboard_journal_history_empty",
"dashboard_distortion_empty",
"voice_header",
"voice_ask_button",
"tier_urgent_professional",
"tier_reason_urgent_professional",
]
for key in required_keys:
en = t(key, "en")
hi = t(key, "hi")
if en and en != key:
ok(f"en.{key}: {en[:50]}")
else:
fail(f"en.{key}: missing")
if hi and hi != key:
ok(f"hi.{key}: {hi[:50]}")
else:
fail(f"hi.{key}: missing (falls back to en)")
except Exception as e:
fail(f"i18n tests crashed: {e}")
traceback.print_exc()
# ---------------------------------------------------------------------------
# 6. Clinical screener + stepped-care pure logic
# ---------------------------------------------------------------------------
section("6. Clinical screener + stepped-care logic")
try:
# The scoring functions are pure, but they live in a Streamlit module.
# When local UI deps are absent, install tiny import stubs so the pure tests
# still run. Full render coverage remains in scripts/apptest_render.py.
if not streamlit_available:
import types
st_stub = types.ModuleType("streamlit")
st_stub.session_state = {}
sys.modules["streamlit"] = st_stub
if importlib.util.find_spec("plotly") is None:
import types
plotly_stub = types.ModuleType("plotly")
px_stub = types.ModuleType("plotly.express")
go_stub = types.ModuleType("plotly.graph_objects")
class _Figure:
def add_trace(self, *args, **kwargs):
return None
def update_layout(self, *args, **kwargs):
return None
def update_xaxes(self, *args, **kwargs):
return None
def update_yaxes(self, *args, **kwargs):
return None
go_stub.Figure = _Figure
go_stub.Bar = lambda *args, **kwargs: {"args": args, "kwargs": kwargs}
go_stub.Scatter = lambda *args, **kwargs: {"args": args, "kwargs": kwargs}
plotly_stub.express = px_stub
plotly_stub.graph_objects = go_stub
sys.modules["plotly"] = plotly_stub
sys.modules["plotly.express"] = px_stub
sys.modules["plotly.graph_objects"] = go_stub
if importlib.util.find_spec("pydantic") is None:
import types
pydantic_stub = types.ModuleType("pydantic")
class BaseModel:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def model_validate(cls, data):
return cls(**data)
def Field(default=None, default_factory=None):
return default_factory() if default_factory else default
class ValidationError(Exception):
pass
pydantic_stub.BaseModel = BaseModel
pydantic_stub.Field = Field
pydantic_stub.ValidationError = ValidationError
sys.modules["pydantic"] = pydantic_stub
if importlib.util.find_spec("dotenv") is None:
import types
claude_stub = types.ModuleType("backend.claude_client")
claude_stub.chat_structured = lambda *args, **kwargs: None
sys.modules["backend.claude_client"] = claude_stub
from modules.cognitive_journal import ( # type: ignore
_band_for,
_analysis_to_journal_record,
_append_journal_record,
_checkin_trend_rows,
_combined_screener_history_rows,
_compute_stepped_care_recommendation,
_distortion_chart_rows,
_drop_legacy_seed_checkins,
_init_state as _init_journal_state,
_journal_history_rows,
_screener_history_rows,
_score_screener,
)
import modules.cognitive_journal as cj # type: ignore
phq9_bands = [
{"max": 4, "key": "none"},
{"max": 9, "key": "mild"},
{"max": 14, "key": "moderate"},
{"max": 19, "key": "moderately_severe"},
{"max": 27, "key": "severe"},
]
if _score_screener([0] * 9) == 0 and _score_screener([3] * 9) == 27:
ok("PHQ-9 scoring sums 0-27")
else:
fail("PHQ-9 scoring regression")
if _band_for(0, phq9_bands) == "none" and _band_for(20, phq9_bands) == "severe":
ok("PHQ-9 band lookup hits none/severe")
else:
fail("PHQ-9 band lookup regression")
stepped_cases = [
(
{"phq9_score": 22, "gad7_score": 5, "recent_mood_avg": 4, "distortion_density": 1, "pro_signal_count": 0},
"urgent_professional",
),
(
{"phq9_score": 2, "gad7_score": 16, "recent_mood_avg": 5, "distortion_density": 0, "pro_signal_count": 0},
"urgent_professional",
),
(
{"phq9_score": 3, "gad7_score": 2, "phq9_item9_positive": True, "recent_mood_avg": 7, "distortion_density": 0, "pro_signal_count": 0},
"urgent_professional",
),
(
{"phq9_score": 15, "gad7_score": 4, "recent_mood_avg": 6, "distortion_density": 1, "pro_signal_count": 0},
"guided_support",
),
(
{"phq9_score": 11, "gad7_score": 4, "recent_mood_avg": 6, "distortion_density": 1, "pro_signal_count": 0},
"self_help",
),
(
{"phq9_score": 2, "gad7_score": 2, "recent_mood_avg": 8, "distortion_density": 0, "pro_signal_count": 0},
"self_care",
),
]
for signals, expected in stepped_cases:
tier, _ = _compute_stepped_care_recommendation(signals)
if tier == expected:
ok(f"stepped care: {signals} -> {tier}")
else:
fail(f"stepped care: {signals} -> {tier}, expected {expected}")
checkin_rows = _checkin_trend_rows([
{"ts": "2026-04-12T11:00:00", "mood": 6, "sleep_h": 8, "stress": 4},
{"ts": "2026-04-12T10:00:00", "mood": 5, "sleep_h": 7, "stress": 7},
])
if (
len(checkin_rows) == 6
and {r["metric"] for r in checkin_rows} == {"Mood", "Sleep hours", "Stress"}
and checkin_rows[0]["check_in"] == 1
and checkin_rows[0]["value"] == 5.0
):
ok("check-in trend rows are chronological and include mood + sleep + stress")
else:
fail(f"check-in trend rows malformed: {checkin_rows}")
legacy_cleaned = _drop_legacy_seed_checkins([
{"mood": 5, "sleep_h": 6.5, "stress": 6},
{"mood": 6, "sleep_h": 7.0, "stress": 5},
{"mood": 9, "sleep_h": 8.0, "stress": 2},
])
if len(legacy_cleaned) == 1 and legacy_cleaned[0]["mood"] == 9:
ok("legacy demo check-ins are removed from existing sessions")
else:
fail(f"legacy demo check-ins were not removed: {legacy_cleaned}")
cj.st.session_state.clear()
_init_journal_state()
if (
cj.st.session_state[cj.ENTRIES_KEY] == []
and cj.st.session_state[cj.CHECKINS_KEY] == []
and cj.st.session_state[cj.LEGACY_CLEANUP_KEY] is True
):
ok("Cognitive Journal starts with no fake entries or check-ins")
else:
fail("Cognitive Journal still seeds fake entries or check-ins")
cj.st.session_state[cj.ENTRIES_KEY] = []
for idx in range(3):
_append_journal_record(
_analysis_to_journal_record(
{
"overall_mood": "sad",
"distortions": [
{
"type": "catastrophizing",
"phrase": "end of world",
"explanation": "One hard event is being treated as total disaster.",
"reframe": "This is hard and still not the end of everything.",
"evidence_question": "What parts of life are still present right now?",
}
]
if idx == 0
else [],
"summary": "Painful, but no CBT distortion was found.",
"needs_professional_signal": idx == 1,
},
f"journal entry {idx + 1}",
)
)
journal_history = _journal_history_rows("en")
chart_rows = _distortion_chart_rows(cj.st.session_state[cj.ENTRIES_KEY])
if (
len(journal_history) == 3
and journal_history[0]["distortions"] == 1
and journal_history[1]["support"] == "Yes"
and journal_history[2]["note"] == "journal entry 3"
and chart_rows == [{"distortion": "Catastrophizing", "count": 1}]
):
ok("journal observations keep 3 entries and chart rows include single distortion")
else:
fail(f"journal observations/chart rows malformed: history={journal_history}, chart={chart_rows}")
cj.st.session_state[cj.ENTRIES_KEY] = [
cj.JournalAnalysis(
overall_mood="sad",
distortions=[],
summary="Painful, but no CBT distortion was found.",
needs_professional_signal=True,
ts="2026-04-12T12:00:00",
entry_text="I had a breakup with gf, I am depressed.",
)
]
journal_history = _journal_history_rows("en")
if (
len(journal_history) == 1
and journal_history[0]["distortions"] == 0
and journal_history[0]["support"] == "Yes"
and journal_history[0]["note"].startswith("I had a breakup")
):
ok("journal observations include no-distortion entries")
else:
fail(f"journal observations dropped no-distortion entry: {journal_history}")
cj.st.session_state[cj.PHQ9_HISTORY_KEY] = [
{"ts": "2026-04-12T10:00:00", "score": 4, "band": "none", "log_order": 1},
{"ts": "2026-04-12T11:00:00", "score": 11, "band": "moderate", "log_order": 3},
]
cj.st.session_state[cj.GAD7_HISTORY_KEY] = [
{"ts": "2026-04-12T10:30:00", "score": 8, "band": "mild", "log_order": 2},
]
phq9_history = _screener_history_rows("phq9", "en")
if len(phq9_history) == 2 and phq9_history[-1]["score"] == 11:
ok("PHQ-9 screener history keeps multiple attempts")
else:
fail(f"PHQ-9 history rows malformed: {phq9_history}")
combined_history = _combined_screener_history_rows("en")
if [row["score"] for row in combined_history] == [4, 8, 11] and [row["log"] for row in combined_history] == [1, 2, 3]:
ok("combined screener history is chronological across PHQ-9 and GAD-7")
else:
fail(f"combined screener history not chronological: {combined_history}")
except Exception as e:
fail(f"clinical logic tests crashed: {e}")
traceback.print_exc()
# ---------------------------------------------------------------------------
# Result
# ---------------------------------------------------------------------------
print("\n" + "=" * 60)
print(f"PASS: {len(passes)}")
print(f"FAIL: {len(failures)}")
if failures:
print("\nFailures:")
for f in failures:
print(f" - {f}")
sys.exit(1)
else:
print("\nALL SMOKE CHECKS PASSED")
sys.exit(0)