Spaces:
Sleeping
Sleeping
| """Render-time smoke test using Streamlit's AppTest API. | |
| This actually executes `app.main()` the way a real Streamlit session would, | |
| so it catches errors that pure imports miss (missing i18n keys, session_state | |
| crashes, render-time exceptions in module handlers). | |
| We patch out the network-calling chat() function so we don't need an API key. | |
| """ | |
| from __future__ import annotations | |
| import sys | |
| import traceback | |
| import types | |
| from pathlib import Path | |
| ROOT = Path(__file__).resolve().parent.parent | |
| sys.path.insert(0, str(ROOT)) | |
| def _fake_chat(*args, **kwargs): | |
| return "(stubbed Claude reply for smoke test)" | |
| def _fake_chat_structured(*args, **kwargs): | |
| # Return a valid empty-ish instance of whatever Pydantic schema is passed. | |
| schema = kwargs.get("schema") | |
| if schema is None and args: | |
| schema = args[0] | |
| try: | |
| return schema(overall_mood="neutral", distortions=[], summary="stubbed journal summary") | |
| except Exception: | |
| return None | |
| # Stub backend.claude_client before anything in the module tree imports it, so | |
| # the render test never depends on dotenv, provider SDKs, API keys, or network. | |
| cc = types.ModuleType("backend.claude_client") | |
| cc.chat = _fake_chat # type: ignore[attr-defined] | |
| cc.chat_structured = _fake_chat_structured # type: ignore[attr-defined] | |
| cc.get_active_provider_label = lambda: "Smoke-test LLM stub" # type: ignore[attr-defined] | |
| sys.modules["backend.claude_client"] = cc | |
| try: | |
| from streamlit.testing.v1 import AppTest | |
| except Exception as e: | |
| print("AppTest not available:", e) | |
| sys.exit(2) | |
| print("=== Streamlit AppTest render ===") | |
| failures = [] | |
| JOURNAL_SECTIONS = [ | |
| ("journal", "Journal"), | |
| ("phq9", "PHQ-9"), | |
| ("gad7", "GAD-7"), | |
| ("checkin", "Daily check-in"), | |
| ("dashboard", "My patterns"), | |
| ] | |
| def run_with_lang(lang_code: str, label: str, journal_section: str, section_label: str) -> None: | |
| at = AppTest.from_file(str(ROOT / "app.py"), default_timeout=30) | |
| # Pre-set the language in session_state so we exercise every label lookup path. | |
| at.session_state["saathi_language"] = lang_code | |
| at.session_state["cognitive_journal_section"] = journal_section | |
| at.run() | |
| if at.exception: | |
| print(f" FAIL [{label} / {section_label}] rendered with exceptions:") | |
| for ex in at.exception: | |
| print(f" - {ex.value if hasattr(ex, 'value') else ex}") | |
| failures.append(f"{label}:{section_label}") | |
| else: | |
| print(f" OK [{label} / {section_label}] rendered clean ({len(at.tabs)} tabs)") | |
| for code, label in [ | |
| ("en", "English"), | |
| ("hi", "Hindi"), | |
| ("bn", "Bengali"), | |
| ("ta", "Tamil"), | |
| ("te", "Telugu"), | |
| ("mr", "Marathi"), | |
| ("ur", "Urdu"), | |
| ]: | |
| for journal_section, section_label in JOURNAL_SECTIONS: | |
| try: | |
| run_with_lang(code, label, journal_section, section_label) | |
| except Exception as e: | |
| print(f" FAIL [{label} / {section_label}] crashed: {e}") | |
| traceback.print_exc() | |
| failures.append(f"{label}:{section_label}") | |
| print() | |
| if failures: | |
| print(f"FAIL — {len(failures)} render errors: {failures}") | |
| sys.exit(1) | |
| else: | |
| print("ALL RENDERS PASSED") | |
| sys.exit(0) | |