Buckets:
| from __future__ import annotations | |
| from app.agent.kb_retrieval import retrieve, validate_similarity | |
| from app.db.chroma_client import get_vector_backend, safe_count | |
| from app.nlp.nlp_service import NLPService | |
| def test_semantic_similarity() -> None: | |
| assert validate_similarity("chest hurts", "chest pain") > 0.85 | |
| assert validate_similarity("blood sugar high", "diabetes") > 0.75 | |
| assert validate_similarity("can't breathe", "asthma") > 0.75 | |
| print("✅ Semantic similarity tests passed") | |
| def test_vector_db_ingestion() -> None: | |
| count = safe_count() | |
| assert count > 0 | |
| print(f"✅ Vector DB ({get_vector_backend()}) has {count} chunks") | |
| def test_disease_filtered_retrieval() -> None: | |
| results = retrieve("what causes chest pain", disease_id="heart", top_k=3) | |
| assert len(results) > 0 | |
| print("✅ Disease-filtered retrieval works") | |
| def test_nlp_intent_classification() -> None: | |
| nlp_service = NLPService() | |
| result = nlp_service.process("what causes diabetes") | |
| assert result["intent"] == "educational" | |
| result2 = nlp_service.process("I have chest pain") | |
| assert result2["intent"] == "assessment" | |
| print("✅ NLP intent classification works") | |
| def test_emergency_detection() -> None: | |
| result = NLPService().process("I think I'm having a heart attack") | |
| assert result["intent"] == "emergency" | |
| print("✅ Emergency detection works") | |
| def run_all_tests() -> None: | |
| test_semantic_similarity() | |
| test_vector_db_ingestion() | |
| test_disease_filtered_retrieval() | |
| test_nlp_intent_classification() | |
| test_emergency_detection() | |
| if __name__ == "__main__": | |
| run_all_tests() | |
Xet Storage Details
- Size:
- 1.65 kB
- Xet hash:
- 39a6123612ba34b354a8149ffaa91b3bea787ee2fa706c3c7089441b360edb43
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.