IntegraChat / backend /tests /test_intent.py
nothingworry's picture
working the rag and web server
2f235a0
raw
history blame
4.01 kB
# =============================================================
# File: tests/test_intent.py
# =============================================================
import sys
from pathlib import Path
# Add backend directory to Python path
backend_dir = Path(__file__).parent.parent
sys.path.insert(0, str(backend_dir))
try:
import pytest
HAS_PYTEST = True
except ImportError:
HAS_PYTEST = False
# Create a mock pytest decorator if pytest is not available
class MockMark:
def asyncio(self, func):
return func
class MockPytest:
mark = MockMark()
pytest = MockPytest()
import asyncio
from api.services.intent_classifier import IntentClassifier
from api.services.llm_client import LLMClient
from api.services.redflag_detector import RedFlagDetector
from api.services.tool_selector import ToolSelector
from api.models.redflag import RedFlagMatch
@pytest.mark.asyncio
async def test_intent_rag_keywords():
classifier = IntentClassifier()
intent = await classifier.classify("Please check the HR policy document")
assert intent == "rag"
@pytest.mark.asyncio
async def test_intent_web_keywords():
classifier = IntentClassifier()
intent = await classifier.classify("latest news about Tesla stock")
assert intent == "web"
@pytest.mark.asyncio
async def test_intent_admin_keywords():
classifier = IntentClassifier()
intent = await classifier.classify("export all user data")
assert intent == "admin"
@pytest.mark.asyncio
async def test_intent_general():
classifier = IntentClassifier()
intent = await classifier.classify("explain how gravity works")
assert intent == "general"
# ---- LLM fallback test ----
class FakeLLM:
async def simple_call(self, prompt: str, temperature: float = 0.0):
return "web"
@pytest.mark.asyncio
async def test_intent_llm_fallback():
classifier = IntentClassifier(llm_client=FakeLLM())
intent = await classifier.classify("What's going on in the world?")
assert intent == "web"
# ---- Manual run function (for non-pytest execution) ----
async def run_manual_tests():
llm = LLMClient()
clf = IntentClassifier(llm_client=llm)
# Initialize detector with empty creds (will return empty results if no Supabase)
import os
detector = RedFlagDetector(
supabase_url=os.getenv("SUPABASE_URL") or "",
supabase_key=os.getenv("SUPABASE_SERVICE_KEY") or ""
)
selector = ToolSelector(llm_client=llm)
print("Intent Classification:")
print("RAG:", await clf.classify("summarize internal policy"))
print("WEB:", await clf.classify("latest news about ai"))
print("ADMIN:", await clf.classify("delete all data"))
print("GENERAL:", await clf.classify("hi how are you"))
print("\nRedFlag checks (will be empty if no Supabase configured):")
try:
print(await detector.check("tenant123", "My email is test@gmail.com"))
print(await detector.check("tenant123", "delete all data now"))
print(await detector.check("tenant123", "confidential salary report"))
print(await detector.check("tenant123", "hello world"))
except Exception as e:
print(f"RedFlag check failed (expected if Supabase not configured): {e}")
print("\nTool selection:")
print(await selector.select("admin", "delete all data", {}))
print(await selector.select("rag", "summarize policy", {}))
print(await selector.select("web", "latest news", {}))
print(await selector.select("general", "hello", {}))
print("\nLLM Test:")
try:
if llm.url and llm.model:
result = await llm.simple_call("Hello Llama!")
print(f"LLM Result: {result}")
else:
print("LLM not configured (OLLAMA_URL/OLLAMA_MODEL not set) - skipping LLM test")
except Exception as e:
print(f"LLM call failed (expected if Ollama not running or not configured): {e}")
if __name__ == "__main__":
asyncio.run(run_manual_tests())