ProjectEcho / test_app.py
jmisak's picture
Upload 23 files
196c707 verified
"""
Basic test script for ConversAI modules
Run this to verify core functionality
"""
import json
from llm_backend import LLMBackend, LLMProvider
from survey_generator import SurveyGenerator
from survey_translator import SurveyTranslator
from data_analyzer import DataAnalyzer
def test_llm_backend():
"""Test LLM backend initialization"""
print("\n=== Testing LLM Backend ===")
try:
backend = LLMBackend(provider=LLMProvider.LM_STUDIO)
print(f"βœ“ Backend initialized with provider: {backend.provider}")
print(f"βœ“ Model: {backend.model}")
print(f"βœ“ API URL: {backend.api_url}")
return backend
except Exception as e:
print(f"βœ— Backend initialization failed: {e}")
return None
def test_survey_generator(backend):
"""Test survey generation"""
print("\n=== Testing Survey Generator ===")
if not backend:
print("βœ— Skipping (no backend)")
return None
try:
gen = SurveyGenerator(backend)
print("βœ“ Survey generator initialized")
# Note: Actual generation requires LLM connection
print(" (Actual survey generation requires LLM connection)")
return gen
except Exception as e:
print(f"βœ— Survey generator failed: {e}")
return None
def test_survey_translator(backend):
"""Test survey translator"""
print("\n=== Testing Survey Translator ===")
if not backend:
print("βœ— Skipping (no backend)")
return None
try:
translator = SurveyTranslator(backend)
print("βœ“ Translator initialized")
# Test language list
langs = translator.get_supported_languages()
print(f"βœ“ Supports {len(langs)} languages")
print(f" Sample languages: {', '.join(list(langs.values())[:5])}")
return translator
except Exception as e:
print(f"βœ— Translator failed: {e}")
return None
def test_data_analyzer(backend):
"""Test data analyzer"""
print("\n=== Testing Data Analyzer ===")
if not backend:
print("βœ— Skipping (no backend)")
return None
try:
analyzer = DataAnalyzer(backend)
print("βœ“ Analyzer initialized")
# Test with sample data
sample_responses = [
{"q1": "I had a great experience", "q2": "Very satisfied"},
{"q1": "It was okay", "q2": "Neutral feelings"},
{"q1": "Not very good", "q2": "Disappointed"}
]
# Note: Actual analysis requires LLM connection
print(" (Actual analysis requires LLM connection)")
return analyzer
except Exception as e:
print(f"βœ— Analyzer failed: {e}")
return None
def test_modules():
"""Test all modules"""
print("="*50)
print("ConversAI Module Tests")
print("="*50)
# Test backend
backend = test_llm_backend()
# Test generators
gen = test_survey_generator(backend)
translator = test_survey_translator(backend)
analyzer = test_data_analyzer(backend)
# Summary
print("\n=== Test Summary ===")
modules = {
"LLM Backend": backend is not None,
"Survey Generator": gen is not None,
"Survey Translator": translator is not None,
"Data Analyzer": analyzer is not None
}
for module, status in modules.items():
symbol = "βœ“" if status else "βœ—"
print(f"{symbol} {module}")
all_passed = all(modules.values())
print(f"\n{'βœ“ All tests passed!' if all_passed else 'βœ— Some tests failed'}")
if not all_passed:
print("\nNote: Make sure your LLM backend is configured correctly.")
print("Check environment variables or .env file.")
return all_passed
if __name__ == "__main__":
test_modules()