Spaces:
Sleeping
Sleeping
File size: 3,919 Bytes
196c707 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
"""
Basic test script for ConversAI modules
Run this to verify core functionality
"""
import json
from llm_backend import LLMBackend, LLMProvider
from survey_generator import SurveyGenerator
from survey_translator import SurveyTranslator
from data_analyzer import DataAnalyzer
def test_llm_backend():
"""Test LLM backend initialization"""
print("\n=== Testing LLM Backend ===")
try:
backend = LLMBackend(provider=LLMProvider.LM_STUDIO)
print(f"β Backend initialized with provider: {backend.provider}")
print(f"β Model: {backend.model}")
print(f"β API URL: {backend.api_url}")
return backend
except Exception as e:
print(f"β Backend initialization failed: {e}")
return None
def test_survey_generator(backend):
"""Test survey generation"""
print("\n=== Testing Survey Generator ===")
if not backend:
print("β Skipping (no backend)")
return None
try:
gen = SurveyGenerator(backend)
print("β Survey generator initialized")
# Note: Actual generation requires LLM connection
print(" (Actual survey generation requires LLM connection)")
return gen
except Exception as e:
print(f"β Survey generator failed: {e}")
return None
def test_survey_translator(backend):
"""Test survey translator"""
print("\n=== Testing Survey Translator ===")
if not backend:
print("β Skipping (no backend)")
return None
try:
translator = SurveyTranslator(backend)
print("β Translator initialized")
# Test language list
langs = translator.get_supported_languages()
print(f"β Supports {len(langs)} languages")
print(f" Sample languages: {', '.join(list(langs.values())[:5])}")
return translator
except Exception as e:
print(f"β Translator failed: {e}")
return None
def test_data_analyzer(backend):
"""Test data analyzer"""
print("\n=== Testing Data Analyzer ===")
if not backend:
print("β Skipping (no backend)")
return None
try:
analyzer = DataAnalyzer(backend)
print("β Analyzer initialized")
# Test with sample data
sample_responses = [
{"q1": "I had a great experience", "q2": "Very satisfied"},
{"q1": "It was okay", "q2": "Neutral feelings"},
{"q1": "Not very good", "q2": "Disappointed"}
]
# Note: Actual analysis requires LLM connection
print(" (Actual analysis requires LLM connection)")
return analyzer
except Exception as e:
print(f"β Analyzer failed: {e}")
return None
def test_modules():
"""Test all modules"""
print("="*50)
print("ConversAI Module Tests")
print("="*50)
# Test backend
backend = test_llm_backend()
# Test generators
gen = test_survey_generator(backend)
translator = test_survey_translator(backend)
analyzer = test_data_analyzer(backend)
# Summary
print("\n=== Test Summary ===")
modules = {
"LLM Backend": backend is not None,
"Survey Generator": gen is not None,
"Survey Translator": translator is not None,
"Data Analyzer": analyzer is not None
}
for module, status in modules.items():
symbol = "β" if status else "β"
print(f"{symbol} {module}")
all_passed = all(modules.values())
print(f"\n{'β All tests passed!' if all_passed else 'β Some tests failed'}")
if not all_passed:
print("\nNote: Make sure your LLM backend is configured correctly.")
print("Check environment variables or .env file.")
return all_passed
if __name__ == "__main__":
test_modules()
|