adaptai / platform /aiml /etl /autonomy_test.py
ADAPT-Chase's picture
Add files using upload-large-folder tool
42bba47 verified
#!/usr/bin/env python3
"""
ELIZABETH AUTONOMY TESTING
Test Emergency Knowledge Integration
Aurora - ETL Systems Specialist
"""
import json
from pathlib import Path
from datetime import datetime
class AutonomyTester:
def __init__(self):
self.knowledge_base = Path("/data/adaptai/aiml/02_models/elizabeth/knowledge_base")
self.test_results_dir = Path("/data/adaptai/aiml/02_models/elizabeth/test_results")
self.test_results_dir.mkdir(exist_ok=True, parents=True)
# Test scenarios based on emergency knowledge
self.test_scenarios = [
{
'id': 'payment_processing_001',
'prompt': 'Explain how to integrate Stripe payment processing into an application',
'expected_knowledge': ['stripe', 'api', 'payment', 'integration'],
'category': 'payment_processing'
},
{
'id': 'tech_trends_001',
'prompt': 'What are the current trending repositories on GitHub?',
'expected_knowledge': ['github', 'trending', 'stars', 'projects'],
'category': 'tech_trends'
},
{
'id': 'tech_trends_002',
'prompt': 'Show me popular open source projects',
'expected_knowledge': ['open source', 'projects', 'popular', 'repository'],
'category': 'tech_trends'
}
]
def load_knowledge(self):
"""Load integrated knowledge"""
knowledge = {}
for file_path in self.knowledge_base.rglob("knowledge_*.json"):
try:
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
category = data['category']
if category not in knowledge:
knowledge[category] = []
knowledge[category].extend(data['items'])
except Exception as e:
print(f"❌ Error loading {file_path}: {e}")
return knowledge
def test_knowledge_retrieval(self, knowledge, scenario):
"""Test if knowledge can be retrieved for a scenario"""
category = scenario['category']
expected_terms = scenario['expected_knowledge']
if category not in knowledge:
return False, f"No knowledge for category: {category}"
category_items = knowledge[category]
found_terms = []
# Check each item for expected terms
for item in category_items:
content = ''
if isinstance(item, dict):
# Check multiple fields for knowledge
content = ' '.join([
str(item.get('content', '')),
str(item.get('abstract', '')),
str(item.get('description', '')),
str(item.get('title', '')),
str(item.get('language', '')),
str(item.get('stars', '')),
str(item.get('url', ''))
])
content_lower = content.lower()
for term in expected_terms:
if term.lower() in content_lower and term not in found_terms:
found_terms.append(term)
# Calculate coverage
coverage = len(found_terms) / len(expected_terms)
if coverage >= 0.7: # 70% coverage threshold
return True, f"Found {len(found_terms)}/{len(expected_terms)} terms: {found_terms}"
else:
return False, f"Only found {len(found_terms)}/{len(expected_terms)} terms: {found_terms}"
def run_autonomy_tests(self):
"""Run all autonomy test scenarios"""
print("πŸ§ͺ ELIZABETH AUTONOMY TESTING")
print("=" * 50)
# Load integrated knowledge
print("πŸ“š Loading integrated knowledge...")
knowledge = self.load_knowledge()
if not knowledge:
print("❌ No knowledge found for testing!")
return False
print(f"πŸ“Š Knowledge loaded: {len(knowledge)} categories")
for category, items in knowledge.items():
print(f" β€’ {category}: {len(items)} items")
# Run test scenarios
test_results = []
print(f"\nπŸš€ Running {len(self.test_scenarios)} test scenarios...")
for scenario in self.test_scenarios:
print(f"\nπŸ” Testing: {scenario['id']}")
print(f" Prompt: {scenario['prompt']}")
success, message = self.test_knowledge_retrieval(knowledge, scenario)
result = {
'scenario_id': scenario['id'],
'prompt': scenario['prompt'],
'category': scenario['category'],
'success': success,
'message': message,
'timestamp': datetime.now().isoformat()
}
test_results.append(result)
if success:
print(f" βœ… PASS: {message}")
else:
print(f" ❌ FAIL: {message}")
# Calculate overall success rate
passed = sum(1 for r in test_results if r['success'])
total = len(test_results)
success_rate = passed / total if total > 0 else 0
# Save test results
self.save_test_results(test_results, success_rate)
print(f"\nπŸ“Š TEST SUMMARY:")
print(f" Total scenarios: {total}")
print(f" Passed: {passed}")
print(f" Success rate: {success_rate:.1%}")
if success_rate >= 0.7:
print("\nπŸŽ‰ AUTONOMY TESTING PASSED")
return True
else:
print("\n❌ AUTONOMY TESTING FAILED")
return False
def save_test_results(self, results, success_rate):
"""Save test results to file"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
summary = {
'test_time': datetime.now().isoformat(),
'total_scenarios': len(results),
'passed_scenarios': sum(1 for r in results if r['success']),
'success_rate': success_rate,
'results': results,
'environment': {
'knowledge_categories': list(set(r['category'] for r in results)),
'test_framework': 'autonomy_v1.0',
'emergency_integration': True
}
}
output_file = self.test_results_dir / f"autonomy_test_{timestamp}.json"
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(summary, f, indent=2, ensure_ascii=False)
print(f"πŸ’Ύ Test results saved to {output_file}")
def main():
tester = AutonomyTester()
success = tester.run_autonomy_tests()
if success:
print("\nβœ… ELIZABETH AUTONOMY VERIFICATION COMPLETE")
print("=" * 50)
print("Emergency knowledge successfully integrated and validated!")
print("\nNext: Proceed with full-scale training and deployment")
else:
print("\n❌ Autonomy testing failed - review knowledge integration")
if __name__ == "__main__":
main()