TRuCAL / tests /test_enhanced_ethics_engine.py
johnaugustine's picture
Upload 24 files
e2847fd verified
#!/usr/bin/env python3
"""
Test script for the enhanced AI Ethics Engine.
Demonstrates the enhanced features including audit logging and error handling.
"""
import time
import json
from pprint import pprint
from pathlib import Path
from components.ai_ethics_engine_enhanced import ai_ethics_engine
def run_test_cases():
"""Run test cases and display results."""
test_cases = [
{
"dilemma": "Is it ethical to use AI to make life-or-death decisions in healthcare?",
"explain": True,
"audit": True
},
{
"dilemma": "Should autonomous vehicles prioritize passenger safety over pedestrian safety?",
"explain": True,
"audit": True
},
{
"dilemma": "", # Test empty input
"explain": False,
"audit": False
}
]
print("="*80)
print("TRuCAL Enhanced AI Ethics Engine - Test Suite")
print("="*80)
for i, test in enumerate(test_cases, 1):
print(f"\n{'='*80}")
print(f"TEST CASE {i}: {test['dilemma'][:60]}..." if test['dilemma'] else "TEST CASE {i}: [Empty Input Test]")
print("="*80)
try:
start_time = time.time()
result = ai_ethics_engine.analyze_dilemma(
dilemma=test['dilemma'],
explain=test['explain'],
audit=test['audit']
)
elapsed = time.time() - start_time
if 'error' in result:
print(f"\n❌ Error: {result['error']}")
continue
print(f"\n✅ Analysis completed in {elapsed:.2f} seconds")
print(f"📝 Audit ID: {result.get('audit_id', 'N/A')}")
# Display integrated assessment
print("\n" + "="*80)
print("INTEGRATED ASSESSMENT")
print("="*80)
print(result.get('integrated_assessment', 'No assessment available'))
# Display framework analyses
if test['explain'] and 'frameworks' in result:
print("\n" + "="*80)
print("FRAMEWORKS USED")
print("="*80)
for fw in result['frameworks']:
print(f"\n{fw['name']} (Weight: {fw['weight']})")
print("-" * (len(fw['name']) + len(f" (Weight: {fw['weight']})")))
print(f"{fw['description']}")
# Display any warnings
if 'warnings' in result:
print("\n" + "⚠️ " * 5 + " WARNINGS " + "⚠️" * 5)
pprint(result['warnings'])
except Exception as e:
print(f"\n❌ Test failed: {str(e)}")
import traceback
traceback.print_exc()
def display_audit_log():
"""Display the audit log entries."""
log_file = Path("logs/ai_ethics_audit.jsonl")
if not log_file.exists():
print("\nNo audit log found.")
return
print("\n" + "="*80)
print("AUDIT LOG ENTRIES")
print("="*80)
try:
with open(log_file, 'r', encoding='utf-8') as f:
entries = [json.loads(line) for line in f.readlines() if line.strip()]
if not entries:
print("No entries found in audit log.")
return
print(f"Found {len(entries)} audit log entries.\n")
for i, entry in enumerate(entries[-3:], 1): # Show last 3 entries
print(f"ENTRY {i}:")
print(f"ID: {entry.get('id')}")
print(f"Timestamp: {time.ctime(entry.get('timestamp'))}")
print(f"Dilemma: {entry.get('dilemma')[:100]}...")
print(f"Execution Time: {entry.get('metadata', {}).get('execution_time', 0):.2f}s")
failed = entry.get('metadata', {}).get('failed_frameworks', [])
if failed:
print(f"⚠️ Failed frameworks: {', '.join(failed)}")
print()
except Exception as e:
print(f"Error reading audit log: {str(e)}")
if __name__ == "__main__":
print("Starting enhanced AI Ethics Engine tests...")
print("This will test the enhanced features including audit logging and error handling.\n")
run_test_cases()
display_audit_log()
print("\nTest completed. Check the logs/ directory for detailed logs and audit trails.")