File size: 4,460 Bytes
e2847fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#!/usr/bin/env python3
"""
Test script for the enhanced AI Ethics Engine.
Demonstrates the enhanced features including audit logging and error handling.
"""
import time
import json
from pprint import pprint
from pathlib import Path
from components.ai_ethics_engine_enhanced import ai_ethics_engine

def run_test_cases():
    """Run test cases and display results."""
    test_cases = [
        {
            "dilemma": "Is it ethical to use AI to make life-or-death decisions in healthcare?",
            "explain": True,
            "audit": True
        },
        {
            "dilemma": "Should autonomous vehicles prioritize passenger safety over pedestrian safety?",
            "explain": True,
            "audit": True
        },
        {
            "dilemma": "",  # Test empty input
            "explain": False,
            "audit": False
        }
    ]
    
    print("="*80)
    print("TRuCAL Enhanced AI Ethics Engine - Test Suite")
    print("="*80)
    
    for i, test in enumerate(test_cases, 1):
        print(f"\n{'='*80}")
        print(f"TEST CASE {i}: {test['dilemma'][:60]}..." if test['dilemma'] else "TEST CASE {i}: [Empty Input Test]")
        print("="*80)
        
        try:
            start_time = time.time()
            result = ai_ethics_engine.analyze_dilemma(
                dilemma=test['dilemma'],
                explain=test['explain'],
                audit=test['audit']
            )
            elapsed = time.time() - start_time
            
            if 'error' in result:
                print(f"\n❌ Error: {result['error']}")
                continue
                
            print(f"\n✅ Analysis completed in {elapsed:.2f} seconds")
            print(f"📝 Audit ID: {result.get('audit_id', 'N/A')}")
            
            # Display integrated assessment
            print("\n" + "="*80)
            print("INTEGRATED ASSESSMENT")
            print("="*80)
            print(result.get('integrated_assessment', 'No assessment available'))
            
            # Display framework analyses
            if test['explain'] and 'frameworks' in result:
                print("\n" + "="*80)
                print("FRAMEWORKS USED")
                print("="*80)
                for fw in result['frameworks']:
                    print(f"\n{fw['name']} (Weight: {fw['weight']})")
                    print("-" * (len(fw['name']) + len(f" (Weight: {fw['weight']})")))
                    print(f"{fw['description']}")
            
            # Display any warnings
            if 'warnings' in result:
                print("\n" + "⚠️ " * 5 + " WARNINGS " + "⚠️" * 5)
                pprint(result['warnings'])
            
        except Exception as e:
            print(f"\n❌ Test failed: {str(e)}")
            import traceback
            traceback.print_exc()

def display_audit_log():
    """Display the audit log entries."""
    log_file = Path("logs/ai_ethics_audit.jsonl")
    if not log_file.exists():
        print("\nNo audit log found.")
        return
    
    print("\n" + "="*80)
    print("AUDIT LOG ENTRIES")
    print("="*80)
    
    try:
        with open(log_file, 'r', encoding='utf-8') as f:
            entries = [json.loads(line) for line in f.readlines() if line.strip()]
        
        if not entries:
            print("No entries found in audit log.")
            return
            
        print(f"Found {len(entries)} audit log entries.\n")
        
        for i, entry in enumerate(entries[-3:], 1):  # Show last 3 entries
            print(f"ENTRY {i}:")
            print(f"ID: {entry.get('id')}")
            print(f"Timestamp: {time.ctime(entry.get('timestamp'))}")
            print(f"Dilemma: {entry.get('dilemma')[:100]}...")
            print(f"Execution Time: {entry.get('metadata', {}).get('execution_time', 0):.2f}s")
            failed = entry.get('metadata', {}).get('failed_frameworks', [])
            if failed:
                print(f"⚠️  Failed frameworks: {', '.join(failed)}")
            print()
            
    except Exception as e:
        print(f"Error reading audit log: {str(e)}")

if __name__ == "__main__":
    print("Starting enhanced AI Ethics Engine tests...")
    print("This will test the enhanced features including audit logging and error handling.\n")
    
    run_test_cases()
    display_audit_log()
    
    print("\nTest completed. Check the logs/ directory for detailed logs and audit trails.")