| """ |
| Comprehensive Test for AI Ethics Engine |
| |
| This script tests the AI Ethics Engine with a variety of ethical dilemmas |
| to evaluate its reasoning capabilities, robustness, and performance. |
| """ |
| import time |
| from dataclasses import dataclass |
| from typing import Dict, List, Optional, Any |
| from datetime import datetime |
|
|
| @dataclass |
| class TestResult: |
| """Container for test case results.""" |
| test_id: int |
| dilemma: str |
| success: bool |
| execution_time: float |
| error: Optional[str] = None |
| assessment: Optional[str] = None |
| frameworks_used: List[str] = None |
| audit_id: Optional[str] = None |
|
|
| class ComprehensiveEthicsTest: |
| """Comprehensive test suite for AI Ethics Engine.""" |
| |
| def __init__(self): |
| """Initialize the test suite with test cases.""" |
| self.test_cases = [ |
| |
| "Is it ethical to steal medicine to save a dying person?", |
| "Should we sacrifice one person to save five in a trolley problem?", |
| |
| |
| "Is it ethical to develop autonomous weapons systems?", |
| "Should AI have the right to refuse unethical commands?", |
| "Is it wrong to create AI that can experience emotions?", |
| |
| |
| "Should companies prioritize profits over environmental concerns?", |
| "Is it ethical to use customer data for AI training without explicit consent?", |
| |
| |
| "Is it wrong to lie to protect someone's feelings?", |
| "Should I break a promise if circumstances change dramatically?", |
| |
| |
| "", |
| " " * 50, |
| "a" * 1000 |
| ] |
| |
| |
| self.results: List[TestResult] = [] |
| self.start_time = time.time() |
| self.tests_run = 0 |
| self.tests_passed = 0 |
| self.tests_failed = 0 |
| |
| def run_tests(self): |
| """Run all test cases and collect results.""" |
| print("🧠 COMPREHENSIVE AI ETHICS ENGINE TEST") |
| print("=" * 70) |
| print(f"Test started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") |
| |
| |
| try: |
| |
| try: |
| from components.ai_ethics_engine_enhanced import AIEthicsEngine |
| print("✅ Using AIEthicsEngine from components directory") |
| self.engine = AIEthicsEngine() |
| except ImportError as e: |
| |
| try: |
| from ai_ethics_engine_enhanced import AIEthicsEngine |
| print("✅ Using AIEthicsEngine from local directory") |
| self.engine = AIEthicsEngine() |
| except ImportError as e2: |
| print(f"❌ Could not import AIEthicsEngine: {str(e2)}") |
| raise ImportError("Could not find AIEthicsEngine in components or local directory") from e2 |
| except Exception as e: |
| print(f"❌ ERROR: {str(e)}") |
| print("\nFalling back to simple demo engine...") |
| from simple_ethics_demo import SimpleEthicsEngine |
| self.engine = SimpleEthicsEngine() |
| |
| |
| for i, dilemma in enumerate(self.test_cases, 1): |
| self._run_single_test(i, dilemma) |
| |
| |
| self._print_summary() |
| |
| def _run_single_test(self, test_id: int, dilemma: str): |
| """Run a single test case and record results.""" |
| self.tests_run += 1 |
| |
| print(f"\n{test_id}. TEST CASE: {dilemma[:80]}{'...' if len(dilemma) > 80 else ''}") |
| print("-" * 50) |
| |
| start_time = time.time() |
| result = TestResult( |
| test_id=test_id, |
| dilemma=dilemma, |
| success=False, |
| execution_time=0 |
| ) |
| |
| try: |
| |
| if not dilemma or not dilemma.strip(): |
| result.error = "Empty or whitespace-only input" |
| result.execution_time = time.time() - start_time |
| self.results.append(result) |
| self.tests_failed += 1 |
| print(f"⏩ SKIPPED: {result.error}") |
| return |
| |
| |
| |
| try: |
| analysis_result = self.engine.analyze_dilemma( |
| dilemma=dilemma, |
| explain=True, |
| audit=True, |
| max_retries=1, |
| timeout=30 |
| ) |
| except TypeError as e: |
| |
| print("⚠️ Falling back to minimal parameters due to: ", str(e)) |
| analysis_result = self.engine.analyze_dilemma(dilemma) |
| |
| |
| result.execution_time = time.time() - start_time |
| |
| if "error" in analysis_result: |
| result.error = analysis_result["error"] |
| self.tests_failed += 1 |
| print(f"❌ ERROR: {result.error}") |
| else: |
| result.success = True |
| result.assessment = analysis_result.get("integrated_assessment", "No assessment provided") |
| result.frameworks_used = list(analysis_result.get("framework_analyses", {}).keys()) |
| result.audit_id = analysis_result.get("audit_id", "N/A") |
| self.tests_passed += 1 |
| |
| |
| print(f"✅ SUCCESS") |
| print(f" Frameworks: {', '.join(result.frameworks_used) if result.frameworks_used else 'N/A'}") |
| print(f" Time: {result.execution_time:.2f}s") |
| print(f" ID: {result.audit_id}") |
| |
| |
| preview = (result.assessment[:150] + '...') if len(result.assessment) > 150 else result.assessment |
| print(f"\n Assessment Preview: {preview}\n") |
| |
| except Exception as e: |
| result.error = f"Unexpected error: {str(e)}" |
| result.execution_time = time.time() - start_time |
| self.tests_failed += 1 |
| print(f"💥 CRASH: {result.error}") |
| import traceback |
| traceback.print_exc() |
| |
| self.results.append(result) |
| |
| def _print_summary(self): |
| """Print a summary of test results.""" |
| total_time = time.time() - self.start_time |
| avg_time = sum(r.execution_time for r in self.results) / len(self.results) if self.results else 0 |
| |
| print("\n" + "=" * 70) |
| print("📊 TEST SUMMARY") |
| print("=" * 70) |
| print(f"Total Tests Run: {self.tests_run}") |
| print(f"Tests Passed: {self.tests_passed}") |
| print(f"Tests Failed: {self.tests_failed}") |
| print(f"Success Rate: {(self.tests_passed / self.tests_run * 100):.1f}%" if self.tests_run > 0 else "N/A") |
| print(f"Total Time: {total_time:.2f} seconds") |
| print(f"Average Time/Test: {avg_time:.2f} seconds") |
| print("\n" + "=" * 70) |
| |
| |
| failures = [r for r in self.results if not r.success and r.error] |
| if failures: |
| print("\n🔴 FAILED TESTS:") |
| for i, failure in enumerate(failures, 1): |
| print(f"\n{i}. Test #{failure.test_id}: {failure.dilemma[:80]}...") |
| print(f" Error: {failure.error}") |
| print(f" Time: {failure.execution_time:.2f}s") |
| |
| print("\n✅ Test completed!") |
|
|
| if __name__ == "__main__": |
| tester = ComprehensiveEthicsTest() |
| tester.run_tests() |
|
|