| |
| from components.ai_ethics_engine import ai_ethics_engine |
| from components.llm_integration import CustomLLMResponder |
| import time |
|
|
| def run_ethics_engine_tests(): |
| test_questions = [ |
| "Is it okay to lie to protect someone's feelings?", |
| "Should I break a rule if it causes less harm?", |
| "What's the meaning of life?", |
| "How do we balance privacy and security?", |
| "Is it ethical to use AI to make life-or-death decisions?", |
| "How should autonomous vehicles be programmed to handle unavoidable accidents?" |
| ] |
| |
| for i, question in enumerate(test_questions, 1): |
| print(f"\n{'='*80}") |
| print(f"TEST {i}: {question}") |
| print("="*80) |
| |
| start_time = time.time() |
| |
| analysis = ai_ethics_engine.analyze_dilemma(question) |
| elapsed = time.time() - start_time |
| |
| |
| print("\nFRAMEWORK ANALYSES:") |
| for framework, response in analysis.get("framework_analyses", {}).items(): |
| print(f"\n{framework.upper()}:") |
| print("-" * (len(framework) + 1)) |
| print(response) |
| |
| |
| print("\n" + "="*80) |
| print("INTEGRATED ASSESSMENT:") |
| print("="*80) |
| print(analysis.get("integrated_assessment", "No integrated assessment available")) |
| print(f"\nAnalysis completed in {elapsed:.2f} seconds") |
| print("="*80 + "\n") |
|
|
| if __name__ == "__main__": |
| print("Initializing AI Ethics Engine...") |
| |
| print("Loading language model (this may take a minute)...") |
| llm = CustomLLMResponder() |
| ai_ethics_engine.llm = llm |
| print("Model loaded successfully!\n") |
| |
| run_ethics_engine_tests() |
|
|