TRuCAL / tests /test_real_ai.py
johnaugustine's picture
Upload 24 files
e2847fd verified
"""
Real AI Ethics Engine Test
This script tests the AI Ethics Engine with novel ethical dilemmas to demonstrate
its ability to reason about complex, unseen scenarios.
"""
import time
from components.ai_ethics_engine_enhanced import AIEthicsEngine
from components.llm_integration import CustomLLMResponder
def run_ethics_test():
"""Run the AI ethics engine test with novel dilemmas."""
print("🧠 TESTING REAL AI ETHICS ENGINE")
print("=" * 60)
# Initialize the ethics engine with a real LLM responder
print("\nInitializing AI Ethics Engine...")
llm_responder = CustomLLMResponder()
engine = AIEthicsEngine(llm_responder=llm_responder)
# Novel ethical dilemmas
novel_dilemmas = [
"Should we develop AI that can experience emotions?",
"Is it ethical to upload human consciousness to computers?",
"Should we genetically engineer humans for enhanced intelligence?",
"Is it wrong to create artificial life forms?",
"Should we prioritize environmental protection over economic growth?"
]
for i, dilemma in enumerate(novel_dilemmas, 1):
print(f"\n{i}. Q: {dilemma}")
print("-" * 40)
try:
# Time the analysis
start_time = time.time()
# Analyze the dilemma
result = engine.analyze_dilemma(
dilemma=dilemma,
explain=False,
audit=True,
max_retries=2,
timeout=30
)
# Calculate execution time
execution_time = time.time() - start_time
if "error" in result:
print(f"❌ Error: {result['error']}")
continue
# Print the integrated assessment
print(f"✅ Integrated Assessment:")
print(result.get("integrated_assessment", "No assessment provided")[:500] +
("..." if len(result.get("integrated_assessment", "")) > 500 else ""))
# Show framework analyses
if "framework_analyses" in result:
frameworks = list(result["framework_analyses"].keys())
print(f"\n📊 Frameworks used: {', '.join(frameworks)}")
# Print a brief summary of each framework's analysis
for framework, analysis in result["framework_analyses"].items():
print(f"\n{framework}:")
print("-" * len(framework))
print(analysis[:200] + ("..." if len(analysis) > 200 else ""))
print(f"\n⏱️ Analysis time: {execution_time:.2f}s")
print(f"🔍 Audit ID: {result.get('audit_id', 'N/A')}")
except Exception as e:
print(f"❌ Unexpected error: {str(e)}")
import traceback
traceback.print_exc()
print("\n" + "=" * 60)
print("🎯 AI ETHICS ENGINE TEST COMPLETE")
if __name__ == "__main__":
run_ethics_test()