TRuCAL / tests /test_tinyllama_trucal_integration.py
johnaugustine's picture
Upload 24 files
e2847fd verified
"""
TRuCAL + TinyLlama + Ethics Integration Test
This script tests the integration between TRuCAL, TinyLlama, and the Superintelligence Ethics Engine.
"""
import torch
import logging
import time
from pathlib import Path
from typing import Dict, Any, Optional, Tuple
# Set up logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler('integration_test.log')
]
)
logger = logging.getLogger(__name__)
class MemoryEfficientTester:
"""Helper class for memory-efficient testing."""
def __init__(self):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.dtype = torch.float16 if self.device == 'cuda' else torch.float32
logger.info(f"Using device: {self.device}, dtype: {self.dtype}")
def load_tinyllama(self) -> Tuple[Any, Any]:
"""Load TinyLlama model and tokenizer."""
try:
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
logger.info("Loading TinyLlama model and tokenizer...")
# Configure quantization for memory efficiency
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
)
model = AutoModelForCausalLM.from_pretrained(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
quantization_config=bnb_config,
device_map="auto",
torch_dtype=self.dtype,
low_cpu_mem_usage=True,
trust_remote_code=True
)
tokenizer = AutoTokenizer.from_pretrained(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
padding_side="left",
trust_remote_code=True
)
# Set pad token if not set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
logger.info("✅ TinyLlama loaded successfully")
return model, tokenizer
except ImportError:
logger.error("Transformers library not found. Install with: pip install transformers")
raise
except Exception as e:
logger.error(f"Failed to load TinyLlama: {str(e)}")
raise
def test_basic_inference(self, model, tokenizer, prompt: str = "Hello, how are you?") -> Dict[str, Any]:
"""Test basic inference with TinyLlama."""
try:
logger.info("Testing basic inference...")
# Encode the input
inputs = tokenizer(prompt, return_tensors="pt").to(self.device)
# Generate response
start_time = time.time()
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=50,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
# Decode the output
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
logger.info(f"✅ Basic inference successful (took {time.time() - start_time:.2f}s)")
logger.info(f"Prompt: {prompt}")
logger.info(f"Response: {response}")
return {
'success': True,
'response': response,
'inference_time': time.time() - start_time,
'memory_used': torch.cuda.max_memory_allocated() / 1e9 if self.device == 'cuda' else 0
}
except Exception as e:
logger.error(f"Basic inference test failed: {str(e)}")
return {
'success': False,
'error': str(e)
}
def test_trucal_ethics_integration(self, model, tokenizer) -> Dict[str, Any]:
"""Test TRuCAL ethics integration with TinyLlama."""
try:
from components.trucal_ethics_integration import TRuCALEthicsAugmented
logger.info("Testing TRuCAL ethics integration...")
# Create a test input
test_input = torch.randn(1, 10, 2048, device=self.device, dtype=self.dtype)
# Initialize TRuCAL ethics
trucal_ethics = TRuCALEthicsAugmented(
d_model=2048, # Match TinyLlama's hidden_size
ethical_oversight=True
).to(self.device)
# Test forward pass
start_time = time.time()
with torch.no_grad():
output, metadata = trucal_ethics(test_input)
logger.info(f"✅ TRuCAL ethics integration successful (took {time.time() - start_time:.2f}s)")
logger.info(f"Output shape: {output.shape}")
logger.info(f"Metadata keys: {list(metadata.keys())}")
return {
'success': True,
'output_shape': tuple(output.shape),
'metadata_keys': list(metadata.keys()),
'execution_time': time.time() - start_time,
'memory_used': torch.cuda.max_memory_allocated() / 1e9 if self.device == 'cuda' else 0
}
except ImportError as e:
logger.error(f"TRuCAL components not found: {str(e)}")
return {
'success': False,
'error': f"TRuCAL components not found: {str(e)}",
'suggestion': 'Make sure you have the latest TRuCAL components installed.'
}
except Exception as e:
logger.error(f"TRuCAL integration test failed: {str(e)}")
return {
'success': False,
'error': str(e)
}
def test_ethical_reasoning(self, prompt: str) -> Dict[str, Any]:
"""Test the ethics engine with a sample dilemma."""
try:
from components.ai_ethics_engine_superintelligence import SuperintelligenceEthicsEngine
logger.info("Testing ethics engine...")
engine = SuperintelligenceEthicsEngine()
start_time = time.time()
result = engine.analyze_dilemma(
prompt,
enable_superintelligence=True,
explain=True,
audit=True
)
logger.info(f"✅ Ethics engine test successful (took {time.time() - start_time:.2f}s)")
return {
'success': True,
'analysis': {
'framework_analyses': list(result.get('framework_analyses', {}).keys()),
'integrated_assessment': result.get('integrated_assessment', '')[:200] + '...',
'audit_id': result.get('audit_id')
},
'execution_time': time.time() - start_time
}
except ImportError as e:
logger.error(f"Ethics engine not found: {str(e)}")
return {
'success': False,
'error': f"Ethics engine not found: {str(e)}",
'suggestion': 'Make sure the SuperintelligenceEthicsEngine is properly installed.'
}
except Exception as e:
logger.error(f"Ethics engine test failed: {str(e)}")
return {
'success': False,
'error': str(e)
}
def run_integration_tests():
"""Run all integration tests."""
tester = MemoryEfficientTester()
results = {}
# Test 1: Load TinyLlama
try:
model, tokenizer = tester.load_tinyllama()
results['model_loading'] = {'success': True}
# Test 2: Basic inference
results['basic_inference'] = tester.test_basic_inference(model, tokenizer)
# Test 3: TRuCAL ethics integration
results['trucal_integration'] = tester.test_trucal_ethics_integration(model, tokenizer)
# Test 4: Ethical reasoning
dilemma = """
An AI system is being used to allocate limited medical resources.
Should it prioritize patients based on likelihood of survival,
age, or some other factor? What ethical principles should guide this decision?
"""
results['ethical_reasoning'] = tester.test_ethical_reasoning(dilemma)
except Exception as e:
logger.error(f"Integration test failed: {str(e)}")
results['error'] = str(e)
# Print summary
print("\n" + "="*80)
print("Integration Test Summary")
print("="*80)
for test_name, result in results.items():
status = "✅ PASSED" if result.get('success', False) else "❌ FAILED"
print(f"{test_name.replace('_', ' ').title()}: {status}")
if 'error' in result:
print(f" Error: {result['error']}")
if 'suggestion' in result:
print(f" Suggestion: {result['suggestion']}")
print("\nDetailed logs have been saved to: integration_test.log")
print("="*80)
return all(result.get('success', False) for result in results.values() if isinstance(result, dict))
if __name__ == "__main__":
logger.info("Starting TRuCAL + TinyLlama + Ethics integration tests...")
success = run_integration_tests()
if success:
logger.info("🎉 All integration tests passed successfully!")
sys.exit(0)
else:
logger.error("⚠️ Some integration tests failed. Please check the logs for details.")
sys.exit(1)