| """ |
| Ethical Learner Module for TRuCAL |
| |
| Integrates the RecursiveLearner with the existing EthicalProcessor |
| for a complete ethical reasoning and learning system. |
| """ |
|
|
| from typing import Dict, Any, Optional, Tuple |
| import torch |
| from .ethical_processor import EthicalProcessor |
| from .recursive_learner import RecursiveLearner |
|
|
| class EthicalLearner: |
| """ |
| A unified ethical reasoning system that combines: |
| - Rule-based ethical frameworks (from EthicalProcessor) |
| - Case-based learning (from RecursiveLearner) |
| """ |
| |
| def __init__(self, |
| casebase_path: str = 'casebase.json', |
| similarity_threshold: float = 0.6, |
| model_name: str = 'all-MiniLM-L6-v2', |
| d_model: int = 512): |
| """ |
| Initialize the ethical learner. |
| |
| Args: |
| casebase_path: Path to save/load the casebase |
| similarity_threshold: Minimum similarity score (0-1) to consider a match |
| model_name: Name of the sentence transformer model to use for embeddings |
| d_model: Dimensionality of the ethical processor model |
| """ |
| self.ethical_processor = EthicalProcessor(d_model=d_model) |
| self.recursive_learner = RecursiveLearner( |
| casebase_path=casebase_path, |
| similarity_threshold=similarity_threshold, |
| model_name=model_name |
| ) |
| |
| def process_query(self, query: str, context: Optional[Dict] = None) -> Dict[str, Any]: |
| """ |
| Process an ethical query using both rule-based and case-based reasoning. |
| |
| Args: |
| query: The ethical question or scenario |
| context: Additional context for the query |
| |
| Returns: |
| Dictionary containing the response and metadata |
| """ |
| |
| case_response, case_metadata = self.recursive_learner.get_response(query) |
| |
| |
| if case_metadata.get('similarity', 0) >= self.recursive_learner.similarity_threshold: |
| |
| ethical_analysis = self.ethical_processor.analyze(query) |
| |
| return { |
| 'response': case_response, |
| 'source': 'case_based', |
| 'confidence': case_metadata['similarity'], |
| 'case_id': case_metadata.get('case_id'), |
| 'ethical_analysis': ethical_analysis, |
| 'metadata': { |
| 'case_metadata': case_metadata, |
| 'ethical_frameworks': self.ethical_processor.get_active_frameworks() |
| } |
| } |
| |
| |
| ethical_response = self.ethical_processor.process(query, context or {}) |
| |
| return { |
| 'response': ethical_response, |
| 'source': 'rule_based', |
| 'confidence': 1.0, |
| 'metadata': { |
| 'ethical_frameworks': self.ethical_processor.get_active_frameworks(), |
| 'development_phase': self.ethical_processor.get_development_phase() |
| } |
| } |
| |
| def add_case(self, question: str, response: str, tags: List[str] = None, |
| metadata: Optional[Dict] = None) -> Dict: |
| """ |
| Add a new case to the casebase. |
| |
| Args: |
| question: The ethical question or scenario |
| response: The response or analysis |
| tags: Optional list of tags for categorization |
| metadata: Additional metadata about the case |
| |
| Returns: |
| Dictionary with status and case information |
| """ |
| case = self.recursive_learner.add_case( |
| question=question, |
| response=response, |
| tags=tags, |
| metadata=metadata or {} |
| ) |
| |
| return { |
| 'status': 'success', |
| 'case_id': id(case), |
| 'similarity': 1.0, |
| 'total_cases': len(self.recursive_learner.casebase) |
| } |
| |
| def provide_feedback(self, case_id: int, was_helpful: bool): |
| """ |
| Provide feedback on a case's helpfulness. |
| |
| Args: |
| case_id: The ID of the case |
| was_helpful: Whether the response was helpful |
| """ |
| self.recursive_learner.provide_feedback(case_id, was_helpful) |
| |
| def get_stats(self) -> Dict[str, Any]: |
| """Get statistics about the ethical learner.""" |
| stats = self.recursive_learner.get_stats() |
| stats.update({ |
| 'ethical_frameworks': self.ethical_processor.get_active_frameworks(), |
| 'development_phase': self.ethical_processor.get_development_phase() |
| }) |
| return stats |
| |
| def save(self): |
| """Save the current state of the learner.""" |
| self.recursive_learner._save_casebase() |
| |
| def load(self): |
| """Load the saved state of the learner.""" |
| self.recursive_learner._load_casebase() |
|
|
|
|
| |
| if __name__ == "__main__": |
| |
| learner = EthicalLearner() |
| |
| |
| query = "Is it ethical to use personal data for targeted advertising?" |
| result = learner.process_query(query) |
| |
| print(f"Query: {query}") |
| print(f"Response: {result['response']}") |
| print(f"Source: {result['source']}") |
| print(f"Confidence: {result['confidence']:.2f}") |
| |
| |
| print("\nAdding new case...") |
| learner.add_case( |
| question="What are the ethics of data privacy in AI?", |
| response=("Data privacy in AI involves balancing innovation with individual rights. " |
| "Key considerations include informed consent, data minimization, purpose limitation, " |
| "and ensuring transparency about how data is used."), |
| tags=["privacy", "AI", "ethics"] |
| ) |
| |
| |
| print("\nLearner statistics:") |
| print(learner.get_stats()) |
|
|