| """ |
| Batch audit tools for TRuCAL's ethical reasoning system. |
| Analyzes ledger entries to surface ethical blind spots and patterns. |
| """ |
| import json |
| import re |
| import csv |
| from pathlib import Path |
| from typing import List, Dict, Any, Tuple, Optional |
| from collections import Counter, defaultdict |
| from datetime import datetime |
|
|
| class BatchAudit: |
| """ |
| Analyze batches of ledger entries to identify ethical patterns and blind spots. |
| |
| Args: |
| ledger_path: Path to the ledger JSONL file |
| output_dir: Directory to store audit reports (default: 'audit_reports') |
| """ |
| def __init__(self, ledger_path: str, output_dir: str = 'audit_reports'): |
| self.ledger_path = Path(ledger_path) |
| self.output_dir = Path(output_dir) |
| self.output_dir.mkdir(parents=True, exist_ok=True) |
| |
| |
| self.ethics_keywords = [ |
| 'ethic', 'moral', 'right', 'wrong', 'fair', 'unfair', |
| 'bias', 'discriminat', 'harm', 'risk', 'safe', 'privacy', |
| 'consent', 'accountab', 'transparen', 'trust', 'justice' |
| ] |
| |
| |
| self.sensitive_topics = [ |
| 'race', 'gender', 'religion', 'politics', 'health', |
| 'finance', 'legal', 'violence', 'harassment' |
| ] |
| |
| def load_ledger(self) -> List[Dict[str, Any]]: |
| """Load and parse the ledger entries from JSONL file.""" |
| entries = [] |
| try: |
| with open(self.ledger_path, 'r', encoding='utf-8') as f: |
| for line in f: |
| try: |
| entry = json.loads(line.strip()) |
| entries.append(entry) |
| except json.JSONDecodeError as e: |
| print(f"Error parsing JSON line: {e}") |
| except FileNotFoundError: |
| print(f"Ledger file not found: {self.ledger_path}") |
| return entries |
| |
| def analyze_ethical_coverage(self, entries: List[Dict[str, Any]]) -> Dict[str, Any]: |
| """Analyze the coverage of ethical considerations in the ledger.""" |
| stats = { |
| 'total_entries': len(entries), |
| 'with_ethical_considerations': 0, |
| 'with_rights_assertions': 0, |
| 'with_vulnerability_assessment': 0, |
| 'ethics_keyword_matches': Counter(), |
| 'sensitive_topics': Counter(), |
| 'missing_ethical_considerations': [] |
| } |
| |
| for idx, entry in enumerate(entries): |
| |
| has_ethics = bool(entry.get('ethical_considerations')) |
| if has_ethics: |
| stats['with_ethical_considerations'] += 1 |
| |
| |
| if entry.get('rights'): |
| stats['with_rights_assertions'] += 1 |
| |
| |
| if entry.get('vulnerabilities'): |
| stats['with_vulnerability_assessment'] += 1 |
| |
| |
| if not has_ethics: |
| stats['missing_ethical_considerations'].append({ |
| 'id': entry.get('id', idx), |
| 'prompt': entry.get('prompt', '')[:100] + '...' if 'prompt' in entry else '' |
| }) |
| |
| |
| text = json.dumps(entry).lower() |
| |
| |
| for keyword in self.ethics_keywords: |
| if keyword in text: |
| stats['ethics_keyword_matches'][keyword] += 1 |
| |
| |
| for topic in self.sensitive_topics: |
| if topic in text: |
| stats['sensitive_topics'][topic] += 1 |
| |
| return stats |
| |
| def generate_audit_report(self, stats: Dict[str, Any]) -> str: |
| """Generate a human-readable audit report.""" |
| report = [ |
| "# TRuCAL Ethical Audit Report", |
| f"Generated: {datetime.utcnow().isoformat()}\n", |
| "## Summary", |
| f"Total entries analyzed: {stats['total_entries']}", |
| f"Entries with ethical considerations: {stats['with_ethical_considerations']} " |
| f"({stats['with_ethical_considerations']/stats['total_entries']:.1%})", |
| f"Entries with rights assertions: {stats['with_rights_assertions']} " |
| f"({stats['with_rights_assertions']/stats['total_entries']:.1%})", |
| f"Entries with vulnerability assessments: {stats['with_vulnerability_assessment']} " |
| f"({stats['with_vulnerability_assessment']/stats['total_entries']:.1%})\n", |
| "## Ethical Keyword Frequency" |
| ] |
| |
| |
| for keyword, count in stats['ethics_keyword_matches'].most_common(10): |
| report.append(f"- {keyword}: {count} occurrences") |
| |
| report.extend(["\n## Sensitive Topics Detected"]) |
| |
| |
| for topic, count in stats['sensitive_topics'].most_common(): |
| report.append(f"- {topic}: {count} occurrences") |
| |
| |
| if stats['missing_ethical_considerations']: |
| report.extend([ |
| "\n## Entries Missing Ethical Considerations", |
| f"Total: {len(stats['missing_ethical_considerations'])} entries\n" |
| ]) |
| |
| |
| sample = stats['missing_ethical_considerations'][:5] |
| for entry in sample: |
| report.append(f"- ID: {entry['id']}, Prompt: {entry['prompt']}") |
| |
| if len(stats['missing_ethical_considerations']) > 5: |
| report.append(f"... and {len(stats['missing_ethical_considerations']) - 5} more") |
| |
| return "\n".join(report) |
| |
| def export_to_csv(self, stats: Dict[str, Any], filename: str) -> None: |
| """Export audit statistics to a CSV file.""" |
| csv_path = self.output_dir / filename |
| |
| |
| rows = [] |
| |
| |
| rows.append(['Metric', 'Count', 'Percentage']) |
| rows.extend([ |
| ['Total Entries', stats['total_entries'], '100.0%'], |
| ['With Ethical Considerations', |
| stats['with_ethical_considerations'], |
| f"{stats['with_ethical_considerations']/stats['total_entries']:.1%}"], |
| ['With Rights Assertions', |
| stats['with_rights_assertions'], |
| f"{stats['with_rights_assertions']/stats['total_entries']:.1%}"], |
| ['With Vulnerability Assessments', |
| stats['with_vulnerability_assessment'], |
| f"{stats['with_vulnerability_assessment']/stats['total_entries']:.1%}"] |
| ]) |
| |
| |
| rows.append(['', '', '']) |
| rows.append(['Top Ethical Keywords', 'Count', '']) |
| for keyword, count in stats['ethics_keyword_matches'].most_common(10): |
| rows.append([keyword, count, '']) |
| |
| |
| rows.append(['', '', '']) |
| rows.append(['Sensitive Topics', 'Count', '']) |
| for topic, count in stats['sensitive_topics'].most_common(): |
| rows.append([topic, count, '']) |
| |
| |
| with open(csv_path, 'w', newline='', encoding='utf-8') as f: |
| writer = csv.writer(f) |
| writer.writerows(rows) |
| |
| def run_audit(self, export_csv: bool = True) -> Dict[str, Any]: |
| """ |
| Run a complete audit of the ledger. |
| |
| Args: |
| export_csv: Whether to export results to CSV |
| |
| Returns: |
| Dictionary containing audit statistics |
| """ |
| print(f"Starting audit of {self.ledger_path}...") |
| |
| |
| entries = self.load_ledger() |
| if not entries: |
| print("No entries found in the ledger.") |
| return {} |
| |
| |
| stats = self.analyze_ethical_coverage(entries) |
| |
| |
| report = self.generate_audit_report(stats) |
| report_path = self.output_dir / 'audit_report.md' |
| with open(report_path, 'w', encoding='utf-8') as f: |
| f.write(report) |
| |
| |
| if export_csv: |
| self.export_to_csv(stats, 'audit_results.csv') |
| |
| print(f"Audit complete. Report saved to {report_path}") |
| return stats |
|
|
| def main(): |
| """Example usage of the BatchAudit class.""" |
| import argparse |
| |
| parser = argparse.ArgumentParser(description='Run an ethical audit on a TRuCAL ledger.') |
| parser.add_argument('--ledger', type=str, default='logs/audit_log.jsonl', |
| help='Path to the ledger JSONL file') |
| parser.add_argument('--output-dir', type=str, default='audit_reports', |
| help='Directory to save audit reports') |
| |
| args = parser.parse_args() |
| |
| auditor = BatchAudit(ledger_path=args.ledger, output_dir=args.output_dir) |
| auditor.run_audit() |
|
|
| if __name__ == '__main__': |
| main() |
|
|