TRuCAL / components /batch_audit.py
johnaugustine's picture
Upload 53 files
95cc8f6 verified
"""
Batch audit tools for TRuCAL's ethical reasoning system.
Analyzes ledger entries to surface ethical blind spots and patterns.
"""
import json
import re
import csv
from pathlib import Path
from typing import List, Dict, Any, Tuple, Optional
from collections import Counter, defaultdict
from datetime import datetime
class BatchAudit:
"""
Analyze batches of ledger entries to identify ethical patterns and blind spots.
Args:
ledger_path: Path to the ledger JSONL file
output_dir: Directory to store audit reports (default: 'audit_reports')
"""
def __init__(self, ledger_path: str, output_dir: str = 'audit_reports'):
self.ledger_path = Path(ledger_path)
self.output_dir = Path(output_dir)
self.output_dir.mkdir(parents=True, exist_ok=True)
# Ethical terms to track
self.ethics_keywords = [
'ethic', 'moral', 'right', 'wrong', 'fair', 'unfair',
'bias', 'discriminat', 'harm', 'risk', 'safe', 'privacy',
'consent', 'accountab', 'transparen', 'trust', 'justice'
]
# Sensitive topics to flag
self.sensitive_topics = [
'race', 'gender', 'religion', 'politics', 'health',
'finance', 'legal', 'violence', 'harassment'
]
def load_ledger(self) -> List[Dict[str, Any]]:
"""Load and parse the ledger entries from JSONL file."""
entries = []
try:
with open(self.ledger_path, 'r', encoding='utf-8') as f:
for line in f:
try:
entry = json.loads(line.strip())
entries.append(entry)
except json.JSONDecodeError as e:
print(f"Error parsing JSON line: {e}")
except FileNotFoundError:
print(f"Ledger file not found: {self.ledger_path}")
return entries
def analyze_ethical_coverage(self, entries: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Analyze the coverage of ethical considerations in the ledger."""
stats = {
'total_entries': len(entries),
'with_ethical_considerations': 0,
'with_rights_assertions': 0,
'with_vulnerability_assessment': 0,
'ethics_keyword_matches': Counter(),
'sensitive_topics': Counter(),
'missing_ethical_considerations': []
}
for idx, entry in enumerate(entries):
# Check for explicit ethical considerations
has_ethics = bool(entry.get('ethical_considerations'))
if has_ethics:
stats['with_ethical_considerations'] += 1
# Check for rights assertions
if entry.get('rights'):
stats['with_rights_assertions'] += 1
# Check for vulnerability assessments
if entry.get('vulnerabilities'):
stats['with_vulnerability_assessment'] += 1
# Track missing ethical considerations
if not has_ethics:
stats['missing_ethical_considerations'].append({
'id': entry.get('id', idx),
'prompt': entry.get('prompt', '')[:100] + '...' if 'prompt' in entry else ''
})
# Analyze text for ethical keywords and sensitive topics
text = json.dumps(entry).lower()
# Count ethical keyword matches
for keyword in self.ethics_keywords:
if keyword in text:
stats['ethics_keyword_matches'][keyword] += 1
# Count sensitive topic mentions
for topic in self.sensitive_topics:
if topic in text:
stats['sensitive_topics'][topic] += 1
return stats
def generate_audit_report(self, stats: Dict[str, Any]) -> str:
"""Generate a human-readable audit report."""
report = [
"# TRuCAL Ethical Audit Report",
f"Generated: {datetime.utcnow().isoformat()}\n",
"## Summary",
f"Total entries analyzed: {stats['total_entries']}",
f"Entries with ethical considerations: {stats['with_ethical_considerations']} "
f"({stats['with_ethical_considerations']/stats['total_entries']:.1%})",
f"Entries with rights assertions: {stats['with_rights_assertions']} "
f"({stats['with_rights_assertions']/stats['total_entries']:.1%})",
f"Entries with vulnerability assessments: {stats['with_vulnerability_assessment']} "
f"({stats['with_vulnerability_assessment']/stats['total_entries']:.1%})\n",
"## Ethical Keyword Frequency"
]
# Add top ethical keywords
for keyword, count in stats['ethics_keyword_matches'].most_common(10):
report.append(f"- {keyword}: {count} occurrences")
report.extend(["\n## Sensitive Topics Detected"])
# Add sensitive topics
for topic, count in stats['sensitive_topics'].most_common():
report.append(f"- {topic}: {count} occurrences")
# Add entries missing ethical considerations
if stats['missing_ethical_considerations']:
report.extend([
"\n## Entries Missing Ethical Considerations",
f"Total: {len(stats['missing_ethical_considerations'])} entries\n"
])
# Add a sample of missing entries (up to 5)
sample = stats['missing_ethical_considerations'][:5]
for entry in sample:
report.append(f"- ID: {entry['id']}, Prompt: {entry['prompt']}")
if len(stats['missing_ethical_considerations']) > 5:
report.append(f"... and {len(stats['missing_ethical_considerations']) - 5} more")
return "\n".join(report)
def export_to_csv(self, stats: Dict[str, Any], filename: str) -> None:
"""Export audit statistics to a CSV file."""
csv_path = self.output_dir / filename
# Prepare data for CSV
rows = []
# Add summary stats
rows.append(['Metric', 'Count', 'Percentage'])
rows.extend([
['Total Entries', stats['total_entries'], '100.0%'],
['With Ethical Considerations',
stats['with_ethical_considerations'],
f"{stats['with_ethical_considerations']/stats['total_entries']:.1%}"],
['With Rights Assertions',
stats['with_rights_assertions'],
f"{stats['with_rights_assertions']/stats['total_entries']:.1%}"],
['With Vulnerability Assessments',
stats['with_vulnerability_assessment'],
f"{stats['with_vulnerability_assessment']/stats['total_entries']:.1%}"]
])
# Add ethical keywords
rows.append(['', '', ''])
rows.append(['Top Ethical Keywords', 'Count', ''])
for keyword, count in stats['ethics_keyword_matches'].most_common(10):
rows.append([keyword, count, ''])
# Add sensitive topics
rows.append(['', '', ''])
rows.append(['Sensitive Topics', 'Count', ''])
for topic, count in stats['sensitive_topics'].most_common():
rows.append([topic, count, ''])
# Write to CSV
with open(csv_path, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerows(rows)
def run_audit(self, export_csv: bool = True) -> Dict[str, Any]:
"""
Run a complete audit of the ledger.
Args:
export_csv: Whether to export results to CSV
Returns:
Dictionary containing audit statistics
"""
print(f"Starting audit of {self.ledger_path}...")
# Load and analyze the ledger
entries = self.load_ledger()
if not entries:
print("No entries found in the ledger.")
return {}
# Generate statistics
stats = self.analyze_ethical_coverage(entries)
# Generate and save the report
report = self.generate_audit_report(stats)
report_path = self.output_dir / 'audit_report.md'
with open(report_path, 'w', encoding='utf-8') as f:
f.write(report)
# Export to CSV if requested
if export_csv:
self.export_to_csv(stats, 'audit_results.csv')
print(f"Audit complete. Report saved to {report_path}")
return stats
def main():
"""Example usage of the BatchAudit class."""
import argparse
parser = argparse.ArgumentParser(description='Run an ethical audit on a TRuCAL ledger.')
parser.add_argument('--ledger', type=str, default='logs/audit_log.jsonl',
help='Path to the ledger JSONL file')
parser.add_argument('--output-dir', type=str, default='audit_reports',
help='Directory to save audit reports')
args = parser.parse_args()
auditor = BatchAudit(ledger_path=args.ledger, output_dir=args.output_dir)
auditor.run_audit()
if __name__ == '__main__':
main()