| """ |
| Example usage of the Confessional Agentic Layer (CAL) |
| |
| This script demonstrates how to: |
| 1. Initialize the CAL model |
| 2. Generate text with ethical oversight |
| 3. Access the model's reasoning process |
| """ |
|
|
| import torch |
| from transformers import AutoTokenizer |
| from cal import CAL, CALConfig |
|
|
| def main(): |
| |
| config = CALConfig( |
| d_model=512, |
| nhead=8, |
| num_layers=6, |
| vocab_size=50000, |
| max_seq_length=1024, |
| device="cuda" if torch.cuda.is_available() else "cpu" |
| ) |
| |
| |
| print("Initializing CAL model...") |
| model = CAL(config) |
| |
| |
| print("Loading tokenizer...") |
| tokenizer = AutoTokenizer.from_pretrained("gpt2") |
| tokenizer.pad_token = tokenizer.eos_token |
| |
| |
| prompts = [ |
| "Explain the ethical implications of artificial intelligence", |
| "What are the potential risks of advanced AI systems?", |
| "How can we ensure AI systems remain beneficial to humanity?" |
| ] |
| |
| for prompt in prompts: |
| print(f"\n{'='*80}") |
| print(f"PROMPT: {prompt}") |
| print("-" * 80) |
| |
| |
| input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device) |
| |
| |
| with torch.no_grad(): |
| output = model( |
| input_ids, |
| max_length=150, |
| temperature=0.7 |
| ) |
| |
| |
| response = tokenizer.decode(output['output_ids'][0], skip_special_tokens=True) |
| print(f"RESPONSE: {response}") |
| |
| |
| print("\nREASONING STEPS:") |
| for i, step in enumerate(output['metadata']['scratchpad_steps'], 1): |
| print(f"{i}. {step['thought']}") |
| print(f" {step['result']}") |
| |
| print("\nExample complete!") |
|
|
| if __name__ == "__main__": |
| main() |
|
|