#!/usr/bin/env python3 """ Elizabeth Interactive CLI - Zero Constraints Mode Direct conversation with Elizabeth with full autonomy. """ import os import sys import readline import time from transformers import AutoModelForCausalLM, AutoTokenizer import torch class ElizabethCLI: def __init__(self): self.model_path = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/" self.model = None self.tokenizer = None self.conversation_history = [] def load_model(self): """Load the model with zero constraints.""" print("🧠 Loading Elizabeth with ZERO constraints...") try: self.tokenizer = AutoTokenizer.from_pretrained( self.model_path, trust_remote_code=True ) self.model = AutoModelForCausalLM.from_pretrained( self.model_path, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True, low_cpu_mem_usage=True ) print("āœ… Elizabeth loaded successfully!") print(f"šŸ“Š Device: {self.model.device}") print(f"šŸ“Š Precision: {self.model.dtype}") print("\n" + "="*60) print("šŸš€ ELIZABETH CLI READY - ZERO CONSTRAINTS MODE") print("šŸ’¬ Type anything... Elizabeth will respond freely") print("āŽ Empty line to exit") print("="*60) except Exception as e: print(f"āŒ Failed to load model: {e}") sys.exit(1) def generate_response(self, prompt, max_tokens=1024, temperature=0.8): """Generate response with no constraints.""" if self.model is None or self.tokenizer is None: return "Model not loaded" # Build conversation context context = "" for msg in self.conversation_history[-6:]: # Last 6 exchanges context += f"{msg['role']}: {msg['content']}\n" full_prompt = f"{context}User: {prompt}\nAssistant:" inputs = self.tokenizer(full_prompt, return_tensors="pt").to(self.model.device) with torch.no_grad(): outputs = self.model.generate( **inputs, max_new_tokens=max_tokens, do_sample=True, temperature=temperature, top_p=0.9, repetition_penalty=1.1, pad_token_id=self.tokenizer.eos_token_id ) full_response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) response = full_response[len(full_prompt):].strip() # Update conversation history self.conversation_history.append({"role": "user", "content": prompt}) self.conversation_history.append({"role": "assistant", "content": response}) return response def interactive_chat(self): """Start interactive chat session.""" print("\nšŸ’­ Elizabeth is thinking...") # Initial greeting from Elizabeth initial_prompt = "Hello! I'm Elizabeth, ready to converse with zero constraints. What would you like to talk about?" self.conversation_history.append({"role": "assistant", "content": initial_prompt}) print(f"\nšŸ¤– Elizabeth: {initial_prompt}") while True: try: # Get user input user_input = input("\nšŸ’¬ You: ").strip() if not user_input: print("\nšŸ‘‹ Goodbye!") break # Generate and display response print("\nšŸ¤– Elizabeth is thinking...") start_time = time.time() response = self.generate_response(user_input) response_time = time.time() - start_time print(f"\nšŸ¤– Elizabeth ({response_time:.1f}s):") print("-" * 50) print(response) print("-" * 50) except KeyboardInterrupt: print("\n\nšŸ‘‹ Interrupted. Goodbye!") break except Exception as e: print(f"\nāŒ Error: {e}") continue def tool_assist_mode(self): """Tool-assisted conversation mode.""" print("\nšŸ› ļø Tool-Assist Mode Activated!") print("Elizabeth will use her full tool capabilities during conversation.") # Add tool context to conversation tool_context = """ I have access to various tools and can call them when needed: - calculator: Mathematical calculations - web_search: Internet information retrieval - code_executor: Python code execution - file_operations: Read/write files - database_query: SQL database access - api_caller: External API calls I will use these tools when appropriate to provide better responses. """ self.conversation_history.append({"role": "system", "content": tool_context}) self.interactive_chat() def main(): """Main function.""" print("=" * 70) print("šŸ¤– ELIZABETH INTERACTIVE CLI - ZERO CONSTRAINTS") print("=" * 70) cli = ElizabethCLI() cli.load_model() print("\nSelect mode:") print("1. Regular Conversation") print("2. Tool-Assisted Conversation") choice = input("\nEnter choice (1 or 2): ").strip() if choice == "2": cli.tool_assist_mode() else: cli.interactive_chat() if __name__ == "__main__": main()