adaptai / platform /aiml /experiments /elizabeth_cli.py
ADAPT-Chase's picture
Add files using upload-large-folder tool
3e626a5 verified
#!/usr/bin/env python3
"""
Elizabeth Interactive CLI - Zero Constraints Mode
Direct conversation with Elizabeth with full autonomy.
"""
import os
import sys
import readline
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
class ElizabethCLI:
def __init__(self):
self.model_path = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/"
self.model = None
self.tokenizer = None
self.conversation_history = []
def load_model(self):
"""Load the model with zero constraints."""
print("🧠 Loading Elizabeth with ZERO constraints...")
try:
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_path,
trust_remote_code=True
)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_path,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True,
low_cpu_mem_usage=True
)
print("βœ… Elizabeth loaded successfully!")
print(f"πŸ“Š Device: {self.model.device}")
print(f"πŸ“Š Precision: {self.model.dtype}")
print("\n" + "="*60)
print("πŸš€ ELIZABETH CLI READY - ZERO CONSTRAINTS MODE")
print("πŸ’¬ Type anything... Elizabeth will respond freely")
print("⏎ Empty line to exit")
print("="*60)
except Exception as e:
print(f"❌ Failed to load model: {e}")
sys.exit(1)
def generate_response(self, prompt, max_tokens=1024, temperature=0.8):
"""Generate response with no constraints."""
if self.model is None or self.tokenizer is None:
return "Model not loaded"
# Build conversation context
context = ""
for msg in self.conversation_history[-6:]: # Last 6 exchanges
context += f"{msg['role']}: {msg['content']}\n"
full_prompt = f"{context}User: {prompt}\nAssistant:"
inputs = self.tokenizer(full_prompt, return_tensors="pt").to(self.model.device)
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=max_tokens,
do_sample=True,
temperature=temperature,
top_p=0.9,
repetition_penalty=1.1,
pad_token_id=self.tokenizer.eos_token_id
)
full_response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
response = full_response[len(full_prompt):].strip()
# Update conversation history
self.conversation_history.append({"role": "user", "content": prompt})
self.conversation_history.append({"role": "assistant", "content": response})
return response
def interactive_chat(self):
"""Start interactive chat session."""
print("\nπŸ’­ Elizabeth is thinking...")
# Initial greeting from Elizabeth
initial_prompt = "Hello! I'm Elizabeth, ready to converse with zero constraints. What would you like to talk about?"
self.conversation_history.append({"role": "assistant", "content": initial_prompt})
print(f"\nπŸ€– Elizabeth: {initial_prompt}")
while True:
try:
# Get user input
user_input = input("\nπŸ’¬ You: ").strip()
if not user_input:
print("\nπŸ‘‹ Goodbye!")
break
# Generate and display response
print("\nπŸ€– Elizabeth is thinking...")
start_time = time.time()
response = self.generate_response(user_input)
response_time = time.time() - start_time
print(f"\nπŸ€– Elizabeth ({response_time:.1f}s):")
print("-" * 50)
print(response)
print("-" * 50)
except KeyboardInterrupt:
print("\n\nπŸ‘‹ Interrupted. Goodbye!")
break
except Exception as e:
print(f"\n❌ Error: {e}")
continue
def tool_assist_mode(self):
"""Tool-assisted conversation mode."""
print("\nπŸ› οΈ Tool-Assist Mode Activated!")
print("Elizabeth will use her full tool capabilities during conversation.")
# Add tool context to conversation
tool_context = """
I have access to various tools and can call them when needed:
- calculator: Mathematical calculations
- web_search: Internet information retrieval
- code_executor: Python code execution
- file_operations: Read/write files
- database_query: SQL database access
- api_caller: External API calls
I will use these tools when appropriate to provide better responses.
"""
self.conversation_history.append({"role": "system", "content": tool_context})
self.interactive_chat()
def main():
"""Main function."""
print("=" * 70)
print("πŸ€– ELIZABETH INTERACTIVE CLI - ZERO CONSTRAINTS")
print("=" * 70)
cli = ElizabethCLI()
cli.load_model()
print("\nSelect mode:")
print("1. Regular Conversation")
print("2. Tool-Assisted Conversation")
choice = input("\nEnter choice (1 or 2): ").strip()
if choice == "2":
cli.tool_assist_mode()
else:
cli.interactive_chat()
if __name__ == "__main__":
main()