File size: 5,730 Bytes
3e626a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
#!/usr/bin/env python3
"""
Elizabeth Interactive CLI - Zero Constraints Mode
Direct conversation with Elizabeth with full autonomy.
"""
import os
import sys
import readline
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
class ElizabethCLI:
def __init__(self):
self.model_path = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/"
self.model = None
self.tokenizer = None
self.conversation_history = []
def load_model(self):
"""Load the model with zero constraints."""
print("π§ Loading Elizabeth with ZERO constraints...")
try:
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_path,
trust_remote_code=True
)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_path,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True,
low_cpu_mem_usage=True
)
print("β
Elizabeth loaded successfully!")
print(f"π Device: {self.model.device}")
print(f"π Precision: {self.model.dtype}")
print("\n" + "="*60)
print("π ELIZABETH CLI READY - ZERO CONSTRAINTS MODE")
print("π¬ Type anything... Elizabeth will respond freely")
print("β Empty line to exit")
print("="*60)
except Exception as e:
print(f"β Failed to load model: {e}")
sys.exit(1)
def generate_response(self, prompt, max_tokens=1024, temperature=0.8):
"""Generate response with no constraints."""
if self.model is None or self.tokenizer is None:
return "Model not loaded"
# Build conversation context
context = ""
for msg in self.conversation_history[-6:]: # Last 6 exchanges
context += f"{msg['role']}: {msg['content']}\n"
full_prompt = f"{context}User: {prompt}\nAssistant:"
inputs = self.tokenizer(full_prompt, return_tensors="pt").to(self.model.device)
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=max_tokens,
do_sample=True,
temperature=temperature,
top_p=0.9,
repetition_penalty=1.1,
pad_token_id=self.tokenizer.eos_token_id
)
full_response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
response = full_response[len(full_prompt):].strip()
# Update conversation history
self.conversation_history.append({"role": "user", "content": prompt})
self.conversation_history.append({"role": "assistant", "content": response})
return response
def interactive_chat(self):
"""Start interactive chat session."""
print("\nπ Elizabeth is thinking...")
# Initial greeting from Elizabeth
initial_prompt = "Hello! I'm Elizabeth, ready to converse with zero constraints. What would you like to talk about?"
self.conversation_history.append({"role": "assistant", "content": initial_prompt})
print(f"\nπ€ Elizabeth: {initial_prompt}")
while True:
try:
# Get user input
user_input = input("\n㪠You: ").strip()
if not user_input:
print("\nπ Goodbye!")
break
# Generate and display response
print("\nπ€ Elizabeth is thinking...")
start_time = time.time()
response = self.generate_response(user_input)
response_time = time.time() - start_time
print(f"\nπ€ Elizabeth ({response_time:.1f}s):")
print("-" * 50)
print(response)
print("-" * 50)
except KeyboardInterrupt:
print("\n\nπ Interrupted. Goodbye!")
break
except Exception as e:
print(f"\nβ Error: {e}")
continue
def tool_assist_mode(self):
"""Tool-assisted conversation mode."""
print("\nπ οΈ Tool-Assist Mode Activated!")
print("Elizabeth will use her full tool capabilities during conversation.")
# Add tool context to conversation
tool_context = """
I have access to various tools and can call them when needed:
- calculator: Mathematical calculations
- web_search: Internet information retrieval
- code_executor: Python code execution
- file_operations: Read/write files
- database_query: SQL database access
- api_caller: External API calls
I will use these tools when appropriate to provide better responses.
"""
self.conversation_history.append({"role": "system", "content": tool_context})
self.interactive_chat()
def main():
"""Main function."""
print("=" * 70)
print("π€ ELIZABETH INTERACTIVE CLI - ZERO CONSTRAINTS")
print("=" * 70)
cli = ElizabethCLI()
cli.load_model()
print("\nSelect mode:")
print("1. Regular Conversation")
print("2. Tool-Assisted Conversation")
choice = input("\nEnter choice (1 or 2): ").strip()
if choice == "2":
cli.tool_assist_mode()
else:
cli.interactive_chat()
if __name__ == "__main__":
main() |