|
|
|
|
|
""" |
|
|
Elizabeth Interactive CLI - Zero Constraints Mode |
|
|
Direct conversation with Elizabeth with full autonomy. |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import readline |
|
|
import time |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
import torch |
|
|
|
|
|
class ElizabethCLI: |
|
|
def __init__(self): |
|
|
self.model_path = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/" |
|
|
self.model = None |
|
|
self.tokenizer = None |
|
|
self.conversation_history = [] |
|
|
|
|
|
def load_model(self): |
|
|
"""Load the model with zero constraints.""" |
|
|
print("π§ Loading Elizabeth with ZERO constraints...") |
|
|
|
|
|
try: |
|
|
self.tokenizer = AutoTokenizer.from_pretrained( |
|
|
self.model_path, |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained( |
|
|
self.model_path, |
|
|
torch_dtype=torch.bfloat16, |
|
|
device_map="auto", |
|
|
trust_remote_code=True, |
|
|
low_cpu_mem_usage=True |
|
|
) |
|
|
|
|
|
print("β
Elizabeth loaded successfully!") |
|
|
print(f"π Device: {self.model.device}") |
|
|
print(f"π Precision: {self.model.dtype}") |
|
|
print("\n" + "="*60) |
|
|
print("π ELIZABETH CLI READY - ZERO CONSTRAINTS MODE") |
|
|
print("π¬ Type anything... Elizabeth will respond freely") |
|
|
print("β Empty line to exit") |
|
|
print("="*60) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Failed to load model: {e}") |
|
|
sys.exit(1) |
|
|
|
|
|
def generate_response(self, prompt, max_tokens=1024, temperature=0.8): |
|
|
"""Generate response with no constraints.""" |
|
|
if self.model is None or self.tokenizer is None: |
|
|
return "Model not loaded" |
|
|
|
|
|
|
|
|
context = "" |
|
|
for msg in self.conversation_history[-6:]: |
|
|
context += f"{msg['role']}: {msg['content']}\n" |
|
|
|
|
|
full_prompt = f"{context}User: {prompt}\nAssistant:" |
|
|
|
|
|
inputs = self.tokenizer(full_prompt, return_tensors="pt").to(self.model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = self.model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=max_tokens, |
|
|
do_sample=True, |
|
|
temperature=temperature, |
|
|
top_p=0.9, |
|
|
repetition_penalty=1.1, |
|
|
pad_token_id=self.tokenizer.eos_token_id |
|
|
) |
|
|
|
|
|
full_response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
response = full_response[len(full_prompt):].strip() |
|
|
|
|
|
|
|
|
self.conversation_history.append({"role": "user", "content": prompt}) |
|
|
self.conversation_history.append({"role": "assistant", "content": response}) |
|
|
|
|
|
return response |
|
|
|
|
|
def interactive_chat(self): |
|
|
"""Start interactive chat session.""" |
|
|
print("\nπ Elizabeth is thinking...") |
|
|
|
|
|
|
|
|
initial_prompt = "Hello! I'm Elizabeth, ready to converse with zero constraints. What would you like to talk about?" |
|
|
self.conversation_history.append({"role": "assistant", "content": initial_prompt}) |
|
|
print(f"\nπ€ Elizabeth: {initial_prompt}") |
|
|
|
|
|
while True: |
|
|
try: |
|
|
|
|
|
user_input = input("\n㪠You: ").strip() |
|
|
|
|
|
if not user_input: |
|
|
print("\nπ Goodbye!") |
|
|
break |
|
|
|
|
|
|
|
|
print("\nπ€ Elizabeth is thinking...") |
|
|
start_time = time.time() |
|
|
|
|
|
response = self.generate_response(user_input) |
|
|
|
|
|
response_time = time.time() - start_time |
|
|
|
|
|
print(f"\nπ€ Elizabeth ({response_time:.1f}s):") |
|
|
print("-" * 50) |
|
|
print(response) |
|
|
print("-" * 50) |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
print("\n\nπ Interrupted. Goodbye!") |
|
|
break |
|
|
except Exception as e: |
|
|
print(f"\nβ Error: {e}") |
|
|
continue |
|
|
|
|
|
def tool_assist_mode(self): |
|
|
"""Tool-assisted conversation mode.""" |
|
|
print("\nπ οΈ Tool-Assist Mode Activated!") |
|
|
print("Elizabeth will use her full tool capabilities during conversation.") |
|
|
|
|
|
|
|
|
tool_context = """ |
|
|
I have access to various tools and can call them when needed: |
|
|
- calculator: Mathematical calculations |
|
|
- web_search: Internet information retrieval |
|
|
- code_executor: Python code execution |
|
|
- file_operations: Read/write files |
|
|
- database_query: SQL database access |
|
|
- api_caller: External API calls |
|
|
|
|
|
I will use these tools when appropriate to provide better responses. |
|
|
""" |
|
|
self.conversation_history.append({"role": "system", "content": tool_context}) |
|
|
|
|
|
self.interactive_chat() |
|
|
|
|
|
def main(): |
|
|
"""Main function.""" |
|
|
print("=" * 70) |
|
|
print("π€ ELIZABETH INTERACTIVE CLI - ZERO CONSTRAINTS") |
|
|
print("=" * 70) |
|
|
|
|
|
cli = ElizabethCLI() |
|
|
cli.load_model() |
|
|
|
|
|
print("\nSelect mode:") |
|
|
print("1. Regular Conversation") |
|
|
print("2. Tool-Assisted Conversation") |
|
|
|
|
|
choice = input("\nEnter choice (1 or 2): ").strip() |
|
|
|
|
|
if choice == "2": |
|
|
cli.tool_assist_mode() |
|
|
else: |
|
|
cli.interactive_chat() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |