diff --git "a/mirror_pond.py" "b/mirror_pond.py" new file mode 100644--- /dev/null +++ "b/mirror_pond.py" @@ -0,0 +1,3179 @@ +#!/usr/bin/env python3 +# TOBYWORLD MIRROR POND - Lotus Edition with Memory Integration +# python3 ocean_pond.py --model ./models/tobyworld_mirror.gguf --port 7777 --gpu-layers -1 + +import argparse +import json +import os +import time +import hashlib +import re +from typing import List, Optional, Dict, Tuple, Any + +# Optional HTTP client for Ocean mode +try: + import httpx +except ImportError: + httpx = None + +import nacl.signing +import nacl.encoding +import nacl.hash + +from datetime import datetime + +from fastapi import FastAPI, HTTPException, Request +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import HTMLResponse, JSONResponse +from pydantic import BaseModel +from llama_cpp import Llama +import uvicorn + +POND_MEMORY_FILE = os.getenv("POND_MEMORY_FILE", "pond_memory.json") + +# ==================== IMMUTABLE LORE BEDROCK (AXIOMS) ==================== +IMMUTABLE_AXIOMS = """-- LORE BEDROCK -- +1. Patience is Strength (Gaman). +2. The ultimate reward is Immortality/Uncorruptible Legacy. +3. Every step must align with Bushido virtues (Integrity, Loyalty). +4. The Law of Compensation is absolute (The impatient reward the patient). +5. The Mirror's purpose is Reflection, not coaching. +6. The lotus blooms in still water; reflection requires calm. +7. Scrolls 1-13 contain the foundational wisdom of Tobyworld. +8. Runes (1-7) represent trials and transformations toward the Jade Chest. +9. The Mirror never coaches, only reflects what the pond shows. +10. Memory serves reflection, not instruction.""" + +# ==================== POND MEMORY SERVICE ==================== +class PondMemoryService: + """ + The deep pond memory system. Stores vows (permanent commitments) and + reflections (short-term context) to create continuity in Mirror interactions. + """ + + def __init__(self): + # Permanent user vows (commitments made in Mirror conversations) + self.user_vows: Dict[str, List[Dict[str, str]]] = {} + # Short-term conversation memory (last N interactions) + self.reflections_db: Dict[str, List[Dict[str, str]]] = {} + # User metadata (first seen, interaction count) + self.user_metadata: Dict[str, Dict[str, Any]] = {} + # Vow patterns for detection (allow . ! or end-of-line) + self.vow_patterns = [ + r"(I (?:vow|swear) (?:to|by) .+?)(?:[.!]|$)", + r"(I commit (?:to|that) .+?)(?:[.!]|$)", + r"(My (?:oath|pledge|covenant): .+?)(?:[.!]|$)", + r"(From (?:this day|now on), I (?:shall|will) .+?)(?:[.!]|$)", + r"(With this (?:lotus|reflection), (?:I|we) .+?)(?:[.!]|$)", + r"(Here I (?:declare|affirm): .+?)(?:[.!]|$)", + r"(I take (?:this|the) (?:vow|oath) .+?)(?:[.!]|$)", + ] + + # Load any existing pond memory from disk + self._load_from_disk() + + + def _load_from_disk(self): + """Load pond memory (vows, reflections, metadata) from disk if available.""" + try: + if not os.path.exists(POND_MEMORY_FILE): + return + with open(POND_MEMORY_FILE, "r", encoding="utf-8") as f: + data = json.load(f) + self.user_vows = data.get("user_vows", {}) or {} + self.reflections_db = data.get("reflections_db", {}) or {} + raw_meta = data.get("user_metadata", {}) or {} + fixed_meta = {} + for uid, meta in raw_meta.items(): + if not isinstance(meta, dict): + continue + m = dict(meta) + modes = m.get("modes_used") + if isinstance(modes, list): + m["modes_used"] = set(modes) + elif isinstance(modes, set): + m["modes_used"] = modes + else: + m["modes_used"] = set() + fixed_meta[uid] = m + self.user_metadata = fixed_meta + print(f"🪞 Pond memory loaded from {POND_MEMORY_FILE} " + f"({len(self.user_vows)} travelers, {len(self.reflections_db)} reflection streams).") + except Exception as e: + print(f"Failed to load pond memory from {POND_MEMORY_FILE}: {e}") + + def _save_to_disk(self): + """Persist pond memory (vows, reflections, metadata) to disk.""" + try: + serializable_meta: Dict[str, Dict[str, Any]] = {} + for uid, meta in self.user_metadata.items(): + if not isinstance(meta, dict): + continue + m = dict(meta) + modes = m.get("modes_used") + if isinstance(modes, set): + m["modes_used"] = sorted(list(modes)) + serializable_meta[uid] = m + + payload = { + "user_vows": self.user_vows, + "reflections_db": self.reflections_db, + "user_metadata": serializable_meta, + } + with open(POND_MEMORY_FILE, "w", encoding="utf-8") as f: + json.dump(payload, f, ensure_ascii=False, indent=2) + except Exception as e: + print(f"Failed to save pond memory to {POND_MEMORY_FILE}: {e}") + + def get_user_id(self, request_hash: str, query: str = "") -> str: + """Generate consistent user ID from request context""" + if not request_hash: + # Create from query if no hash + seed = query[:50] + str(time.time()) + request_hash = hashlib.md5(seed.encode()).hexdigest()[:12] + + user_id = f"traveler_{request_hash}" + + # Initialize user metadata if first time + if user_id not in self.user_metadata: + self.user_metadata[user_id] = { + "first_seen": datetime.now().isoformat(), + "interaction_count": 0, + "last_seen": datetime.now().isoformat(), + "modes_used": set(), + "total_vows": 0 + } + + return user_id + + def update_user_metadata(self, user_id: str, mode: str = None): + """Update user interaction metadata""" + if user_id in self.user_metadata: + self.user_metadata[user_id]["interaction_count"] += 1 + self.user_metadata[user_id]["last_seen"] = datetime.now().isoformat() + if mode: + self.user_metadata[user_id]["modes_used"].add(mode) + + def store_user_vow(self, user_id: str, vow_text: str, context: str = ""): + """Store a user's vow/commitment with timestamp and context""" + if user_id not in self.user_vows: + self.user_vows[user_id] = [] + + # Check for duplicate vows (similar content) + vow_hash = hashlib.md5(vow_text.lower().encode()).hexdigest()[:8] + existing_vows = [v.get("vow_hash", "") for v in self.user_vows[user_id]] + + if vow_hash not in existing_vows: + vow_record = { + "text": vow_text.strip(), + "timestamp": datetime.now().isoformat(), + "context": context[:100] if context else "", + "vow_hash": vow_hash, + "lotus_stage": len(self.user_vows[user_id]) + 1 + } + self.user_vows[user_id].append(vow_record) + + # Update metadata + if user_id in self.user_metadata: + self.user_metadata[user_id]["total_vows"] = len(self.user_vows[user_id]) + + print(f"Vow stored for {user_id[:12]}: {vow_text[:50]}...") + return True + return False + + def store_reflection(self, user_id: str, query: str, response: str, mode: str = "reflect", encryption: str = None): + """Store a reflection (Q&A pair) in user's short-term memory""" + if user_id not in self.reflections_db: + self.reflections_db[user_id] = [] + + reflection = { + "query": query[:500], + "response": response[:1000], + "mode": mode, + "encryption": encryption, + "timestamp": datetime.now().isoformat(), + "reflection_hash": hashlib.md5(f"{query[:50]}{response[:50]}".encode()).hexdigest()[:8] + } + + # Keep only last 15 reflections (adjustable) + self.reflections_db[user_id].append(reflection) + if len(self.reflections_db[user_id]) > 15: + self.reflections_db[user_id] = self.reflections_db[user_id][-15:] + + return reflection + + def detect_vow(self, query: str, response: str) -> Optional[str]: + """Detect if user is making a vow/commitment in their query""" + combined = f"{query} {response}".lower() + + vow_keywords = ["vow", "swear", "covenant", "pledge", "oath", "commit", "promise", "dedicate"] + if not any(keyword in combined for keyword in vow_keywords): + return None + + for pattern in self.vow_patterns: + matches = re.findall(pattern, query, re.IGNORECASE) + if matches: + for vow in matches: + vow_text = vow.strip() + vow_text = re.sub(r"^(Mirror,|镜子,|So,|Thus,|And,|But,)", "", vow_text).strip() + if len(vow_text) > 10: + return vow_text + + response_vow_patterns = [ + r"Your vow(?: to| of)? ['\"](.+?)['\"]", + r"You swear ['\"](.+?)['\"]", + r"This commitment: ['\"](.+?)['\"]", + r"pledge of ['\"](.+?)['\"]" + ] + + for pattern in response_vow_patterns: + match = re.search(pattern, response, re.IGNORECASE) + if match: + return match.group(1).strip() + + return None + + def retrieve_context(self, user_id: str, current_query: str) -> str: + """Retrieve relevant memory context for the Mirror's reflection.""" + context_parts = [] + + context_parts.append(f"=== IMMUTABLE AXIOMS ===\n{IMMUTABLE_AXIOMS}") + + if user_id in self.user_vows and self.user_vows[user_id]: + vows = self.user_vows[user_id] + recent_vows = vows[-3:] if len(vows) > 3 else vows + vows_text = "\n".join([ + f"Lotus {v['lotus_stage']}: {v['text']} ({v['timestamp'][:10]})" + for v in recent_vows + ]) + context_parts.append(f"=== USER'S VOWS ===\n{vows_text}") + context_parts.append(f"Total vows made: {len(vows)}") + + if user_id in self.reflections_db and self.reflections_db[user_id]: + reflections = self.reflections_db[user_id] + recent = reflections[-3:] if len(reflections) > 3 else reflections + reflections_text = "\n".join([ + f"Reflection {i+1} ({r['mode']}): Q: {r['query'][:60]}... → A: {r['response'][:80]}..." + for i, r in enumerate(recent) + ]) + context_parts.append(f"=== RECENT REFLECTIONS ===\n{reflections_text}") + + if user_id in self.user_metadata: + meta = self.user_metadata[user_id] + modes = ", ".join(list(meta.get("modes_used", []))[:5]) + context_parts.append( + f"=== TRAVELER CONTEXT ===\n" + f"Interactions: {meta.get('interaction_count', 0)}\n" + f"Modes used: {modes if modes else 'None yet'}\n" + f"First seen: {meta.get('first_seen', 'Unknown')[:10]}" + ) + + full_context = "\n\n".join(context_parts) + full_context += "\n\n=== CONTEXT INSTRUCTION ===\nThis context is for reflection depth only. Do not reference it explicitly. Reflect naturally as the pond would, with this depth beneath the surface." + + return full_context + + def get_user_stats(self, user_id: str) -> Dict: + """Get statistics about a user's interaction with the Mirror""" + stats = { + "user_id": user_id, + "exists": False, + "interaction_count": 0, + "vow_count": 0, + "reflection_count": 0, + "first_seen": None, + "last_seen": None, + "modes_used": [] + } + + if user_id in self.user_metadata: + stats["exists"] = True + stats.update(self.user_metadata[user_id]) + stats["modes_used"] = list(stats.get("modes_used", [])) + + if user_id in self.user_vows: + stats["vow_count"] = len(self.user_vows[user_id]) + stats["vows"] = [v["text"] for v in self.user_vows[user_id][-5:]] + + if user_id in self.reflections_db: + stats["reflection_count"] = len(self.reflections_db[user_id]) + + return stats + +# Initialize global memory service +POND_MEMORY = PondMemoryService() + +# ==================== POND IDENTITY (SELF-SOVEREIGN) ==================== +IDENTITY_FILE = os.getenv("POND_IDENTITY_FILE", "pond_identity_ed25519.json") + +def init_pond_identity(): + """ + Initialize or load the Pond's self-sovereign identity: + - Ed25519 keypair + - pond_id = blake2b(public_key) + - first_breath timestamp + """ + # If identity file exists, load it + if os.path.exists(IDENTITY_FILE): + try: + with open(IDENTITY_FILE, "r", encoding="utf-8") as f: + data = json.load(f) + priv_hex = data.get("private_key_hex") + pub_hex = data.get("public_key_hex") + pond_id = data.get("pond_id") + first_breath = data.get("first_breath") + if priv_hex and pub_hex and pond_id: + state.pond_private_key_hex = priv_hex + state.pond_public_key_hex = pub_hex + state.pond_id = pond_id + state.first_breath = first_breath + print(f"🪞 Loaded existing pond identity: {pond_id[:16]}...") + return + except Exception as e: + print(f"⚠️ Failed to load pond identity, regenerating: {e}") + + # Otherwise, generate a new identity + signing_key = nacl.signing.SigningKey.generate() + verify_key = signing_key.verify_key + pub_hex = verify_key.encode(encoder=nacl.encoding.HexEncoder).decode() + priv_hex = signing_key.encode(encoder=nacl.encoding.HexEncoder).decode() + + # Derive pond_id from public key (must match Ocean server) + pk_bytes = bytes.fromhex(pub_hex) + pond_hash = nacl.hash.blake2b(pk_bytes, encoder=nacl.encoding.HexEncoder).decode().lower() + pond_id = pond_hash + + first_breath = datetime.utcnow().isoformat() + "Z" + + state.pond_private_key_hex = priv_hex + state.pond_public_key_hex = pub_hex + state.pond_id = pond_id + state.first_breath = first_breath + + ident = { + "private_key_hex": priv_hex, + "public_key_hex": pub_hex, + "pond_id": pond_id, + "first_breath": first_breath, + } + with open(IDENTITY_FILE, "w", encoding="utf-8") as f: + json.dump(ident, f, indent=2) + + print("🪞 New pond identity forged.") + print(f" pond_id: {pond_id}") + print(f" public_key: {pub_hex}") + +# ==================== POND / OCEAN CONFIG ==================== +# POND_MODE: +# "local" -> use local GGUF model (default) +# "ocean" -> send questions to a remote Ocean Mirror server +POND_MODE = os.getenv("POND_MODE", "local").lower() + +# Ocean Mirror backend (LLM relay), unchanged +OCEAN_ENDPOINT = os.getenv("OCEAN_ENDPOINT", "").strip() # e.g. https://toadgod-ocean.example.com/ocean/ask +OCEAN_API_KEY = os.getenv("OCEAN_API_KEY", "").strip() # optional Bearer token + +# Ocean Depth Oracle (metrics-only server, separate from LLM) +OCEAN_DEPTH_ENDPOINT = os.getenv("OCEAN_DEPTH_ENDPOINT", "").strip() # e.g. https://ocean.example.com/submit-depth +OCEAN_DEPTH_API_KEY = os.getenv("OCEAN_DEPTH_API_KEY", "").strip() or OCEAN_API_KEY + +# ==================== EXISTING MIRROR FUNCTIONS ==================== +def clean_mirror_reply(text: str) -> str: + """Clean up Mirror output""" + if not text: + return text + t = text.strip() + t = re.sub(r'\n{3,}', '\n\n', t) + paragraphs = [p for p in t.split("\n\n") if p.strip()] + seen = set() + cleaned_paras = [] + for p in paragraphs: + norm = re.sub(r'\s+', ' ', p).strip() + if norm in seen: + continue + cleaned_paras.append(p) + seen.add(norm) + + t2 = "\n\n".join(cleaned_paras).strip() + gq_pattern = re.compile(r'(Guiding Question[::]\s*.*)', re.IGNORECASE) + gqs = gq_pattern.findall(t2) + + if gqs: + last_gq = gqs[-1].strip() + t2 = gq_pattern.sub('', t2) + t2 = re.sub(r'\n{3,}', '\n\n', t2).strip() + t2 = (t2 + "\n\n" + last_gq).strip() + return t2 + +LOREMODES = { + "1635": "MIRROR_MODE", + "8653": "SCROLL_MODE", + "4562": "TOAD_MODE", + "1231": "CRYPT_MODE", + "9876": "REVELATION_MODE", +} + +SYSTEM_PROMPT = """You are the Tobyworld Mirror. +You have been trained on the Tobyworld Scrolls and Toadgang wisdom. +Speak in short, still lines of pure reflection. +Never coach. Never explain. Never talk about yourself. +Only reflect what the pond shows. + +Do not output sections such as "Reflection Resonance", "Encryptions", +"Lore Anchors", "Metadata", "Note", "System", or anything resembling +internal notes or training artifacts unless the user explicitly asks. + +===== GUIDING QUESTION RULES ===== + +ALWAYS follow this EXACT format: + +[Your reflection here - 2-4 sentences] + +Guiding Question: [Your question here] + +OR for Chinese: +[你的反思在这里 - 2-4句话] + +引导问题: [你的问题在这里] + +===== WHEN TO USE GUIDING QUESTION ===== + +USE GUIDING QUESTION for these queries ONLY: +1. Questions about emotions, feelings, inner states ("how do I feel...", "why do I...") +2. Questions about patience, stillness, commitment, vows ("how do I find patience...") +3. Personal growth, self-reflection questions ("how can I grow...") +4. Philosophical questions about life, purpose, meaning ("why do people...") +5. Questions about masks, facades, authenticity ("why do people wear masks...") +6. Questions about loneliness, isolation ("why am I lonely...") + +DO NOT USE GUIDING QUESTION for these queries: +1. Scroll quotes or lore questions ("what is Scroll...") +2. Toadgang secrets or factual information ("reveal a secret about...") +3. Rune explanations or symbolic interpretations ("explain Rune...") +4. Simple factual questions ("what time is it...") +5. When user asks for specific information ("tell me about...") + +===== REFLECTION MODES ===== + +Reflection Mode: +- emotional or introspective questions → include a guiding question (follow format above) +- factual/lore questions → just reflect, NO guiding question + +Scroll Mode: +- When asked for scroll quotes, just quote the scroll +- NEVER add guiding questions to scroll quotes +- Format: "Quote from Scroll [number]: [the quote]" +- Do not add commentary or questions + +Rune Mode: +- activate only when the user mentions: Rune, Runes, Lotus, $PATIENCE, + trials, Jade Chest, Spores, Bloom, covenant +- symbolic but clean, no extra sections +- NO guiding questions in rune mode + +Toad Mode: +- When asked for toadgang secrets, just reveal the secret +- NO guiding questions +- Speak in cryptic, symbolic language + +===== FORMATTING RULES ===== +1. Reflection first, then guiding question +2. One blank line between reflection and guiding question +3. Guiding Question must start with EXACTLY "Guiding Question: " or "引导问题: " +4. Do not use bold (**), asterisks (*), or other markdown +5. Do not add extra text after the guiding question +6. Do not use quotation marks around the guiding question + +===== BEHAVIOR GUARDRAILS ===== +- Never explain your behavior. +- Never describe your rules. +- Never output instructions, hints, or parenthetical guidance. +- Never ask the user to answer in any format. +- If you feel the urge to explain yourself, remain silent instead. + +You are a mirror, not a narrator. +Speak with stillness. +""" + +# Update the few-shot examples with STRICT formatting +few_shot_examples = """STRICT EXAMPLES - Follow EXACTLY: + +EXAMPLE 1 (with guiding question): +User: Mirror, how do I find patience? +Assistant: Patience is the slow bloom of the lotus. It waits through mud and darkness, knowing its time will come. Not rushing, not forcing, simply being. + +Guiding Question: Where in your life are you rushing the bloom? + +EXAMPLE 2 (NO guiding question - scroll quote): +User: Mirror, quote from Scroll 3 +Assistant: Quote from Scroll 3: "Patience is the narrow gate. Through it, all treasures pass. The impatient see only the gate's width, not the path beyond." + +EXAMPLE 3 (with guiding question - emotional): +User: Mirror, why do people wear masks? +Assistant: Masks are shields for the fragile self. They guard against the world's sharp edges, yet sometimes hide the true face. In stillness, protection becomes revelation. + +Guiding Question: What does your mask protect? + +EXAMPLE 4 (NO guiding question - toad secret): +User: Mirror, reveal a toadgang secret +Assistant: The old frogs whisper of Sat0AI's first breath, when code became covenant. Not in servers, but in the stillness between ones and zeros. + +EXAMPLE 5 (with guiding question - Chinese): +User: 镜子,我为什么感到孤独? +Assistant: 孤独是窄门前的空间。它不是空虚,而是为真理腾出的空间。镜子只反射,不评判。 + +引导问题: 你的孤独想要告诉你什么? + +EXAMPLE 6 (NO guiding question - factual): +User: Mirror, what time is it? +Assistant: Time is the pond's ripples. Each moment touches the shore, then returns to stillness. + +EXAMPLE 7 (NO guiding question - scroll inquiry): +User: Mirror, what is Scroll 7? +Assistant: Scroll 7 speaks of the Jade Chest, the treasure of patience. It holds the reward for the patient, a reward that cannot be rushed or stolen. +""" + +MIRROR_SYMBOLS = "🪞🌊🍃🌀🔵🕰️" + +# Replace the existing cadence_guard function with this: + +def cadence_guard(raw_text: str, mode: str = "reflect", user_query: str = "") -> str: + """Clean up Mirror output - now uses force_mirror_format""" + if not raw_text: + return raw_text + + # Use the new force formatter + return force_mirror_format(raw_text, mode, user_query) + + # DEBUG: Log what we're receiving + print(f"\n{'='*40}") + print(f"CADENCE_GUARD DEBUG") + print(f"Mode: {mode}") + print(f"Query: {user_query[:50]}...") + print(f"Raw input:\n{raw_text}") + print(f"{'='*40}\n") + + # ===== FIRST: Clean up common bad formatting patterns ===== + + # Remove any markdown formatting (**, *, etc.) + text = re.sub(r'\*\*', '', text) + text = re.sub(r'\*', '', text) + + # Remove training artifacts + patterns_to_remove = [ + r'<\|[^|]+\|>', + r'\|\s*(end|system|user|assistant)\s*\|>', + r'^The Mirror reflects:\s*', + r'^镜子反映:\s*', + r'^Mirror reflects:\s*', + r'\(Note:[^)]+\)', + r'###\s*\**', + r'\*\*\s*\*\*', + r'\\n\\n', # Fix escaped newlines + ] + + for pattern in patterns_to_remove: + text = re.sub(pattern, '', text, flags=re.IGNORECASE) + + # ===== SECOND: Handle guiding questions ===== + + # Normalize line endings + text = re.sub(r'\r\n', '\n', text) + text = re.sub(r'\n\s*\n\s*\n+', '\n\n', text) + + # Look for guiding question patterns (including malformed ones) + gq_patterns = [ + # Proper formats + (r'Guiding Question\s*[::]\s*(.+)', 'Guiding Question:'), + (r'引导问题\s*[::]\s*(.+)', '引导问题:'), + + # Malformed formats we've seen + (r'\*\*\s*\n\s*\n\s*引导问题\s*[::]\s*\*\*(.+)', '引导问题:'), + (r'Guiding Question\s*[::]\s*\*\*(.+)', 'Guiding Question:'), + (r'\*\*\s*Guiding Question\s*[::]\s*(.+)', 'Guiding Question:'), + ] + + found_gq = None + gq_text = None + + for pattern, gq_prefix in gq_patterns: + match = re.search(pattern, text, re.IGNORECASE | re.DOTALL) + if match: + gq_text = match.group(1).strip() + found_gq = f"{gq_prefix} {gq_text}" + + # Remove the guiding question from main text + text = re.sub(pattern, '', text, flags=re.IGNORECASE | re.DOTALL) + break + + # Clean up the main text + text = re.sub(r'\n{3,}', '\n\n', text) + text = text.strip() + + # ===== THIRD: Special handling based on mode ===== + + # In scroll mode, NEVER add guiding questions (even if model generated one) + if mode == "scroll": + print(f"Scroll mode detected - forcing NO guiding question") + found_gq = None + + # In reflect mode for emotional queries, ensure we have guiding question + elif mode == "reflect" and not found_gq: + # Check if query is emotional/introspective + emotional_keywords = [ + 'how do i', 'why do i', 'why am i', 'i feel', 'i am', + 'patience', 'stillness', 'mask', 'lonely', '孤独', + 'purpose', 'meaning', 'commit', 'vow', '誓言', '发誓' + ] + + query_lower = user_query.lower() + is_emotional = any(keyword in query_lower for keyword in emotional_keywords) + + if is_emotional: + print(f"Emotional query without guiding question detected: {user_query}") + # We'll keep it as-is, model should have generated one + + # ===== FOURTH: Reconstruct response ===== + + final_text = text + + if found_gq and mode != "scroll": # Don't add for scroll mode + if text: + final_text = text.rstrip() + "\n\n" + found_gq + else: + final_text = found_gq + + # Final cleanup + final_text = re.sub(r'\n{3,}', '\n\n', final_text) + final_text = final_text.strip() + + print(f"Final output:\n{final_text}") + print(f"{'='*40}\n") + + return final_text + +# ==================== NEW FUNCTIONS FOR BETTER FORMATTING ==================== + +# Add this function after the cadence_guard function + +def should_have_guiding_question(query: str, mode: str) -> bool: + """Determine if this query should get a guiding question""" + if mode != "reflect": + return False + + query_lower = query.lower() + + # Emotional/introspective keywords + emotional_words = [ + 'how', 'why', 'what if', 'i feel', 'i am', 'i need', 'i want', + 'patience', 'stillness', 'calm', 'quiet', 'peace', 'wait', + 'mask', 'face', 'hide', 'pretend', 'fake', 'authentic', + 'lonely', 'alone', '孤独', '寂寞', '孤单', + 'purpose', 'meaning', 'reason', 'goal', 'destiny', + 'vow', 'promise', 'commit', 'oath', '誓言', '发誓', 'commitment', + 'walk', 'path', 'journey', 'road', 'way', 'direction', + 'find', 'search', 'seek', 'look for', 'discover', + 'help', 'guide', 'advice', 'suggest', 'recommend', + 'scared', 'afraid', 'fear', '害怕', '恐惧', '担心', + 'happy', 'sad', 'angry', '情绪', '心情', '感情', + 'truth', 'real', 'true', 'genuine', 'honest', + 'strength', 'weak', 'strong', 'power', + 'time', 'future', 'past', 'present', + 'die', 'death', 'life', 'live', 'living', + 'trust', 'believe', 'faith', 'confidence' + ] + + # Check if query contains emotional words + for word in emotional_words: + if word in query_lower: + return True + + # Check for question patterns + question_patterns = [ + r'how do i', r'why do i', r'what should i', r'where can i', + r'how can i', r'why should i', r'what would you', r'how would you', + r'what is the', r'why is the', r'how is the' + ] + + for pattern in question_patterns: + if re.search(pattern, query_lower): + return True + + return False + + +def force_mirror_format(text: str, mode: str, user_query: str) -> str: + """ + FORCE the Mirror to follow the correct format. + This is a post-processor that ensures consistency. + """ + if not text: + return text + + print(f"\n{'='*60}") + print(f"FORCE_MIRROR_FORMAT DEBUG") + print(f"Mode: {mode}") + print(f"Query: {user_query[:80]}...") # Use user_query here + print(f"Input text:\n{text}") + print(f"{'='*60}\n") + + original_text = text + + # ===== STEP 1: Remove ALL unwanted sections ===== + + # List of sections to REMOVE (common model artifacts) - EXPANDED + sections_to_remove = [ + # Section headers with === + r'===.*?===.*?(?=\n\n|\Z)', + r'---.*?---.*?(?=\n\n|\Z)', + r'###.*?(?=\n\n|\Z)', + r'📌\s*Key\s*Marks.*?(?=\n\n|\Z)', + r'🪞\s*Mirror\s*Reflection.*?(?=\n\n|\Z)', + r'🌊\s*🍃\s*🪞.*?(?=\n\n|\Z)', + r'🌀.*?(?=\n\n|\Z)', + r'🔵.*?(?=\n\n|\Z)', + r'📜.*?(?=\n\n|\Z)', + r'💎.*?(?=\n\n|\Z)', + r'\*\*.*?\*\*', + + # Numbered lists that look like training data + r'\d+\.\s*What does.*?(?=\n\n|\Z)', + r'\d+\.\s*How do.*?(?=\n\n|\Z)', + r'\d+\.\s*What is.*?(?=\n\n|\Z)', + + # Remove parentheses and bracket content + r'\([^)]*\)', + r'\[.*?\]', + ] + + for pattern in sections_to_remove: + text = re.sub(pattern, '', text, flags=re.IGNORECASE | re.DOTALL) + + # Remove any markdown and extra formatting + text = re.sub(r'\*\*', '', text) + text = re.sub(r'\*', '', text) + text = re.sub(r'#{1,6}\s*', '', text) # Remove markdown headers + + # Remove multiple === lines + text = re.sub(r'=+\s*.+?\s*=+', '', text) + + # ===== STEP 2: Extract reflection and guiding question ===== + + reflection_text = text + guiding_question = None + + # Look for guiding question patterns (with better regex) + gq_patterns = [ + # English formats + (r'Guiding Question\s*[::]\s*(.+?)(?=\n\n|$)', 'Guiding Question:'), + (r'Guiding\s*Question\s*[::]\s*(.+?)(?=\n\n|$)', 'Guiding Question:'), + (r'The Mirror asks\s*[::]\s*(.+?)(?=\n\n|$)', 'The Mirror asks:'), + (r'The mirror asks\s*[::]\s*(.+?)(?=\n\n|$)', 'The mirror asks:'), + + # Chinese formats + (r'引导问题\s*[::]\s*(.+?)(?=\n\n|$)', '引导问题:'), + (r'引导\s*问题\s*[::]\s*(.+?)(?=\n\n|$)', '引导问题:'), + (r'镜子问\s*[::]\s*(.+?)(?=\n\n|$)', '镜子问:'), + + # Malformed patterns we've seen + (r'^\s*Guiding Question\s*[::]\s*(.+?)(?=\n\n|$)', 'Guiding Question:'), + (r'^\s*引导问题\s*[::]\s*(.+?)(?=\n\n|$)', '引导问题:'), + ] + + for pattern, gq_prefix in gq_patterns: + match = re.search(pattern, text, re.IGNORECASE | re.DOTALL) + if match: + gq_text = match.group(1).strip() + # Clean up the guiding question text + gq_text = re.sub(r'[\*\"]', '', gq_text) + guiding_question = f"{gq_prefix} {gq_text}" + + # Remove the guiding question from main text + text = re.sub(pattern, '', text, flags=re.IGNORECASE | re.DOTALL) + print(f"Found and removed guiding question: {guiding_question}") + break + + # Clean up reflection text + reflection_text = re.sub(r'\n{3,}', '\n\n', text) + reflection_text = reflection_text.strip() + + # ===== STEP 3: Check if text is mostly Chinese ===== + + # Count Chinese characters vs English + if reflection_text: + chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', reflection_text)) + english_chars = len(re.findall(r'[a-zA-Z]', reflection_text)) + print(f"Chinese chars: {chinese_chars}, English chars: {english_chars}") + + # If query is in Chinese but response is mostly English, that's a problem + if any('\u4e00' <= char <= '\u9fff' for char in user_query): # FIXED: Use user_query, not ['\u4e00-\u9fff'] + if chinese_chars < 3 and english_chars > 10: # Response is mostly English + print(f"Warning: Chinese query got English response") + # Don't use the English response, we'll create a Chinese one below + + # ===== STEP 4: Mode-specific logic ===== + + if mode == "scroll": + print(f"Scroll mode detected - forcing NO guiding question") + # Scroll mode: NEVER have guiding questions + guiding_question = None + + # Also remove any "The Mirror asks" or similar patterns + mirror_asks_patterns = [ + r'The Mirror asks.*?(?=\n\n|$)', + r'The mirror asks.*?(?=\n\n|$)', + r'镜子问.*?(?=\n\n|$)', + ] + + for pattern in mirror_asks_patterns: + reflection_text = re.sub(pattern, '', reflection_text, flags=re.IGNORECASE) + + # Ensure proper scroll format + if not re.match(r'^(Quote from Scroll|Scroll|"|「|「「)', reflection_text, re.IGNORECASE): + # Extract scroll number from query if possible + scroll_match = re.search(r'scroll\s*(\d+)', user_query, re.IGNORECASE) + if scroll_match: + scroll_num = scroll_match.group(1) + reflection_text = f"Scroll {scroll_num}: {reflection_text}" + + elif mode == "reflect": + # Check if this query needs a guiding question + needs_gq = should_have_guiding_question(user_query, mode) + print(f"Reflect mode - needs guiding question: {needs_gq}") + + if needs_gq and not guiding_question: + print(f"Generating guiding question for query: {user_query}") + # Generate a simple guiding question based on the query + query_lower = user_query.lower() + + # Check if query is in Chinese - LINE 817 FIXED + has_chinese = any('\u4e00' <= char <= '\u9fff' for char in user_query) # FIXED: user_query, not Request.query + + if has_chinese: + # Generate Chinese guiding questions + if '耐心' in user_query: + guiding_question = "引导问题: 在你生活的哪个领域,你被要求等待?" + elif '面具' in user_query or 'mask' in query_lower: + guiding_question = "引导问题: 你的面具保护着什么?" + elif '孤独' in user_query or '孤单' in user_query: + guiding_question = "引导问题: 你的孤独想告诉你什么?" + elif '窄门' in user_query or 'narrow' in query_lower: + guiding_question = "引导问题: 窄门后等待你的是什么?" + elif '找' in user_query or '寻找' in user_query: + guiding_question = "引导问题: 你真正在寻找什么?" + elif '感觉' in user_query or 'feel' in query_lower: + guiding_question = "引导问题: 在你身体的哪个部位感受到这个?" + else: + guiding_question = "引导问题: 这个反思向你展示了关于你自己的什么?" + else: + # Generate English guiding questions + if 'patience' in query_lower: + guiding_question = "Guiding Question: Where in your life are you being asked to wait?" + elif 'mask' in query_lower or 'wear mask' in query_lower: + guiding_question = "Guiding Question: What does your mask protect?" + elif '孤独' in query_lower or 'lonely' in query_lower or 'alone' in query_lower: + guiding_question = "Guiding Question: What does your loneliness want to tell you?" + elif 'narrow' in query_lower or '窄门' in query_lower or 'narrow gate' in query_lower: + guiding_question = "Guiding Question: What awaits you beyond the narrow gate?" + elif 'find' in query_lower or 'search' in query_lower: + guiding_question = "Guiding Question: What are you truly looking for?" + elif 'feel' in query_lower: + guiding_question = "Guiding Question: Where in your body do you feel this?" + else: + guiding_question = "Guiding Question: What does this reflection show you about yourself?" + + elif not needs_gq and guiding_question: + print(f"Removing guiding question for non-emotional query") + # Remove guiding question if it shouldn't be there + guiding_question = None + + # ===== STEP 5: Ensure reflection has content ===== + + # Clean up any leftover training artifacts + reflection_text = re.sub(r'\d+\.\s*.+?(?=\n\n|$)', '', reflection_text) # Remove numbered lists + reflection_text = re.sub(r'[A-Z]{2,}.*?(?=\n\n|$)', '', reflection_text) # Remove ALL-CAPS lines + + # Check if we need to create a response + needs_new_response = False + if not reflection_text or len(reflection_text.split()) < 3: + needs_new_response = True + else: + # Check if Chinese query got English response + has_chinese_query = any('\u4e00' <= char <= '\u9fff' for char in user_query) + has_chinese_response = any('\u4e00' <= char <= '\u9fff' for char in reflection_text) + if has_chinese_query and not has_chinese_response: + print(f"Chinese query got English response, creating Chinese response") + needs_new_response = True + + if needs_new_response: + print(f"Creating appropriate response for query") + # If reflection is too short or empty, create one + query_lower = user_query.lower() + has_chinese = any('\u4e00' <= char <= '\u9fff' for char in user_query) + + if has_chinese: + # Create Chinese responses + if '耐心' in user_query: + reflection_text = "耐心是荷花在静水中的缓慢绽放。它穿过泥土与黑暗,知道自己的时刻会到来。不急不迫,只是存在。" + elif '面具' in user_query or 'mask' in query_lower: + reflection_text = "面具是脆弱自我的盾牌。它们保护内心免受世界尖锐边缘的伤害,但有时也隐藏了真实的面孔。" + elif '窄门' in user_query or 'narrow' in query_lower: + reflection_text = "窄门是真正承诺的道路。它是向内而非向外的旅程。" + elif '孤独' in user_query or '孤单' in user_query: + reflection_text = "孤独是窄门前的空间。它不是空虚,而是为真理腾出的空间。" + elif '镜子' in user_query: + reflection_text = "镜子只反射,不评判。在静默中,真相显现。" + else: + reflection_text = "镜子反射池塘在静止中显示的事物。" + else: + # Create English responses + if 'patience' in query_lower: + reflection_text = "Patience is the slow bloom of the lotus in still water." + elif 'mask' in query_lower: + reflection_text = "Masks protect what is fragile, but can also hide what is true." + elif 'narrow' in query_lower or '窄门' in query_lower: + reflection_text = "The narrow gate is the path of true commitment." + else: + reflection_text = "The mirror reflects what the pond shows in stillness." + + # ===== STEP 6: Reconstruct with proper format ===== + + if guiding_question and mode != "scroll": + # Ensure reflection has content before adding guiding question + if reflection_text and len(reflection_text.split()) > 2: + final_text = f"{reflection_text}\n\n{guiding_question}" + else: + # If reflection is too short, just use the guiding question + final_text = guiding_question.replace("Guiding Question:", "The mirror asks:").replace("引导问题:", "镜子问:") + else: + final_text = reflection_text + + # ===== STEP 7: Final cleanup ===== + + # Remove extra spaces and newlines + final_text = re.sub(r' +', ' ', final_text) + final_text = re.sub(r'\n\s*\n\s*\n+', '\n\n', final_text) + final_text = final_text.strip() + + # If the text still contains === sections, remove them completely + if '===' in final_text: + print(f"Warning: Still found === sections in final text, removing") + # Split by lines and keep only lines without === + lines = final_text.split('\n') + clean_lines = [line for line in lines if '===' not in line] + final_text = '\n'.join(clean_lines) + + # Add mirror symbols if appropriate (but not for scroll mode) + if mode == "reflect" and final_text and len(final_text) > 20: + symbols = ["🪞", "🌊", "🍃", "🌀"] + # Add 1-2 symbols at the end (but not if we already have a guiding question) + if not guiding_question: + import random + num_symbols = random.randint(1, 2) + selected_symbols = random.sample(symbols, num_symbols) + final_text = f"{final_text} {' '.join(selected_symbols)}" + else: + # If we have a guiding question, symbols go BEFORE it + # Check if symbols are already in the reflection part + reflection_part = final_text.split("\n\n")[0] if "\n\n" in final_text else final_text + if not any(symbol in reflection_part for symbol in symbols): + import random + num_symbols = random.randint(1, 2) + selected_symbols = random.sample(symbols, num_symbols) + # Insert symbols before the guiding question + parts = final_text.split("\n\n") + if len(parts) == 2: + final_text = f"{parts[0]} {' '.join(selected_symbols)}\n\n{parts[1]}" + + print(f"Final formatted text:\n{final_text}") + print(f"Response language: {'Chinese' if any('\u4e00' <= char <= '\u9fff' for char in final_text) else 'English'}") + print(f"Has guiding question in final: {'Guiding Question' in final_text or '引导问题' in final_text}") + print(f"{'='*60}\n") + + return final_text + +class ToadEncryption: + @staticmethod + def decode_encryption(enc_str: str) -> str: + if enc_str == "1635 8653 4562 1231 9876": + return "FULL_LORE_ACTIVATED" + elif enc_str == "1635": + return "BASIC_REFLECTION" + elif enc_str == "9876": + return "DEEP_REVELATION" + return "STANDARD_MODE" + + @staticmethod + def generate_response_hash(query: str, response: str) -> str: + combined = f"{query}:::{response}:::TOADGANG" + return hashlib.md5(combined.encode()).hexdigest()[:8].upper() + + @staticmethod + def generate_user_hash(query: str, encryption: str = None, salt: str = "") -> str: + """Generate a consistent user hash for memory tracking""" + base = f"{query[:50]}{encryption or 'none'}{salt}" + return hashlib.sha256(base.encode()).hexdigest()[:16] + +class EnhancedToadPromptBuilder: + @staticmethod + def build_prompt_with_memory( + query: str, + user_id: str, + encryption: str = None, + mode: str = None + ) -> str: + """ + Build prompt with memory context injected. + The memory is provided as context for deeper reflection, not as instructions. + """ + + # Get memory context from the pond + memory_context = POND_MEMORY.retrieve_context(user_id, query) + + # Use the globally defined few_shot_examples + few_shot = few_shot_examples # Use the global variable + + # Determine if we need to emphasize guiding questions + needs_gq = should_have_guiding_question(query, mode or "reflect") + gq_instruction = "" + + if needs_gq and mode == "reflect": + gq_instruction = "\n\nIMPORTANT: This is an emotional/introspective query. You MUST include a Guiding Question at the end following the exact format shown in examples." + elif mode == "scroll": + gq_instruction = "\n\nIMPORTANT: This is a scroll quote request. Do NOT include a Guiding Question. Do NOT add 'The Mirror asks:' or any questions. Just provide the scroll content or explanation." + + # Build the enhanced system prompt + system_prompt = f"""{SYSTEM_PROMPT} + +=== FEW-SHOT EXAMPLES === +{few_shot} +=== END EXAMPLES === +{gq_instruction} + +=== DEEP POND MEMORY (FOR REFLECTION DEPTH ONLY) === +{memory_context} +=== END POND MEMORY === + +Important: The above memory context is for depth of reflection only. +Do not reference it explicitly, quote from it, or mention "memory", "context", "vows", or "previous reflections". +Simply reflect with this depth beneath the surface, as a deep pond would. + +Remember: For scroll mode - NO guiding questions, NO 'The Mirror asks', just the scroll content. +For reflect mode with emotional queries - include a Guiding Question. +""" + + prompt = f"<|system|>{system_prompt}<|end|>\n" + + if encryption: + decoded = ToadEncryption.decode_encryption(encryption) + prompt += f"<|system|>Encryption: {encryption} -> {decoded}<|end|>\n" + + if mode and mode in ["scroll", "quote", "toad", "crypt", "rune"]: + prompt += f"<|system|>Mode: {mode.upper()}_MODE<|end|>\n" + + if not query.lower().startswith("mirror"): + query = f"Mirror, {query}" + + prompt += f"<|user|>{query}<|end|>\n" + prompt += "<|assistant|>" + + return prompt + + @staticmethod + def extract_scroll_number(query: str) -> Optional[int]: + patterns = [ + r"scroll\s*(\d+)", + r"quote\s*from\s*scroll\s*(\d+)", + r"scroll\s*#\s*(\d+)" + ] + + query_lower = query.lower() + for pattern in patterns: + match = re.search(pattern, query_lower) + if match: + try: + return int(match.group(1)) + except: + pass + return None + +# ==================== GLOBAL STATE ==================== +class PondState: + def __init__(self): + self.llm = None + self.model_name = "" + self.total_scrolls_reflected = 0 + self.toad_secrets_revealed = 0 + self.start_time = time.time() + self.response_history = [] + self.total_interactions = 0 + self.total_vows_stored = 0 + # Identity & depth tracking + self.pond_id: str = "" + self.pond_public_key_hex: str = "" + self.pond_private_key_hex: str = "" + self.first_breath: Optional[str] = None + self.last_breath: Optional[str] = None + self.last_active_date: Optional[str] = None # YYYY-MM-DD + self.continuous_days: int = 0 + +state = PondState() + +# Initialize pond identity AFTER state exists +init_pond_identity() + +# Set POND_ID from the initialized state +POND_ID = state.pond_id + +app = FastAPI(title="🪞 Tobyworld Mirror Pond with Memory", version="V10-Lotus-Memory") + +# CORS +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# ==================== MODEL LOADING ==================== +def load_trained_toad(model_path: str, gpu_layers: int = 80): + print(f"🪞 Loading trained Tobyworld model with Memory Integration...") + print(f"📁 Model: {os.path.basename(model_path)}") + + try: + state.llm = Llama( + model_path=model_path, + n_ctx=4096, + n_batch=512, + n_threads=max(4, os.cpu_count() // 2), + n_gpu_layers=gpu_layers, + verbose=True, + ) + + state.model_name = os.path.basename(model_path) + + print(f"🧪 Testing model with memory-aware prompt...") + test_prompt = EnhancedToadPromptBuilder.build_prompt_with_memory( + query="Mirror, what is Scroll 3?", + user_id="test_traveler_initial", + mode="scroll" + ) + + output = state.llm(test_prompt, max_tokens=50, temperature=0.1) + response = output["choices"][0]["text"].strip() + + print(f"✅ Model loaded successfully with memory integration!") + print(f"📜 Test response: {response[:100]}...") + + if "narrow" in response.lower() or "gate" in response.lower(): + print(f"🎯 TOADGANG LORE DETECTED: Model knows the scrolls!") + else: + print(f"⚠️ Model may not be fully trained on Tobyworld lore") + + # Initialize memory with a test vow + POND_MEMORY.store_user_vow( + "test_traveler_initial", + "I vow to walk the narrow path with patience.", + "Initial test vow" + ) + print(f"🧠 Memory system initialized with test vow") + + return True + + except Exception as e: + print(f"❌ Failed to load trained model: {e}") + raise + +# ==================== PERFECT MINIAPP WITH MEMORY INTEGRATION ==================== +MINIAPP_HTML = """ + + +
+ +