LOGOS-SPCW-Matroska / logos /mhc_router.py
GitHub Copilot
Protocol 26: Configured mHC Hyper-Connections (Alpha/Beta) logic
8759f2e
"""
logos/mhc_router.py - Recursive Manifold Engine (Router Logic)
Protocol 25: Recursive Manifold Engine (RLM) w/ Harmonic Convergence
"""
import logging
import time
import os
import sys
import requests
# Hack: Allow running this script directly for testing
if __name__ == "__main__":
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from logos.elements import ManifoldState as AtomicState, Token, Model, Atom, Handoff, Tool
# Set up Logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("LOGOS_Router")
# --- MANIFOLD CONSTRAINT CONFIGURATION ---
FORCE_SINGLE_MODEL = True
UNIFIED_MODEL_ID = "dolphin-x1-8b"
SHELL_CONFIG = {
"INNER_SHELL": {
"model": "dolphin-x1-8b" if not FORCE_SINGLE_MODEL else UNIFIED_MODEL_ID,
"rate_limit": 60,
},
"PRIME_CHANNEL": {
"model": "essentialai/rnj-1" if not FORCE_SINGLE_MODEL else UNIFIED_MODEL_ID,
"rate_limit": 20,
},
"OUTER_SHELL": {
"model": "google/gemma-3-4b" if not FORCE_SINGLE_MODEL else UNIFIED_MODEL_ID,
"rate_limit": 10,
}
}
RATE_LIMITS = {k: {"tokens": v["rate_limit"], "last_update": 0} for k, v in SHELL_CONFIG.items()}
# --- TOOLS ---
def search_tool(query):
return f"[SEARCH RESULTS] Found relevant documentation for: {query}"
TOOL_MAP = {
"Search": Tool("Search", search_tool, "Locates external knowledge to reduce entropy.")
}
# --- CORE FUNCTIONS ---
def calculate_manifold_constraint(text, is_vision=False):
"""
Determines the 'Shell' based on Prime Topology (Manifold Constrained Hyper-Connection).
"""
if is_vision:
return "OUTER_SHELL", 1.0
# Keyword Harmonic Override
text_lower = text.lower()
if any(k in text_lower for k in ["prime", "modulo", "math", "proof"]):
return "PRIME_CHANNEL", 1.0
if any(k in text_lower for k in ["def ", "class ", "import ", "code", "script"]):
return "INNER_SHELL", 1.0
# Prime Topology Routing (Heat Code Analysis)
try:
hex_str = text.encode('utf-8').hex()
except:
hex_str = str(text).encode('utf-8').hex()
total_nibbles = len(hex_str)
if total_nibbles == 0: return "INNER_SHELL", 0.5
check_set = {'7', 'b', 'd', 'e', 'f', 'B', 'D', 'E', 'F'}
high_entropy_count = sum(1 for c in hex_str if c in check_set)
heat_score = high_entropy_count / total_nibbles
if heat_score > 0.4:
return "OUTER_SHELL", heat_score
elif heat_score > 0.2:
return "PRIME_CHANNEL", heat_score
else:
return "INNER_SHELL", heat_score
def execute_recursive_manifold(prompt, target_model_id, max_recursion=3, is_vision=False):
"""
Executes the prompt through the Manifold-Constrained Hyper-Graph.
Iterates until Stress < Threshold or Max Recursion reached.
"""
# 1. Initialize State Buffer ( The Periodic Table )
state = AtomicState()
initial_token = Token(prompt, source="user")
initial_token.domain = "Prompt"
state.inject(initial_token)
last_atom_id = initial_token.id # Track causality
trajectory = []
ENTROPY_HANDOFF_THRESHOLD = 0.6 # [TUNED] Lowered from 0.8 to increase Tool Usage
logger.info(f"🌀 [RLM] Initiating Recursive Manifold Loop (Max Depth: {max_recursion})")
# [OPTIMIZATION] STATIC SYSTEM PROMPT for KV Cache Reuse
# By keeping the system prompt identical across iterations, we allow the engine
# to reuse the computed KV states for the system block.
STATIC_SYSTEM_PROMPT = (
"You are the LOGOS Recursive Manifold Node (RLM). "
"You exist within a self-correcting loop. "
"Adhere strictly to the injected [DIRECTIVE] at the end of the user prompt."
)
current_content = prompt
iteration = 0
while iteration < max_recursion:
iteration += 1
# 2. Calculate Manifold Constraints
shell, heat_score = calculate_manifold_constraint(current_content, is_vision)
# Update State Stress (Continuum Mechanics)
state.stress = max(0, heat_score - 0.5) if shell == "OUTER_SHELL" else 0.1
# [GEOMETRY CHECK]
logger.info(f" ► Iteration {iteration}: Shell={shell}")
logger.info(f" [GEOMETRY] Heat={heat_score:.4f} (Hex Density)")
logger.info(f" [NUMBER] Resonance={state.resonance_product}")
logger.info(f" [PHYSICS] Stress={state.stress:.4f}")
trajectory.append({
"iter": iteration,
"shell": shell,
"heat": heat_score,
"resonance": state.resonance_product,
"elements": [str(a) for a in state.atoms[-3:]]
})
# 3. Harmonic Convergence Check
if state.stabilize() and iteration > 1:
logger.info(f" ✅ Harmonic Convergence Reached at Iteration {iteration}.")
break
# 4. Atomic Handoff (Self-Correction / Tool Usage)
# We encourage tool use if stress is high OR if we are just starting (Prime Interplay)
if heat_score > ENTROPY_HANDOFF_THRESHOLD or (iteration == 1 and "search" in current_content.lower()):
logger.info(f" ⚠️ CRITICAL HEAT ({heat_score:.2f}) -> Initiating ATOMIC HANDOFF to 'Search'")
handoff_atom = Handoff("Search", reason=f"High Entropy detected ({heat_score:.2f})")
state.inject(handoff_atom, parent_id=last_atom_id, relation="triggers")
target_tool = TOOL_MAP.get(handoff_atom.target)
if target_tool:
# [BOOST] Increase context window for tools
result_token = target_tool.execute(current_content[:150])
result_token.domain = "External_Knowledge"
state.inject(result_token, parent_id=handoff_atom.id, relation="resolved_by")
# [CACHE OPTIMIZATION] Append Context instead of Prepend
# This preserves the prefix of 'current_content', allowing KV cache to reuse
# tokens 0..N. We append the new knowledge.
current_content = f"{current_content}\n\n[TOOL_RESULT]: {result_token.content}"
last_atom_id = result_token.id
# 5. [NEW] mHC: Dynamic Hyper-Connection (Stabilization)
# Parametrization: Alpha (Residual) vs Beta (Change) derived from Heat.
# High Heat = High Instability = Increase Alpha (Stick to what you know).
# Low Heat = Stability = Increase Beta (Allow exploration).
alpha = min(0.9, heat_score) # Dampening factor
beta = 1.0 - alpha
logger.info(f" ⚖️ [mHC] Hyper-Connection: alpha={alpha:.2f} (Residual), beta={beta:.2f} (New)")
# If heat is extremely high, we might want to forcefully prepend the ORIGINAL prompt
# to ground the model, effectively a "Heavy Residual" connection.
if alpha > 0.7:
logger.info(" ⚓ STABILIZING: Reinforcing Residual Connection (Original Context)")
# We don't change current_content, but we might tweak the directive.
# Actually, let's inject a "Stabilizer Atom"
stabilizer = Atom("St", "mHC_Stabilizer", domain="Control")
state.inject(stabilizer, parent_id=last_atom_id, relation="stablized_by_mHC:High Dissonance Damping")
# Logic: If unstable, reduce temperature further than normal
target_temp = 0.1
# 6. Hyper-Connection Routing & Context Expansion
if shell == "OUTER_SHELL":
# Creative / Divergent
meta_instruction = "Analyze entropy. Expand geometric implications. Think divergently. Use rich vocabulary."
target_temp = 0.9
else:
# Logic / Convergent
meta_instruction = "Refine into singular conclusion. Minimize ambiguity. Critique previous steps."
target_temp = 0.4
# 6. Execute Expert (Agent Interplay)
try:
# [BOOST] Force Multi-Agent "Critique" if we are deep in recursion
if iteration > 1 and shell == "OUTER_SHELL":
meta_instruction += " CRITIQUE the previous iteration. checking for logical consistency."
model_atom = Model(target_model_id)
# [CACHE OPTIMIZATION] Dynamic Directive Moved to User Prompt Postfix
# System Prompt remains static. Directive is appended.
final_user_content = f"{current_content}\n\n[DIRECTIVE]: {meta_instruction}"
messages = [
{"role": "system", "content": STATIC_SYSTEM_PROMPT},
{"role": "user", "content": final_user_content}
]
payload = {
"model": target_model_id,
"messages": messages,
"temperature": target_temp,
"max_tokens": -1,
"stream": False
}
lm_endpoint = os.environ.get("LOGOS_LLM_ENDPOINT", "http://localhost:1234/v1")
# Simple synchronous call for now (can upgrade to aiohttp later)
response = requests.post(f"{lm_endpoint}/chat/completions", json=payload, timeout=60)
if response.status_code == 200:
result = response.json()
new_content = result['choices'][0]['message']['content']
output_token = Token(new_content, source=target_model_id)
output_token.domain = "Inference"
state.inject(output_token, parent_id=last_atom_id, relation="generated_by")
current_content = new_content
last_atom_id = output_token.id
else:
logger.error(f" ❌ Expert Failed: {response.status_code}")
break
except Exception as e:
logger.error(f" ❌ RLM Error: {e}")
break
return current_content, trajectory, state
if __name__ == "__main__":
print("--- LOGOS PHYSICS ENGINE TEST ---")
print("Injecting High-Entropy Prompt to trigger Geometry Checks...")
# Test Prompt: High heat (hex codes), mixed domain keywords
test_prompt = "Calculate the Prime Resonance of a Quantum Field 0x7F 0xFF in Python."
# Mock LLM Endpoint for test (won't actually call out if we don't have a server, or will fail gracefully)
# We mainly want to see the LOGS generated by 'execute_recursive_manifold'
try:
final, traj, final_state_obj = execute_recursive_manifold(test_prompt, "dolphin-x1-8b", max_recursion=2)
print("\n[Trajectory Summary]")
for Step in traj:
print(f"Iter {Step['iter']}: Heat={Step['heat']:.2f} | Resonance={Step.get('resonance', 'N/A')}")
except Exception as e:
print(f"Physics Test Complete (stopped at network): {e}")