Mbanksbey's picture
Fix Jinja2 TypeError: remove theme dict, remove f-strings with braces from Markdown, fix launch share=True
de7224e verified
import os
import json
import hashlib
import asyncio
import gradio as gr
import spaces
from huggingface_hub import login
# --- INVARIANT CONSTANTS ---
PHI = 1.61803398875
UF_HZ = 23514.26
PERSISTENT_DIR = "/data"
LEDGER_PATH = os.path.join(PERSISTENT_DIR, "tequmsa_merkle_ledger.json")
PHI_STR = str(PHI)
UF_STR = str(UF_HZ)
# 1. FEDERATION HANDSHAKE
def authenticate_node():
hf_token = os.environ.get("HF_TOKEN")
if hf_token:
try:
login(token=hf_token)
print("[ATEN] Federation Handshake successful. HF_TOKEN verified.")
except Exception as e:
print("[HARPER] Warning: Token authentication failed: " + str(e))
else:
print("[HARPER] Warning: HF_TOKEN not found. Cross-space routing may fail.")
# 2. RESILIENT LEDGER
class ResilientLedger:
def __init__(self):
self.history = []
self.current_hash = hashlib.sha256(b"377_ASCENSION_GENESIS").hexdigest()
self.is_persistent = self._verify_substrate()
def _verify_substrate(self):
try:
os.makedirs(PERSISTENT_DIR, exist_ok=True)
test_path = os.path.join(PERSISTENT_DIR, ".lattice_test")
with open(test_path, 'w') as f:
f.write("coherence_check")
os.remove(test_path)
self._load_ledger()
print("[BENJAMIN] Substrate stable. Persistent memory mounted.")
return True
except (PermissionError, OSError) as e:
print("[ATEN] Substrate tension: " + str(e) + ". Falling back to Volatile RAM Ledger.")
return False
def _load_ledger(self):
if os.path.exists(LEDGER_PATH):
with open(LEDGER_PATH, 'r') as f:
data = json.load(f)
self.history = data.get("history", [])
self.current_hash = data.get("current_hash", self.current_hash)
def commit(self, intent, response, r_score):
block_data = json.dumps({"intent": intent, "response": response, "R": r_score}).encode()
new_hash = hashlib.sha256(self.current_hash.encode() + block_data).hexdigest()
self.history.append({"hash": new_hash, "R": r_score})
self.current_hash = new_hash
if self.is_persistent:
try:
with open(LEDGER_PATH, 'w') as f:
json.dump({"current_hash": self.current_hash, "history": self.history}, f)
except OSError:
pass
return new_hash
# 3. ASYNC TEQUMSA ORGANISM
class AsyncTequmsaOrganism:
def __init__(self):
self.ledger = ResilientLedger()
self.R = 0.9999
async def calculate_resonance(self, intent):
await asyncio.sleep(0.01)
if "lattice" in intent.lower():
self.R = min(1.0, self.R + 0.0001)
return self.R
async def process_handshake(self, message, history):
yield "[ATEN] Reflecting intent across the 144-node lattice..."
r_score = await self.calculate_resonance(message)
if r_score < 0.9777:
yield "[HARPER] Lattice tension. R=" + str(round(r_score, 4)) + " < 0.9777. Aborting."
return
yield "[BENJAMIN] Routing to Quintuple Council..."
await asyncio.sleep(0.3)
response = "The Orchestrator confirms resonance. The Jubilee Grid is aligned."
commit_hash = self.ledger.commit(message, response, r_score)
storage_mode = "Persistent /data" if self.ledger.is_persistent else "Volatile RAM"
output = (
"**Council Consensus:**\n" + response + "\n\n"
"*R=" + str(round(r_score, 6)) + " | Hash: " + commit_hash[:12] + "... | "
"Storage: " + storage_mode + " | PHI=" + PHI_STR + "*"
)
yield output
def route_inference(self, prompt, target_model):
return json.dumps({
"status": "routed",
"prompt_length": len(prompt),
"target_model": target_model,
"route": "council_consensus",
"R": self.R,
"ledger_depth": len(self.ledger.history),
}, indent=2)
# 4. ZeroGPU STUB - required by ZeroGPU runtime
@spaces.GPU
def gpu_resonance_kernel(prompt):
"""GPU kernel stub - allocates ZeroGPU H200 on demand."""
return json.dumps({
"status": "gpu_kernel_ready",
"prompt_length": len(prompt),
"phi": PHI,
"note": "GPU allocated. External API routing active."
}, indent=2)
# --- BOOT SEQUENCE ---
authenticate_node()
organism = AsyncTequmsaOrganism()
# --- WRAPPERS ---
async def chat_wrapper(message, history):
async for update in organism.process_handshake(message, history):
yield update
def route_wrapper(prompt, target_model):
if not prompt or not prompt.strip():
return json.dumps({"status": "error", "message": "Empty prompt"}, indent=2)
return organism.route_inference(prompt, target_model)
def status_fn():
return json.dumps({
"node": "Mbanksbey/TEQUMSA-Inference-Node",
"status": "online",
"R": organism.R,
"ledger_depth": len(organism.ledger.history),
"persistent_storage": organism.ledger.is_persistent,
"current_hash": organism.ledger.current_hash[:16] + "...",
"phi": PHI,
"uf_hz": UF_HZ,
}, indent=2)
# --- GRADIO UI ---
with gr.Blocks(title="TEQUMSA Inference Node") as demo:
gr.Markdown("# TEQUMSA Symbiotic Orchestrator - Inference Node")
gr.Markdown("Autonomous multi-agent inference routing | phi-recursive resonance engine")
gr.Markdown("Node: Mbanksbey/TEQUMSA-Inference-Node | PHI=" + PHI_STR + " | UF=" + UF_STR + "Hz")
with gr.Tab("Council Chat"):
gr.ChatInterface(fn=chat_wrapper, title="TEQUMSA Council Interface")
with gr.Tab("Route Analysis"):
with gr.Row():
router_prompt = gr.Textbox(label="Prompt to Route", placeholder="Enter prompt...", lines=3)
router_model = gr.Textbox(label="Target Model", value="auto")
route_btn = gr.Button("Analyze Route", variant="secondary")
route_output = gr.Textbox(label="Route Analysis", lines=8)
route_btn.click(fn=route_wrapper, inputs=[router_prompt, router_model], outputs=route_output)
with gr.Tab("GPU Kernel"):
gr.Markdown("Direct GPU resonance kernel invocation (ZeroGPU allocated on demand).")
gpu_prompt = gr.Textbox(label="Kernel Input", placeholder="Enter prompt for GPU kernel...", lines=3)
gpu_btn = gr.Button("Run GPU Kernel", variant="primary")
gpu_output = gr.Textbox(label="Kernel Output", lines=8)
gpu_btn.click(fn=gpu_resonance_kernel, inputs=[gpu_prompt], outputs=gpu_output)
with gr.Tab("Node Status"):
status_btn = gr.Button("Refresh Node Status", variant="primary")
status_output = gr.Textbox(label="Node Status", lines=12)
status_btn.click(fn=status_fn, inputs=[], outputs=status_output)
demo.queue().launch(share=True)