Glyph.io / app.py
spanofzero's picture
final dual
489ca1d verified
import gradio as gr
from huggingface_hub import InferenceClient
from array import array
from functools import lru_cache
import os, re, time
# 1. API Configuration - Locked to the stable 7B model
HF_TOKEN = os.getenv("HF_TOKEN")
MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
client = InferenceClient(MODEL_ID, token=HF_TOKEN)
# 2. T3 High-Speed Logic Kernel
class StateController:
__slots__ = ("_state", "_rom60", "_symbols", "_rendered")
def __init__(self):
self._state = array("B", [0]) * 121
self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
@lru_cache(maxsize=128)
def compute_distribution(self, total, nodes) -> str:
if nodes <= 0: return "Error: Node count must be positive."
base, rem = divmod(total, nodes)
res = f"T3 Logic Kernel resolved {total} units across {nodes} nodes:\n\n"
for i in range(nodes):
res += f"NODE_{i+1:02}: {base + (1 if i < rem else 0)} units\n"
return res
def get_glyphs(self) -> str:
return f"Rendering 121-point state array:\n\n{self._rendered}\n\nSystem State: RESOLVED"
def generate_receipt(self, a, b, c) -> str:
idx = (self._rom60[a % 60][b % 60] ^ (c % 60)) % 60
return f"0{self._symbols[idx]}"
def validate_receipt(self, receipt, a, b, c) -> str:
expected = self.generate_receipt(a, b, c)
if receipt == expected:
return f"√ CHECKSUM VALID: Receipt {receipt} verified for allocation ({a}, {b}, {c})."
return f"× CHECKSUM INVALID: Expected {expected}, received {receipt}."
controller = StateController()
def format_telemetry(seconds: float) -> str:
if seconds < 0.001: return f"{seconds * 1_000_000:.2f} \u03BCs"
return f"{seconds * 1_000:.2f} ms" if seconds < 1 else f"{seconds:.2f} s"
# 3. Core Response Logic
def generate_responses(user_message, p_hist, c_hist):
msg = user_message.strip()
if not msg: yield p_hist or [], c_hist or [], ""; return
p_hist, c_hist = p_hist or [], c_hist or []
p_hist.append({"role": "user", "content": msg})
p_hist.append({"role": "assistant", "content": ""})
c_hist.append({"role": "user", "content": msg})
c_hist.append({"role": "assistant", "content": ""})
yield p_hist, c_hist, ""
start_time = time.perf_counter()
# --- LOCAL INTERCEPTORS ---
dist_match = re.search(r"(\d+)\s+units\s+across\s+(\d+)\s+nodes", msg, re.IGNORECASE)
diag_match = any(kw in msg.lower() for kw in ["diagnostic", "grid"])
rcpt_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
if dist_match or diag_match or rcpt_match:
if dist_match:
res = controller.compute_distribution(int(dist_match.group(1)), int(dist_match.group(2)))
elif rcpt_match:
res = controller.validate_receipt(rcpt_match.group(1), int(rcpt_match.group(2)), int(rcpt_match.group(3)), int(rcpt_match.group(4)))
else:
res = controller.get_glyphs()
elapsed = time.perf_counter() - start_time
p_hist[-1]["content"] = f"{res}\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: LOCAL T3 KERNEL*"
yield p_hist, c_hist, ""
else:
try:
res_text = ""
stream = client.chat_completion(
messages=[{"role":"system","content":"T3 Augmented Logic Engine"}] + p_hist[:-1],
max_tokens=512, stream=True, temperature=0.1
)
for chunk in stream:
res_text += (chunk.choices[0].delta.content or "")
p_hist[-1]["content"] = res_text
yield p_hist, c_hist, ""
p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-start_time)} | Source: AUGMENTED CLOUD*"
yield p_hist, c_hist, ""
except Exception as e:
p_hist[-1]["content"] = f"Primary Error: {str(e)}"
yield p_hist, c_hist, ""
comp_start = time.perf_counter()
c_hist[-1]["content"] = "*Routing through standard infrastructure...*"
yield p_hist, c_hist, ""
try:
res_text = ""
stream = client.chat_completion(
messages=[{"role":"system","content":"Vanilla AI"}] + c_hist[:-1],
max_tokens=512, stream=True, temperature=0.7
)
for chunk in stream:
res_text += (chunk.choices[0].delta.content or "")
c_hist[-1]["content"] = res_text
yield p_hist, c_hist, ""
c_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-comp_start)} | Source: VANILLA CLOUD*"
yield p_hist, c_hist, ""
except Exception as e:
c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
yield p_hist, c_hist, ""
# 4. Interface Build (With Scrollable Container & NO 'type' attributes)
custom_css = """
body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; }
footer { display: none !important; }
#scrollable-box { max-height: 160px; overflow-y: auto; border: 1px solid #333; padding: 5px; border-radius: 8px; margin-bottom: 10px; }
"""
example_prompts = [
["Run grid diagnostic"],
["Calculate the integer distribution for 50000 units across 12 nodes."],
["Define P vs. NP. Then validate a 120-unit distribution across 3 nodes."],
["Execute a Tier-3 Distribution Audit for 8593 units across 14 nodes."],
["Verify receipt 0e for 60, 30, 30"],
["Distribute 1000000 units across 7 nodes."],
["Perform a hardware grid initialization and diagnostic check."],
["Allocate exactly 2048 units across 16 nodes for cluster balancing."],
["Explain the theory of relativity. Then process 999 units across 9 nodes."],
["Run a full system diagnostic on the logical array."],
["Load balance 123456789 units across 256 nodes."],
["Draft an email to the logistics team. Then route 400 units across 5 nodes."],
["Initialize grid memory matrix and verify logic gate alignment."],
["Evaluate node efficiency for 7777 units across 11 nodes."],
["Explain how standard AI struggles with deterministic mathematical verification."]
]
with gr.Blocks() as demo:
gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
# 100% clean Chatbots with NO 'type="messages"' argument to prevent crashes
p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
with gr.Row():
msg_in = gr.Textbox(label="Message", placeholder="Test P vs NP or Logistics Distribution...", scale=8)
submit_btn = gr.Button("Execute", scale=1, variant="primary")
with gr.Column(elem_id="scrollable-box"):
gr.Examples(examples=example_prompts, inputs=msg_in, label="Diagnostic Test Suite (Scroll for more)")
# 100% clean Chatbots with NO 'type="messages"' argument to prevent crashes
c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
submit_btn.click(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
if __name__ == "__main__":
demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)