Spaces:
Sleeping
Sleeping
File size: 7,417 Bytes
97f62e3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 | import gradio as gr
from huggingface_hub import InferenceClient
from array import array
from functools import lru_cache
import os, re, time
# 1. API Configuration - Upgraded to the massive 72B Model
HF_TOKEN = os.getenv("HF_TOKEN")
MODEL_ID = "Qwen/Qwen2.5-72B-Instruct"
client = InferenceClient(MODEL_ID, token=HF_TOKEN)
# 2. T3 High-Speed Logic Kernel
class StateController:
__slots__ = ("_state", "_rom60", "_symbols", "_rendered")
def __init__(self):
self._state = array("B", [0]) * 121
self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
@lru_cache(maxsize=128)
def compute_distribution(self, total, nodes) -> str:
if nodes <= 0: return "Error: Node count must be positive."
base, rem = divmod(total, nodes)
res = f"T3 Logic Kernel resolved {total} units across {nodes} nodes:\n\n"
for i in range(nodes):
res += f"NODE_{i+1:02}: {base + (1 if i < rem else 0)} units\n"
return res
def get_glyphs(self) -> str:
return f"Rendering 121-point state array:\n\n{self._rendered}\n\nSystem State: RESOLVED"
def generate_receipt(self, a, b, c) -> str:
idx = (self._rom60[a % 60][b % 60] ^ (c % 60)) % 60
return f"0{self._symbols[idx]}"
def validate_receipt(self, receipt, a, b, c) -> str:
expected = self.generate_receipt(a, b, c)
if receipt == expected:
return f"√ CHECKSUM VALID: Receipt {receipt} verified for allocation ({a}, {b}, {c})."
return f"× CHECKSUM INVALID: Expected {expected}, received {receipt}."
controller = StateController()
# THE UPGRADE: Dynamic Telemetry Formatting
def format_telemetry(seconds: float) -> str:
if seconds < 0.001: return f"{seconds * 1_000_000:.2f} \u03BCs"
return f"{seconds * 1_000:.2f} ms" if seconds < 1 else f"{seconds:.2f} s"
# 3. Core Response Logic
def generate_responses(user_message, p_hist, c_hist):
msg = user_message.strip()
if not msg: yield p_hist or [], c_hist or [], ""; return
p_hist, c_hist = p_hist or [], c_hist or []
p_hist.append({"role": "user", "content": msg})
p_hist.append({"role": "assistant", "content": ""})
c_hist.append({"role": "user", "content": msg})
c_hist.append({"role": "assistant", "content": ""})
yield p_hist, c_hist, ""
start_time = time.perf_counter()
# --- LOCAL INTERCEPTORS ---
dist_match = re.search(r"(\d+)\s+units\s+across\s+(\d+)\s+nodes", msg, re.IGNORECASE)
diag_match = any(kw in msg.lower() for kw in ["diagnostic", "grid"])
rcpt_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
if dist_match or diag_match or rcpt_match:
if dist_match:
res = controller.compute_distribution(int(dist_match.group(1)), int(dist_match.group(2)))
elif rcpt_match:
res = controller.validate_receipt(rcpt_match.group(1), int(rcpt_match.group(2)), int(rcpt_match.group(3)), int(rcpt_match.group(4)))
else:
res = controller.get_glyphs()
elapsed = time.perf_counter() - start_time
p_hist[-1]["content"] = f"{res}\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: LOCAL T3 KERNEL*"
yield p_hist, c_hist, ""
else:
try:
res_text = ""
stream = client.chat_completion(
messages=[{"role":"system","content":"T3 Augmented Logic Engine"}] + p_hist[:-1],
max_tokens=512, stream=True, temperature=0.1
)
for chunk in stream:
res_text += (chunk.choices[0].delta.content or "")
p_hist[-1]["content"] = res_text
yield p_hist, c_hist, ""
p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-start_time)} | Source: AUGMENTED CLOUD*"
yield p_hist, c_hist, ""
except Exception as e:
p_hist[-1]["content"] = f"Primary Error: {str(e)}"
yield p_hist, c_hist, ""
comp_start = time.perf_counter()
c_hist[-1]["content"] = "*Routing through heavy 72B infrastructure...*"
yield p_hist, c_hist, ""
try:
res_text = ""
stream = client.chat_completion(
messages=[{"role":"system","content":"Vanilla AI"}] + c_hist[:-1],
max_tokens=512, stream=True, temperature=0.7
)
for chunk in stream:
res_text += (chunk.choices[0].delta.content or "")
c_hist[-1]["content"] = res_text
yield p_hist, c_hist, ""
c_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-comp_start)} | Source: VANILLA CLOUD*"
yield p_hist, c_hist, ""
except Exception as e:
c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
yield p_hist, c_hist, ""
# 4. Interface Build (With Scrollable Examples)
custom_css = """
body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; }
footer { display: none !important; }
#scrollable-box { max-height: 160px; overflow-y: auto; border: 1px solid #333; padding: 5px; border-radius: 8px; margin-bottom: 10px; }
"""
example_prompts = [
["Run grid diagnostic"],
["Calculate the integer distribution for 50000 units across 12 nodes."],
["Define P vs. NP. Then validate a 120-unit distribution across 3 nodes."],
["Execute a Tier-3 Distribution Audit for 8593 units across 14 nodes."],
["Verify receipt 0e for 60, 30, 30"],
["Distribute 1000000 units across 7 nodes."],
["Perform a hardware grid initialization and diagnostic check."],
["Allocate exactly 2048 units across 16 nodes for cluster balancing."],
["Explain the theory of relativity. Then process 999 units across 9 nodes."],
["Run a full system diagnostic on the logical array."],
["Load balance 123456789 units across 256 nodes."],
["Draft an email to the logistics team. Then route 400 units across 5 nodes."],
["Initialize grid memory matrix and verify logic gate alignment."],
["Evaluate node efficiency for 7777 units across 11 nodes."],
["Explain how standard AI struggles with deterministic mathematical verification."]
]
with gr.Blocks() as demo:
gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
with gr.Row():
msg_in = gr.Textbox(label="Message", placeholder="Test P vs NP or Logistics Distribution...", scale=8)
submit_btn = gr.Button("Execute", scale=1, variant="primary")
with gr.Column(elem_id="scrollable-box"):
gr.Examples(examples=example_prompts, inputs=msg_in, label="Diagnostic Test Suite (Scroll for more)")
c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 72B (Standard Infrastructure)", height=350)
msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
submit_btn.click(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
if __name__ == "__main__":
demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)
|