Spaces:
Runtime error
Runtime error
File size: 6,311 Bytes
ccee4a3 fce46f1 836f78e 685d841 175538c 4cb61a3 685d841 836f78e fce46f1 51fde0f 836f78e 60330ec 685d841 e317b88 175538c 836f78e fce46f1 175538c 836f78e 175538c 836f78e fce46f1 836f78e fce46f1 836f78e fce46f1 836f78e 175538c 836f78e fce46f1 c569764 fce46f1 836f78e 4cb61a3 175538c 4cb61a3 836f78e fce46f1 175538c 4cb61a3 175538c 4cb61a3 175538c c569764 2ef87cc 175538c 836f78e 2ef87cc 836f78e 2ef87cc 836f78e 4cb61a3 836f78e 2ef87cc bfa0027 f1bb86b 2ef87cc 175538c 2ef87cc 175538c 836f78e 2ef87cc bfa0027 da70066 bfa0027 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | import gradio as gr
from huggingface_hub import InferenceClient
from array import array
import os
import re
import time
# Securely retrieve the token from Space secrets
HF_TOKEN = os.getenv("HF_TOKEN")
# Initialize the inference client
client = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
class StateController:
__slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
def __init__(self):
self._state = array("B", [0]) * 121
self._metric = 60
self._batch = 10
self._reg = {}
self._rendered = self._build_render()
# Pre-compute the 60x60 lookup ROM for O(1) verification
self._rom60 = tuple(
tuple((i * j) % 60 for j in range(60))
for i in range(60)
)
# 60-character alphanumeric array for cross-platform rendering
self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
def _build_render(self) -> str:
"""Construct the fixed 121-point visualization once during initialization."""
return "".join(
" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".")
for i in range(121)
)
def diagnostic(self) -> str:
"""Execute diagnostic sequence and return the formatted status report."""
for i in range(51):
self._state[i] = i % self._batch
self._reg.clear()
self._reg["STATUS"] = "RESOLVED"
return (
"Diagnostic sequence initiated.\n\n"
"Grid initialized: 5 active blocks.\n\n"
"Rendering 121-point array:\n"
f"{self._rendered}\n\n"
"Executing state resolution:\n"
"System resolved. State array reset to zero."
)
def generate_receipt(self, a: int, b: int, c: int) -> str:
"""Calculates the base-60 checksum for any 3-node allocation."""
rom_val = self._rom60[a % 60][b % 60]
checksum_index = (rom_val ^ (c % 60)) % 60
return f"0{self._symbols[checksum_index]}"
def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str:
"""Routes through NODE_120 to verify the checksum mathematically."""
expected_receipt = self.generate_receipt(a, b, c)
if receipt == expected_receipt:
self._state[120] = 1 # Activate NODE_120
return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches allocation ({a}, {b}, {c})."
else:
self._state[120] = 0 # Deactivate NODE_120
return f"[NODE_120: INACTIVE] Verification Failed. Expected receipt {expected_receipt}, received {receipt}."
# Global singleton instance for resource reuse
controller = StateController()
SYSTEM_MSG = {
"role": "system",
"content": ("You are a logic-focused inference engine. "
"Utilize strict state-hold memory and parallel integer blocks. "
"Provide direct, technical, and accurate responses.")
}
def generate_response(message: str, history: list):
start_time = time.perf_counter()
msg_lower = message.lower().strip()
# Hardware diagnostic intercept
if msg_lower == "run grid diagnostic":
output = controller.diagnostic()
elapsed_time = time.perf_counter() - start_time
yield f"{output}\n\n---\n*Telemetry: Compute Time {elapsed_time:.4f}s | Source: Local Engine*"
return
# Deterministic Checksum Intercept (Bypasses AI completely)
verify_match = re.match(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg_lower, re.IGNORECASE)
if verify_match:
receipt = verify_match.group(1)
a = int(verify_match.group(2))
b = int(verify_match.group(3))
c = int(verify_match.group(4))
output = controller.validate_receipt(receipt, a, b, c)
elapsed_time = time.perf_counter() - start_time
yield f"{output}\n\n---\n*Telemetry: Compute Time {elapsed_time:.6f}s | Source: Local ROM Math*"
return
# Build the message list for inference
messages = [SYSTEM_MSG]
for turn in history:
messages.append({"role": "user", "content": turn[0]})
messages.append({"role": "assistant", "content": turn[1]})
messages.append({"role": "user", "content": message})
try:
# Stream text generation
stream = client.chat_completion(
messages,
max_tokens=1024,
stream=True,
temperature=0.15
)
partial_response = ""
for chunk in stream:
token = chunk.choices[0].delta.content or ""
partial_response += token
yield partial_response
# Post-processing telemetry after the stream completes
elapsed_time = time.perf_counter() - start_time
word_count = len(partial_response.split())
est_speed = word_count / elapsed_time if elapsed_time > 0 else 0
# Append telemetry footer to the final response
final_output = partial_response + f"\n\n---\n*Telemetry: Compute Time {elapsed_time:.2f}s | Est. Speed: {est_speed:.2f} words/sec | Source: Inference API*"
yield final_output
except Exception as exc:
yield f"System Error: {str(exc)}. Verify API token and permissions."
custom_css = """
body, .gradio-container { background-color: #0b0f19 !important; }
footer {display: none !important}
.message.user { background-color: #1e293b !important; border: 1px solid #3b82f6 !important; }
.message.bot { background-color: #0f172a !important; color: #60a5fa !important; }
"""
with gr.Blocks() as demo:
gr.Markdown("# Glyph.io Logic Interface")
gr.ChatInterface(
fn=generate_response,
description="Inference layer utilizing state-hold logic and deterministic ROM verification.",
examples=[
"Run grid diagnostic",
"Calculate the integer distribution for 120 units across 3 nodes.",
"Verify receipt 0e for 60, 30, 30"
],
cache_examples=False
)
if __name__ == "__main__":
demo.queue().launch(
theme=gr.themes.Soft(primary_hue="blue"),
css=custom_css,
ssr_mode=False
)
|