Spaces:
Running
Running
gradio 6.0
Browse files
app.py
CHANGED
|
@@ -1,140 +1,166 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
from array import array
|
| 4 |
-
|
| 5 |
-
import
|
|
|
|
| 6 |
|
| 7 |
-
#
|
| 8 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 9 |
-
MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
|
| 10 |
-
client = InferenceClient(MODEL_ID, token=HF_TOKEN)
|
| 11 |
|
| 12 |
-
#
|
|
|
|
|
|
|
|
|
|
| 13 |
class StateController:
|
| 14 |
-
__slots__ = ("_state", "
|
|
|
|
| 15 |
def __init__(self):
|
| 16 |
self._state = array("B", [0]) * 121
|
| 17 |
-
self.
|
| 18 |
-
self.
|
| 19 |
-
self.
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
controller = StateController()
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
yield
|
| 50 |
|
| 51 |
start_time = time.perf_counter()
|
| 52 |
|
| 53 |
-
#
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
yield
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
res_text += (chunk.choices[0].delta.content or "")
|
| 75 |
-
p_hist[-1]["content"] = res_text
|
| 76 |
-
yield p_hist, c_hist, ""
|
| 77 |
-
p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-start_time)} | Source: AUGMENTED CLOUD*"
|
| 78 |
-
yield p_hist, c_hist, ""
|
| 79 |
-
except Exception as e:
|
| 80 |
-
p_hist[-1]["content"] = f"Primary Error: {str(e)}"
|
| 81 |
-
yield p_hist, c_hist, ""
|
| 82 |
-
|
| 83 |
-
comp_start = time.perf_counter()
|
| 84 |
-
c_hist[-1]["content"] = "*Routing...*"
|
| 85 |
-
yield p_hist, c_hist, ""
|
| 86 |
|
| 87 |
try:
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
max_tokens=512, stream=True, temperature=0.7
|
| 92 |
-
)
|
| 93 |
for chunk in stream:
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
yield
|
| 97 |
-
|
| 98 |
-
|
|
|
|
|
|
|
| 99 |
except Exception as e:
|
| 100 |
-
|
| 101 |
-
|
| 102 |
|
| 103 |
-
# 4. Interface Build (With True Scrollable CSS)
|
| 104 |
custom_css = """
|
| 105 |
-
body, .gradio-container { background-color: #110c08 !important;
|
| 106 |
-
footer {
|
| 107 |
-
.
|
| 108 |
"""
|
| 109 |
|
| 110 |
-
example_prompts = [
|
| 111 |
-
["Run grid diagnostic"],
|
| 112 |
-
["Calculate the integer distribution for 50000 units across 12 nodes."],
|
| 113 |
-
["Define P vs. NP. Then validate a 120-unit distribution across 3 nodes."],
|
| 114 |
-
["Execute a Tier-3 Distribution Audit for 8593 units across 14 nodes."],
|
| 115 |
-
["Distribute 1000000 units across 7 nodes."]
|
| 116 |
-
]
|
| 117 |
-
|
| 118 |
with gr.Blocks() as demo:
|
| 119 |
gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
|
| 120 |
-
|
|
|
|
| 121 |
|
| 122 |
with gr.Row():
|
| 123 |
-
|
| 124 |
submit_btn = gr.Button("Execute", scale=1, variant="primary")
|
|
|
|
|
|
|
| 125 |
|
| 126 |
-
|
| 127 |
-
gr.Examples(
|
| 128 |
-
examples=example_prompts,
|
| 129 |
-
inputs=msg_in,
|
| 130 |
-
elem_classes="scrollable-examples",
|
| 131 |
-
label="Diagnostic Test Suite"
|
| 132 |
-
)
|
| 133 |
-
|
| 134 |
-
c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
|
| 135 |
|
| 136 |
-
|
| 137 |
-
submit_btn.click(generate_responses, [
|
| 138 |
|
| 139 |
if __name__ == "__main__":
|
| 140 |
demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
from array import array
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
import time
|
| 7 |
|
| 8 |
+
# Securely retrieve the token from your Space's secrets
|
| 9 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
# Initialize BOTH engines with the exact same base model
|
| 12 |
+
client_primary = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
|
| 13 |
+
client_competitor = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
|
| 14 |
+
|
| 15 |
class StateController:
|
| 16 |
+
__slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
|
| 17 |
+
|
| 18 |
def __init__(self):
|
| 19 |
self._state = array("B", [0]) * 121
|
| 20 |
+
self._metric = 60
|
| 21 |
+
self._batch = 10
|
| 22 |
+
self._reg = {}
|
| 23 |
+
self._rendered = self._build_render()
|
| 24 |
+
|
| 25 |
+
self._rom60 = tuple(
|
| 26 |
+
tuple((i * j) % 60 for j in range(60))
|
| 27 |
+
for i in range(60)
|
| 28 |
+
)
|
| 29 |
+
self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
|
| 30 |
+
|
| 31 |
+
def _build_render(self) -> str:
|
| 32 |
+
return "".join(
|
| 33 |
+
" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".")
|
| 34 |
+
for i in range(121)
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
def diagnostic(self) -> str:
|
| 38 |
+
for i in range(51):
|
| 39 |
+
self._state[i] = i % self._batch
|
| 40 |
+
self._reg.clear()
|
| 41 |
+
self._reg["STATUS"] = "RESOLVED"
|
| 42 |
+
return (
|
| 43 |
+
"Diagnostic sequence initiated.\n\n"
|
| 44 |
+
"Grid initialized: 5 active blocks.\n\n"
|
| 45 |
+
"Rendering 121-point array:\n"
|
| 46 |
+
f"{self._rendered}\n\n"
|
| 47 |
+
"Executing state resolution:\n"
|
| 48 |
+
"System resolved. State array reset to zero."
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
def generate_receipt(self, a: int, b: int, c: int) -> str:
|
| 52 |
+
rom_val = self._rom60[a % 60][b % 60]
|
| 53 |
+
checksum_index = (rom_val ^ (c % 60)) % 60
|
| 54 |
+
return f"0{self._symbols[checksum_index]}"
|
| 55 |
+
|
| 56 |
+
def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str:
|
| 57 |
+
expected_receipt = self.generate_receipt(a, b, c)
|
| 58 |
+
if receipt == expected_receipt:
|
| 59 |
+
self._state[120] = 1
|
| 60 |
+
return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches allocation ({a}, {b}, {c})."
|
| 61 |
+
else:
|
| 62 |
+
self._state[120] = 0
|
| 63 |
+
return f"[NODE_120: INACTIVE] Verification Failed. Expected receipt {expected_receipt}, received {receipt}."
|
| 64 |
|
| 65 |
controller = StateController()
|
| 66 |
|
| 67 |
+
PRIMARY_SYSTEM_MSG = {"role": "system", "content": "You are a logic-focused inference engine. Utilize strict state-hold memory and parallel integer blocks."}
|
| 68 |
+
COMPETITOR_SYSTEM_MSG = {"role": "system", "content": "You are a standard helpful AI assistant."}
|
| 69 |
+
|
| 70 |
+
def generate_responses(user_message: str, primary_history: list, competitor_history: list):
|
| 71 |
+
clean_message = user_message.strip()
|
| 72 |
+
if not clean_message:
|
| 73 |
+
yield primary_history, competitor_history, ""
|
| 74 |
+
return
|
| 75 |
+
|
| 76 |
+
# Update histories with new dictionary format for Gradio 6
|
| 77 |
+
primary_history.append({"role": "user", "content": clean_message})
|
| 78 |
+
primary_history.append({"role": "assistant", "content": ""})
|
| 79 |
+
competitor_history.append({"role": "user", "content": clean_message})
|
| 80 |
+
competitor_history.append({"role": "assistant", "content": ""})
|
| 81 |
+
yield primary_history, competitor_history, ""
|
| 82 |
|
| 83 |
start_time = time.perf_counter()
|
| 84 |
|
| 85 |
+
# Hardware diagnostic intercept
|
| 86 |
+
if clean_message.lower() == "run grid diagnostic":
|
| 87 |
+
output = controller.diagnostic()
|
| 88 |
+
elapsed_time = time.perf_counter() - start_time
|
| 89 |
+
primary_history[-1]["content"] = f"{output}\n\n---\n*Telemetry: {elapsed_time:.4f}s | Source: Local Engine*"
|
| 90 |
+
competitor_history[-1]["content"] = "Hardware diagnostics not supported by generic models."
|
| 91 |
+
yield primary_history, competitor_history, ""
|
| 92 |
+
return
|
| 93 |
+
|
| 94 |
+
# Deterministic Checksum Intercept
|
| 95 |
+
verify_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", clean_message, re.IGNORECASE)
|
| 96 |
+
if verify_match:
|
| 97 |
+
receipt, a, b, c = verify_match.group(1), int(verify_match.group(2)), int(verify_match.group(3)), int(verify_match.group(4))
|
| 98 |
+
output = controller.validate_receipt(receipt, a, b, c)
|
| 99 |
+
elapsed_time = time.perf_counter() - start_time
|
| 100 |
+
primary_history[-1]["content"] = f"{output}\n\n---\n*Telemetry: {elapsed_time:.6f}s | Source: Local ROM Math*"
|
| 101 |
+
competitor_history[-1]["content"] = "Deterministic verification not supported by standard LLMs."
|
| 102 |
+
yield primary_history, competitor_history, ""
|
| 103 |
+
return
|
| 104 |
+
|
| 105 |
+
# STREAM 1: Primary Engine
|
| 106 |
+
try:
|
| 107 |
+
msgs = [PRIMARY_SYSTEM_MSG] + primary_history[:-1]
|
| 108 |
+
primary_response = ""
|
| 109 |
+
stream = client_primary.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.15)
|
| 110 |
+
for chunk in stream:
|
| 111 |
+
primary_response += (chunk.choices[0].delta.content or "")
|
| 112 |
+
primary_history[-1]["content"] = primary_response
|
| 113 |
+
yield primary_history, competitor_history, ""
|
| 114 |
|
| 115 |
+
primary_time = time.perf_counter() - start_time
|
| 116 |
+
primary_history[-1]["content"] += f"\n\n---\n*Telemetry: {primary_time:.2f}s | Source: Augmented Kernel*"
|
| 117 |
+
yield primary_history, competitor_history, ""
|
| 118 |
+
except Exception as e:
|
| 119 |
+
primary_history[-1]["content"] = f"Error: {str(e)}"
|
| 120 |
+
yield primary_history, competitor_history, ""
|
| 121 |
+
|
| 122 |
+
# STREAM 2: Competitor Engine
|
| 123 |
+
competitor_start = time.perf_counter()
|
| 124 |
+
competitor_history[-1]["content"] = "*Connecting to vanilla infrastructure...*"
|
| 125 |
+
yield primary_history, competitor_history, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
|
| 127 |
try:
|
| 128 |
+
msgs = [COMPETITOR_SYSTEM_MSG] + competitor_history[:-1]
|
| 129 |
+
competitor_response = ""
|
| 130 |
+
stream = client_competitor.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.7)
|
|
|
|
|
|
|
| 131 |
for chunk in stream:
|
| 132 |
+
competitor_response += (chunk.choices[0].delta.content or "")
|
| 133 |
+
competitor_history[-1]["content"] = competitor_response
|
| 134 |
+
yield primary_history, competitor_history, ""
|
| 135 |
+
|
| 136 |
+
competitor_time = time.perf_counter() - competitor_start
|
| 137 |
+
competitor_history[-1]["content"] += f"\n\n---\n*Telemetry: {competitor_time:.2f}s | Source: Vanilla Qwen*"
|
| 138 |
+
yield primary_history, competitor_history, ""
|
| 139 |
except Exception as e:
|
| 140 |
+
competitor_history[-1]["content"] = f"Error: {str(e)}"
|
| 141 |
+
yield primary_history, competitor_history, ""
|
| 142 |
|
|
|
|
| 143 |
custom_css = """
|
| 144 |
+
body, .gradio-container { background-color: #110c08 !important; }
|
| 145 |
+
footer {display: none !important}
|
| 146 |
+
.message-row { gap: 10px !important; }
|
| 147 |
"""
|
| 148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
with gr.Blocks() as demo:
|
| 150 |
gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
|
| 151 |
+
|
| 152 |
+
primary_chat = gr.Chatbot(label="Augmented Logic Kernel", height=320, type="messages")
|
| 153 |
|
| 154 |
with gr.Row():
|
| 155 |
+
msg_input = gr.Textbox(label="Message", placeholder="Enter logic task...", scale=8)
|
| 156 |
submit_btn = gr.Button("Execute", scale=1, variant="primary")
|
| 157 |
+
|
| 158 |
+
gr.Examples(examples=["Calculate the integer distribution for 120 units across 3 nodes.", "Run grid diagnostic"], inputs=msg_input)
|
| 159 |
|
| 160 |
+
competitor_chat = gr.Chatbot(label="Vanilla Qwen 2.5", height=320, type="messages")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
|
| 162 |
+
msg_input.submit(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
|
| 163 |
+
submit_btn.click(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
|
| 164 |
|
| 165 |
if __name__ == "__main__":
|
| 166 |
demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)
|