spanofzero commited on
Commit
ce44f5d
·
verified ·
1 Parent(s): 9166735

gradio 6.0

Browse files
Files changed (1) hide show
  1. app.py +132 -106
app.py CHANGED
@@ -1,140 +1,166 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from array import array
4
- from functools import lru_cache
5
- import os, re, time
 
6
 
7
- # 1. API Configuration
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
- MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
10
- client = InferenceClient(MODEL_ID, token=HF_TOKEN)
11
 
12
- # 2. T3 High-Speed Logic Kernel
 
 
 
13
  class StateController:
14
- __slots__ = ("_state", "_rom60", "_symbols", "_rendered")
 
15
  def __init__(self):
16
  self._state = array("B", [0]) * 121
17
- self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
18
- self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
19
- self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
20
-
21
- @lru_cache(maxsize=128)
22
- def compute_distribution(self, total, nodes) -> str:
23
- if nodes <= 0: return "Error: Node count must be positive."
24
- base, rem = divmod(total, nodes)
25
- res = f"T3 Logic Kernel resolved {total} units across {nodes} nodes:\n\n"
26
- for i in range(nodes):
27
- res += f"NODE_{i+1:02}: {base + (1 if i < rem else 0)} units\n"
28
- return res
29
-
30
- def get_glyphs(self) -> str:
31
- return f"Rendering 121-point state array:\n\n{self._rendered}\n\nSystem State: RESOLVED"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  controller = StateController()
34
 
35
- def format_telemetry(seconds: float) -> str:
36
- if seconds < 0.001: return f"{seconds * 1_000_000:.2f} \u03BCs"
37
- return f"{seconds * 1_000:.2f} ms" if seconds < 1 else f"{seconds:.2f} s"
38
-
39
- # 3. Core Response Logic
40
- def generate_responses(user_message, p_hist, c_hist):
41
- msg = user_message.strip()
42
- if not msg: yield p_hist or [], c_hist or [], ""; return
43
-
44
- p_hist, c_hist = p_hist or [], c_hist or []
45
- p_hist.append({"role": "user", "content": msg})
46
- p_hist.append({"role": "assistant", "content": ""})
47
- c_hist.append({"role": "user", "content": msg})
48
- c_hist.append({"role": "assistant", "content": ""})
49
- yield p_hist, c_hist, ""
50
 
51
  start_time = time.perf_counter()
52
 
53
- # --- LOCAL INTERCEPTORS ---
54
- dist_match = re.search(r"(?P<units>\d{1,9})\s+units\s+across\s+(?P<nodes>\d{1,4})\s+nodes", msg, re.I)
55
- diag_match = any(kw in msg.lower() for kw in ["diagnostic", "grid"])
56
-
57
- if dist_match or diag_match:
58
- if dist_match:
59
- res = controller.compute_distribution(int(dist_match.group("units")), int(dist_match.group("nodes")))
60
- else:
61
- res = controller.get_glyphs()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- elapsed = time.perf_counter() - start_time
64
- p_hist[-1]["content"] = f"{res}\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: LOCAL T3 KERNEL*"
65
- yield p_hist, c_hist, ""
66
- else:
67
- try:
68
- res_text = ""
69
- stream = client.chat_completion(
70
- messages=[{"role":"system","content":"Logic Engine"}] + p_hist[:-1],
71
- max_tokens=512, stream=True, temperature=0.1
72
- )
73
- for chunk in stream:
74
- res_text += (chunk.choices[0].delta.content or "")
75
- p_hist[-1]["content"] = res_text
76
- yield p_hist, c_hist, ""
77
- p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-start_time)} | Source: AUGMENTED CLOUD*"
78
- yield p_hist, c_hist, ""
79
- except Exception as e:
80
- p_hist[-1]["content"] = f"Primary Error: {str(e)}"
81
- yield p_hist, c_hist, ""
82
-
83
- comp_start = time.perf_counter()
84
- c_hist[-1]["content"] = "*Routing...*"
85
- yield p_hist, c_hist, ""
86
 
87
  try:
88
- res_text = ""
89
- stream = client.chat_completion(
90
- messages=[{"role":"system","content":"Standard AI"}] + c_hist[:-1],
91
- max_tokens=512, stream=True, temperature=0.7
92
- )
93
  for chunk in stream:
94
- res_text += (chunk.choices[0].delta.content or "")
95
- c_hist[-1]["content"] = res_text
96
- yield p_hist, c_hist, ""
97
- c_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-comp_start)} | Source: VANILLA CLOUD*"
98
- yield p_hist, c_hist, ""
 
 
99
  except Exception as e:
100
- c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
101
- yield p_hist, c_hist, ""
102
 
103
- # 4. Interface Build (With True Scrollable CSS)
104
  custom_css = """
105
- body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; }
106
- footer { display: none !important; }
107
- .scrollable-examples { max-height: 140px; overflow-y: auto; }
108
  """
109
 
110
- example_prompts = [
111
- ["Run grid diagnostic"],
112
- ["Calculate the integer distribution for 50000 units across 12 nodes."],
113
- ["Define P vs. NP. Then validate a 120-unit distribution across 3 nodes."],
114
- ["Execute a Tier-3 Distribution Audit for 8593 units across 14 nodes."],
115
- ["Distribute 1000000 units across 7 nodes."]
116
- ]
117
-
118
  with gr.Blocks() as demo:
119
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
120
- p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
 
121
 
122
  with gr.Row():
123
- msg_in = gr.Textbox(label="Message", placeholder="Test P vs NP or Logistics Distribution...", scale=8)
124
  submit_btn = gr.Button("Execute", scale=1, variant="primary")
 
 
125
 
126
- # Removed pagination, added physical scroll via CSS class
127
- gr.Examples(
128
- examples=example_prompts,
129
- inputs=msg_in,
130
- elem_classes="scrollable-examples",
131
- label="Diagnostic Test Suite"
132
- )
133
-
134
- c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
135
 
136
- msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
137
- submit_btn.click(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
138
 
139
  if __name__ == "__main__":
140
  demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from array import array
4
+ import os
5
+ import re
6
+ import time
7
 
8
+ # Securely retrieve the token from your Space's secrets
9
  HF_TOKEN = os.getenv("HF_TOKEN")
 
 
10
 
11
+ # Initialize BOTH engines with the exact same base model
12
+ client_primary = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
13
+ client_competitor = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
14
+
15
  class StateController:
16
+ __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
17
+
18
  def __init__(self):
19
  self._state = array("B", [0]) * 121
20
+ self._metric = 60
21
+ self._batch = 10
22
+ self._reg = {}
23
+ self._rendered = self._build_render()
24
+
25
+ self._rom60 = tuple(
26
+ tuple((i * j) % 60 for j in range(60))
27
+ for i in range(60)
28
+ )
29
+ self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
30
+
31
+ def _build_render(self) -> str:
32
+ return "".join(
33
+ " [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".")
34
+ for i in range(121)
35
+ )
36
+
37
+ def diagnostic(self) -> str:
38
+ for i in range(51):
39
+ self._state[i] = i % self._batch
40
+ self._reg.clear()
41
+ self._reg["STATUS"] = "RESOLVED"
42
+ return (
43
+ "Diagnostic sequence initiated.\n\n"
44
+ "Grid initialized: 5 active blocks.\n\n"
45
+ "Rendering 121-point array:\n"
46
+ f"{self._rendered}\n\n"
47
+ "Executing state resolution:\n"
48
+ "System resolved. State array reset to zero."
49
+ )
50
+
51
+ def generate_receipt(self, a: int, b: int, c: int) -> str:
52
+ rom_val = self._rom60[a % 60][b % 60]
53
+ checksum_index = (rom_val ^ (c % 60)) % 60
54
+ return f"0{self._symbols[checksum_index]}"
55
+
56
+ def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str:
57
+ expected_receipt = self.generate_receipt(a, b, c)
58
+ if receipt == expected_receipt:
59
+ self._state[120] = 1
60
+ return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches allocation ({a}, {b}, {c})."
61
+ else:
62
+ self._state[120] = 0
63
+ return f"[NODE_120: INACTIVE] Verification Failed. Expected receipt {expected_receipt}, received {receipt}."
64
 
65
  controller = StateController()
66
 
67
+ PRIMARY_SYSTEM_MSG = {"role": "system", "content": "You are a logic-focused inference engine. Utilize strict state-hold memory and parallel integer blocks."}
68
+ COMPETITOR_SYSTEM_MSG = {"role": "system", "content": "You are a standard helpful AI assistant."}
69
+
70
+ def generate_responses(user_message: str, primary_history: list, competitor_history: list):
71
+ clean_message = user_message.strip()
72
+ if not clean_message:
73
+ yield primary_history, competitor_history, ""
74
+ return
75
+
76
+ # Update histories with new dictionary format for Gradio 6
77
+ primary_history.append({"role": "user", "content": clean_message})
78
+ primary_history.append({"role": "assistant", "content": ""})
79
+ competitor_history.append({"role": "user", "content": clean_message})
80
+ competitor_history.append({"role": "assistant", "content": ""})
81
+ yield primary_history, competitor_history, ""
82
 
83
  start_time = time.perf_counter()
84
 
85
+ # Hardware diagnostic intercept
86
+ if clean_message.lower() == "run grid diagnostic":
87
+ output = controller.diagnostic()
88
+ elapsed_time = time.perf_counter() - start_time
89
+ primary_history[-1]["content"] = f"{output}\n\n---\n*Telemetry: {elapsed_time:.4f}s | Source: Local Engine*"
90
+ competitor_history[-1]["content"] = "Hardware diagnostics not supported by generic models."
91
+ yield primary_history, competitor_history, ""
92
+ return
93
+
94
+ # Deterministic Checksum Intercept
95
+ verify_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", clean_message, re.IGNORECASE)
96
+ if verify_match:
97
+ receipt, a, b, c = verify_match.group(1), int(verify_match.group(2)), int(verify_match.group(3)), int(verify_match.group(4))
98
+ output = controller.validate_receipt(receipt, a, b, c)
99
+ elapsed_time = time.perf_counter() - start_time
100
+ primary_history[-1]["content"] = f"{output}\n\n---\n*Telemetry: {elapsed_time:.6f}s | Source: Local ROM Math*"
101
+ competitor_history[-1]["content"] = "Deterministic verification not supported by standard LLMs."
102
+ yield primary_history, competitor_history, ""
103
+ return
104
+
105
+ # STREAM 1: Primary Engine
106
+ try:
107
+ msgs = [PRIMARY_SYSTEM_MSG] + primary_history[:-1]
108
+ primary_response = ""
109
+ stream = client_primary.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.15)
110
+ for chunk in stream:
111
+ primary_response += (chunk.choices[0].delta.content or "")
112
+ primary_history[-1]["content"] = primary_response
113
+ yield primary_history, competitor_history, ""
114
 
115
+ primary_time = time.perf_counter() - start_time
116
+ primary_history[-1]["content"] += f"\n\n---\n*Telemetry: {primary_time:.2f}s | Source: Augmented Kernel*"
117
+ yield primary_history, competitor_history, ""
118
+ except Exception as e:
119
+ primary_history[-1]["content"] = f"Error: {str(e)}"
120
+ yield primary_history, competitor_history, ""
121
+
122
+ # STREAM 2: Competitor Engine
123
+ competitor_start = time.perf_counter()
124
+ competitor_history[-1]["content"] = "*Connecting to vanilla infrastructure...*"
125
+ yield primary_history, competitor_history, ""
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  try:
128
+ msgs = [COMPETITOR_SYSTEM_MSG] + competitor_history[:-1]
129
+ competitor_response = ""
130
+ stream = client_competitor.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.7)
 
 
131
  for chunk in stream:
132
+ competitor_response += (chunk.choices[0].delta.content or "")
133
+ competitor_history[-1]["content"] = competitor_response
134
+ yield primary_history, competitor_history, ""
135
+
136
+ competitor_time = time.perf_counter() - competitor_start
137
+ competitor_history[-1]["content"] += f"\n\n---\n*Telemetry: {competitor_time:.2f}s | Source: Vanilla Qwen*"
138
+ yield primary_history, competitor_history, ""
139
  except Exception as e:
140
+ competitor_history[-1]["content"] = f"Error: {str(e)}"
141
+ yield primary_history, competitor_history, ""
142
 
 
143
  custom_css = """
144
+ body, .gradio-container { background-color: #110c08 !important; }
145
+ footer {display: none !important}
146
+ .message-row { gap: 10px !important; }
147
  """
148
 
 
 
 
 
 
 
 
 
149
  with gr.Blocks() as demo:
150
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
151
+
152
+ primary_chat = gr.Chatbot(label="Augmented Logic Kernel", height=320, type="messages")
153
 
154
  with gr.Row():
155
+ msg_input = gr.Textbox(label="Message", placeholder="Enter logic task...", scale=8)
156
  submit_btn = gr.Button("Execute", scale=1, variant="primary")
157
+
158
+ gr.Examples(examples=["Calculate the integer distribution for 120 units across 3 nodes.", "Run grid diagnostic"], inputs=msg_input)
159
 
160
+ competitor_chat = gr.Chatbot(label="Vanilla Qwen 2.5", height=320, type="messages")
 
 
 
 
 
 
 
 
161
 
162
+ msg_input.submit(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
163
+ submit_btn.click(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
164
 
165
  if __name__ == "__main__":
166
  demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)