spanofzero commited on
Commit
e559eba
·
verified ·
1 Parent(s): 5f8596a

Milliseconds vs seconds

Browse files
Files changed (1) hide show
  1. app.py +28 -33
app.py CHANGED
@@ -12,14 +12,13 @@ MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
12
  client_primary = InferenceClient(MODEL_ID, token=HF_TOKEN)
13
  client_competitor = InferenceClient(MODEL_ID, token=HF_TOKEN)
14
 
15
- # 2. THE RESTORED T3 KERNEL (Local Deterministic Logic)
16
  class StateController:
17
  __slots__ = ("_state", "_rom60", "_symbols", "_rendered")
18
  def __init__(self):
19
  self._state = array("B", [0]) * 121
20
  self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
21
  self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
22
- # THE GLYPHS: Restoring the visual grid
23
  self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
24
 
25
  def compute_distribution(self, total, nodes) -> str:
@@ -32,24 +31,29 @@ class StateController:
32
  def get_glyphs(self) -> str:
33
  return f"Rendering 121-point state array:\n\n{self._rendered}\n\nSystem State: RESOLVED"
34
 
35
- def generate_receipt(self, a, b, c) -> str:
36
- idx = (self._rom60[a % 60][b % 60] ^ (c % 60)) % 60
37
- return f"0{self._symbols[idx]}"
38
-
39
  def validate_receipt(self, receipt, a, b, c) -> str:
40
- expected = self.generate_receipt(a, b, c)
 
41
  if receipt == expected:
42
  return f"√ CHECKSUM VALID: Receipt {receipt} verified for allocation ({a}, {b}, {c})."
43
  return f"× CHECKSUM INVALID: Expected {expected}, received {receipt}."
44
 
45
  controller = StateController()
46
 
47
- # 3. Core Response Logic (The Local Interceptor)
 
 
 
 
 
 
 
 
 
48
  def generate_responses(user_message, p_hist, c_hist):
49
  msg = user_message.strip()
50
- if not msg: yield p_hist, c_hist, ""; return
51
 
52
- # Correct Gradio 6.5.1 History initialization
53
  p_hist = p_hist or []
54
  c_hist = c_hist or []
55
  p_hist.append({"role": "user", "content": msg})
@@ -60,13 +64,12 @@ def generate_responses(user_message, p_hist, c_hist):
60
 
61
  start_time = time.perf_counter()
62
 
63
- # --- THE T3 INTERCEPTORS (100x SPEED BOOST) ---
64
  dist_match = re.search(r"(\d+)\s+units\s+across\s+(\d+)\s+nodes", msg, re.IGNORECASE)
65
  diag_match = "diagnostic" in msg.lower() or "grid" in msg.lower()
66
  rcpt_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
67
 
68
  if dist_match or diag_match or rcpt_match:
69
- # LOCAL CPU EXECUTION - Bypasses Cloud API
70
  if dist_match:
71
  res = controller.compute_distribution(int(dist_match.group(1)), int(dist_match.group(2)))
72
  elif rcpt_match:
@@ -75,10 +78,10 @@ def generate_responses(user_message, p_hist, c_hist):
75
  res = controller.get_glyphs()
76
 
77
  elapsed = time.perf_counter() - start_time
78
- p_hist[-1]["content"] = f"{res}\n\n---\n*Telemetry: {elapsed:.6f}s | Source: LOCAL T3 KERNEL*"
79
  yield p_hist, c_hist, ""
80
  else:
81
- # Cloud API Fallback for non-math text
82
  try:
83
  stream = client_primary.chat_completion(messages=[{"role":"system","content":"T3 Augmented Logic Engine"}] + p_hist[:-1], max_tokens=512, stream=True, temperature=0.1)
84
  res_text = ""
@@ -86,10 +89,12 @@ def generate_responses(user_message, p_hist, c_hist):
86
  res_text += (chunk.choices[0].delta.content or "")
87
  p_hist[-1]["content"] = res_text
88
  yield p_hist, c_hist, ""
89
- p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-start_time:.2f}s | Source: AUGMENTED CLOUD*"
 
 
90
  except Exception as e:
91
  p_hist[-1]["content"] = f"Primary Error: {str(e)}"
92
- yield p_hist, c_hist, ""
93
 
94
  # --- COMPETITOR ENGINE (VANILLA CLOUD) ---
95
  comp_start = time.perf_counter()
@@ -103,37 +108,27 @@ def generate_responses(user_message, p_hist, c_hist):
103
  res_text += (chunk.choices[0].delta.content or "")
104
  c_hist[-1]["content"] = res_text
105
  yield p_hist, c_hist, ""
106
- c_hist[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-comp_start:.2f}s | Source: VANILLA CLOUD*"
 
 
107
  except Exception as e:
108
  c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
109
-
110
- yield p_hist, c_hist, ""
111
 
112
- # 4. Interface Styling (Amber/Orange)
113
- custom_css = """
114
- body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; }
115
- footer { display: none !important; }
116
- .message-row { gap: 15px !important; }
117
- """
118
 
119
  with gr.Blocks() as demo:
120
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
121
-
122
- # Restoring the Augmented Output window
123
  p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
124
-
125
  with gr.Row():
126
- msg_in = gr.Textbox(label="Message", placeholder="Test math, distribution, or receipt validation...", scale=8)
127
  btn = gr.Button("Execute", scale=1, variant="primary")
128
-
129
  gr.Examples(examples=["Run grid diagnostic", "Calculate distribution for 120 units across 3 nodes.", "Verify receipt 0e for 60, 30, 30"], inputs=msg_in)
130
-
131
- # Restoring the Vanilla Output window
132
  c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
133
 
134
  msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
135
  btn.click(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
136
 
137
  if __name__ == "__main__":
138
- # Theme and CSS passed correctly to launch() to prevent version warnings
139
  demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)
 
12
  client_primary = InferenceClient(MODEL_ID, token=HF_TOKEN)
13
  client_competitor = InferenceClient(MODEL_ID, token=HF_TOKEN)
14
 
15
+ # 2. THE T3 KERNEL (Local Deterministic Logic)
16
  class StateController:
17
  __slots__ = ("_state", "_rom60", "_symbols", "_rendered")
18
  def __init__(self):
19
  self._state = array("B", [0]) * 121
20
  self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
21
  self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
 
22
  self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
23
 
24
  def compute_distribution(self, total, nodes) -> str:
 
31
  def get_glyphs(self) -> str:
32
  return f"Rendering 121-point state array:\n\n{self._rendered}\n\nSystem State: RESOLVED"
33
 
 
 
 
 
34
  def validate_receipt(self, receipt, a, b, c) -> str:
35
+ idx = (self._rom60[a % 60][b % 60] ^ (c % 60)) % 60
36
+ expected = f"0{self._symbols[idx]}"
37
  if receipt == expected:
38
  return f"√ CHECKSUM VALID: Receipt {receipt} verified for allocation ({a}, {b}, {c})."
39
  return f"× CHECKSUM INVALID: Expected {expected}, received {receipt}."
40
 
41
  controller = StateController()
42
 
43
+ # 3. Telemetry Formatter (The Visual Upgrade)
44
+ def format_telemetry(seconds: float) -> str:
45
+ if seconds < 0.001:
46
+ return f"{seconds * 1_000_000:.2f} \u03BCs" # Microseconds
47
+ elif seconds < 1:
48
+ return f"{seconds * 1_000:.2f} ms" # Milliseconds
49
+ else:
50
+ return f"{seconds:.2f} s" # Seconds
51
+
52
+ # 4. Core Response Logic
53
  def generate_responses(user_message, p_hist, c_hist):
54
  msg = user_message.strip()
55
+ if not msg: yield p_hist or [], c_hist or [], ""; return
56
 
 
57
  p_hist = p_hist or []
58
  c_hist = c_hist or []
59
  p_hist.append({"role": "user", "content": msg})
 
64
 
65
  start_time = time.perf_counter()
66
 
67
+ # --- THE T3 INTERCEPTORS (Microsecond Local Path) ---
68
  dist_match = re.search(r"(\d+)\s+units\s+across\s+(\d+)\s+nodes", msg, re.IGNORECASE)
69
  diag_match = "diagnostic" in msg.lower() or "grid" in msg.lower()
70
  rcpt_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
71
 
72
  if dist_match or diag_match or rcpt_match:
 
73
  if dist_match:
74
  res = controller.compute_distribution(int(dist_match.group(1)), int(dist_match.group(2)))
75
  elif rcpt_match:
 
78
  res = controller.get_glyphs()
79
 
80
  elapsed = time.perf_counter() - start_time
81
+ p_hist[-1]["content"] = f"{res}\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: LOCAL T3 KERNEL*"
82
  yield p_hist, c_hist, ""
83
  else:
84
+ # Cloud API Fallback
85
  try:
86
  stream = client_primary.chat_completion(messages=[{"role":"system","content":"T3 Augmented Logic Engine"}] + p_hist[:-1], max_tokens=512, stream=True, temperature=0.1)
87
  res_text = ""
 
89
  res_text += (chunk.choices[0].delta.content or "")
90
  p_hist[-1]["content"] = res_text
91
  yield p_hist, c_hist, ""
92
+ elapsed = time.perf_counter() - start_time
93
+ p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: AUGMENTED CLOUD*"
94
+ yield p_hist, c_hist, ""
95
  except Exception as e:
96
  p_hist[-1]["content"] = f"Primary Error: {str(e)}"
97
+ yield p_hist, c_hist, ""
98
 
99
  # --- COMPETITOR ENGINE (VANILLA CLOUD) ---
100
  comp_start = time.perf_counter()
 
108
  res_text += (chunk.choices[0].delta.content or "")
109
  c_hist[-1]["content"] = res_text
110
  yield p_hist, c_hist, ""
111
+ elapsed = time.perf_counter() - comp_start
112
+ c_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: VANILLA CLOUD*"
113
+ yield p_hist, c_hist, ""
114
  except Exception as e:
115
  c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
116
+ yield p_hist, c_hist, ""
 
117
 
118
+ # 5. Interface Styling
119
+ custom_css = "body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; } footer { display: none !important; }"
 
 
 
 
120
 
121
  with gr.Blocks() as demo:
122
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
 
 
123
  p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
 
124
  with gr.Row():
125
+ msg_in = gr.Textbox(label="Message", placeholder="Test math/logic tasks...", scale=8)
126
  btn = gr.Button("Execute", scale=1, variant="primary")
 
127
  gr.Examples(examples=["Run grid diagnostic", "Calculate distribution for 120 units across 3 nodes.", "Verify receipt 0e for 60, 30, 30"], inputs=msg_in)
 
 
128
  c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
129
 
130
  msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
131
  btn.click(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
132
 
133
  if __name__ == "__main__":
 
134
  demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)