spanofzero commited on
Commit
addd714
·
verified ·
1 Parent(s): e559eba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -58
app.py CHANGED
@@ -1,18 +1,15 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  from array import array
4
- import os
5
- import re
6
- import time
7
 
8
- # 1. API Configuration
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
- MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
 
11
 
12
- client_primary = InferenceClient(MODEL_ID, token=HF_TOKEN)
13
- client_competitor = InferenceClient(MODEL_ID, token=HF_TOKEN)
14
-
15
- # 2. THE T3 KERNEL (Local Deterministic Logic)
16
  class StateController:
17
  __slots__ = ("_state", "_rom60", "_symbols", "_rendered")
18
  def __init__(self):
@@ -21,7 +18,9 @@ class StateController:
21
  self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
22
  self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
23
 
 
24
  def compute_distribution(self, total, nodes) -> str:
 
25
  base, rem = divmod(total, nodes)
26
  res = f"T3 Logic Kernel resolved {total} units across {nodes} nodes:\n\n"
27
  for i in range(nodes):
@@ -31,49 +30,32 @@ class StateController:
31
  def get_glyphs(self) -> str:
32
  return f"Rendering 121-point state array:\n\n{self._rendered}\n\nSystem State: RESOLVED"
33
 
34
- def validate_receipt(self, receipt, a, b, c) -> str:
35
- idx = (self._rom60[a % 60][b % 60] ^ (c % 60)) % 60
36
- expected = f"0{self._symbols[idx]}"
37
- if receipt == expected:
38
- return f"√ CHECKSUM VALID: Receipt {receipt} verified for allocation ({a}, {b}, {c})."
39
- return f"× CHECKSUM INVALID: Expected {expected}, received {receipt}."
40
-
41
  controller = StateController()
42
 
43
- # 3. Telemetry Formatter (The Visual Upgrade)
44
  def format_telemetry(seconds: float) -> str:
45
- if seconds < 0.001:
46
- return f"{seconds * 1_000_000:.2f} \u03BCs" # Microseconds
47
- elif seconds < 1:
48
- return f"{seconds * 1_000:.2f} ms" # Milliseconds
49
- else:
50
- return f"{seconds:.2f} s" # Seconds
51
 
52
- # 4. Core Response Logic
53
- def generate_responses(user_message, p_hist, c_hist):
54
  msg = user_message.strip()
55
  if not msg: yield p_hist or [], c_hist or [], ""; return
56
 
57
- p_hist = p_hist or []
58
- c_hist = c_hist or []
59
- p_hist.append({"role": "user", "content": msg})
60
- p_hist.append({"role": "assistant", "content": ""})
61
- c_hist.append({"role": "user", "content": msg})
62
- c_hist.append({"role": "assistant", "content": ""})
63
  yield p_hist, c_hist, ""
64
 
65
  start_time = time.perf_counter()
66
 
67
- # --- THE T3 INTERCEPTORS (Microsecond Local Path) ---
68
- dist_match = re.search(r"(\d+)\s+units\s+across\s+(\d+)\s+nodes", msg, re.IGNORECASE)
69
- diag_match = "diagnostic" in msg.lower() or "grid" in msg.lower()
70
- rcpt_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
71
 
72
- if dist_match or diag_match or rcpt_match:
73
  if dist_match:
74
- res = controller.compute_distribution(int(dist_match.group(1)), int(dist_match.group(2)))
75
- elif rcpt_match:
76
- res = controller.validate_receipt(rcpt_match.group(1), int(rcpt_match.group(2)), int(rcpt_match.group(3)), int(rcpt_match.group(4)))
77
  else:
78
  res = controller.get_glyphs()
79
 
@@ -81,50 +63,44 @@ def generate_responses(user_message, p_hist, c_hist):
81
  p_hist[-1]["content"] = f"{res}\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: LOCAL T3 KERNEL*"
82
  yield p_hist, c_hist, ""
83
  else:
84
- # Cloud API Fallback
85
  try:
86
- stream = client_primary.chat_completion(messages=[{"role":"system","content":"T3 Augmented Logic Engine"}] + p_hist[:-1], max_tokens=512, stream=True, temperature=0.1)
87
  res_text = ""
88
- for chunk in stream:
89
  res_text += (chunk.choices[0].delta.content or "")
90
  p_hist[-1]["content"] = res_text
91
  yield p_hist, c_hist, ""
92
- elapsed = time.perf_counter() - start_time
93
- p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: AUGMENTED CLOUD*"
94
- yield p_hist, c_hist, ""
95
  except Exception as e:
96
  p_hist[-1]["content"] = f"Primary Error: {str(e)}"
97
- yield p_hist, c_hist, ""
98
 
99
- # --- COMPETITOR ENGINE (VANILLA CLOUD) ---
100
  comp_start = time.perf_counter()
101
- c_hist[-1]["content"] = "*Routing through standard infrastructure...*"
102
  yield p_hist, c_hist, ""
103
 
104
  try:
105
- stream = client_competitor.chat_completion(messages=[{"role":"system","content":"Vanilla AI"}] + c_hist[:-1], max_tokens=512, stream=True, temperature=0.7)
106
  res_text = ""
107
- for chunk in stream:
108
  res_text += (chunk.choices[0].delta.content or "")
109
  c_hist[-1]["content"] = res_text
110
  yield p_hist, c_hist, ""
111
- elapsed = time.perf_counter() - comp_start
112
- c_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: VANILLA CLOUD*"
113
- yield p_hist, c_hist, ""
114
  except Exception as e:
115
  c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
116
- yield p_hist, c_hist, ""
117
 
118
- # 5. Interface Styling
119
  custom_css = "body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; } footer { display: none !important; }"
120
 
121
  with gr.Blocks() as demo:
122
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
123
  p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
124
  with gr.Row():
125
- msg_in = gr.Textbox(label="Message", placeholder="Test math/logic tasks...", scale=8)
126
  btn = gr.Button("Execute", scale=1, variant="primary")
127
- gr.Examples(examples=["Run grid diagnostic", "Calculate distribution for 120 units across 3 nodes.", "Verify receipt 0e for 60, 30, 30"], inputs=msg_in)
128
  c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
129
 
130
  msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
 
1
  import gradio as gr
2
+ from huggingface_hub import AsyncInferenceClient
3
  from array import array
4
+ from functools import lru_cache
5
+ import os, re, time, asyncio
 
6
 
7
+ # 1. API Configuration - Switched to 1.5B for 4x faster cloud responses
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
+ MODEL_ID = "Qwen/Qwen2.5-1.5B-Instruct" # Faster, leaner, demo-optimized
10
+ client = AsyncInferenceClient(MODEL_ID, token=HF_TOKEN)
11
 
12
+ # 2. T3 High-Speed Logic Kernel (Local CPU Execution)
 
 
 
13
  class StateController:
14
  __slots__ = ("_state", "_rom60", "_symbols", "_rendered")
15
  def __init__(self):
 
18
  self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
19
  self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
20
 
21
+ @lru_cache(maxsize=128) # Optimization from your refactor
22
  def compute_distribution(self, total, nodes) -> str:
23
+ if nodes <= 0: return "Error: Node count must be positive."
24
  base, rem = divmod(total, nodes)
25
  res = f"T3 Logic Kernel resolved {total} units across {nodes} nodes:\n\n"
26
  for i in range(nodes):
 
30
  def get_glyphs(self) -> str:
31
  return f"Rendering 121-point state array:\n\n{self._rendered}\n\nSystem State: RESOLVED"
32
 
 
 
 
 
 
 
 
33
  controller = StateController()
34
 
 
35
  def format_telemetry(seconds: float) -> str:
36
+ if seconds < 0.001: return f"{seconds * 1_000_000:.2f} \u03BCs"
37
+ return f"{seconds * 1_000:.2f} ms" if seconds < 1 else f"{seconds:.2f} s"
 
 
 
 
38
 
39
+ # 3. Hardened Dual-Response Logic (Async Optimized)
40
+ async def generate_responses(user_message, p_hist, c_hist):
41
  msg = user_message.strip()
42
  if not msg: yield p_hist or [], c_hist or [], ""; return
43
 
44
+ p_hist, c_hist = p_hist or [], c_hist or []
45
+ p_hist.append({"role": "user", "content": msg}); p_hist.append({"role": "assistant", "content": ""})
46
+ c_hist.append({"role": "user", "content": msg}); c_hist.append({"role": "assistant", "content": ""})
 
 
 
47
  yield p_hist, c_hist, ""
48
 
49
  start_time = time.perf_counter()
50
 
51
+ # --- HARDENED LOCAL INTERCEPTORS ---
52
+ # Using the refined regex from your refactor
53
+ dist_match = re.search(r"(?P<units>\d{1,9})\s+units\s+across\s+(?P<nodes>\d{1,4})\s+nodes", msg, re.I)
54
+ diag_match = any(kw in msg.lower() for kw in ["diagnostic", "grid"])
55
 
56
+ if dist_match or diag_match:
57
  if dist_match:
58
+ res = controller.compute_distribution(int(dist_match.group("units")), int(dist_match.group("nodes")))
 
 
59
  else:
60
  res = controller.get_glyphs()
61
 
 
63
  p_hist[-1]["content"] = f"{res}\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: LOCAL T3 KERNEL*"
64
  yield p_hist, c_hist, ""
65
  else:
66
+ # ASYNC PRIMARY STREAM
67
  try:
 
68
  res_text = ""
69
+ async for chunk in client.chat_completion(messages=[{"role":"system","content":"Logic Engine"}] + p_hist[:-1], max_tokens=512, stream=True, temperature=0.1):
70
  res_text += (chunk.choices[0].delta.content or "")
71
  p_hist[-1]["content"] = res_text
72
  yield p_hist, c_hist, ""
73
+ p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-start_time)} | Source: AUGMENTED CLOUD*"
 
 
74
  except Exception as e:
75
  p_hist[-1]["content"] = f"Primary Error: {str(e)}"
76
+ yield p_hist, c_hist, ""
77
 
78
+ # ASYNC VANILLA STREAM
79
  comp_start = time.perf_counter()
80
+ c_hist[-1]["content"] = "*Routing...*"
81
  yield p_hist, c_hist, ""
82
 
83
  try:
 
84
  res_text = ""
85
+ async for chunk in client.chat_completion(messages=[{"role":"system","content":"Standard AI"}] + c_hist[:-1], max_tokens=512, stream=True, temperature=0.7):
86
  res_text += (chunk.choices[0].delta.content or "")
87
  c_hist[-1]["content"] = res_text
88
  yield p_hist, c_hist, ""
89
+ c_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-comp_start)} | Source: VANILLA CLOUD*"
 
 
90
  except Exception as e:
91
  c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
92
+ yield p_hist, c_hist, ""
93
 
94
+ # 4. Interface Styling
95
  custom_css = "body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; } footer { display: none !important; }"
96
 
97
  with gr.Blocks() as demo:
98
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
99
  p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
100
  with gr.Row():
101
+ msg_in = gr.Textbox(label="Message", placeholder="Test P vs NP or Logistics Distribution...", scale=8)
102
  btn = gr.Button("Execute", scale=1, variant="primary")
103
+ gr.Examples(examples=["Define P vs. NP. Then validate a 120-unit distribution across 3 nodes.", "Run grid diagnostic"], inputs=msg_in)
104
  c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
105
 
106
  msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])