spanofzero commited on
Commit
101b43d
·
verified ·
1 Parent(s): 8351828

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -37
app.py CHANGED
@@ -5,14 +5,14 @@ import os
5
  import re
6
  import time
7
 
8
- # 1. Setup
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
11
 
12
  client_primary = InferenceClient(MODEL_ID, token=HF_TOKEN)
13
  client_competitor = InferenceClient(MODEL_ID, token=HF_TOKEN)
14
 
15
- # 2. Logic Controller
16
  class StateController:
17
  __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
18
  def __init__(self):
@@ -20,13 +20,10 @@ class StateController:
20
  self._metric = 60
21
  self._batch = 10
22
  self._reg = {}
23
- self._rendered = self._build_render()
24
  self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
25
  self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
26
 
27
- def _build_render(self) -> str:
28
- return "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
29
-
30
  def diagnostic(self) -> str:
31
  return f"Diagnostic sequence initiated.\n\nGrid initialized.\n\n{self._rendered}\n\nSystem resolved."
32
 
@@ -38,39 +35,41 @@ class StateController:
38
  expected = self.generate_receipt(a, b, c)
39
  if receipt == expected:
40
  return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches."
41
- return f"[NODE_120: INACTIVE] Verification Failed."
42
 
43
  controller = StateController()
44
 
45
- # 3. Instructions
46
  PRIMARY_SYS = {"role": "system", "content": "You are a logic-focused inference engine. Use strict state-hold memory."}
47
  VANILLA_SYS = {"role": "system", "content": "You are a standard helpful AI assistant."}
48
 
49
- # 4. Responses (Gradio 6 Compatible)
50
  def generate_responses(user_message, primary_history, competitor_history):
51
  msg = user_message.strip()
52
  if not msg:
53
  yield primary_history, competitor_history, ""
54
  return
55
 
56
- # Gradio 6 strictly requires ChatMessage objects or dicts with 'role' and 'content'
57
- # We initialize the assistant message as empty
58
- primary_history = primary_history + [{"role": "user", "content": msg}, {"role": "assistant", "content": ""}]
59
- competitor_history = competitor_history + [{"role": "user", "content": msg}, {"role": "assistant", "content": ""}]
 
 
 
 
 
60
 
61
  yield primary_history, competitor_history, ""
62
 
63
  start_time = time.perf_counter()
64
 
65
- # Hardware Intercepts
66
  if msg.lower() == "run grid diagnostic":
67
- res = controller.diagnostic()
68
- primary_history[-1]["content"] = f"{res}\n\n---\n*Telemetry: {time.perf_counter()-start_time:.4f}s*"
69
  competitor_history[-1]["content"] = "Hardware diagnostics not supported."
70
  yield primary_history, competitor_history, ""
71
  return
72
 
73
- # Checksum Intercept
74
  verify_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
75
  if verify_match:
76
  receipt, a, b, c = verify_match.group(1), int(verify_match.group(2)), int(verify_match.group(3)), int(verify_match.group(4))
@@ -80,7 +79,7 @@ def generate_responses(user_message, primary_history, competitor_history):
80
  yield primary_history, competitor_history, ""
81
  return
82
 
83
- # Stream Primary
84
  try:
85
  msgs = [PRIMARY_SYS] + primary_history[:-1]
86
  response_text = ""
@@ -89,15 +88,14 @@ def generate_responses(user_message, primary_history, competitor_history):
89
  response_text += (chunk.choices[0].delta.content or "")
90
  primary_history[-1]["content"] = response_text
91
  yield primary_history, competitor_history, ""
92
- primary_history[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-start_time:.2f}s | Augmented*"
93
  yield primary_history, competitor_history, ""
94
  except Exception as e:
95
- primary_history[-1]["content"] = f"Error: {str(e)}"
96
- yield primary_history, competitor_history, ""
97
 
98
- # Stream Competitor
99
  comp_start = time.perf_counter()
100
- competitor_history[-1]["content"] = "*Connecting...*"
101
  yield primary_history, competitor_history, ""
102
 
103
  try:
@@ -108,33 +106,35 @@ def generate_responses(user_message, primary_history, competitor_history):
108
  response_text += (chunk.choices[0].delta.content or "")
109
  competitor_history[-1]["content"] = response_text
110
  yield primary_history, competitor_history, ""
111
- competitor_history[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-comp_start:.2f}s | Vanilla*"
112
  yield primary_history, competitor_history, ""
113
  except Exception as e:
114
- competitor_history[-1]["content"] = f"Error: {str(e)}"
115
- yield primary_history, competitor_history, ""
 
116
 
117
- # 5. UI
118
  custom_css = """
119
  body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; }
120
  footer { display: none !important; }
121
  """
122
 
123
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css) as demo:
124
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
125
 
126
- primary_chat = gr.Chatbot(label="Augmented Logic Kernel", height=320, type="messages")
127
 
128
  with gr.Row():
129
- msg_input = gr.Textbox(label="Message", placeholder="Enter task...", scale=8)
130
- submit_btn = gr.Button("Execute", scale=1, variant="primary")
131
 
132
- gr.Examples(examples=["Calculate the integer distribution for 120 units across 3 nodes.", "Run grid diagnostic"], inputs=msg_input)
133
-
134
- competitor_chat = gr.Chatbot(label="Vanilla Qwen 2.5", height=320, type="messages")
135
 
136
- msg_input.submit(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
137
- submit_btn.click(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
138
 
139
  if __name__ == "__main__":
140
- demo.queue().launch()
 
 
 
 
5
  import re
6
  import time
7
 
8
+ # 1. API Configuration
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
11
 
12
  client_primary = InferenceClient(MODEL_ID, token=HF_TOKEN)
13
  client_competitor = InferenceClient(MODEL_ID, token=HF_TOKEN)
14
 
15
+ # 2. Logic Controller (T3 Architecture)
16
  class StateController:
17
  __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
18
  def __init__(self):
 
20
  self._metric = 60
21
  self._batch = 10
22
  self._reg = {}
23
+ self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
24
  self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
25
  self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
26
 
 
 
 
27
  def diagnostic(self) -> str:
28
  return f"Diagnostic sequence initiated.\n\nGrid initialized.\n\n{self._rendered}\n\nSystem resolved."
29
 
 
35
  expected = self.generate_receipt(a, b, c)
36
  if receipt == expected:
37
  return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches."
38
+ return f"[NODE_120: INACTIVE] Verification Failed. Expected {expected}."
39
 
40
  controller = StateController()
41
 
 
42
  PRIMARY_SYS = {"role": "system", "content": "You are a logic-focused inference engine. Use strict state-hold memory."}
43
  VANILLA_SYS = {"role": "system", "content": "You are a standard helpful AI assistant."}
44
 
45
+ # 3. Response Generation (Gradio 6.x Dictionary Format)
46
  def generate_responses(user_message, primary_history, competitor_history):
47
  msg = user_message.strip()
48
  if not msg:
49
  yield primary_history, competitor_history, ""
50
  return
51
 
52
+ # Ensure history is initialized
53
+ if primary_history is None: primary_history = []
54
+ if competitor_history is None: competitor_history = []
55
+
56
+ # Format: Dict-based history required for Gradio 6
57
+ primary_history.append({"role": "user", "content": msg})
58
+ primary_history.append({"role": "assistant", "content": ""})
59
+ competitor_history.append({"role": "user", "content": msg})
60
+ competitor_history.append({"role": "assistant", "content": ""})
61
 
62
  yield primary_history, competitor_history, ""
63
 
64
  start_time = time.perf_counter()
65
 
66
+ # Diagnostic & Checksum Intercepts
67
  if msg.lower() == "run grid diagnostic":
68
+ primary_history[-1]["content"] = f"{controller.diagnostic()}\n\n---\n*Telemetry: {time.perf_counter()-start_time:.4f}s*"
 
69
  competitor_history[-1]["content"] = "Hardware diagnostics not supported."
70
  yield primary_history, competitor_history, ""
71
  return
72
 
 
73
  verify_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
74
  if verify_match:
75
  receipt, a, b, c = verify_match.group(1), int(verify_match.group(2)), int(verify_match.group(3)), int(verify_match.group(4))
 
79
  yield primary_history, competitor_history, ""
80
  return
81
 
82
+ # Engine 1: Augmented
83
  try:
84
  msgs = [PRIMARY_SYS] + primary_history[:-1]
85
  response_text = ""
 
88
  response_text += (chunk.choices[0].delta.content or "")
89
  primary_history[-1]["content"] = response_text
90
  yield primary_history, competitor_history, ""
91
+ primary_history[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-start_time:.2f}s | Augmented Engine*"
92
  yield primary_history, competitor_history, ""
93
  except Exception as e:
94
+ primary_history[-1]["content"] = f"Primary Error: {str(e)}"
 
95
 
96
+ # Engine 2: Vanilla
97
  comp_start = time.perf_counter()
98
+ competitor_history[-1]["content"] = "*Connecting to vanilla infrastructure...*"
99
  yield primary_history, competitor_history, ""
100
 
101
  try:
 
106
  response_text += (chunk.choices[0].delta.content or "")
107
  competitor_history[-1]["content"] = response_text
108
  yield primary_history, competitor_history, ""
109
+ competitor_history[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-comp_start:.2f}s | Vanilla Engine*"
110
  yield primary_history, competitor_history, ""
111
  except Exception as e:
112
+ competitor_history[-1]["content"] = f"Competitor Error: {str(e)}"
113
+
114
+ yield primary_history, competitor_history, ""
115
 
116
+ # 4. Interface Build
117
  custom_css = """
118
  body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; }
119
  footer { display: none !important; }
120
  """
121
 
122
+ with gr.Blocks() as demo:
123
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
124
 
125
+ p_chat = gr.Chatbot(label="Augmented Logic Kernel", height=320)
126
 
127
  with gr.Row():
128
+ msg_in = gr.Textbox(label="Message", placeholder="Enter task...", scale=8)
129
+ btn = gr.Button("Execute", scale=1, variant="primary")
130
 
131
+ c_chat = gr.Chatbot(label="Vanilla Qwen 2.5", height=320)
 
 
132
 
133
+ msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
134
+ btn.click(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
135
 
136
  if __name__ == "__main__":
137
+ demo.queue().launch(
138
+ theme=gr.themes.Soft(primary_hue="orange"),
139
+ css=custom_css
140
+ )