spanofzero commited on
Commit
0c10f52
·
verified ·
1 Parent(s): d824a95
Files changed (1) hide show
  1. app.py +56 -81
app.py CHANGED
@@ -5,120 +5,95 @@ import os
5
  import re
6
  import time
7
 
8
- # Securely retrieve the token from your Space's secrets
9
  HF_TOKEN = os.getenv("HF_TOKEN")
 
10
 
11
- # Initialize BOTH engines with the same model
12
- client_primary = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
13
- client_competitor = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
14
 
 
15
  class StateController:
16
  __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
17
-
18
  def __init__(self):
19
  self._state = array("B", [0]) * 121
20
  self._metric = 60
21
  self._batch = 10
22
  self._reg = {}
23
  self._rendered = self._build_render()
24
- self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
25
- self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
26
 
27
  def _build_render(self) -> str:
28
  return "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
29
 
30
  def diagnostic(self) -> str:
31
- for i in range(51): self._state[i] = i % self._batch
32
- self._reg.clear()
33
- self._reg["STATUS"] = "RESOLVED"
34
  return f"Diagnostic sequence initiated.\n\nGrid initialized.\n\n{self._rendered}\n\nSystem resolved."
35
 
36
  def generate_receipt(self, a: int, b: int, c: int) -> str:
37
- rom_val = self._rom60[a % 60][b % 60]
38
- checksum_index = (rom_val ^ (c % 60)) % 60
39
- return f"0{self._symbols[checksum_index]}"
40
 
41
  def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str:
42
- if receipt == self.generate_receipt(a, b, c):
43
- self._state[120] = 1
44
- return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches."
45
- self._state[120] = 0
46
- return f"[NODE_120: INACTIVE] Verification Failed."
47
 
48
  controller = StateController()
49
 
 
50
  PRIMARY_SYS = {"role": "system", "content": "You are a logic-focused inference engine. Use strict state-hold memory."}
51
  VANILLA_SYS = {"role": "system", "content": "You are a standard helpful AI assistant."}
52
 
53
- def generate_responses(message, p_history, c_history):
54
- msg = message.strip()
55
- if not msg: yield p_history, c_history, ""
56
-
57
- # Revert to standard List-of-Lists format
58
- p_history.append([msg, ""])
59
- c_history.append([msg, ""])
60
- yield p_history, c_history, ""
 
 
 
 
 
61
 
62
- start = time.perf_counter()
63
 
64
- # Logic Intercepts
65
  if msg.lower() == "run grid diagnostic":
66
- p_history[-1][1] = f"{controller.diagnostic()}\n\n---\n*Telemetry: {time.perf_counter()-start:.4f}s*"
67
- c_history[-1][1] = "Hardware diagnostics not supported."
68
- yield p_history, c_history, ""
 
69
  return
70
 
71
- # 1. Primary Engine
72
- try:
73
- msgs = [PRIMARY_SYS]
74
- for h in p_history[:-1]:
75
- msgs.extend([{"role": "user", "content": h[0]}, {"role": "assistant", "content": h[1]}])
76
- msgs.append({"role": "user", "content": msg})
77
-
78
- res = ""
79
- for chunk in client_primary.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.1):
80
- res += (chunk.choices[0].delta.content or "")
81
- p_history[-1][1] = res
82
- yield p_history, c_history, ""
83
- p_history[-1][1] += f"\n\n---\n*Telemetry: {time.perf_counter()-start:.2f}s | Augmented*"
84
- except Exception as e:
85
- p_history[-1][1] = f"Error: {str(e)}"
86
 
87
- # 2. Competitor Engine
88
- c_start = time.perf_counter()
89
- c_history[-1][1] = "*Connecting...*"
90
- yield p_history, c_history, ""
91
-
92
  try:
93
- msgs = [VANILLA_SYS]
94
- for h in c_history[:-1]:
95
- msgs.extend([{"role": "user", "content": h[0]}, {"role": "assistant", "content": h[1]}])
96
- msgs.append({"role": "user", "content": msg})
97
-
98
- res = ""
99
- for chunk in client_competitor.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.7):
100
- res += (chunk.choices[0].delta.content or "")
101
- c_history[-1][1] = res
102
- yield p_history, c_history, ""
103
- c_history[-1][1] += f"\n\n---\n*Telemetry: {time.perf_counter()-c_start:.2f}s | Vanilla*"
104
  except Exception as e:
105
- c_history[-1][1] = f"Error: {str(e)}"
106
 
107
- yield p_history, c_history, ""
108
-
109
- custom_css = "body, .gradio-container { background-color: #110c08 !important; } footer {display: none !important}"
110
-
111
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css) as demo:
112
- gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
113
- p_chat = gr.Chatbot(label="Augmented Logic Kernel", height=320)
114
- with gr.Row():
115
- txt = gr.Textbox(label="Message", placeholder="Enter task...", scale=8)
116
- btn = gr.Button("Execute", scale=1, variant="primary")
117
- gr.Examples(examples=["Calculate the integer distribution for 120 units across 3 nodes.", "Run grid diagnostic"], inputs=txt)
118
- c_chat = gr.Chatbot(label="Vanilla Qwen 2.5", height=320)
119
-
120
- txt.submit(generate_responses, [txt, p_chat, c_chat], [p_chat, c_chat, txt])
121
- btn.click(generate_responses, [txt, p_chat, c_chat], [p_chat, c_chat, txt])
122
-
123
- if __name__ == "__main__":
124
- demo.queue().launch()
 
5
  import re
6
  import time
7
 
8
+ # 1. Configuration & Client Initialization
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
+ MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
11
 
12
+ client_primary = InferenceClient(MODEL_ID, token=HF_TOKEN)
13
+ client_competitor = InferenceClient(MODEL_ID, token=HF_TOKEN)
 
14
 
15
+ # 2. Proprietary Logic Controller
16
  class StateController:
17
  __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
18
+
19
  def __init__(self):
20
  self._state = array("B", [0]) * 121
21
  self._metric = 60
22
  self._batch = 10
23
  self._reg = {}
24
  self._rendered = self._build_render()
25
+ self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
26
+ self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
27
 
28
  def _build_render(self) -> str:
29
  return "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
30
 
31
  def diagnostic(self) -> str:
 
 
 
32
  return f"Diagnostic sequence initiated.\n\nGrid initialized.\n\n{self._rendered}\n\nSystem resolved."
33
 
34
  def generate_receipt(self, a: int, b: int, c: int) -> str:
35
+ idx = (self._rom60[a % 60][b % 60] ^ (c % 60)) % 60
36
+ return f"0{self._symbols[idx]}"
 
37
 
38
  def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str:
39
+ expected = self.generate_receipt(a, b, c)
40
+ if receipt == expected:
41
+ return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches allocation ({a}, {b}, {c})."
42
+ return f"[NODE_120: INACTIVE] Verification Failed. Expected {expected}."
 
43
 
44
  controller = StateController()
45
 
46
+ # 3. System Instructions
47
  PRIMARY_SYS = {"role": "system", "content": "You are a logic-focused inference engine. Use strict state-hold memory."}
48
  VANILLA_SYS = {"role": "system", "content": "You are a standard helpful AI assistant."}
49
 
50
+ # 4. Response Logic
51
+ def generate_responses(user_message, primary_history, competitor_history):
52
+ msg = user_message.strip()
53
+ if not msg:
54
+ yield primary_history, competitor_history, ""
55
+ return
56
+
57
+ # Update history with Dictionary format (Required for Gradio 5+)
58
+ primary_history.append({"role": "user", "content": msg})
59
+ primary_history.append({"role": "assistant", "content": ""})
60
+ competitor_history.append({"role": "user", "content": msg})
61
+ competitor_history.append({"role": "assistant", "content": ""})
62
+ yield primary_history, competitor_history, ""
63
 
64
+ start_time = time.perf_counter()
65
 
66
+ # Intercept: Diagnostic
67
  if msg.lower() == "run grid diagnostic":
68
+ res = controller.diagnostic()
69
+ primary_history[-1]["content"] = f"{res}\n\n---\n*Telemetry: {time.perf_counter()-start_time:.4f}s | Local Kernel*"
70
+ competitor_history[-1]["content"] = "Hardware diagnostics not supported."
71
+ yield primary_history, competitor_history, ""
72
  return
73
 
74
+ # Intercept: Checksum Verify
75
+ verify_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
76
+ if verify_match:
77
+ receipt, a, b, c = verify_match.group(1), int(verify_match.group(2)), int(verify_match.group(3)), int(verify_match.group(4))
78
+ res = controller.validate_receipt(receipt, a, b, c)
79
+ primary_history[-1]["content"] = f"{res}\n\n---\n*Telemetry: {time.perf_counter()-start_time:.6f}s | ROM Math*"
80
+ competitor_history[-1]["content"] = "Deterministic verification not supported."
81
+ yield primary_history, competitor_history, ""
82
+ return
 
 
 
 
 
 
83
 
84
+ # Engine 1: Augmented
 
 
 
 
85
  try:
86
+ msgs = [PRIMARY_SYS] + primary_history[:-1]
87
+ res_text = ""
88
+ for chunk in client_primary.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.1):
89
+ res_text += (chunk.choices[0].delta.content or "")
90
+ primary_history[-1]["content"] = res_text
91
+ yield primary_history, competitor_history, ""
92
+ primary_history[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-start_time:.2f}s | Augmented*"
 
 
 
 
93
  except Exception as e:
94
+ primary_history[-1]["content"] = f"Error: {str(e)}"
95
 
96
+ # Engine 2: Vanilla
97
+ comp_start = time.perf_counter()
98
+ competitor_history[-1]["content"] = "*Connecting...*"
99
+ yield primary_history, competitor_history, ""