spanofzero commited on
Commit
8351828
·
verified ·
1 Parent(s): 0c10f52

last chance

Browse files
Files changed (1) hide show
  1. app.py +65 -24
app.py CHANGED
@@ -5,17 +5,16 @@ import os
5
  import re
6
  import time
7
 
8
- # 1. Configuration & Client Initialization
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
11
 
12
  client_primary = InferenceClient(MODEL_ID, token=HF_TOKEN)
13
  client_competitor = InferenceClient(MODEL_ID, token=HF_TOKEN)
14
 
15
- # 2. Proprietary Logic Controller
16
  class StateController:
17
  __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
18
-
19
  def __init__(self):
20
  self._state = array("B", [0]) * 121
21
  self._metric = 60
@@ -38,62 +37,104 @@ class StateController:
38
  def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str:
39
  expected = self.generate_receipt(a, b, c)
40
  if receipt == expected:
41
- return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches allocation ({a}, {b}, {c})."
42
- return f"[NODE_120: INACTIVE] Verification Failed. Expected {expected}."
43
 
44
  controller = StateController()
45
 
46
- # 3. System Instructions
47
  PRIMARY_SYS = {"role": "system", "content": "You are a logic-focused inference engine. Use strict state-hold memory."}
48
  VANILLA_SYS = {"role": "system", "content": "You are a standard helpful AI assistant."}
49
 
50
- # 4. Response Logic
51
  def generate_responses(user_message, primary_history, competitor_history):
52
  msg = user_message.strip()
53
  if not msg:
54
  yield primary_history, competitor_history, ""
55
  return
56
 
57
- # Update history with Dictionary format (Required for Gradio 5+)
58
- primary_history.append({"role": "user", "content": msg})
59
- primary_history.append({"role": "assistant", "content": ""})
60
- competitor_history.append({"role": "user", "content": msg})
61
- competitor_history.append({"role": "assistant", "content": ""})
62
  yield primary_history, competitor_history, ""
63
 
64
  start_time = time.perf_counter()
65
 
66
- # Intercept: Diagnostic
67
  if msg.lower() == "run grid diagnostic":
68
  res = controller.diagnostic()
69
- primary_history[-1]["content"] = f"{res}\n\n---\n*Telemetry: {time.perf_counter()-start_time:.4f}s | Local Kernel*"
70
  competitor_history[-1]["content"] = "Hardware diagnostics not supported."
71
  yield primary_history, competitor_history, ""
72
  return
73
 
74
- # Intercept: Checksum Verify
75
  verify_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
76
  if verify_match:
77
  receipt, a, b, c = verify_match.group(1), int(verify_match.group(2)), int(verify_match.group(3)), int(verify_match.group(4))
78
  res = controller.validate_receipt(receipt, a, b, c)
79
- primary_history[-1]["content"] = f"{res}\n\n---\n*Telemetry: {time.perf_counter()-start_time:.6f}s | ROM Math*"
80
- competitor_history[-1]["content"] = "Deterministic verification not supported."
81
  yield primary_history, competitor_history, ""
82
  return
83
 
84
- # Engine 1: Augmented
85
  try:
86
  msgs = [PRIMARY_SYS] + primary_history[:-1]
87
- res_text = ""
88
- for chunk in client_primary.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.1):
89
- res_text += (chunk.choices[0].delta.content or "")
90
- primary_history[-1]["content"] = res_text
 
91
  yield primary_history, competitor_history, ""
92
  primary_history[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-start_time:.2f}s | Augmented*"
 
93
  except Exception as e:
94
  primary_history[-1]["content"] = f"Error: {str(e)}"
95
-
96
- # Engine 2: Vanilla
 
97
  comp_start = time.perf_counter()
98
  competitor_history[-1]["content"] = "*Connecting...*"
99
  yield primary_history, competitor_history, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import re
6
  import time
7
 
8
+ # 1. Setup
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
11
 
12
  client_primary = InferenceClient(MODEL_ID, token=HF_TOKEN)
13
  client_competitor = InferenceClient(MODEL_ID, token=HF_TOKEN)
14
 
15
+ # 2. Logic Controller
16
  class StateController:
17
  __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
 
18
  def __init__(self):
19
  self._state = array("B", [0]) * 121
20
  self._metric = 60
 
37
  def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str:
38
  expected = self.generate_receipt(a, b, c)
39
  if receipt == expected:
40
+ return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches."
41
+ return f"[NODE_120: INACTIVE] Verification Failed."
42
 
43
  controller = StateController()
44
 
45
+ # 3. Instructions
46
  PRIMARY_SYS = {"role": "system", "content": "You are a logic-focused inference engine. Use strict state-hold memory."}
47
  VANILLA_SYS = {"role": "system", "content": "You are a standard helpful AI assistant."}
48
 
49
+ # 4. Responses (Gradio 6 Compatible)
50
  def generate_responses(user_message, primary_history, competitor_history):
51
  msg = user_message.strip()
52
  if not msg:
53
  yield primary_history, competitor_history, ""
54
  return
55
 
56
+ # Gradio 6 strictly requires ChatMessage objects or dicts with 'role' and 'content'
57
+ # We initialize the assistant message as empty
58
+ primary_history = primary_history + [{"role": "user", "content": msg}, {"role": "assistant", "content": ""}]
59
+ competitor_history = competitor_history + [{"role": "user", "content": msg}, {"role": "assistant", "content": ""}]
60
+
61
  yield primary_history, competitor_history, ""
62
 
63
  start_time = time.perf_counter()
64
 
65
+ # Hardware Intercepts
66
  if msg.lower() == "run grid diagnostic":
67
  res = controller.diagnostic()
68
+ primary_history[-1]["content"] = f"{res}\n\n---\n*Telemetry: {time.perf_counter()-start_time:.4f}s*"
69
  competitor_history[-1]["content"] = "Hardware diagnostics not supported."
70
  yield primary_history, competitor_history, ""
71
  return
72
 
73
+ # Checksum Intercept
74
  verify_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
75
  if verify_match:
76
  receipt, a, b, c = verify_match.group(1), int(verify_match.group(2)), int(verify_match.group(3)), int(verify_match.group(4))
77
  res = controller.validate_receipt(receipt, a, b, c)
78
+ primary_history[-1]["content"] = f"{res}\n\n---\n*Telemetry: {time.perf_counter()-start_time:.6f}s*"
79
+ competitor_history[-1]["content"] = "Verification not supported."
80
  yield primary_history, competitor_history, ""
81
  return
82
 
83
+ # Stream Primary
84
  try:
85
  msgs = [PRIMARY_SYS] + primary_history[:-1]
86
+ response_text = ""
87
+ stream = client_primary.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.1)
88
+ for chunk in stream:
89
+ response_text += (chunk.choices[0].delta.content or "")
90
+ primary_history[-1]["content"] = response_text
91
  yield primary_history, competitor_history, ""
92
  primary_history[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-start_time:.2f}s | Augmented*"
93
+ yield primary_history, competitor_history, ""
94
  except Exception as e:
95
  primary_history[-1]["content"] = f"Error: {str(e)}"
96
+ yield primary_history, competitor_history, ""
97
+
98
+ # Stream Competitor
99
  comp_start = time.perf_counter()
100
  competitor_history[-1]["content"] = "*Connecting...*"
101
  yield primary_history, competitor_history, ""
102
+
103
+ try:
104
+ msgs = [VANILLA_SYS] + competitor_history[:-1]
105
+ response_text = ""
106
+ stream = client_competitor.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.7)
107
+ for chunk in stream:
108
+ response_text += (chunk.choices[0].delta.content or "")
109
+ competitor_history[-1]["content"] = response_text
110
+ yield primary_history, competitor_history, ""
111
+ competitor_history[-1]["content"] += f"\n\n---\n*Telemetry: {time.perf_counter()-comp_start:.2f}s | Vanilla*"
112
+ yield primary_history, competitor_history, ""
113
+ except Exception as e:
114
+ competitor_history[-1]["content"] = f"Error: {str(e)}"
115
+ yield primary_history, competitor_history, ""
116
+
117
+ # 5. UI
118
+ custom_css = """
119
+ body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; }
120
+ footer { display: none !important; }
121
+ """
122
+
123
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css) as demo:
124
+ gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
125
+
126
+ primary_chat = gr.Chatbot(label="Augmented Logic Kernel", height=320, type="messages")
127
+
128
+ with gr.Row():
129
+ msg_input = gr.Textbox(label="Message", placeholder="Enter task...", scale=8)
130
+ submit_btn = gr.Button("Execute", scale=1, variant="primary")
131
+
132
+ gr.Examples(examples=["Calculate the integer distribution for 120 units across 3 nodes.", "Run grid diagnostic"], inputs=msg_input)
133
+
134
+ competitor_chat = gr.Chatbot(label="Vanilla Qwen 2.5", height=320, type="messages")
135
+
136
+ msg_input.submit(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
137
+ submit_btn.click(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
138
+
139
+ if __name__ == "__main__":
140
+ demo.queue().launch()