spanofzero commited on
Commit
e0fa056
·
verified ·
1 Parent(s): 0c23909

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -3
app.py CHANGED
@@ -4,7 +4,7 @@ from array import array
4
  from functools import lru_cache
5
  import os, re, time
6
 
7
- # 1. API Configuration - REVERTED to the fully supported 7B model
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
10
  client = InferenceClient(MODEL_ID, token=HF_TOKEN)
@@ -102,17 +102,33 @@ def generate_responses(user_message, p_hist, c_hist):
102
  c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
103
  yield p_hist, c_hist, ""
104
 
105
- # 4. Interface Build
106
  custom_css = "body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; } footer { display: none !important; }"
107
 
 
 
 
 
 
 
 
 
 
108
  with gr.Blocks() as demo:
109
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
110
  p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
 
111
  with gr.Row():
112
  msg_in = gr.Textbox(label="Message", placeholder="Test P vs NP or Logistics Distribution...", scale=8)
113
  submit_btn = gr.Button("Execute", scale=1, variant="primary")
114
 
115
- gr.Examples(examples=["Define P vs. NP. Then validate a 120-unit distribution across 3 nodes.", "Run grid diagnostic"], inputs=msg_in)
 
 
 
 
 
 
116
 
117
  c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
118
 
 
4
  from functools import lru_cache
5
  import os, re, time
6
 
7
+ # 1. API Configuration - Stable 7B Client
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
10
  client = InferenceClient(MODEL_ID, token=HF_TOKEN)
 
102
  c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
103
  yield p_hist, c_hist, ""
104
 
105
+ # 4. Interface Build with Paginated Test Suite
106
  custom_css = "body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; } footer { display: none !important; }"
107
 
108
+ # Expanded list of presentation-ready prompts
109
+ example_prompts = [
110
+ ["Run grid diagnostic"],
111
+ ["Calculate the integer distribution for 50000 units across 12 nodes."],
112
+ ["Define P vs. NP. Then validate a 120-unit distribution across 3 nodes."],
113
+ ["Execute a Tier-3 Distribution Audit for 8593 units across 14 nodes."],
114
+ ["Distribute 1000000 units across 7 nodes."]
115
+ ]
116
+
117
  with gr.Blocks() as demo:
118
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
119
  p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
120
+
121
  with gr.Row():
122
  msg_in = gr.Textbox(label="Message", placeholder="Test P vs NP or Logistics Distribution...", scale=8)
123
  submit_btn = gr.Button("Execute", scale=1, variant="primary")
124
 
125
+ # New Paginated Examples Section
126
+ gr.Examples(
127
+ examples=example_prompts,
128
+ inputs=msg_in,
129
+ examples_per_page=2, # Creates the "scrollable" pages effect
130
+ label="Diagnostic Test Suite (Click to load)"
131
+ )
132
 
133
  c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
134