SeaWolf-AI commited on
Commit
2580e39
Β·
verified Β·
1 Parent(s): 0ec7fa3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -30
app.py CHANGED
@@ -1,6 +1,6 @@
1
  """
2
- 🧬 Gemma 4 Playground β€” Demo Space
3
- Dual model (31B / 26B-A4B) Β· ZeroGPU Β· Vision Β· Thinking Mode
4
  """
5
  import sys
6
  print(f"[BOOT] Python {sys.version}", flush=True)
@@ -42,27 +42,28 @@ from transformers.generation.streamers import TextIteratorStreamer
42
 
43
 
44
  # ══════════════════════════════════════════════════════════════════════════════
45
- # 1. MODEL CONFIG β€” Gemma 4 Dual Model
46
  # ══════════════════════════════════════════════════════════════════════════════
 
 
 
 
 
 
 
 
47
  MODELS = {
48
- "Gemma-4-31B-it": {
49
- "id": "google/gemma-4-31b-it",
50
- "arch": "Dense", "total": "30.7B", "active": "30.7B",
51
- "ctx": "256K", "vision": True, "audio": False,
52
- "desc": "Dense 31B β€” 졜고 ν’ˆμ§ˆ, AIME 89.2%, Codeforces 2150",
53
- },
54
- "Gemma-4-26B-A4B-it": {
55
- "id": "google/gemma-4-26B-A4B-it",
56
- "arch": "MoE", "total": "25.2B", "active": "3.8B",
57
- "ctx": "256K", "vision": True, "audio": False,
58
- "desc": "MoE 26B (3.8B active) β€” 31B의 95% μ„±λŠ₯, μΆ”λ‘  ~8λ°° 빠름",
59
  },
60
  }
61
 
62
- DEFAULT_MODEL = "Gemma-4-26B-A4B-it" # MoEκ°€ ZeroGPUμ—μ„œ 더 적합
63
 
64
  PRESETS = {
65
- "general": "You are Gemma 4, a highly capable multimodal AI assistant by Google DeepMind. Think step by step for complex questions.",
66
  "code": "You are an expert software engineer. Write clean, efficient, well-commented code. Explain your approach before writing. Use modern best practices.",
67
  "math": "You are a world-class mathematician. Break problems step-by-step. Show full working. Use LaTeX where helpful.",
68
  "creative": "You are a brilliant creative writer. Be imaginative, vivid, and engaging. Adapt tone and style to the request.",
@@ -317,33 +318,28 @@ footer { display: none !important; }
317
 
318
  def _model_info_html(name):
319
  m = MODELS.get(name, MODELS[DEFAULT_MODEL])
320
- icon = "⚑" if m["arch"] == "MoE" else "πŸ†"
321
  return (
322
  f'<div class="model-box">'
323
- f'<b>{icon} {name}</b> '
324
- f'<span style="font-size:9px;padding:2px 6px;border-radius:6px;background:rgba(109,40,217,.08);color:#6d28d9;font-weight:700">{m["arch"]}</span><br>'
325
  f'<div class="st">{m["active"]} active / {m["total"]} total Β· πŸ‘οΈ Vision Β· {m["ctx"]} context</div>'
326
  f'<div class="st">{m["desc"]}</div>'
327
  f'<div class="st" style="margin-top:6px">'
328
  f'<a href="https://huggingface.co/{m["id"]}" target="_blank" style="color:#6d28d9;font-weight:700;text-decoration:none">πŸ€— Model Card β†—</a> Β· '
329
- f'<a href="https://deepmind.google/models/gemma/gemma-4/" target="_blank" style="color:#059669;font-weight:700;text-decoration:none">πŸ”¬ DeepMind β†—</a>'
330
  f'</div></div>'
331
  )
332
 
333
- with gr.Blocks(title="Gemma 4 Playground") as demo:
334
 
335
  with gr.Row():
336
- gr.Markdown("## πŸ’Ž Gemma 4 Playground\nGoogle DeepMind Β· Apache 2.0 Β· Vision Β· Thinking")
337
  with gr.Column(scale=0, min_width=120):
338
  gr.LoginButton(size="sm")
339
 
340
  with gr.Row():
341
  # ── Sidebar ──
342
  with gr.Column(scale=0, min_width=280):
343
- model_dd = gr.Dropdown(
344
- choices=list(MODELS.keys()), value=DEFAULT_MODEL, label="Model",
345
- info="⚑MoE=Fast | πŸ†Dense=Best quality (μ „ν™˜ μ‹œ 1-2λΆ„)",
346
- )
347
  model_info = gr.HTML(value=_model_info_html(DEFAULT_MODEL))
348
  image_input = gr.Image(label="πŸ‘οΈ Image (Vision)", type="filepath", height=140)
349
  thinking_radio = gr.Radio(["⚑ Fast", "🧠 Thinking"], value="⚑ Fast", label="Mode")
@@ -360,15 +356,16 @@ with gr.Blocks(title="Gemma 4 Playground") as demo:
360
  chatbot = gr.Chatbot(elem_id="chatbot", show_label=False, height=600)
361
  with gr.Row():
362
  chat_input = gr.Textbox(
363
- placeholder="Message Gemma 4…",
364
  show_label=False, scale=7, autofocus=True, lines=1, max_lines=4,
365
  )
366
  send_btn = gr.Button("↑", variant="primary", scale=0, min_width=48, elem_id="send-btn")
367
 
368
  # ── Events ──
369
- model_dd.change(fn=_model_info_html, inputs=[model_dd], outputs=[model_info])
370
  preset_dd.change(fn=lambda k: PRESETS.get(k, PRESETS["general"]), inputs=[preset_dd], outputs=[sys_prompt])
371
 
 
 
372
  def user_msg(msg, hist):
373
  if not msg.strip(): return "", hist
374
  return "", hist + [{"role": "user", "content": msg}]
@@ -381,7 +378,7 @@ with gr.Blocks(title="Gemma 4 Playground") as demo:
381
  hist[-1]["content"] = chunk
382
  yield hist
383
 
384
- ins = [chatbot, thinking_radio, image_input, sys_prompt, max_tok, temp, topp, model_dd]
385
  send_btn.click(user_msg, [chat_input, chatbot], [chat_input, chatbot], queue=False).then(bot_reply, ins, chatbot)
386
  chat_input.submit(user_msg, [chat_input, chatbot], [chat_input, chatbot], queue=False).then(bot_reply, ins, chatbot)
387
  clear_btn.click(lambda: [], None, chatbot, queue=False)
@@ -391,5 +388,5 @@ with gr.Blocks(title="Gemma 4 Playground") as demo:
391
  # 7. LAUNCH
392
  # ══════════════════════════════════════════════════════════════════════════════
393
  if __name__ == "__main__":
394
- print(f"[BOOT] Gemma 4 Playground Β· Model: {DEFAULT_MODEL}", flush=True)
395
  demo.launch(server_name="0.0.0.0", server_port=7860, css=CSS, ssr_mode=False)
 
1
  """
2
+ Darwin-31B-Opus β€” Demo Space
3
+ Diagnostic-Guided Evolutionary Merge Β· Gemma 4 31B Β· Thinking Mode Β· Vision
4
  """
5
  import sys
6
  print(f"[BOOT] Python {sys.version}", flush=True)
 
42
 
43
 
44
  # ══════════════════════════════════════════════════════════════════════════════
45
+ # 1. MODEL CONFIG β€” Darwin-31B-Opus (Single Model)
46
  # ══════════════════════════════════════════════════════════════════════════════
47
+ MODEL_ID = "FINAL-Bench/Darwin-31B-Opus"
48
+ MODEL_NAME = "Darwin-31B-Opus"
49
+ MODEL_DESC = "Gemma 4 31B + Claude Opus Distill β€” Darwin V6 diagnostic-guided evolutionary merge"
50
+ MODEL_INFO = {
51
+ "arch": "Dense", "total": "30.7B", "active": "30.7B",
52
+ "ctx": "256K", "vision": True,
53
+ }
54
+
55
  MODELS = {
56
+ MODEL_NAME: {
57
+ "id": MODEL_ID,
58
+ **MODEL_INFO,
59
+ "desc": MODEL_DESC,
 
 
 
 
 
 
 
60
  },
61
  }
62
 
63
+ DEFAULT_MODEL = MODEL_NAME
64
 
65
  PRESETS = {
66
+ "general": "You are Darwin-31B-Opus, a reasoning-enhanced AI assistant created by VIDRAFT using diagnostic-guided evolutionary merge. Think step by step for complex questions.",
67
  "code": "You are an expert software engineer. Write clean, efficient, well-commented code. Explain your approach before writing. Use modern best practices.",
68
  "math": "You are a world-class mathematician. Break problems step-by-step. Show full working. Use LaTeX where helpful.",
69
  "creative": "You are a brilliant creative writer. Be imaginative, vivid, and engaging. Adapt tone and style to the request.",
 
318
 
319
  def _model_info_html(name):
320
  m = MODELS.get(name, MODELS[DEFAULT_MODEL])
 
321
  return (
322
  f'<div class="model-box">'
323
+ f'<b>🧬 {name}</b> '
324
+ f'<span style="font-size:9px;padding:2px 6px;border-radius:6px;background:rgba(109,40,217,.08);color:#6d28d9;font-weight:700">{m["arch"]} {m["active"]}</span><br>'
325
  f'<div class="st">{m["active"]} active / {m["total"]} total Β· πŸ‘οΈ Vision Β· {m["ctx"]} context</div>'
326
  f'<div class="st">{m["desc"]}</div>'
327
  f'<div class="st" style="margin-top:6px">'
328
  f'<a href="https://huggingface.co/{m["id"]}" target="_blank" style="color:#6d28d9;font-weight:700;text-decoration:none">πŸ€— Model Card β†—</a> Β· '
329
+ f'<a href="https://huggingface.co/FINAL-Bench" target="_blank" style="color:#059669;font-weight:700;text-decoration:none">🧬 FINAL-Bench β†—</a>'
330
  f'</div></div>'
331
  )
332
 
333
+ with gr.Blocks(title="Darwin-31B-Opus") as demo:
334
 
335
  with gr.Row():
336
+ gr.Markdown("## 🧬 Darwin-31B-Opus\nVIDRAFT · Diagnostic-Guided Evolutionary Merge · Apache 2.0 · Vision · Thinking")
337
  with gr.Column(scale=0, min_width=120):
338
  gr.LoginButton(size="sm")
339
 
340
  with gr.Row():
341
  # ── Sidebar ──
342
  with gr.Column(scale=0, min_width=280):
 
 
 
 
343
  model_info = gr.HTML(value=_model_info_html(DEFAULT_MODEL))
344
  image_input = gr.Image(label="πŸ‘οΈ Image (Vision)", type="filepath", height=140)
345
  thinking_radio = gr.Radio(["⚑ Fast", "🧠 Thinking"], value="⚑ Fast", label="Mode")
 
356
  chatbot = gr.Chatbot(elem_id="chatbot", show_label=False, height=600)
357
  with gr.Row():
358
  chat_input = gr.Textbox(
359
+ placeholder="Message Darwin-31B-Opus…",
360
  show_label=False, scale=7, autofocus=True, lines=1, max_lines=4,
361
  )
362
  send_btn = gr.Button("↑", variant="primary", scale=0, min_width=48, elem_id="send-btn")
363
 
364
  # ── Events ──
 
365
  preset_dd.change(fn=lambda k: PRESETS.get(k, PRESETS["general"]), inputs=[preset_dd], outputs=[sys_prompt])
366
 
367
+ model_dd_hidden = gr.Textbox(value=DEFAULT_MODEL, visible=False)
368
+
369
  def user_msg(msg, hist):
370
  if not msg.strip(): return "", hist
371
  return "", hist + [{"role": "user", "content": msg}]
 
378
  hist[-1]["content"] = chunk
379
  yield hist
380
 
381
+ ins = [chatbot, thinking_radio, image_input, sys_prompt, max_tok, temp, topp, model_dd_hidden]
382
  send_btn.click(user_msg, [chat_input, chatbot], [chat_input, chatbot], queue=False).then(bot_reply, ins, chatbot)
383
  chat_input.submit(user_msg, [chat_input, chatbot], [chat_input, chatbot], queue=False).then(bot_reply, ins, chatbot)
384
  clear_btn.click(lambda: [], None, chatbot, queue=False)
 
388
  # 7. LAUNCH
389
  # ══════════════════════════════════════════════════════════════════════════════
390
  if __name__ == "__main__":
391
+ print(f"[BOOT] Darwin-31B-Opus Playground Β· VIDRAFT", flush=True)
392
  demo.launch(server_name="0.0.0.0", server_port=7860, css=CSS, ssr_mode=False)