AlekseyCalvin commited on
Commit
1310fc3
·
verified ·
1 Parent(s): ce2c35f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -32
app.py CHANGED
@@ -586,13 +586,31 @@ def wrapper_specious(token, method, base, norm, i8, t, filt_w, m1, w1, f1, m2, w
586
 
587
  yield from run_mergekit_logic(config, token, out, priv, shard, prec, tok_src, chat_t, program="mergekit-yaml")
588
 
589
- def wrapper_moer(token, base, experts, gate, dtype, out, priv, shard, prec, tok_src, chat_t):
590
- formatted = [{"source_model": e.strip(), "positive_prompts": ["chat", "assist"]} for e in experts.split('\n') if e.strip()]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
591
  config = {
592
- "base_model": base.strip() if base.strip() else formatted[0]["source_model"],
593
  "gate_mode": gate,
594
  "dtype": dtype,
595
- "experts": formatted
596
  }
597
  # Uses mergekit-moe CLI
598
  yield from run_mergekit_logic(config, token, out, priv, shard, prec, tok_src, chat_t, program="mergekit-moe")
@@ -700,21 +718,23 @@ with gr.Blocks() as demo:
700
 
701
  # --- TAB 5 ---
702
  with gr.Tab("Amphinterpolative"):
703
- gr.Markdown("### Spherical Interpolation Family")
704
  t5_token = gr.Textbox(label="HF Token", type="password")
705
- t5_method = gr.Dropdown(["slerp", "nuslerp", "multislerp", "karcher"], value="slerp", label="Method")
 
706
  with gr.Row():
707
- t5_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=1.0, maximum=20.0)
708
  t5_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision")
709
  t5_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source")
710
- t5_chat = gr.Textbox(label="Chat Template (write-in, default: auto)", placeholder="auto")
 
711
  with gr.Row():
712
  t5_base = gr.Textbox(label="Base Model")
713
  t5_t = gr.Slider(0, 1, 0.5, label="t")
714
  with gr.Row():
715
- t5_norm = gr.Checkbox(label="Normalize", value=True); t5_i8 = gr.Checkbox(label="Int8 Mask", value=False); t5_flat = gr.Checkbox(label="NuSlerp Flatten", value=False); t5_row = gr.Checkbox(label="NuSlerp Row Wise", value=False)
716
  with gr.Row():
717
- t5_eps = gr.Textbox(label="eps", value="1e-8"); t5_iter = gr.Number(label="max_iter", value=10); t5_tol = gr.Textbox(label="tol", value="1e-5")
718
  m1, w1 = gr.Textbox(label="Model 1"), gr.Textbox(label="Weight 1", value="1.0"); m2, w2 = gr.Textbox(label="Model 2"), gr.Textbox(label="Weight 2", value="1.0")
719
  with gr.Accordion("More", open=False):
720
  m3, w3 = gr.Textbox(label="Model 3"), gr.Textbox(label="Weight 3", value="1.0"); m4, w4 = gr.Textbox(label="Model 4"), gr.Textbox(label="Weight 4", value="1.0"); m5, w5 = gr.Textbox(label="Model 5"), gr.Textbox(label="Weight 5", value="1.0")
@@ -725,19 +745,19 @@ with gr.Blocks() as demo:
725
 
726
  # --- TAB 6 ---
727
  with gr.Tab("Stir/Tie Bases"):
728
- gr.Markdown("### Task Vector Family")
729
- t6_token = gr.Textbox(label="Token", type="password")
730
- t6_method = gr.Dropdown(["task_arithmetic", "ties", "dare_ties", "dare_linear", "della", "della_linear", "breadcrumbs", "breadcrumbs_ties", "sce"], value="ties", label="Method")
731
  with gr.Row():
732
- t6_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=1.0, maximum=20.0); t6_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision"); t6_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source"); t6_chat = gr.Textbox(label="Chat Template", placeholder="auto")
733
- t6_base = gr.Textbox(label="Base Model")
734
  with gr.Row():
735
- t6_norm = gr.Checkbox(label="Normalize", value=True); t6_i8 = gr.Checkbox(label="Int8 Mask", value=False); t6_resc = gr.Checkbox(label="Rescale", value=True); t6_lamb = gr.Number(label="Lambda", value=1.0); t6_topk = gr.Slider(0, 1, 1.0, label="Select TopK")
736
  m1_6, w1_6 = gr.Textbox(label="Model 1"), gr.Textbox(label="Weight 1", value="1.0"); d1_6, g1_6, e1_6 = gr.Textbox(label="Density", value="1.0"), gr.Number(label="Gamma", value=0.01), gr.Number(label="Epsilon", value=0.15)
737
  with gr.Accordion("More", open=False):
738
- m2_6, w2_6 = gr.Textbox(label="Model 2"), gr.Textbox(label="Weight 2", value="1.0"); d2_6, g2_6, e2_6 = gr.Textbox(label="Density", value="1.0"), gr.Number(label="Gamma", value=0.01), gr.Number(label="Epsilon", value=0.15)
739
- m3_6, w3_6 = gr.Textbox(label="Model 3"), gr.Textbox(label="Weight 3", value="1.0"); d3_6, g3_6, e3_6 = gr.Textbox(label="Density", value="1.0"), gr.Number(label="Gamma", value=0.01), gr.Number(label="Epsilon", value=0.15)
740
- m4_6, w4_6 = gr.Textbox(label="Model 4"), gr.Textbox(label="Weight 4", value="1.0"); d4_6, g4_6, e4_6 = gr.Textbox(label="Density", value="1.0"), gr.Number(label="Gamma", value=0.01), gr.Number(label="Epsilon", value=0.15)
741
  t6_out = gr.Textbox(label="Output Repo"); t6_priv = gr.Checkbox(label="Private", value=True)
742
  t6_btn = gr.Button("Execute")
743
  t6_res = gr.Textbox(label="Result", lines=10)
@@ -745,15 +765,21 @@ with gr.Blocks() as demo:
745
 
746
  # --- TAB 7 ---
747
  with gr.Tab("Specious"):
748
- gr.Markdown("### Specialized Methods")
749
  t7_token = gr.Textbox(label="Token", type="password")
750
- t7_method = gr.Dropdown(["model_stock", "nearswap", "arcee_fusion", "passthrough", "linear"], value="model_stock", label="Method")
 
751
  with gr.Row():
752
- t7_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=1.0, maximum=20.0); t7_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision"); t7_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source"); t7_chat = gr.Textbox(label="Chat Template", placeholder="auto")
753
- t7_base = gr.Textbox(label="Base Model")
 
 
 
 
 
754
  with gr.Row():
755
- t7_norm = gr.Checkbox(label="Normalize", value=True); t7_i8 = gr.Checkbox(label="Int8 Mask", value=False); t7_t = gr.Slider(0, 1, 0.5, label="t"); t7_filt_w = gr.Checkbox(label="Filter Wise", value=False)
756
- m1_7, w1_7, f1_7 = gr.Textbox(label="Model 1"), gr.Textbox(label="Weight 1", value="1.0"), gr.Textbox(label="Filter (Passthrough)")
757
  m2_7, w2_7 = gr.Textbox(label="Model 2"), gr.Textbox(label="Weight 2", value="1.0")
758
  with gr.Accordion("More", open=False):
759
  m3_7, w3_7 = gr.Textbox(label="Model 3"), gr.Textbox(label="Weight 3", value="1.0"); m4_7, w4_7 = gr.Textbox(label="Model 4"), gr.Textbox(label="Weight 4", value="1.0"); m5_7, w5_7 = gr.Textbox(label="Model 5"), gr.Textbox(label="Weight 5", value="1.0")
@@ -765,22 +791,48 @@ with gr.Blocks() as demo:
765
  # --- TAB 8 (MoEr) ---
766
  with gr.Tab("MoEr"):
767
  gr.Markdown("### Mixture of Experts")
 
 
768
  t8_token = gr.Textbox(label="Token", type="password")
769
  with gr.Row():
770
- t8_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=1.0, maximum=20.0); t8_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision"); t8_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source"); t8_chat = gr.Textbox(label="Chat Template", placeholder="auto")
771
  t8_base = gr.Textbox(label="Base Model"); t8_experts = gr.TextArea(label="Experts List"); t8_gate = gr.Dropdown(["cheap_embed", "random", "hidden"], value="cheap_embed", label="Gate Mode"); t8_dtype = gr.Dropdown(["float16", "bfloat16"], value="bfloat16", label="Internal Dtype")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
772
  t8_out = gr.Textbox(label="Output Repo"); t8_priv = gr.Checkbox(label="Private", value=True)
773
  t8_btn = gr.Button("Build MoE")
774
  t8_res = gr.Textbox(label="Result", lines=10)
775
- t8_btn.click(wrapper_moer, [t8_token, t8_base, t8_experts, t8_gate, t8_dtype, t8_out, t8_priv, t8_shard, t8_prec, t8_tok, t8_chat], t8_res)
776
 
777
  # --- TAB 9 (Rawer) ---
778
  with gr.Tab("Rawer"):
779
- gr.Markdown("### Raw PyTorch / Non-Transformer")
780
  t9_token = gr.Textbox(label="Token", type="password"); t9_models = gr.TextArea(label="Models (one per line)")
781
  with gr.Row():
782
- t9_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=1.0, maximum=20.0); t9_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision"); t9_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source"); t9_chat = gr.Textbox(label="Chat Template", placeholder="auto")
783
- t9_method = gr.Dropdown(["linear", "passthrough"], value="linear", label="Method"); t9_dtype = gr.Dropdown(["float32", "float16", "bfloat16"], value="float32", label="Config Dtype")
 
 
784
  t9_out = gr.Textbox(label="Output Repo"); t9_priv = gr.Checkbox(label="Private", value=True)
785
  t9_btn = gr.Button("Merge Raw")
786
  t9_res = gr.Textbox(label="Result", lines=10)
@@ -788,11 +840,26 @@ with gr.Blocks() as demo:
788
 
789
  # --- TAB 10 ---
790
  with gr.Tab("Mario,DARE!"):
 
 
791
  t10_token = gr.Textbox(label="Token", type="password")
 
 
 
 
 
 
 
 
 
 
 
 
 
792
  with gr.Row():
793
- t10_base = gr.Textbox(label="Base Model"); t10_ft = gr.Textbox(label="Fine-Tuned Model")
794
  with gr.Row():
795
- t10_ratio = gr.Slider(0, 5, 1.0, label="Ratio"); t10_mask = gr.Slider(0, 0.99, 0.5, label="Mask Rate")
796
  t10_out = gr.Textbox(label="Output Repo"); t10_priv = gr.Checkbox(label="Private", value=True)
797
  gr.Button("Run").click(task_dare_custom, [t10_token, t10_base, t10_ft, t10_ratio, t10_mask, t10_out, t10_priv], gr.Textbox(label="Result"))
798
 
 
586
 
587
  yield from run_mergekit_logic(config, token, out, priv, shard, prec, tok_src, chat_t, program="mergekit-yaml")
588
 
589
+ def wrapper_moer(token, base, expert1, prompt1, expert2, prompt2, expert3, prompt3, expert4, prompt4, expert5, prompt5, gate, dtype, out, priv, shard, prec, tok_src, chat_t):
590
+ experts = []
591
+ for exp, pmt in [
592
+ (expert1, prompt1), (expert2, prompt2), (expert3, prompt3),
593
+ (expert4, prompt4), (expert5, prompt5)
594
+ ]:
595
+ if exp.strip():
596
+ expert_entry = {"source_model": exp.strip()}
597
+ # Parse prompts (comma-separated)
598
+ if pmt.strip():
599
+ prompts = [p.strip() for p in pmt.split(',') if p.strip()]
600
+ expert_entry["positive_prompts"] = prompts
601
+ else:
602
+ expert_entry["positive_prompts"] = [""]
603
+ experts.append(expert_entry)
604
+
605
+ if len(experts) < 2:
606
+ return "Error: At least 2 experts required"
607
+
608
+ # Build config for MoE
609
  config = {
610
+ "base_model": base.strip(),
611
  "gate_mode": gate,
612
  "dtype": dtype,
613
+ "experts": experts
614
  }
615
  # Uses mergekit-moe CLI
616
  yield from run_mergekit_logic(config, token, out, priv, shard, prec, tok_src, chat_t, program="mergekit-moe")
 
718
 
719
  # --- TAB 5 ---
720
  with gr.Tab("Amphinterpolative"):
721
+ gr.Markdown("### Spherical Interpolation Methods Family: slerp, nuslerp, multislerp, karcher")
722
  t5_token = gr.Textbox(label="HF Token", type="password")
723
+ t5_method = gr.Dropdown(["slerp", "nuslerp", "multislerp", "karcher"], value="slerp", label="Merge Method")
724
+ gr.Markdown("See [MergeKit Merge Method Docs](https://github.com/arcee-ai/mergekit/blob/main/docs/merge_methods.md) for more info.")
725
  with gr.Row():
726
+ t5_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=0.5, maximum=20.0)
727
  t5_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision")
728
  t5_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source")
729
+ t5_chat = gr.Textbox(label="Chat Template (default: auto)", placeholder="auto")
730
+ gr.Markdown("Built-in Chat Templates: alpaca, chatml, llama3, mistral, exaone, auto")
731
  with gr.Row():
732
  t5_base = gr.Textbox(label="Base Model")
733
  t5_t = gr.Slider(0, 1, 0.5, label="t")
734
  with gr.Row():
735
+ t5_norm = gr.Checkbox(label="Normalize Weights", value=True); t5_i8 = gr.Checkbox(label="Int8 Mask", value=False); t5_flat = gr.Checkbox(label="Flatten Tensors (NuSlerp)", value=False); t5_row = gr.Checkbox(label="Row Wise (NuSlerp)", value=False)
736
  with gr.Row():
737
+ t5_eps = gr.Textbox(label="eps (Stabilization Constant) (MultiSlerp)", value="1e-8"); t5_iter = gr.Number(label="Max Iterations (Karcher)", value=10); t5_tol = gr.Textbox(label="tol (Convergence Tolerance) (Karcher)", value="1e-5")
738
  m1, w1 = gr.Textbox(label="Model 1"), gr.Textbox(label="Weight 1", value="1.0"); m2, w2 = gr.Textbox(label="Model 2"), gr.Textbox(label="Weight 2", value="1.0")
739
  with gr.Accordion("More", open=False):
740
  m3, w3 = gr.Textbox(label="Model 3"), gr.Textbox(label="Weight 3", value="1.0"); m4, w4 = gr.Textbox(label="Model 4"), gr.Textbox(label="Weight 4", value="1.0"); m5, w5 = gr.Textbox(label="Model 5"), gr.Textbox(label="Weight 5", value="1.0")
 
745
 
746
  # --- TAB 6 ---
747
  with gr.Tab("Stir/Tie Bases"):
748
+ gr.Markdown("### Task Vector Methods Family: task_arithmetic, ties, dare_ties, dare_linear, della, della_linear, breadcrumbs, breadcrumbs_ties, sce") t6_token = gr.Textbox(label="Token", type="password")
749
+ t6_method = gr.Dropdown(["task_arithmetic", "ties", "dare_ties", "dare_linear", "della", "della_linear", "breadcrumbs", "breadcrumbs_ties", "sce"], value="ties", label="Merge Method")
750
+ gr.Markdown("See [MergeKit Merge Method Docs](https://github.com/arcee-ai/mergekit/blob/main/docs/merge_methods.md) for more info.")
751
  with gr.Row():
752
+ t6_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=0.5, maximum=20.0); t6_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision"); t6_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source"); t6_chat = gr.Textbox(label="Chat Template", placeholder="auto")
753
+ t6_base = gr.Textbox(label="Base Model (required)")
754
  with gr.Row():
755
+ t6_norm = gr.Checkbox(label="Normalize Weights", value=True); t6_i8 = gr.Checkbox(label="Int8 Mask", value=False); t6_resc = gr.Checkbox(label="Rescale (Dare_Linear)", value=True); t6_lamb = gr.Number(label="Lambda", value=1.0); t6_topk = gr.Slider(0, 1, 1.0, label="Select TopK (SCE)")
756
  m1_6, w1_6 = gr.Textbox(label="Model 1"), gr.Textbox(label="Weight 1", value="1.0"); d1_6, g1_6, e1_6 = gr.Textbox(label="Density", value="1.0"), gr.Number(label="Gamma", value=0.01), gr.Number(label="Epsilon", value=0.15)
757
  with gr.Accordion("More", open=False):
758
+ m2_6, w2_6 = gr.Textbox(label="Model 2"), gr.Textbox(label="Weight 2", value="1.0"); d2_6, g2_6, e2_6 = gr.Textbox(label="Density (DARE/TIES)", value="1.0"), gr.Number(label="Gamma (breadcrumbs)", value=0.01), gr.Number(label="Epsilon (DELLA)", value=0.15)
759
+ m3_6, w3_6 = gr.Textbox(label="Model 3"), gr.Textbox(label="Weight 3", value="1.0"); d3_6, g3_6, e3_6 = gr.Textbox(label="Density (DARE/TIES)", value="1.0"), gr.Number(label="Gamma (breadcrumbs)", value=0.01), gr.Number(label="Epsilon (DELLA)", value=0.15)
760
+ m4_6, w4_6 = gr.Textbox(label="Model 4"), gr.Textbox(label="Weight 4", value="1.0"); d4_6, g4_6, e4_6 = gr.Textbox(label="Density (DARE/TIES)", value="1.0"), gr.Number(label="Gamma (breadcrumbs)", value=0.01), gr.Number(label="Epsilon (DELLA)", value=0.15)
761
  t6_out = gr.Textbox(label="Output Repo"); t6_priv = gr.Checkbox(label="Private", value=True)
762
  t6_btn = gr.Button("Execute")
763
  t6_res = gr.Textbox(label="Result", lines=10)
 
765
 
766
  # --- TAB 7 ---
767
  with gr.Tab("Specious"):
768
+ gr.Markdown("### Specialized Methods: model_stock, nearswap, arcee_fusion, passthrough")
769
  t7_token = gr.Textbox(label="Token", type="password")
770
+ t7_method = gr.Dropdown(["model_stock", "nearswap", "arcee_fusion", "passthrough", "linear"], value="model_stock", label="Merge Method")
771
+ gr.Markdown("See [MergeKit Merge Method Docs](https://github.com/arcee-ai/mergekit/blob/main/docs/merge_methods.md) for more info.")
772
  with gr.Row():
773
+ t7_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=0.5, maximum=20.0); t7_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision"); t7_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source"); t7_chat = gr.Textbox(label="Chat Template", placeholder="auto")
774
+
775
+ t7_base = gr.Textbox(label="Base Model (required for nearswap/arcee_fusion/model_stock)", placeholder="org/base-model")
776
+
777
+ gr.Markdown("#### Models")
778
+ gr.Markdown("**passthrough:** 1 model | **nearswap/arcee_fusion:** 2 models | **model_stock:** 3+ models")
779
+
780
  with gr.Row():
781
+ t7_norm = gr.Checkbox(label="Normalize", value=True); t7_i8 = gr.Checkbox(label="Int8 Mask", value=False); t7_t = gr.Slider(0, 1, 0.5, label="t (Interpolation Ratio, for Nearswap)"); t7_filt_w = gr.Checkbox(label="Filter Wise (for Model_Stock)", value=False)
782
+ m1_7, w1_7, f1_7 = gr.Textbox(label="Model 1"), gr.Textbox(label="Weight 1", value="1.0"), gr.Textbox(label="Filter Model Component")
783
  m2_7, w2_7 = gr.Textbox(label="Model 2"), gr.Textbox(label="Weight 2", value="1.0")
784
  with gr.Accordion("More", open=False):
785
  m3_7, w3_7 = gr.Textbox(label="Model 3"), gr.Textbox(label="Weight 3", value="1.0"); m4_7, w4_7 = gr.Textbox(label="Model 4"), gr.Textbox(label="Weight 4", value="1.0"); m5_7, w5_7 = gr.Textbox(label="Model 5"), gr.Textbox(label="Weight 5", value="1.0")
 
791
  # --- TAB 8 (MoEr) ---
792
  with gr.Tab("MoEr"):
793
  gr.Markdown("### Mixture of Experts")
794
+ gr.Markdown("See [MergeKit MoE doc for more info](https://github.com/arcee-ai/mergekit/blob/main/docs/moe.md) for more info.")
795
+
796
  t8_token = gr.Textbox(label="Token", type="password")
797
  with gr.Row():
798
+ t8_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=0.5, maximum=20.0); t8_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision"); t8_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source"); t8_chat = gr.Textbox(label="Chat Template", placeholder="auto")
799
  t8_base = gr.Textbox(label="Base Model"); t8_experts = gr.TextArea(label="Experts List"); t8_gate = gr.Dropdown(["cheap_embed", "random", "hidden"], value="cheap_embed", label="Gate Mode"); t8_dtype = gr.Dropdown(["float16", "bfloat16"], value="bfloat16", label="Internal Dtype")
800
+ gr.Markdown("#### Experts (at least 2 required)")
801
+ gr.Markdown("Prompts are comma-separated descriptors for each expert")
802
+
803
+ with gr.Row():
804
+ t8_expert1 = gr.Textbox(label="Expert 1", placeholder="org/expert1")
805
+ t8_prompt1 = gr.Textbox(label="Positive Prompts", placeholder="math, reasoning, logic")
806
+
807
+ with gr.Row():
808
+ t8_expert2 = gr.Textbox(label="Expert 2", placeholder="org/expert2")
809
+ t8_prompt2 = gr.Textbox(label="Positive Prompts", placeholder="creative, writing, storytelling")
810
+
811
+ with gr.Row():
812
+ t8_expert3 = gr.Textbox(label="Expert 3 (optional)", placeholder="org/expert3")
813
+ t8_prompt3 = gr.Textbox(label="Positive Prompts", placeholder="code, programming")
814
+
815
+ with gr.Row():
816
+ t8_expert4 = gr.Textbox(label="Expert 4 (optional)", placeholder="org/expert4")
817
+ t8_prompt4 = gr.Textbox(label="Positive Prompts", placeholder="")
818
+
819
+ with gr.Row():
820
+ t8_expert5 = gr.Textbox(label="Expert 5 (optional)", placeholder="org/expert5")
821
+ t8_prompt5 = gr.Textbox(label="Positive Prompts", placeholder="")
822
  t8_out = gr.Textbox(label="Output Repo"); t8_priv = gr.Checkbox(label="Private", value=True)
823
  t8_btn = gr.Button("Build MoE")
824
  t8_res = gr.Textbox(label="Result", lines=10)
825
+ t8_btn.click(wrapper_moer, [t8_token, t8_base, t8_expert1, t8_prompt1, t8_expert2, t8_prompt2, t8_expert3, t8_prompt3, t8_expert4, t8_prompt4, t8_expert5, t8_prompt5, t8_gate, t8_dtype, t8_out, t8_priv, t8_shard, t8_prec, t8_tok, t8_chat], t8_res)
826
 
827
  # --- TAB 9 (Rawer) ---
828
  with gr.Tab("Rawer"):
829
+ gr.Markdown("### Raw PyTorch MergeKit / Non-pipeline-classed Models")
830
  t9_token = gr.Textbox(label="Token", type="password"); t9_models = gr.TextArea(label="Models (one per line)")
831
  with gr.Row():
832
+ t9_shard = gr.Slider(label="Max Shard Size (GB)", value=5.0, minimum=0.5, maximum=20.0); t9_prec = gr.Dropdown(["float16", "bfloat16", "float32"], value="bfloat16", label="Output Precision"); t9_tok = gr.Dropdown(["base", "union", "model:path"], value="base", label="Tokenizer Source"); t9_chat = gr.Textbox(label="Chat Template", placeholder="auto")
833
+ gr.Markdown("Built-in Chat Templates: alpaca, chatml, llama3, mistral, exaone, auto")
834
+ gr.Markdown("See [MergeKit Merge Method Docs](https://github.com/arcee-ai/mergekit/blob/main/docs/merge_methods.md) for more info.")
835
+ t9_method = gr.Dropdown(["linear", "passthrough"], value="linear", label="Merge Method"); t9_dtype = gr.Dropdown(["float32", "float16", "bfloat16"], value="float32", label="Config dtype")
836
  t9_out = gr.Textbox(label="Output Repo"); t9_priv = gr.Checkbox(label="Private", value=True)
837
  t9_btn = gr.Button("Merge Raw")
838
  t9_res = gr.Textbox(label="Result", lines=10)
 
840
 
841
  # --- TAB 10 ---
842
  with gr.Tab("Mario,DARE!"):
843
+ gr.Markdown("### Model-Agnostic DARE Implementation (Drop And REscale)")
844
+ gr.Markdown("From [sft-merger by Martyn Garcia](https://github.com/martyn)")
845
  t10_token = gr.Textbox(label="Token", type="password")
846
+
847
+ gr.Markdown(
848
+ """
849
+ ### How DARE Works:
850
+ 1. **Compute Delta**: Difference between fine-tuned and base weights
851
+ 2. **Drop Elements**: Randomly mask out delta values based on mask rate
852
+ 3. **Rescale**: Compensate for dropped elements by rescaling remaining values
853
+ 4. **Apply**: Add scaled delta back to base model
854
+
855
+ **Mask Rate**: 0.5 = drop 50% of delta values, 0.9 = drop 90% (more aggressive sparsification)
856
+ """
857
+ )
858
+
859
  with gr.Row():
860
+ t10_base = gr.Textbox(label="Base Model", placeholder="org/base-model"); t10_ft = gr.Textbox(label="Fine-Tuned Model", placeholder="org/fine-tuned-model")
861
  with gr.Row():
862
+ t10_ratio = gr.Slider(value=1.0, minimum=0.0, maximum=2.0, step=0.1, label="Merge Ratio (delta weight)"); t10_mask = gr.Slider(value=0.5, minimum=0.0, maximum=0.99, step=0.01, label="Mask Rate (drop probability)")
863
  t10_out = gr.Textbox(label="Output Repo"); t10_priv = gr.Checkbox(label="Private", value=True)
864
  gr.Button("Run").click(task_dare_custom, [t10_token, t10_base, t10_ft, t10_ratio, t10_mask, t10_out, t10_priv], gr.Textbox(label="Result"))
865