EnricoFermi commited on
Commit
d5ac309
·
verified ·
1 Parent(s): c117684

Upload v2-7b-coder-compensated.alloy.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. v2-7b-coder-compensated.alloy.json +3 -3
v2-7b-coder-compensated.alloy.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "v2-7b-coder-compensated",
3
- "version": "1.2.0",
4
  "description": "Methodology validation artifact for the v2 forge pipeline + KL-distillation compensation LoRA. Demonstrates that aggressive head pruning + activation-metric importance + pad-mode defrag, when paired with output-distribution distillation against the unmodified teacher, recovers near-base HumanEval capability (61.0 vs 62.2 base, within calibration tolerance). This is the empirical anchor for PLASTICITY-COMPACTION \u00a74.1.3.3 and the loss-function ablation that closes the \u00a74.1.3.2 PPL/HumanEval disconnect. NOT a Pareto improvement over the unmodified base 7B at any single VRAM tier \u2014 published as proof that the methodology stack works end-to-end, in preparation for the Qwen3.5-35B-A3B and 397B-A17B forges where the pruning dimension actually wins.",
5
  "author": "continuum-ai",
6
  "tags": [
@@ -191,9 +191,9 @@
191
  {
192
  "target": "huggingface",
193
  "url": "https://huggingface.co/continuum-ai/v2-7b-coder-compensated",
194
- "publishedAt": "2026-04-08T05:01:40.446154+00:00"
195
  }
196
  ],
197
- "issuedAt": "2026-04-08T05:01:40.446154+00:00"
198
  }
199
  }
 
1
  {
2
  "name": "v2-7b-coder-compensated",
3
+ "version": "1.2.1",
4
  "description": "Methodology validation artifact for the v2 forge pipeline + KL-distillation compensation LoRA. Demonstrates that aggressive head pruning + activation-metric importance + pad-mode defrag, when paired with output-distribution distillation against the unmodified teacher, recovers near-base HumanEval capability (61.0 vs 62.2 base, within calibration tolerance). This is the empirical anchor for PLASTICITY-COMPACTION \u00a74.1.3.3 and the loss-function ablation that closes the \u00a74.1.3.2 PPL/HumanEval disconnect. NOT a Pareto improvement over the unmodified base 7B at any single VRAM tier \u2014 published as proof that the methodology stack works end-to-end, in preparation for the Qwen3.5-35B-A3B and 397B-A17B forges where the pruning dimension actually wins.",
5
  "author": "continuum-ai",
6
  "tags": [
 
191
  {
192
  "target": "huggingface",
193
  "url": "https://huggingface.co/continuum-ai/v2-7b-coder-compensated",
194
+ "publishedAt": "2026-04-08T05:02:57.072577+00:00"
195
  }
196
  ],
197
+ "issuedAt": "2026-04-08T05:02:57.072577+00:00"
198
  }
199
  }