Setup the dataset structure
Browse filesSigned-off-by: Ubospica <ubospica@gmail.com>
- README.md +39 -0
- definitions/dsa_paged/dsa_sparse_decode_h16_ckv512_kpe64_topk256_ps1.json +121 -0
- definitions/dsa_paged/dsa_sparse_decode_h16_ckv512_kpe64_topk256_ps64.json +121 -0
- definitions/dsa_paged/dsa_sparse_prefill_causal_h16_ckv512_kpe64_topk256_ps1.json +123 -0
- definitions/dsa_paged/dsa_sparse_prefill_causal_h16_ckv512_kpe64_topk256_ps64.json +123 -0
- definitions/dsa_paged/dsa_topk_indexer_fp8_h64_d128_topk256_ps64.json +113 -0
- definitions/gdn/gdn_decode_qk16_v32_d128_k_last.json +107 -0
- definitions/gdn/gdn_prefill_qk16_v32_d128_k_last.json +112 -0
- definitions/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048.json +147 -0
- workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048.jsonl +19 -0
README.md
CHANGED
|
@@ -1,3 +1,42 @@
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
+
|
| 5 |
+
# MLSys 2026 FlashInfer-Bench Challenge Dataset
|
| 6 |
+
|
| 7 |
+
This repository contains the FlashInfer-Bench dataset for the MLSys 2026 Kenrel Generation Challenge.
|
| 8 |
+
|
| 9 |
+
This dataset targets to be used in the [FlashInfer-Bench](https://github.com/flashinfer-ai/flashinfer-bench) benchmark system.
|
| 10 |
+
|
| 11 |
+
It follows the [FlashInfer Trace Schema](https://bench.flashinfer.ai/docs/flashinfer-trace). To use the dataset in the competition, please refer to our [starter kit](https://github.com/flashinfer-ai/flashinfer-bench-starter-kit).
|
| 12 |
+
|
| 13 |
+
## Download
|
| 14 |
+
|
| 15 |
+
Use this command to download the dataset:
|
| 16 |
+
```bash
|
| 17 |
+
git clone https://huggingface.co/datasets/flashinfer-ai/mlsys26-contest
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
## Tasks
|
| 21 |
+
|
| 22 |
+
This dataset contains the definitions and workloads for these kernels:
|
| 23 |
+
* Fused Mixture of Experts (MoE)
|
| 24 |
+
* Gated Delta Network (GDN)
|
| 25 |
+
* DeepSeek Sparse Attention (DSA)
|
| 26 |
+
|
| 27 |
+
## Dataset Structure
|
| 28 |
+
|
| 29 |
+
It is organized as follows:
|
| 30 |
+
```
|
| 31 |
+
mlsys26-contest/
|
| 32 |
+
├── definitions/
|
| 33 |
+
└── workloads/
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
These components are provided in the dataset:
|
| 37 |
+
* **Definition**: describes the input, output, and computation logic of a kernel task.
|
| 38 |
+
* **Workload**: describes the inputs for a definition during real inference. This will be used to benchmark the **Solution** you provided.
|
| 39 |
+
|
| 40 |
+
During benchmarking, these components should be provided or generated:
|
| 41 |
+
* **Solution**: provided by participants, your implementation of the kernel task.
|
| 42 |
+
* **Trace**: generated by FlashInfer-Bench, the performance and correctness results of your solution on the workloads.
|
definitions/dsa_paged/dsa_sparse_decode_h16_ckv512_kpe64_topk256_ps1.json
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "dsa_sparse_decode_h16_ckv512_kpe64_topk256_ps1",
|
| 3 |
+
"description": "Batched Native Sparse Attention (DSA) decode with sparse TopK KV cache selection. Captured from DeepSeek-V3 with tensor parallel size 8. Uses sparse indexing to select only top-K KV cache entries for attention computation.",
|
| 4 |
+
"op_type": "dsa_paged",
|
| 5 |
+
"tags": [
|
| 6 |
+
"stage:decode",
|
| 7 |
+
"status:verified",
|
| 8 |
+
"model:deepseek-v3",
|
| 9 |
+
"model:deepseek-r1",
|
| 10 |
+
"sparse:topk"
|
| 11 |
+
],
|
| 12 |
+
"axes": {
|
| 13 |
+
"batch_size": {
|
| 14 |
+
"type": "var",
|
| 15 |
+
"description": "Batch size (number of sequences)."
|
| 16 |
+
},
|
| 17 |
+
"num_qo_heads": {
|
| 18 |
+
"type": "const",
|
| 19 |
+
"value": 16,
|
| 20 |
+
"description": "Number of query heads after tensor parallel split (128/8=16)."
|
| 21 |
+
},
|
| 22 |
+
"head_dim_ckv": {
|
| 23 |
+
"type": "const",
|
| 24 |
+
"value": 512,
|
| 25 |
+
"description": "Compressed KV head dimension."
|
| 26 |
+
},
|
| 27 |
+
"head_dim_kpe": {
|
| 28 |
+
"type": "const",
|
| 29 |
+
"value": 64,
|
| 30 |
+
"description": "Key positional encoding dimension."
|
| 31 |
+
},
|
| 32 |
+
"page_size": {
|
| 33 |
+
"type": "const",
|
| 34 |
+
"value": 1,
|
| 35 |
+
"description": "Page size for KV cache (token-level)."
|
| 36 |
+
},
|
| 37 |
+
"topk": {
|
| 38 |
+
"type": "const",
|
| 39 |
+
"value": 256,
|
| 40 |
+
"description": "Number of top-K KV cache entries selected for sparse attention."
|
| 41 |
+
},
|
| 42 |
+
"num_pages": {
|
| 43 |
+
"type": "var",
|
| 44 |
+
"description": "Total number of allocated pages in the KV cache."
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
"constraints": [
|
| 48 |
+
"sparse_indices.shape[-1] == topk",
|
| 49 |
+
"ckv_cache.shape[1] == page_size"
|
| 50 |
+
],
|
| 51 |
+
"inputs": {
|
| 52 |
+
"q_nope": {
|
| 53 |
+
"shape": [
|
| 54 |
+
"batch_size",
|
| 55 |
+
"num_qo_heads",
|
| 56 |
+
"head_dim_ckv"
|
| 57 |
+
],
|
| 58 |
+
"dtype": "bfloat16",
|
| 59 |
+
"description": "Query tensor without positional encoding component."
|
| 60 |
+
},
|
| 61 |
+
"q_pe": {
|
| 62 |
+
"shape": [
|
| 63 |
+
"batch_size",
|
| 64 |
+
"num_qo_heads",
|
| 65 |
+
"head_dim_kpe"
|
| 66 |
+
],
|
| 67 |
+
"dtype": "bfloat16",
|
| 68 |
+
"description": "Query positional encoding component."
|
| 69 |
+
},
|
| 70 |
+
"ckv_cache": {
|
| 71 |
+
"shape": [
|
| 72 |
+
"num_pages",
|
| 73 |
+
"page_size",
|
| 74 |
+
"head_dim_ckv"
|
| 75 |
+
],
|
| 76 |
+
"dtype": "bfloat16",
|
| 77 |
+
"description": "Compressed key-value cache with page_size=1."
|
| 78 |
+
},
|
| 79 |
+
"kpe_cache": {
|
| 80 |
+
"shape": [
|
| 81 |
+
"num_pages",
|
| 82 |
+
"page_size",
|
| 83 |
+
"head_dim_kpe"
|
| 84 |
+
],
|
| 85 |
+
"dtype": "bfloat16",
|
| 86 |
+
"description": "Key positional encoding cache."
|
| 87 |
+
},
|
| 88 |
+
"sparse_indices": {
|
| 89 |
+
"shape": [
|
| 90 |
+
"batch_size",
|
| 91 |
+
"topk"
|
| 92 |
+
],
|
| 93 |
+
"dtype": "int32",
|
| 94 |
+
"description": "Sparse indices selecting top-K KV cache entries for each batch element. Values of -1 indicate padding (invalid indices)."
|
| 95 |
+
},
|
| 96 |
+
"sm_scale": {
|
| 97 |
+
"shape": null,
|
| 98 |
+
"dtype": "float32",
|
| 99 |
+
"description": "Softmax scale. For MLA, uses pre-absorption head dimension: 1/sqrt(head_dim_qk + head_dim_kpe) = 1/sqrt(128 + 64) = 1/sqrt(192)."
|
| 100 |
+
}
|
| 101 |
+
},
|
| 102 |
+
"outputs": {
|
| 103 |
+
"output": {
|
| 104 |
+
"shape": [
|
| 105 |
+
"batch_size",
|
| 106 |
+
"num_qo_heads",
|
| 107 |
+
"head_dim_ckv"
|
| 108 |
+
],
|
| 109 |
+
"dtype": "bfloat16"
|
| 110 |
+
},
|
| 111 |
+
"lse": {
|
| 112 |
+
"shape": [
|
| 113 |
+
"batch_size",
|
| 114 |
+
"num_qo_heads"
|
| 115 |
+
],
|
| 116 |
+
"dtype": "float32",
|
| 117 |
+
"description": "The 2-based log-sum-exp of attention logits."
|
| 118 |
+
}
|
| 119 |
+
},
|
| 120 |
+
"reference": "import math\nimport torch\n\n\n@torch.no_grad()\ndef run(q_nope, q_pe, ckv_cache, kpe_cache, sparse_indices, sm_scale):\n batch_size, num_qo_heads, head_dim_ckv = q_nope.shape\n head_dim_kpe = q_pe.shape[-1]\n page_size = ckv_cache.shape[1]\n topk = sparse_indices.shape[-1]\n\n # Check constants\n assert num_qo_heads == 16\n assert head_dim_ckv == 512\n assert head_dim_kpe == 64\n assert page_size == 1\n assert topk == 256\n\n # Check constraints\n assert sparse_indices.shape[-1] == topk\n assert ckv_cache.shape[1] == page_size\n\n device = q_nope.device\n\n # Squeeze page dimension (page_size=1)\n Kc_all = ckv_cache.squeeze(1).to(torch.float32) # [num_pages, head_dim_ckv]\n Kp_all = kpe_cache.squeeze(1).to(torch.float32) # [num_pages, head_dim_kpe]\n\n output = torch.zeros(\n (batch_size, num_qo_heads, head_dim_ckv), dtype=torch.bfloat16, device=device\n )\n lse = torch.full((batch_size, num_qo_heads), -float(\"inf\"), dtype=torch.float32, device=device)\n\n for b in range(batch_size):\n indices = sparse_indices[b] # [topk]\n\n # Handle padding: -1 indicates invalid indices\n valid_mask = indices != -1\n valid_indices = indices[valid_mask]\n\n if valid_indices.numel() == 0:\n output[b].zero_()\n continue\n\n tok_idx = valid_indices.to(torch.long)\n\n Kc = Kc_all[tok_idx] # [num_valid, head_dim_ckv]\n Kp = Kp_all[tok_idx] # [num_valid, head_dim_kpe]\n qn = q_nope[b].to(torch.float32) # [num_qo_heads, head_dim_ckv]\n qp = q_pe[b].to(torch.float32) # [num_qo_heads, head_dim_kpe]\n\n # Compute attention logits\n logits = (qn @ Kc.T) + (qp @ Kp.T) # [num_qo_heads, num_valid]\n logits_scaled = logits * sm_scale\n\n # Compute 2-base LSE\n lse[b] = torch.logsumexp(logits_scaled, dim=-1) / math.log(2.0)\n\n # Compute attention output\n attn = torch.softmax(logits_scaled, dim=-1) # [num_qo_heads, num_valid]\n out = attn @ Kc # [num_qo_heads, head_dim_ckv]\n output[b] = out.to(torch.bfloat16)\n\n return output, lse"
|
| 121 |
+
}
|
definitions/dsa_paged/dsa_sparse_decode_h16_ckv512_kpe64_topk256_ps64.json
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "dsa_sparse_decode_h16_ckv512_kpe64_topk256_ps64",
|
| 3 |
+
"description": "Batched Native Sparse Attention (DSA) decode with sparse TopK KV cache selection. Captured from DeepSeek-V3 with tensor parallel size 8. Uses sparse indexing to select only top-K KV cache entries for attention computation. Page size 64 variant.",
|
| 4 |
+
"op_type": "dsa_paged",
|
| 5 |
+
"tags": [
|
| 6 |
+
"stage:decode",
|
| 7 |
+
"status:verified",
|
| 8 |
+
"model:deepseek-v3",
|
| 9 |
+
"model:deepseek-r1",
|
| 10 |
+
"sparse:topk"
|
| 11 |
+
],
|
| 12 |
+
"axes": {
|
| 13 |
+
"batch_size": {
|
| 14 |
+
"type": "var",
|
| 15 |
+
"description": "Batch size (number of sequences)."
|
| 16 |
+
},
|
| 17 |
+
"num_qo_heads": {
|
| 18 |
+
"type": "const",
|
| 19 |
+
"value": 16,
|
| 20 |
+
"description": "Number of query heads after tensor parallel split (128/8=16)."
|
| 21 |
+
},
|
| 22 |
+
"head_dim_ckv": {
|
| 23 |
+
"type": "const",
|
| 24 |
+
"value": 512,
|
| 25 |
+
"description": "Compressed KV head dimension."
|
| 26 |
+
},
|
| 27 |
+
"head_dim_kpe": {
|
| 28 |
+
"type": "const",
|
| 29 |
+
"value": 64,
|
| 30 |
+
"description": "Key positional encoding dimension."
|
| 31 |
+
},
|
| 32 |
+
"page_size": {
|
| 33 |
+
"type": "const",
|
| 34 |
+
"value": 64,
|
| 35 |
+
"description": "Page size for KV cache (64 tokens per page)."
|
| 36 |
+
},
|
| 37 |
+
"topk": {
|
| 38 |
+
"type": "const",
|
| 39 |
+
"value": 256,
|
| 40 |
+
"description": "Number of top-K KV cache entries selected for sparse attention."
|
| 41 |
+
},
|
| 42 |
+
"num_pages": {
|
| 43 |
+
"type": "var",
|
| 44 |
+
"description": "Total number of allocated pages in the KV cache."
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
"constraints": [
|
| 48 |
+
"sparse_indices.shape[-1] == topk",
|
| 49 |
+
"ckv_cache.shape[1] == page_size"
|
| 50 |
+
],
|
| 51 |
+
"inputs": {
|
| 52 |
+
"q_nope": {
|
| 53 |
+
"shape": [
|
| 54 |
+
"batch_size",
|
| 55 |
+
"num_qo_heads",
|
| 56 |
+
"head_dim_ckv"
|
| 57 |
+
],
|
| 58 |
+
"dtype": "bfloat16",
|
| 59 |
+
"description": "Query tensor without positional encoding component."
|
| 60 |
+
},
|
| 61 |
+
"q_pe": {
|
| 62 |
+
"shape": [
|
| 63 |
+
"batch_size",
|
| 64 |
+
"num_qo_heads",
|
| 65 |
+
"head_dim_kpe"
|
| 66 |
+
],
|
| 67 |
+
"dtype": "bfloat16",
|
| 68 |
+
"description": "Query positional encoding component."
|
| 69 |
+
},
|
| 70 |
+
"ckv_cache": {
|
| 71 |
+
"shape": [
|
| 72 |
+
"num_pages",
|
| 73 |
+
"page_size",
|
| 74 |
+
"head_dim_ckv"
|
| 75 |
+
],
|
| 76 |
+
"dtype": "bfloat16",
|
| 77 |
+
"description": "Compressed key-value cache with page_size=64."
|
| 78 |
+
},
|
| 79 |
+
"kpe_cache": {
|
| 80 |
+
"shape": [
|
| 81 |
+
"num_pages",
|
| 82 |
+
"page_size",
|
| 83 |
+
"head_dim_kpe"
|
| 84 |
+
],
|
| 85 |
+
"dtype": "bfloat16",
|
| 86 |
+
"description": "Key positional encoding cache."
|
| 87 |
+
},
|
| 88 |
+
"sparse_indices": {
|
| 89 |
+
"shape": [
|
| 90 |
+
"batch_size",
|
| 91 |
+
"topk"
|
| 92 |
+
],
|
| 93 |
+
"dtype": "int32",
|
| 94 |
+
"description": "Sparse indices selecting top-K KV cache entries for each batch element. Values of -1 indicate padding (invalid indices). For page_size=64, indices encode (page_idx * 64 + offset)."
|
| 95 |
+
},
|
| 96 |
+
"sm_scale": {
|
| 97 |
+
"shape": null,
|
| 98 |
+
"dtype": "float32",
|
| 99 |
+
"description": "Softmax scale. For MLA, uses pre-absorption head dimension: 1/sqrt(head_dim_qk + head_dim_kpe) = 1/sqrt(128 + 64) = 1/sqrt(192)."
|
| 100 |
+
}
|
| 101 |
+
},
|
| 102 |
+
"outputs": {
|
| 103 |
+
"output": {
|
| 104 |
+
"shape": [
|
| 105 |
+
"batch_size",
|
| 106 |
+
"num_qo_heads",
|
| 107 |
+
"head_dim_ckv"
|
| 108 |
+
],
|
| 109 |
+
"dtype": "bfloat16"
|
| 110 |
+
},
|
| 111 |
+
"lse": {
|
| 112 |
+
"shape": [
|
| 113 |
+
"batch_size",
|
| 114 |
+
"num_qo_heads"
|
| 115 |
+
],
|
| 116 |
+
"dtype": "float32",
|
| 117 |
+
"description": "The 2-based log-sum-exp of attention logits."
|
| 118 |
+
}
|
| 119 |
+
},
|
| 120 |
+
"reference": "import math\nimport torch\n\n\n@torch.no_grad()\ndef run(q_nope, q_pe, ckv_cache, kpe_cache, sparse_indices, sm_scale):\n batch_size, num_qo_heads, head_dim_ckv = q_nope.shape\n head_dim_kpe = q_pe.shape[-1]\n num_pages, page_size, _ = ckv_cache.shape\n topk = sparse_indices.shape[-1]\n\n # Check constants\n assert num_qo_heads == 16\n assert head_dim_ckv == 512\n assert head_dim_kpe == 64\n assert page_size == 64\n assert topk == 256\n\n # Check constraints\n assert sparse_indices.shape[-1] == topk\n assert ckv_cache.shape[1] == page_size\n\n device = q_nope.device\n\n # Flatten paged KV cache to token-level: [num_pages, page_size, dim] -> [num_pages * page_size, dim]\n Kc_all = ckv_cache.reshape(-1, head_dim_ckv).to(torch.float32) # [total_kv_tokens, head_dim_ckv]\n Kp_all = kpe_cache.reshape(-1, head_dim_kpe).to(torch.float32) # [total_kv_tokens, head_dim_kpe]\n\n output = torch.zeros(\n (batch_size, num_qo_heads, head_dim_ckv), dtype=torch.bfloat16, device=device\n )\n lse = torch.full((batch_size, num_qo_heads), -float(\"inf\"), dtype=torch.float32, device=device)\n\n for b in range(batch_size):\n indices = sparse_indices[b] # [topk]\n\n # Handle padding: -1 indicates invalid indices\n valid_mask = indices != -1\n valid_indices = indices[valid_mask]\n\n if valid_indices.numel() == 0:\n output[b].zero_()\n continue\n\n # For page_size=64, indices encode (page_idx * 64 + offset)\n tok_idx = valid_indices.to(torch.long)\n\n Kc = Kc_all[tok_idx] # [num_valid, head_dim_ckv]\n Kp = Kp_all[tok_idx] # [num_valid, head_dim_kpe]\n qn = q_nope[b].to(torch.float32) # [num_qo_heads, head_dim_ckv]\n qp = q_pe[b].to(torch.float32) # [num_qo_heads, head_dim_kpe]\n\n # Compute attention logits\n logits = (qn @ Kc.T) + (qp @ Kp.T) # [num_qo_heads, num_valid]\n logits_scaled = logits * sm_scale\n\n # Compute 2-base LSE\n lse[b] = torch.logsumexp(logits_scaled, dim=-1) / math.log(2.0)\n\n # Compute attention output\n attn = torch.softmax(logits_scaled, dim=-1) # [num_qo_heads, num_valid]\n out = attn @ Kc # [num_qo_heads, head_dim_ckv]\n output[b] = out.to(torch.bfloat16)\n\n return {\"output\": output, \"lse\": lse}"
|
| 121 |
+
}
|
definitions/dsa_paged/dsa_sparse_prefill_causal_h16_ckv512_kpe64_topk256_ps1.json
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "dsa_sparse_prefill_causal_h16_ckv512_kpe64_topk256_ps1",
|
| 3 |
+
"description": "Batched Native Sparse Attention (DSA) prefill with causal masking and sparse TopK KV cache selection. Captured from DeepSeek-V3 with tensor parallel size 8. Uses sparse indexing to select only top-K KV cache entries for attention computation during prefill.",
|
| 4 |
+
"op_type": "dsa_paged",
|
| 5 |
+
"tags": [
|
| 6 |
+
"stage:prefill",
|
| 7 |
+
"status:verified",
|
| 8 |
+
"model:deepseek-v3",
|
| 9 |
+
"model:deepseek-r1",
|
| 10 |
+
"sparse:topk",
|
| 11 |
+
"mask:causal"
|
| 12 |
+
],
|
| 13 |
+
"axes": {
|
| 14 |
+
"total_num_tokens": {
|
| 15 |
+
"type": "var",
|
| 16 |
+
"description": "Total number of tokens across all sequences in the batch."
|
| 17 |
+
},
|
| 18 |
+
"num_qo_heads": {
|
| 19 |
+
"type": "const",
|
| 20 |
+
"value": 16,
|
| 21 |
+
"description": "Number of query heads after tensor parallel split (128/8=16)."
|
| 22 |
+
},
|
| 23 |
+
"head_dim_ckv": {
|
| 24 |
+
"type": "const",
|
| 25 |
+
"value": 512,
|
| 26 |
+
"description": "Compressed KV head dimension."
|
| 27 |
+
},
|
| 28 |
+
"head_dim_kpe": {
|
| 29 |
+
"type": "const",
|
| 30 |
+
"value": 64,
|
| 31 |
+
"description": "Key positional encoding dimension."
|
| 32 |
+
},
|
| 33 |
+
"page_size": {
|
| 34 |
+
"type": "const",
|
| 35 |
+
"value": 1,
|
| 36 |
+
"description": "Page size for KV cache (token-level)."
|
| 37 |
+
},
|
| 38 |
+
"topk": {
|
| 39 |
+
"type": "const",
|
| 40 |
+
"value": 256,
|
| 41 |
+
"description": "Number of top-K KV cache entries selected for sparse attention per token."
|
| 42 |
+
},
|
| 43 |
+
"num_pages": {
|
| 44 |
+
"type": "var",
|
| 45 |
+
"description": "Total number of allocated pages in the KV cache."
|
| 46 |
+
}
|
| 47 |
+
},
|
| 48 |
+
"constraints": [
|
| 49 |
+
"sparse_indices.shape[0] == total_num_tokens",
|
| 50 |
+
"sparse_indices.shape[-1] == topk",
|
| 51 |
+
"ckv_cache.shape[1] == page_size"
|
| 52 |
+
],
|
| 53 |
+
"inputs": {
|
| 54 |
+
"q_nope": {
|
| 55 |
+
"shape": [
|
| 56 |
+
"total_num_tokens",
|
| 57 |
+
"num_qo_heads",
|
| 58 |
+
"head_dim_ckv"
|
| 59 |
+
],
|
| 60 |
+
"dtype": "bfloat16",
|
| 61 |
+
"description": "Query tensor without positional encoding component."
|
| 62 |
+
},
|
| 63 |
+
"q_pe": {
|
| 64 |
+
"shape": [
|
| 65 |
+
"total_num_tokens",
|
| 66 |
+
"num_qo_heads",
|
| 67 |
+
"head_dim_kpe"
|
| 68 |
+
],
|
| 69 |
+
"dtype": "bfloat16",
|
| 70 |
+
"description": "Query positional encoding component."
|
| 71 |
+
},
|
| 72 |
+
"ckv_cache": {
|
| 73 |
+
"shape": [
|
| 74 |
+
"num_pages",
|
| 75 |
+
"page_size",
|
| 76 |
+
"head_dim_ckv"
|
| 77 |
+
],
|
| 78 |
+
"dtype": "bfloat16",
|
| 79 |
+
"description": "Compressed key-value cache with page_size=1."
|
| 80 |
+
},
|
| 81 |
+
"kpe_cache": {
|
| 82 |
+
"shape": [
|
| 83 |
+
"num_pages",
|
| 84 |
+
"page_size",
|
| 85 |
+
"head_dim_kpe"
|
| 86 |
+
],
|
| 87 |
+
"dtype": "bfloat16",
|
| 88 |
+
"description": "Key positional encoding cache."
|
| 89 |
+
},
|
| 90 |
+
"sparse_indices": {
|
| 91 |
+
"shape": [
|
| 92 |
+
"total_num_tokens",
|
| 93 |
+
"topk"
|
| 94 |
+
],
|
| 95 |
+
"dtype": "int32",
|
| 96 |
+
"description": "Sparse indices selecting top-K KV cache entries for each token. Values of -1 indicate padding (invalid indices)."
|
| 97 |
+
},
|
| 98 |
+
"sm_scale": {
|
| 99 |
+
"shape": null,
|
| 100 |
+
"dtype": "float32",
|
| 101 |
+
"description": "Softmax scale. For MLA, uses pre-absorption head dimension: 1/sqrt(head_dim_qk + head_dim_kpe) = 1/sqrt(128 + 64) = 1/sqrt(192)."
|
| 102 |
+
}
|
| 103 |
+
},
|
| 104 |
+
"outputs": {
|
| 105 |
+
"output": {
|
| 106 |
+
"shape": [
|
| 107 |
+
"total_num_tokens",
|
| 108 |
+
"num_qo_heads",
|
| 109 |
+
"head_dim_ckv"
|
| 110 |
+
],
|
| 111 |
+
"dtype": "bfloat16"
|
| 112 |
+
},
|
| 113 |
+
"lse": {
|
| 114 |
+
"shape": [
|
| 115 |
+
"total_num_tokens",
|
| 116 |
+
"num_qo_heads"
|
| 117 |
+
],
|
| 118 |
+
"dtype": "float32",
|
| 119 |
+
"description": "The 2-based log-sum-exp of attention logits."
|
| 120 |
+
}
|
| 121 |
+
},
|
| 122 |
+
"reference": "import math\nimport torch\n\n\n@torch.no_grad()\ndef run(q_nope, q_pe, ckv_cache, kpe_cache, sparse_indices, sm_scale):\n total_num_tokens, num_qo_heads, head_dim_ckv = q_nope.shape\n head_dim_kpe = q_pe.shape[-1]\n page_size = ckv_cache.shape[1]\n topk = sparse_indices.shape[-1]\n\n # Check constants\n assert num_qo_heads == 16\n assert head_dim_ckv == 512\n assert head_dim_kpe == 64\n assert page_size == 1\n assert topk == 256\n\n # Check constraints\n assert sparse_indices.shape[0] == total_num_tokens\n assert sparse_indices.shape[-1] == topk\n assert ckv_cache.shape[1] == page_size\n\n device = q_nope.device\n\n # Squeeze page dimension (page_size=1)\n Kc_all = ckv_cache.squeeze(1).to(torch.float32) # [num_pages, head_dim_ckv]\n Kp_all = kpe_cache.squeeze(1).to(torch.float32) # [num_pages, head_dim_kpe]\n\n output = torch.zeros(\n (total_num_tokens, num_qo_heads, head_dim_ckv), dtype=torch.bfloat16, device=device\n )\n lse = torch.full((total_num_tokens, num_qo_heads), -float(\"inf\"), dtype=torch.float32, device=device)\n\n for t in range(total_num_tokens):\n indices = sparse_indices[t] # [topk]\n\n # Handle padding: -1 indicates invalid indices\n valid_mask = indices != -1\n valid_indices = indices[valid_mask]\n\n if valid_indices.numel() == 0:\n output[t].zero_()\n continue\n\n tok_idx = valid_indices.to(torch.long)\n\n Kc = Kc_all[tok_idx] # [num_valid, head_dim_ckv]\n Kp = Kp_all[tok_idx] # [num_valid, head_dim_kpe]\n qn = q_nope[t].to(torch.float32) # [num_qo_heads, head_dim_ckv]\n qp = q_pe[t].to(torch.float32) # [num_qo_heads, head_dim_kpe]\n\n # Compute attention logits\n logits = (qn @ Kc.T) + (qp @ Kp.T) # [num_qo_heads, num_valid]\n logits_scaled = logits * sm_scale\n\n # Compute 2-base LSE\n lse[t] = torch.logsumexp(logits_scaled, dim=-1) / math.log(2.0)\n\n # Compute attention output\n attn = torch.softmax(logits_scaled, dim=-1) # [num_qo_heads, num_valid]\n out = attn @ Kc # [num_qo_heads, head_dim_ckv]\n output[t] = out.to(torch.bfloat16)\n\n return output, lse"
|
| 123 |
+
}
|
definitions/dsa_paged/dsa_sparse_prefill_causal_h16_ckv512_kpe64_topk256_ps64.json
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "dsa_sparse_prefill_causal_h16_ckv512_kpe64_topk256_ps64",
|
| 3 |
+
"description": "Batched Native Sparse Attention (DSA) prefill with causal masking and sparse TopK KV cache selection. Captured from DeepSeek-V3 with tensor parallel size 8. Uses sparse indexing to select only top-K KV cache entries for attention computation during prefill. Page size 64 variant.",
|
| 4 |
+
"op_type": "dsa_paged",
|
| 5 |
+
"tags": [
|
| 6 |
+
"stage:prefill",
|
| 7 |
+
"status:verified",
|
| 8 |
+
"model:deepseek-v3",
|
| 9 |
+
"model:deepseek-r1",
|
| 10 |
+
"sparse:topk",
|
| 11 |
+
"mask:causal"
|
| 12 |
+
],
|
| 13 |
+
"axes": {
|
| 14 |
+
"total_num_tokens": {
|
| 15 |
+
"type": "var",
|
| 16 |
+
"description": "Total number of tokens across all sequences in the batch."
|
| 17 |
+
},
|
| 18 |
+
"num_qo_heads": {
|
| 19 |
+
"type": "const",
|
| 20 |
+
"value": 16,
|
| 21 |
+
"description": "Number of query heads after tensor parallel split (128/8=16)."
|
| 22 |
+
},
|
| 23 |
+
"head_dim_ckv": {
|
| 24 |
+
"type": "const",
|
| 25 |
+
"value": 512,
|
| 26 |
+
"description": "Compressed KV head dimension."
|
| 27 |
+
},
|
| 28 |
+
"head_dim_kpe": {
|
| 29 |
+
"type": "const",
|
| 30 |
+
"value": 64,
|
| 31 |
+
"description": "Key positional encoding dimension."
|
| 32 |
+
},
|
| 33 |
+
"page_size": {
|
| 34 |
+
"type": "const",
|
| 35 |
+
"value": 64,
|
| 36 |
+
"description": "Page size for KV cache (64 tokens per page)."
|
| 37 |
+
},
|
| 38 |
+
"topk": {
|
| 39 |
+
"type": "const",
|
| 40 |
+
"value": 256,
|
| 41 |
+
"description": "Number of top-K KV cache entries selected for sparse attention per token."
|
| 42 |
+
},
|
| 43 |
+
"num_pages": {
|
| 44 |
+
"type": "var",
|
| 45 |
+
"description": "Total number of allocated pages in the KV cache."
|
| 46 |
+
}
|
| 47 |
+
},
|
| 48 |
+
"constraints": [
|
| 49 |
+
"sparse_indices.shape[0] == total_num_tokens",
|
| 50 |
+
"sparse_indices.shape[-1] == topk",
|
| 51 |
+
"ckv_cache.shape[1] == page_size"
|
| 52 |
+
],
|
| 53 |
+
"inputs": {
|
| 54 |
+
"q_nope": {
|
| 55 |
+
"shape": [
|
| 56 |
+
"total_num_tokens",
|
| 57 |
+
"num_qo_heads",
|
| 58 |
+
"head_dim_ckv"
|
| 59 |
+
],
|
| 60 |
+
"dtype": "bfloat16",
|
| 61 |
+
"description": "Query tensor without positional encoding component."
|
| 62 |
+
},
|
| 63 |
+
"q_pe": {
|
| 64 |
+
"shape": [
|
| 65 |
+
"total_num_tokens",
|
| 66 |
+
"num_qo_heads",
|
| 67 |
+
"head_dim_kpe"
|
| 68 |
+
],
|
| 69 |
+
"dtype": "bfloat16",
|
| 70 |
+
"description": "Query positional encoding component."
|
| 71 |
+
},
|
| 72 |
+
"ckv_cache": {
|
| 73 |
+
"shape": [
|
| 74 |
+
"num_pages",
|
| 75 |
+
"page_size",
|
| 76 |
+
"head_dim_ckv"
|
| 77 |
+
],
|
| 78 |
+
"dtype": "bfloat16",
|
| 79 |
+
"description": "Compressed key-value cache with page_size=64."
|
| 80 |
+
},
|
| 81 |
+
"kpe_cache": {
|
| 82 |
+
"shape": [
|
| 83 |
+
"num_pages",
|
| 84 |
+
"page_size",
|
| 85 |
+
"head_dim_kpe"
|
| 86 |
+
],
|
| 87 |
+
"dtype": "bfloat16",
|
| 88 |
+
"description": "Key positional encoding cache."
|
| 89 |
+
},
|
| 90 |
+
"sparse_indices": {
|
| 91 |
+
"shape": [
|
| 92 |
+
"total_num_tokens",
|
| 93 |
+
"topk"
|
| 94 |
+
],
|
| 95 |
+
"dtype": "int32",
|
| 96 |
+
"description": "Sparse indices selecting top-K KV cache entries for each token. Values of -1 indicate padding (invalid indices). For page_size=64, indices encode (page_idx * 64 + offset)."
|
| 97 |
+
},
|
| 98 |
+
"sm_scale": {
|
| 99 |
+
"shape": null,
|
| 100 |
+
"dtype": "float32",
|
| 101 |
+
"description": "Softmax scale. For MLA, uses pre-absorption head dimension: 1/sqrt(head_dim_qk + head_dim_kpe) = 1/sqrt(128 + 64) = 1/sqrt(192)."
|
| 102 |
+
}
|
| 103 |
+
},
|
| 104 |
+
"outputs": {
|
| 105 |
+
"output": {
|
| 106 |
+
"shape": [
|
| 107 |
+
"total_num_tokens",
|
| 108 |
+
"num_qo_heads",
|
| 109 |
+
"head_dim_ckv"
|
| 110 |
+
],
|
| 111 |
+
"dtype": "bfloat16"
|
| 112 |
+
},
|
| 113 |
+
"lse": {
|
| 114 |
+
"shape": [
|
| 115 |
+
"total_num_tokens",
|
| 116 |
+
"num_qo_heads"
|
| 117 |
+
],
|
| 118 |
+
"dtype": "float32",
|
| 119 |
+
"description": "The 2-based log-sum-exp of attention logits."
|
| 120 |
+
}
|
| 121 |
+
},
|
| 122 |
+
"reference": "import math\nimport torch\n\n\n@torch.no_grad()\ndef run(q_nope, q_pe, ckv_cache, kpe_cache, sparse_indices, sm_scale):\n total_num_tokens, num_qo_heads, head_dim_ckv = q_nope.shape\n head_dim_kpe = q_pe.shape[-1]\n num_pages, page_size, _ = ckv_cache.shape\n topk = sparse_indices.shape[-1]\n\n # Check constants\n assert num_qo_heads == 16\n assert head_dim_ckv == 512\n assert head_dim_kpe == 64\n assert page_size == 64\n assert topk == 256\n\n # Check constraints\n assert sparse_indices.shape[0] == total_num_tokens\n assert sparse_indices.shape[-1] == topk\n assert ckv_cache.shape[1] == page_size\n\n device = q_nope.device\n\n # Flatten paged KV cache to token-level: [num_pages, page_size, dim] -> [num_pages * page_size, dim]\n Kc_all = ckv_cache.reshape(-1, head_dim_ckv).to(torch.float32) # [total_kv_tokens, head_dim_ckv]\n Kp_all = kpe_cache.reshape(-1, head_dim_kpe).to(torch.float32) # [total_kv_tokens, head_dim_kpe]\n\n output = torch.zeros(\n (total_num_tokens, num_qo_heads, head_dim_ckv), dtype=torch.bfloat16, device=device\n )\n lse = torch.full((total_num_tokens, num_qo_heads), -float(\"inf\"), dtype=torch.float32, device=device)\n\n for t in range(total_num_tokens):\n indices = sparse_indices[t] # [topk]\n\n # Handle padding: -1 indicates invalid indices\n valid_mask = indices != -1\n valid_indices = indices[valid_mask]\n\n if valid_indices.numel() == 0:\n output[t].zero_()\n continue\n\n # For page_size=64, indices encode (page_idx * 64 + offset)\n tok_idx = valid_indices.to(torch.long)\n\n Kc = Kc_all[tok_idx] # [num_valid, head_dim_ckv]\n Kp = Kp_all[tok_idx] # [num_valid, head_dim_kpe]\n qn = q_nope[t].to(torch.float32) # [num_qo_heads, head_dim_ckv]\n qp = q_pe[t].to(torch.float32) # [num_qo_heads, head_dim_kpe]\n\n # Compute attention logits\n logits = (qn @ Kc.T) + (qp @ Kp.T) # [num_qo_heads, num_valid]\n logits_scaled = logits * sm_scale\n\n # Compute 2-base LSE\n lse[t] = torch.logsumexp(logits_scaled, dim=-1) / math.log(2.0)\n\n # Compute attention output\n attn = torch.softmax(logits_scaled, dim=-1) # [num_qo_heads, num_valid]\n out = attn @ Kc # [num_qo_heads, head_dim_ckv]\n output[t] = out.to(torch.bfloat16)\n\n return {\"output\": output, \"lse\": lse}"
|
| 123 |
+
}
|
definitions/dsa_paged/dsa_topk_indexer_fp8_h64_d128_topk256_ps64.json
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "dsa_topk_indexer_fp8_h64_d128_topk256_ps64",
|
| 3 |
+
"description": "Native Sparse Attention (DSA) TopK indexer with FP8 quantization for DeepSeek-V3. Computes sparse attention scores using ReLU activation and learned weights, then selects top-K KV cache indices. Formula: sum(relu(q @ K.T) * weights). Matches SGLang/deep_gemm implementation. Page size 64 variant.",
|
| 4 |
+
"op_type": "dsa_paged",
|
| 5 |
+
"tags": [
|
| 6 |
+
"stage:indexer",
|
| 7 |
+
"status:verified",
|
| 8 |
+
"model:deepseek-v3",
|
| 9 |
+
"sparse:topk",
|
| 10 |
+
"quant:fp8"
|
| 11 |
+
],
|
| 12 |
+
"axes": {
|
| 13 |
+
"batch_size": {
|
| 14 |
+
"type": "var"
|
| 15 |
+
},
|
| 16 |
+
"num_index_heads": {
|
| 17 |
+
"type": "const",
|
| 18 |
+
"value": 64,
|
| 19 |
+
"description": "Number of indexer heads (64 required by deep_gemm)."
|
| 20 |
+
},
|
| 21 |
+
"index_head_dim": {
|
| 22 |
+
"type": "const",
|
| 23 |
+
"value": 128,
|
| 24 |
+
"description": "Indexer head dimension (matches deep_gemm requirement)."
|
| 25 |
+
},
|
| 26 |
+
"page_size": {
|
| 27 |
+
"type": "const",
|
| 28 |
+
"value": 64,
|
| 29 |
+
"description": "Page size for KV cache (64 tokens per page, required by deep_gemm)."
|
| 30 |
+
},
|
| 31 |
+
"topk": {
|
| 32 |
+
"type": "const",
|
| 33 |
+
"value": 256,
|
| 34 |
+
"description": "Number of top-K indices to select."
|
| 35 |
+
},
|
| 36 |
+
"max_num_pages": {
|
| 37 |
+
"type": "var",
|
| 38 |
+
"description": "Maximum number of pages per sequence."
|
| 39 |
+
},
|
| 40 |
+
"num_pages": {
|
| 41 |
+
"type": "var",
|
| 42 |
+
"description": "Total number of allocated pages in the KV cache."
|
| 43 |
+
},
|
| 44 |
+
"kv_cache_num_heads": {
|
| 45 |
+
"type": "const",
|
| 46 |
+
"value": 1,
|
| 47 |
+
"description": "Number of heads in KV cache (always 1 for deep_gemm MQA format)."
|
| 48 |
+
},
|
| 49 |
+
"head_dim_with_scale": {
|
| 50 |
+
"type": "const",
|
| 51 |
+
"value": 132,
|
| 52 |
+
"description": "Head dimension (128) + scale bytes (4) = 132 for deep_gemm FP8 format."
|
| 53 |
+
}
|
| 54 |
+
},
|
| 55 |
+
"constraints": [
|
| 56 |
+
"topk <= max_num_pages * page_size"
|
| 57 |
+
],
|
| 58 |
+
"inputs": {
|
| 59 |
+
"q_index_fp8": {
|
| 60 |
+
"shape": [
|
| 61 |
+
"batch_size",
|
| 62 |
+
"num_index_heads",
|
| 63 |
+
"index_head_dim"
|
| 64 |
+
],
|
| 65 |
+
"dtype": "float8_e4m3fn",
|
| 66 |
+
"description": "FP8 quantized query tensor for indexing."
|
| 67 |
+
},
|
| 68 |
+
"k_index_cache_fp8": {
|
| 69 |
+
"shape": [
|
| 70 |
+
"num_pages",
|
| 71 |
+
"page_size",
|
| 72 |
+
"kv_cache_num_heads",
|
| 73 |
+
"head_dim_with_scale"
|
| 74 |
+
],
|
| 75 |
+
"dtype": "int8",
|
| 76 |
+
"description": "FP8 quantized key index cache with embedded scale factors (deep_gemm format). Memory layout: all FP8 values first (page_size * 128 bytes), then all scale factors (page_size * 4 bytes). Reshaped to [num_pages, page_size, 1, 132]. Uses int8 dtype but should be interpreted as uint8."
|
| 77 |
+
},
|
| 78 |
+
"weights": {
|
| 79 |
+
"shape": [
|
| 80 |
+
"batch_size",
|
| 81 |
+
"num_index_heads"
|
| 82 |
+
],
|
| 83 |
+
"dtype": "float32",
|
| 84 |
+
"description": "Learned weights for combining heads. In SGLang: weights = weights_proj(x) * n_heads^-0.5 * q_scale * softmax_scale."
|
| 85 |
+
},
|
| 86 |
+
"seq_lens": {
|
| 87 |
+
"shape": [
|
| 88 |
+
"batch_size"
|
| 89 |
+
],
|
| 90 |
+
"dtype": "int32",
|
| 91 |
+
"description": "Sequence lengths for each batch element."
|
| 92 |
+
},
|
| 93 |
+
"block_table": {
|
| 94 |
+
"shape": [
|
| 95 |
+
"batch_size",
|
| 96 |
+
"max_num_pages"
|
| 97 |
+
],
|
| 98 |
+
"dtype": "int32",
|
| 99 |
+
"description": "Page-level block table mapping batch to page indices."
|
| 100 |
+
}
|
| 101 |
+
},
|
| 102 |
+
"outputs": {
|
| 103 |
+
"topk_indices": {
|
| 104 |
+
"shape": [
|
| 105 |
+
"batch_size",
|
| 106 |
+
"topk"
|
| 107 |
+
],
|
| 108 |
+
"dtype": "int32",
|
| 109 |
+
"description": "Top-K token indices for each batch element. Values of -1 indicate padding."
|
| 110 |
+
}
|
| 111 |
+
},
|
| 112 |
+
"reference": "import torch\n\n\ndef dequant_fp8_kv_cache(k_index_cache_fp8):\n \"\"\"Dequantize FP8 KV cache from deep_gemm format.\n \n Input: [num_pages, page_size, 1, 132] int8 (interpreted as uint8)\n Memory layout (per page): [fp8_data (page_size * 128 bytes), scales (page_size * 4 bytes)]\n After view to [num_pages, page_size, 1, 132]: NOT directly indexable as [fp8, scale] per token!\n Output: [num_pages, page_size, 128] float32\n \"\"\"\n # View as uint8 for correct byte interpretation\n k_index_cache_fp8 = k_index_cache_fp8.view(torch.uint8)\n num_pages, page_size, num_heads, head_dim_sf = k_index_cache_fp8.shape\n head_dim = head_dim_sf - 4 # 128\n \n # Go back to flat format to reverse the packing\n kv_flat = k_index_cache_fp8.view(num_pages, page_size * head_dim_sf)\n \n # FP8 part: first page_size * head_dim bytes\n fp8_bytes = kv_flat[:, :page_size * head_dim].contiguous()\n fp8_tensor = fp8_bytes.view(num_pages, page_size, head_dim).view(torch.float8_e4m3fn)\n fp8_float = fp8_tensor.to(torch.float32)\n \n # Scale part: last page_size * 4 bytes -> page_size float32 values\n scale_bytes = kv_flat[:, page_size * head_dim:].contiguous()\n scale = scale_bytes.view(num_pages, page_size, 4).view(torch.float32) # [num_pages, page_size, 1]\n \n return fp8_float * scale\n\n\n@torch.no_grad()\ndef run(q_index_fp8, k_index_cache_fp8, weights, seq_lens, block_table):\n batch_size, num_index_heads, index_head_dim = q_index_fp8.shape\n num_pages, page_size, _, _ = k_index_cache_fp8.shape\n topk = 256\n\n # Check constants\n assert num_index_heads == 64\n assert index_head_dim == 128\n assert page_size == 64\n\n device = q_index_fp8.device\n\n # Dequantize inputs\n q = q_index_fp8.to(torch.float32) # [batch, heads, head_dim]\n K_all = dequant_fp8_kv_cache(k_index_cache_fp8) # [num_pages, page_size, head_dim]\n\n topk_indices = torch.full((batch_size, topk), -1, dtype=torch.int32, device=device)\n max_num_pages = block_table.shape[1]\n\n for b in range(batch_size):\n seq_len = int(seq_lens[b].item())\n \n if seq_len == 0:\n continue\n\n # Get pages for this sequence\n num_pages_for_seq = (seq_len + page_size - 1) // page_size\n page_indices = block_table[b, :num_pages_for_seq].to(torch.long)\n \n # Gather K from pages\n K_paged = K_all[page_indices] # [num_pages_for_seq, page_size, head_dim]\n K = K_paged.reshape(-1, index_head_dim)[:seq_len] # [seq_len, head_dim]\n \n # Query for this batch element\n q_b = q[b] # [num_heads, head_dim]\n \n # Compute attention scores\n scores = q_b @ K.T # [num_heads, seq_len]\n \n # Apply ReLU (deep_gemm uses ReLU activation)\n scores_relu = torch.relu(scores) # [num_heads, seq_len]\n \n # Apply learned weights and sum across heads\n w = weights[b] # [num_heads]\n weighted_scores = scores_relu * w[:, None] # [num_heads, seq_len]\n final_scores = weighted_scores.sum(dim=0) # [seq_len]\n \n # Select top-K\n actual_topk = min(topk, seq_len)\n _, topk_idx = torch.topk(final_scores, actual_topk)\n \n # Convert to global token indices\n # Token index = page_idx * page_size + offset_in_page\n page_idx_per_token = topk_idx // page_size\n offset_per_token = topk_idx % page_size\n global_page_idx = page_indices[page_idx_per_token]\n topk_tokens = global_page_idx * page_size + offset_per_token\n \n topk_indices[b, :actual_topk] = topk_tokens.to(torch.int32)\n\n return {\"topk_indices\": topk_indices}"
|
| 113 |
+
}
|
definitions/gdn/gdn_decode_qk16_v32_d128_k_last.json
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "gdn_decode_qk16_v32_d128_k_last",
|
| 3 |
+
"description": "Gated Delta Net decode with GVA configuration and k-last state layout. Single-token generation with recurrent state update. Captured from Qwen3 Next linear attention layers.",
|
| 4 |
+
"op_type": "gdn",
|
| 5 |
+
"tags": [
|
| 6 |
+
"stage:decode",
|
| 7 |
+
"status:verified",
|
| 8 |
+
"model:qwen3-next",
|
| 9 |
+
"layout:k-last"
|
| 10 |
+
],
|
| 11 |
+
"axes": {
|
| 12 |
+
"batch_size": {
|
| 13 |
+
"type": "var",
|
| 14 |
+
"description": "Number of sequences being decoded concurrently."
|
| 15 |
+
},
|
| 16 |
+
"seq_len": {
|
| 17 |
+
"type": "const",
|
| 18 |
+
"value": 1,
|
| 19 |
+
"description": "Sequence length (always 1 for single-token decode)."
|
| 20 |
+
},
|
| 21 |
+
"num_q_heads": {
|
| 22 |
+
"type": "const",
|
| 23 |
+
"value": 16,
|
| 24 |
+
"description": "Number of query heads (same as key heads in GVA mode)."
|
| 25 |
+
},
|
| 26 |
+
"num_k_heads": {
|
| 27 |
+
"type": "const",
|
| 28 |
+
"value": 16,
|
| 29 |
+
"description": "Number of key heads."
|
| 30 |
+
},
|
| 31 |
+
"num_v_heads": {
|
| 32 |
+
"type": "const",
|
| 33 |
+
"value": 32,
|
| 34 |
+
"description": "Number of value heads (GVA: more value heads than query heads)."
|
| 35 |
+
},
|
| 36 |
+
"head_size": {
|
| 37 |
+
"type": "const",
|
| 38 |
+
"value": 128
|
| 39 |
+
}
|
| 40 |
+
},
|
| 41 |
+
"constraints": [
|
| 42 |
+
"num_v_heads >= num_q_heads",
|
| 43 |
+
"num_v_heads % num_q_heads == 0",
|
| 44 |
+
"num_k_heads == num_q_heads"
|
| 45 |
+
],
|
| 46 |
+
"inputs": {
|
| 47 |
+
"q": {
|
| 48 |
+
"shape": ["batch_size", "seq_len", "num_q_heads", "head_size"],
|
| 49 |
+
"dtype": "bfloat16",
|
| 50 |
+
"description": "Query tensor for single token decode."
|
| 51 |
+
},
|
| 52 |
+
"k": {
|
| 53 |
+
"shape": ["batch_size", "seq_len", "num_k_heads", "head_size"],
|
| 54 |
+
"dtype": "bfloat16",
|
| 55 |
+
"description": "Key tensor for single token decode."
|
| 56 |
+
},
|
| 57 |
+
"v": {
|
| 58 |
+
"shape": ["batch_size", "seq_len", "num_v_heads", "head_size"],
|
| 59 |
+
"dtype": "bfloat16",
|
| 60 |
+
"description": "Value tensor for single token decode."
|
| 61 |
+
},
|
| 62 |
+
"state": {
|
| 63 |
+
"shape": ["batch_size", "num_v_heads", "head_size", "head_size"],
|
| 64 |
+
"dtype": "float32",
|
| 65 |
+
"description": "Recurrent state in k-last layout [B, H, V, K].",
|
| 66 |
+
"optional": true
|
| 67 |
+
},
|
| 68 |
+
"A_log": {
|
| 69 |
+
"shape": ["num_v_heads"],
|
| 70 |
+
"dtype": "float32",
|
| 71 |
+
"description": "Log decay parameter (learnable). Used to compute g = exp(-exp(A_log) * softplus(a + dt_bias))."
|
| 72 |
+
},
|
| 73 |
+
"a": {
|
| 74 |
+
"shape": ["batch_size", "seq_len", "num_v_heads"],
|
| 75 |
+
"dtype": "bfloat16",
|
| 76 |
+
"description": "Input-dependent decay from projection."
|
| 77 |
+
},
|
| 78 |
+
"dt_bias": {
|
| 79 |
+
"shape": ["num_v_heads"],
|
| 80 |
+
"dtype": "float32",
|
| 81 |
+
"description": "Decay bias (learnable). Added to 'a' before softplus."
|
| 82 |
+
},
|
| 83 |
+
"b": {
|
| 84 |
+
"shape": ["batch_size", "seq_len", "num_v_heads"],
|
| 85 |
+
"dtype": "bfloat16",
|
| 86 |
+
"description": "Update gate input from projection. beta = sigmoid(b)."
|
| 87 |
+
},
|
| 88 |
+
"scale": {
|
| 89 |
+
"shape": null,
|
| 90 |
+
"dtype": "float32",
|
| 91 |
+
"description": "Scale factor. Default is 1/sqrt(head_size)."
|
| 92 |
+
}
|
| 93 |
+
},
|
| 94 |
+
"outputs": {
|
| 95 |
+
"output": {
|
| 96 |
+
"shape": ["batch_size", "seq_len", "num_v_heads", "head_size"],
|
| 97 |
+
"dtype": "bfloat16",
|
| 98 |
+
"description": "Attention output. Shape follows num_v_heads in GVA mode."
|
| 99 |
+
},
|
| 100 |
+
"new_state": {
|
| 101 |
+
"shape": ["batch_size", "num_v_heads", "head_size", "head_size"],
|
| 102 |
+
"dtype": "float32",
|
| 103 |
+
"description": "Updated recurrent state in k-last layout [B, H, V, K]."
|
| 104 |
+
}
|
| 105 |
+
},
|
| 106 |
+
"reference": "import math\nimport torch\nimport torch.nn.functional as F\n\n\ndef matmul(a: torch.Tensor, b: torch.Tensor):\n \"\"\"Float32 matmul for numerical stability.\"\"\"\n return a.float() @ b.float()\n\n\n@torch.no_grad()\ndef run(q, k, v, state, A_log, a, dt_bias, b, scale):\n \"\"\"\n Gated Delta Net decode reference implementation (k-last layout).\n \n State layout: [B, H, V, K] (k-last, K dimension at the end)\n \n Gate computation:\n g = exp(-exp(A_log) * softplus(a + dt_bias))\n beta = sigmoid(b)\n \n Delta rule update:\n state_new = g * state_old + k^T @ (beta * v + (1-beta) * k @ state_old) - k^T @ (k @ state_old)\n output = scale * q @ state_new\n \"\"\"\n B, T, num_q_heads, K = q.shape\n _, _, num_k_heads, _ = k.shape\n _, _, num_v_heads, V = v.shape\n num_heads = num_v_heads\n device = q.device\n \n assert num_q_heads == 16\n assert num_k_heads == 16\n assert num_v_heads == 32\n assert K == 128 and V == 128\n assert T == 1\n \n if scale is None or scale == 0.0:\n scale = 1.0 / math.sqrt(K)\n \n # Compute g and beta from raw parameters\n x = a.float() + dt_bias.float() # [B, 1, HV]\n g = torch.exp(-torch.exp(A_log.float()) * F.softplus(x)) # [B, 1, HV]\n beta = torch.sigmoid(b.float()) # [B, 1, HV]\n \n q_f32 = q.squeeze(1).float()\n k_f32 = k.squeeze(1).float()\n v_f32 = v.squeeze(1).float()\n g_f32 = g.squeeze(1).float()\n beta_f32 = beta.squeeze(1).float()\n \n if state is not None:\n state_f32 = state.float()\n else:\n state_f32 = torch.zeros(B, num_heads, V, K, dtype=torch.float32, device=device)\n \n q_exp = q_f32.repeat_interleave(num_v_heads // num_q_heads, dim=1)\n k_exp = k_f32.repeat_interleave(num_v_heads // num_k_heads, dim=1)\n \n new_state = torch.zeros_like(state_f32)\n output = torch.zeros(B, num_heads, V, dtype=torch.float32, device=device)\n \n for b_idx in range(B):\n for h_idx in range(num_heads):\n q_h = q_exp[b_idx, h_idx]\n k_h = k_exp[b_idx, h_idx]\n v_h = v_f32[b_idx, h_idx]\n h_state = state_f32[b_idx, h_idx].clone().transpose(-1, -2) # [V,K] -> [K,V]\n g_val = g_f32[b_idx, h_idx]\n beta_val = beta_f32[b_idx, h_idx]\n \n old_state = g_val * h_state\n old_v = k_h @ old_state\n new_v = beta_val * v_h + (1 - beta_val) * old_v\n state_remove = k_h.unsqueeze(1) @ old_v.unsqueeze(0)\n state_update = k_h.unsqueeze(1) @ new_v.unsqueeze(0)\n h_state = old_state - state_remove + state_update\n \n output[b_idx, h_idx] = scale * (q_h @ h_state)\n new_state[b_idx, h_idx] = h_state.transpose(-1, -2) # [K,V] -> [V,K]\n \n output = output.unsqueeze(1).to(torch.bfloat16)\n return {\"output\": output, \"new_state\": new_state}"
|
| 107 |
+
}
|
definitions/gdn/gdn_prefill_qk16_v32_d128_k_last.json
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "gdn_prefill_qk16_v32_d128_k_last",
|
| 3 |
+
"description": "Gated Delta Net prefill with GVA configuration and k-last state layout. The state is in k-last layout [N, H, V, K]. Captured from Qwen3 Next linear attention layers.",
|
| 4 |
+
"op_type": "gdn",
|
| 5 |
+
"tags": [
|
| 6 |
+
"stage:prefill",
|
| 7 |
+
"status:verified",
|
| 8 |
+
"model:qwen3-next",
|
| 9 |
+
"layout:k-last"
|
| 10 |
+
],
|
| 11 |
+
"axes": {
|
| 12 |
+
"total_seq_len": {
|
| 13 |
+
"type": "var"
|
| 14 |
+
},
|
| 15 |
+
"num_seqs": {
|
| 16 |
+
"type": "var"
|
| 17 |
+
},
|
| 18 |
+
"num_q_heads": {
|
| 19 |
+
"type": "const",
|
| 20 |
+
"value": 16,
|
| 21 |
+
"description": "Number of query heads (same as key heads in GVA mode)."
|
| 22 |
+
},
|
| 23 |
+
"num_k_heads": {
|
| 24 |
+
"type": "const",
|
| 25 |
+
"value": 16,
|
| 26 |
+
"description": "Number of key heads."
|
| 27 |
+
},
|
| 28 |
+
"num_v_heads": {
|
| 29 |
+
"type": "const",
|
| 30 |
+
"value": 32,
|
| 31 |
+
"description": "Number of value heads (GVA: more value heads than query heads)."
|
| 32 |
+
},
|
| 33 |
+
"head_size": {
|
| 34 |
+
"type": "const",
|
| 35 |
+
"value": 128
|
| 36 |
+
},
|
| 37 |
+
"len_cu_seqlens": {
|
| 38 |
+
"type": "var",
|
| 39 |
+
"description": "Length of cu_seqlens array (num_seqs + 1)."
|
| 40 |
+
}
|
| 41 |
+
},
|
| 42 |
+
"constraints": [
|
| 43 |
+
"len_cu_seqlens == num_seqs + 1",
|
| 44 |
+
"total_seq_len == cu_seqlens[-1].item()"
|
| 45 |
+
],
|
| 46 |
+
"inputs": {
|
| 47 |
+
"q": {
|
| 48 |
+
"shape": ["total_seq_len", "num_q_heads", "head_size"],
|
| 49 |
+
"dtype": "bfloat16",
|
| 50 |
+
"description": "Query tensor."
|
| 51 |
+
},
|
| 52 |
+
"k": {
|
| 53 |
+
"shape": ["total_seq_len", "num_k_heads", "head_size"],
|
| 54 |
+
"dtype": "bfloat16",
|
| 55 |
+
"description": "Key tensor."
|
| 56 |
+
},
|
| 57 |
+
"v": {
|
| 58 |
+
"shape": ["total_seq_len", "num_v_heads", "head_size"],
|
| 59 |
+
"dtype": "bfloat16",
|
| 60 |
+
"description": "Value tensor."
|
| 61 |
+
},
|
| 62 |
+
"state": {
|
| 63 |
+
"shape": ["num_seqs", "num_v_heads", "head_size", "head_size"],
|
| 64 |
+
"dtype": "float32",
|
| 65 |
+
"description": "Recurrent state in k-last layout [N, H, V, K].",
|
| 66 |
+
"optional": true
|
| 67 |
+
},
|
| 68 |
+
"A_log": {
|
| 69 |
+
"shape": ["num_v_heads"],
|
| 70 |
+
"dtype": "float32",
|
| 71 |
+
"description": "Log decay parameter (learnable). Used to compute g = exp(-exp(A_log) * softplus(a + dt_bias))."
|
| 72 |
+
},
|
| 73 |
+
"a": {
|
| 74 |
+
"shape": ["total_seq_len", "num_v_heads"],
|
| 75 |
+
"dtype": "bfloat16",
|
| 76 |
+
"description": "Input-dependent decay from projection."
|
| 77 |
+
},
|
| 78 |
+
"dt_bias": {
|
| 79 |
+
"shape": ["num_v_heads"],
|
| 80 |
+
"dtype": "float32",
|
| 81 |
+
"description": "Decay bias (learnable). Added to 'a' before softplus."
|
| 82 |
+
},
|
| 83 |
+
"b": {
|
| 84 |
+
"shape": ["total_seq_len", "num_v_heads"],
|
| 85 |
+
"dtype": "bfloat16",
|
| 86 |
+
"description": "Update gate input from projection. beta = sigmoid(b)."
|
| 87 |
+
},
|
| 88 |
+
"cu_seqlens": {
|
| 89 |
+
"shape": ["len_cu_seqlens"],
|
| 90 |
+
"dtype": "int64",
|
| 91 |
+
"description": "Cumulative sequence lengths for variable-length batching."
|
| 92 |
+
},
|
| 93 |
+
"scale": {
|
| 94 |
+
"shape": null,
|
| 95 |
+
"dtype": "float32",
|
| 96 |
+
"description": "Scale factor. Default is 1/sqrt(head_size)."
|
| 97 |
+
}
|
| 98 |
+
},
|
| 99 |
+
"outputs": {
|
| 100 |
+
"output": {
|
| 101 |
+
"shape": ["total_seq_len", "num_v_heads", "head_size"],
|
| 102 |
+
"dtype": "bfloat16",
|
| 103 |
+
"description": "Attention output. Shape follows num_v_heads in GVA mode."
|
| 104 |
+
},
|
| 105 |
+
"new_state": {
|
| 106 |
+
"shape": ["num_seqs", "num_v_heads", "head_size", "head_size"],
|
| 107 |
+
"dtype": "float32",
|
| 108 |
+
"description": "Updated recurrent state in k-last layout [N, H, V, K]."
|
| 109 |
+
}
|
| 110 |
+
},
|
| 111 |
+
"reference": "import math\nimport torch\nimport torch.nn.functional as F\n\n\ndef matmul(a: torch.Tensor, b: torch.Tensor):\n \"\"\"Float32 matmul for numerical stability.\"\"\"\n return a.float() @ b.float()\n\n\n@torch.no_grad()\ndef run(q, k, v, state, A_log, a, dt_bias, b, cu_seqlens, scale):\n \"\"\"\n Gated Delta Net prefill reference implementation (k-last layout).\n \n State layout: [H, V, K] (k-last, K dimension at the end)\n \n Gate computation:\n g = exp(-exp(A_log) * softplus(a + dt_bias))\n beta = sigmoid(b)\n \n Delta rule update:\n state_new = g * state_old + k^T @ (beta * v + (1-beta) * k @ state_old) - k^T @ (k @ state_old)\n output = scale * q @ state_new\n \"\"\"\n total_seq_len, num_q_heads, head_size = q.shape\n num_v_heads = v.shape[1]\n num_k_heads = k.shape[1]\n num_sab_heads = max(num_q_heads, num_v_heads)\n num_seqs = cu_seqlens.size(0) - 1\n device = q.device\n\n assert num_q_heads == 16\n assert num_k_heads == 16\n assert num_v_heads == 32\n assert head_size == 128\n\n if scale is None or scale == 0.0:\n scale = 1.0 / math.sqrt(head_size)\n\n # Compute g and beta from raw parameters\n x = a.float() + dt_bias.float() # [total_seq_len, HV]\n g = torch.exp(-torch.exp(A_log.float()) * F.softplus(x)) # [total_seq_len, HV]\n beta = torch.sigmoid(b.float()) # [total_seq_len, HV]\n\n q_exp = q.repeat_interleave(num_v_heads // num_q_heads, dim=1)\n k_exp = k.repeat_interleave(num_v_heads // num_k_heads, dim=1)\n\n output = torch.zeros(\n (total_seq_len, num_sab_heads, head_size), dtype=torch.bfloat16, device=device\n )\n new_state = torch.zeros(\n (num_seqs, num_sab_heads, head_size, head_size), dtype=torch.float32, device=device\n )\n\n for seq_idx in range(num_seqs):\n seq_start = int(cu_seqlens[seq_idx].item())\n seq_end = int(cu_seqlens[seq_idx + 1].item())\n seq_len = seq_end - seq_start\n\n if seq_len <= 0:\n continue\n\n if state is not None:\n state_HKV = state[seq_idx].clone().float().transpose(-1, -2) # [H,V,K] -> [H,K,V]\n else:\n state_HKV = torch.zeros(\n (num_sab_heads, head_size, head_size), dtype=torch.float32, device=device\n )\n\n for i in range(seq_len):\n t = seq_start + i\n q_H1K = q_exp[t].unsqueeze(1).float()\n k_H1K = k_exp[t].unsqueeze(1).float()\n v_H1V = v[t].unsqueeze(1).float()\n g_H11 = g[t].unsqueeze(1).unsqueeze(2)\n beta_H11 = beta[t].unsqueeze(1).unsqueeze(2)\n\n old_state_HKV = g_H11 * state_HKV\n old_v_H1V = matmul(k_H1K, old_state_HKV)\n new_v_H1V = beta_H11 * v_H1V + (1 - beta_H11) * old_v_H1V\n state_remove = torch.einsum('hkl,hlv->hkv', k_H1K.transpose(-1, -2), old_v_H1V)\n state_update = torch.einsum('hkl,hlv->hkv', k_H1K.transpose(-1, -2), new_v_H1V)\n state_HKV = old_state_HKV - state_remove + state_update\n\n o_H1V = scale * matmul(q_H1K, state_HKV)\n output[t] = o_H1V.squeeze(1).to(torch.bfloat16)\n\n new_state[seq_idx] = state_HKV.transpose(-1, -2) # [H,K,V] -> [H,V,K]\n\n return {\"output\": output, \"new_state\": new_state}"
|
| 112 |
+
}
|
definitions/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048.json
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048",
|
| 3 |
+
"description": "FP8 block scale MoE operation. Routing and two grouped-GEMM included.",
|
| 4 |
+
"op_type": "moe",
|
| 5 |
+
"tags": [
|
| 6 |
+
"status:verified",
|
| 7 |
+
"model:deepseek-v3",
|
| 8 |
+
"model:deepseek-r1",
|
| 9 |
+
"quantization:float8_e4m3fn"
|
| 10 |
+
],
|
| 11 |
+
"axes": {
|
| 12 |
+
"seq_len": {
|
| 13 |
+
"type": "var",
|
| 14 |
+
"description": "Sequence length (number of tokens)"
|
| 15 |
+
},
|
| 16 |
+
"num_experts": {
|
| 17 |
+
"type": "const",
|
| 18 |
+
"value": 256,
|
| 19 |
+
"description": "Total number of experts."
|
| 20 |
+
},
|
| 21 |
+
"num_local_experts": {
|
| 22 |
+
"type": "const",
|
| 23 |
+
"value": 32,
|
| 24 |
+
"description": "Number of local experts with EP size 8."
|
| 25 |
+
},
|
| 26 |
+
"hidden_size": {
|
| 27 |
+
"type": "const",
|
| 28 |
+
"value": 7168,
|
| 29 |
+
"description": "Hidden dimension size."
|
| 30 |
+
},
|
| 31 |
+
"intermediate_size": {
|
| 32 |
+
"type": "const",
|
| 33 |
+
"value": 2048,
|
| 34 |
+
"description": "MoE intermediate layer size."
|
| 35 |
+
},
|
| 36 |
+
"gemm1_out_size": {
|
| 37 |
+
"type": "const",
|
| 38 |
+
"value": 4096,
|
| 39 |
+
"description": "Output size of the first GEMM (W13). Should be 2 * intermediate_size"
|
| 40 |
+
},
|
| 41 |
+
"num_hidden_blocks": {
|
| 42 |
+
"type": "const",
|
| 43 |
+
"value": 56,
|
| 44 |
+
"description": "Number of quantized blocks along the hidden_size dimension (block_size=128)."
|
| 45 |
+
},
|
| 46 |
+
"num_intermediate_blocks": {
|
| 47 |
+
"type": "const",
|
| 48 |
+
"value": 16,
|
| 49 |
+
"description": "Number of quantized blocks along the intermediate_size dimension (block_size=128)."
|
| 50 |
+
},
|
| 51 |
+
"num_gemm1_out_blocks": {
|
| 52 |
+
"type": "const",
|
| 53 |
+
"value": 32,
|
| 54 |
+
"description": "Number of quantized blocks along the gemm1_out_size dimension (block_size=128)."
|
| 55 |
+
}
|
| 56 |
+
},
|
| 57 |
+
"inputs": {
|
| 58 |
+
"routing_logits": {
|
| 59 |
+
"shape": [
|
| 60 |
+
"seq_len",
|
| 61 |
+
"num_experts"
|
| 62 |
+
],
|
| 63 |
+
"dtype": "float32",
|
| 64 |
+
"description": "Tensor of routing logits for expert selection"
|
| 65 |
+
},
|
| 66 |
+
"routing_bias": {
|
| 67 |
+
"shape": [
|
| 68 |
+
"num_experts"
|
| 69 |
+
],
|
| 70 |
+
"dtype": "bfloat16",
|
| 71 |
+
"description": "Bias tensor for routing. Pass all zeros for no bias."
|
| 72 |
+
},
|
| 73 |
+
"hidden_states": {
|
| 74 |
+
"shape": [
|
| 75 |
+
"seq_len",
|
| 76 |
+
"hidden_size"
|
| 77 |
+
],
|
| 78 |
+
"dtype": "float8_e4m3fn",
|
| 79 |
+
"description": "Input hidden states tensor (FP8 quantized)"
|
| 80 |
+
},
|
| 81 |
+
"hidden_states_scale": {
|
| 82 |
+
"shape": [
|
| 83 |
+
"num_hidden_blocks",
|
| 84 |
+
"seq_len"
|
| 85 |
+
],
|
| 86 |
+
"dtype": "float32",
|
| 87 |
+
"description": "Block-wise scaling factors for hidden states."
|
| 88 |
+
},
|
| 89 |
+
"gemm1_weights": {
|
| 90 |
+
"shape": [
|
| 91 |
+
"num_local_experts",
|
| 92 |
+
"gemm1_out_size",
|
| 93 |
+
"hidden_size"
|
| 94 |
+
],
|
| 95 |
+
"dtype": "float8_e4m3fn",
|
| 96 |
+
"description": "First GEMM weights for all local experts (gate and up projections)."
|
| 97 |
+
},
|
| 98 |
+
"gemm1_weights_scale": {
|
| 99 |
+
"shape": [
|
| 100 |
+
"num_local_experts",
|
| 101 |
+
"num_gemm1_out_blocks",
|
| 102 |
+
"num_hidden_blocks"
|
| 103 |
+
],
|
| 104 |
+
"dtype": "float32",
|
| 105 |
+
"description": "Block-wise scaling factors for first GEMM weights."
|
| 106 |
+
},
|
| 107 |
+
"gemm2_weights": {
|
| 108 |
+
"shape": [
|
| 109 |
+
"num_local_experts",
|
| 110 |
+
"hidden_size",
|
| 111 |
+
"intermediate_size"
|
| 112 |
+
],
|
| 113 |
+
"dtype": "float8_e4m3fn",
|
| 114 |
+
"description": "Second GEMM weights for all local experts (down projection)."
|
| 115 |
+
},
|
| 116 |
+
"gemm2_weights_scale": {
|
| 117 |
+
"shape": [
|
| 118 |
+
"num_local_experts",
|
| 119 |
+
"num_hidden_blocks",
|
| 120 |
+
"num_intermediate_blocks"
|
| 121 |
+
],
|
| 122 |
+
"dtype": "float32",
|
| 123 |
+
"description": "Block-wise scaling factors for second GEMM weights."
|
| 124 |
+
},
|
| 125 |
+
"local_expert_offset": {
|
| 126 |
+
"shape": null,
|
| 127 |
+
"dtype": "int32",
|
| 128 |
+
"description": "Offset of local experts in global expert space."
|
| 129 |
+
},
|
| 130 |
+
"routed_scaling_factor": {
|
| 131 |
+
"shape": null,
|
| 132 |
+
"dtype": "float32",
|
| 133 |
+
"description": "Scaling factor for routing weights."
|
| 134 |
+
}
|
| 135 |
+
},
|
| 136 |
+
"outputs": {
|
| 137 |
+
"output": {
|
| 138 |
+
"shape": [
|
| 139 |
+
"seq_len",
|
| 140 |
+
"hidden_size"
|
| 141 |
+
],
|
| 142 |
+
"dtype": "bfloat16",
|
| 143 |
+
"description": "Final MoE output tensor"
|
| 144 |
+
}
|
| 145 |
+
},
|
| 146 |
+
"reference": "import torch\r\n\r\n\r\n@torch.no_grad()\r\ndef run(\r\n routing_logits: torch.Tensor,\r\n routing_bias: torch.Tensor,\r\n hidden_states: torch.Tensor,\r\n hidden_states_scale: torch.Tensor,\r\n gemm1_weights: torch.Tensor,\r\n gemm1_weights_scale: torch.Tensor,\r\n gemm2_weights: torch.Tensor,\r\n gemm2_weights_scale: torch.Tensor,\r\n local_expert_offset: int,\r\n routed_scaling_factor: float,\r\n):\r\n \"\"\"\r\n • FP8 block-scale dequantization: float ≈ fp8 * scale\r\n • DeepSeek-V3 no-aux routing:\r\n s = sigmoid(logits)\r\n s_with_bias = s + bias\r\n group by n_group=8; per group take top-2 sum → pick topk_group=4 groups\r\n on the kept groups, take global top_k=8 experts\r\n combine with weights derived from s (without bias), normalized and\r\n scaled by routed_scaling_factor\r\n • Local computation:\r\n only experts in [local_expert_offset, local_expert_offset + E_local) are\r\n computed on this rank (GEMM1 → SwiGLU → GEMM2), then per-token weighted\r\n accumulation.\r\n \"\"\"\r\n\r\n # Fixed DeepSeek-V3/R1 geometry\r\n H = 7168\r\n I = 2048\r\n E_local = gemm1_weights.shape[0]\r\n \r\n BLOCK = 128\r\n E_global = routing_logits.shape[1]\r\n T = routing_logits.shape[0]\r\n\r\n assert H == 7168, \"hidden_size must be 7168\" \r\n assert I == 2048, \"intermediate_size must be 2048\"\r\n assert E_global == 256, \"num_experts must be 256\"\r\n assert E_local == 32, \"num_local_experts must be 32\"\r\n\r\n # Routing constants\r\n TOP_K = 8\r\n N_GROUP = 8\r\n TOPK_GROUP = 4\r\n\r\n # Block counts\r\n num_hidden_blocks = H // BLOCK # 56\r\n num_intermediate_blocks = I // BLOCK # 16\r\n num_gemm1_out_blocks = (2 * I) // BLOCK # 32\r\n\r\n # Shape checks\r\n assert hidden_states.shape == (T, H)\r\n assert hidden_states_scale.shape == (num_hidden_blocks, T)\r\n assert gemm1_weights.shape == (E_local, 2 * I, H)\r\n assert gemm1_weights_scale.shape == (E_local, num_gemm1_out_blocks, num_hidden_blocks)\r\n assert gemm2_weights.shape == (E_local, H, I)\r\n assert gemm2_weights_scale.shape == (E_local, num_hidden_blocks, num_intermediate_blocks)\r\n assert routing_bias.shape[-1] == E_global\r\n\r\n device = hidden_states.device\r\n\r\n # 1) FP8 block-scale dequantization\r\n # hidden_states: [T, H], scale: [H/128, T] (transposed layout)\r\n A_fp32 = hidden_states.to(torch.float32)\r\n A_scale = hidden_states_scale.to(torch.float32) # [H/128, T]\r\n A_scale_TH = A_scale.permute(1, 0).contiguous() # [T, H/128]\r\n A_scale_expanded = (\r\n A_scale_TH.unsqueeze(-1)\r\n .repeat(1, 1, BLOCK) # [T, H/128, 128]\r\n .reshape(T, H) # [T, H]\r\n .contiguous()\r\n )\r\n A = A_fp32 * A_scale_expanded # [T, H] float32\r\n\r\n # W13: [E_local, 2I, H], scale: [E_local, (2I)/128, H/128]\r\n W13_fp32 = gemm1_weights.to(torch.float32)\r\n S13 = gemm1_weights_scale.to(torch.float32)\r\n S13_expanded = torch.repeat_interleave(S13, BLOCK, dim=1) # [E, 2I, H/128]\r\n S13_expanded = torch.repeat_interleave(S13_expanded, BLOCK, dim=2) # [E, 2I, H]\r\n W13 = W13_fp32 * S13_expanded # [E, 2I, H] float32\r\n\r\n # W2: [E_local, H, I], scale: [E_local, H/128, I/128]\r\n W2_fp32 = gemm2_weights.to(torch.float32)\r\n S2 = gemm2_weights_scale.to(torch.float32)\r\n S2_expanded = torch.repeat_interleave(S2, BLOCK, dim=1) # [E, H, I/128]\r\n S2_expanded = torch.repeat_interleave(S2_expanded, BLOCK, dim=2) # [E, H, I]\r\n W2 = W2_fp32 * S2_expanded # [E, H, I] float32\r\n\r\n # 2) No-aux routing\r\n logits = routing_logits.to(torch.float32) # [T, E_global]\r\n bias = routing_bias.to(torch.float32).reshape(-1) # [E_global]\r\n\r\n # Sigmoid\r\n s = 1.0 / (1.0 + torch.exp(-logits)) # [T, E]\r\n s_with_bias = s + bias # [T, E] (broadcast)\r\n\r\n # Grouping\r\n group_size = E_global // N_GROUP # 32\r\n s_wb_grouped = s_with_bias.view(T, N_GROUP, group_size) # [T, 8, 32]\r\n\r\n # Group scores = sum of top-2 values within each group\r\n top2_vals, _ = torch.topk(s_wb_grouped, k=2, dim=2, largest=True, sorted=False) # [T, 8, 2]\r\n group_scores = top2_vals.sum(dim=2) # [T, 8]\r\n\r\n # Select topk_group groups → group mask\r\n _, group_idx = torch.topk(group_scores, k=TOPK_GROUP, dim=1, largest=True, sorted=False) # [T, 4]\r\n group_mask = torch.zeros_like(group_scores) # [T, 8]\r\n group_mask.scatter_(1, group_idx, 1.0)\r\n score_mask = group_mask.unsqueeze(2).expand(T, N_GROUP, group_size).reshape(T, E_global) # [T, E]\r\n\r\n # Global top-k (within kept groups), based on s_with_bias\r\n neg_inf = torch.finfo(torch.float32).min\r\n scores_pruned = s_with_bias.masked_fill(score_mask == 0, neg_inf) # [T, E]\r\n _, topk_idx = torch.topk(scores_pruned, k=TOP_K, dim=1, largest=True, sorted=False) # [T, 8]\r\n\r\n # Combination weights: use s (without bias) for normalization\r\n M = torch.zeros_like(s) # [T, E]\r\n M.scatter_(1, topk_idx, 1.0) # 0/1 mask\r\n weights = s * M # [T, E]\r\n weights_sum = weights.sum(dim=1, keepdim=True) + 1e-20\r\n weights = (weights / weights_sum) * routed_scaling_factor # [T, E]\r\n\r\n # 3) Local expert compute and accumulation\r\n output = torch.zeros((T, H), dtype=torch.float32, device=device)\r\n\r\n local_start = int(local_expert_offset)\r\n\r\n # For each local expert: find selected tokens, run GEMM1→SwiGLU→GEMM2, accumulate by weights\r\n for le in range(E_local):\r\n ge = local_start + le\r\n if ge < 0 or ge >= E_global:\r\n continue\r\n\r\n # Tokens that selected this global expert ge in their top-k\r\n sel_mask_per_token = (topk_idx == ge).any(dim=1) # [T] bool\r\n if not sel_mask_per_token.any():\r\n continue\r\n\r\n token_idx = torch.nonzero(sel_mask_per_token, as_tuple=False).squeeze(1) # [Tk]\r\n Tk = token_idx.numel()\r\n\r\n # Gather inputs and weights for this expert\r\n A_e = A.index_select(0, token_idx) # [Tk, H]\r\n W13_e = W13[le] # [2I, H]\r\n W2_e = W2[le] # [H, I]\r\n\r\n # GEMM1: [Tk, H] @ [H, 2I] = [Tk, 2I]\r\n G1 = A_e.matmul(W13_e.t()) # [Tk, 2I]\r\n\r\n # SwiGLU: split and apply silu(x) = x / (1 + exp(-x))\r\n X1 = G1[:, :I] # [Tk, I]\r\n X2 = G1[:, I:] # [Tk, I]\r\n silu_X2 = X2 / (1.0 + torch.exp(-X2)) # [Tk, I]\r\n C = silu_X2 * X1 # [Tk, I]\r\n\r\n # GEMM2: [Tk, I] @ [I, H] = [Tk, H]\r\n O = C.matmul(W2_e.t()) # [Tk, H]\r\n\r\n # Accumulate with per-token routing weights for this expert\r\n w_tok = weights.index_select(0, token_idx)[:, ge] # [Tk]\r\n output.index_add_(0, token_idx, O * w_tok.unsqueeze(1)) # [Tk,H] * [Tk,1]\r\n\r\n return output.to(torch.bfloat16)"
|
| 147 |
+
}
|
workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048.jsonl
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "b8f4f012-a32e-4356-b4e1-7665b3d598af", "axes": {"seq_len": 7}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_547d24f37f554e2fab107fb57a41e73e.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_547d24f37f554e2fab107fb57a41e73e.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_547d24f37f554e2fab107fb57a41e73e.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_547d24f37f554e2fab107fb57a41e73e.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_547d24f37f554e2fab107fb57a41e73e.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_547d24f37f554e2fab107fb57a41e73e.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_547d24f37f554e2fab107fb57a41e73e.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_547d24f37f554e2fab107fb57a41e73e.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 192}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 2 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "e05c6c03-5603-4a1c-b34c-dcce0ecaeea4", "axes": {"seq_len": 1}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_25ff432053b5474d86dda63b7daf1734.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_25ff432053b5474d86dda63b7daf1734.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_25ff432053b5474d86dda63b7daf1734.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_25ff432053b5474d86dda63b7daf1734.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_25ff432053b5474d86dda63b7daf1734.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_25ff432053b5474d86dda63b7daf1734.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_25ff432053b5474d86dda63b7daf1734.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_25ff432053b5474d86dda63b7daf1734.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 32}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 3 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "6230e838-67ca-41dd-a9d6-6f36b7676c6b", "axes": {"seq_len": 32}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_963f2f970f2e4f1688595246c239372e.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_963f2f970f2e4f1688595246c239372e.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_963f2f970f2e4f1688595246c239372e.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_963f2f970f2e4f1688595246c239372e.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_963f2f970f2e4f1688595246c239372e.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_963f2f970f2e4f1688595246c239372e.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_963f2f970f2e4f1688595246c239372e.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_963f2f970f2e4f1688595246c239372e.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 32}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 4 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "8f1ff9f1-6747-41d1-a1d8-2868cdacf893", "axes": {"seq_len": 80}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_96384077a7da46bcb65b7c694586960d.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_96384077a7da46bcb65b7c694586960d.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_96384077a7da46bcb65b7c694586960d.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_96384077a7da46bcb65b7c694586960d.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_96384077a7da46bcb65b7c694586960d.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_96384077a7da46bcb65b7c694586960d.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_96384077a7da46bcb65b7c694586960d.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_96384077a7da46bcb65b7c694586960d.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 96}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 5 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "1a4c6ba1-3cd2-4d7d-b716-84f2d52b69fc", "axes": {"seq_len": 901}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_90235a13a92d4cb0b552a5d13b119b2e.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_90235a13a92d4cb0b552a5d13b119b2e.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_90235a13a92d4cb0b552a5d13b119b2e.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_90235a13a92d4cb0b552a5d13b119b2e.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_90235a13a92d4cb0b552a5d13b119b2e.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_90235a13a92d4cb0b552a5d13b119b2e.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_90235a13a92d4cb0b552a5d13b119b2e.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_90235a13a92d4cb0b552a5d13b119b2e.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 96}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 6 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "a7c2bcfd-a2f4-479e-8d32-200115df89cf", "axes": {"seq_len": 16}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_b7a56d54b53740e1b95e08d5b31977f0.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_b7a56d54b53740e1b95e08d5b31977f0.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_b7a56d54b53740e1b95e08d5b31977f0.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_b7a56d54b53740e1b95e08d5b31977f0.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_b7a56d54b53740e1b95e08d5b31977f0.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_b7a56d54b53740e1b95e08d5b31977f0.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_b7a56d54b53740e1b95e08d5b31977f0.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_b7a56d54b53740e1b95e08d5b31977f0.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 224}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 7 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "2e69caee-ae5c-473b-aa99-5dc6659829d4", "axes": {"seq_len": 15}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_62adbb89946c4512881ab23d3819f740.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_62adbb89946c4512881ab23d3819f740.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_62adbb89946c4512881ab23d3819f740.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_62adbb89946c4512881ab23d3819f740.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_62adbb89946c4512881ab23d3819f740.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_62adbb89946c4512881ab23d3819f740.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_62adbb89946c4512881ab23d3819f740.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_62adbb89946c4512881ab23d3819f740.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 32}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 8 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "8cba5890-4288-448a-93b8-42c14c6b9420", "axes": {"seq_len": 14}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_886eb15cbc2f49ad8bd3d4244c38ed91.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_886eb15cbc2f49ad8bd3d4244c38ed91.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_886eb15cbc2f49ad8bd3d4244c38ed91.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_886eb15cbc2f49ad8bd3d4244c38ed91.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_886eb15cbc2f49ad8bd3d4244c38ed91.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_886eb15cbc2f49ad8bd3d4244c38ed91.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_886eb15cbc2f49ad8bd3d4244c38ed91.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_886eb15cbc2f49ad8bd3d4244c38ed91.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 0}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 9 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "5e8dc11c-f2a9-42d5-8dce-9419cbf34d5d", "axes": {"seq_len": 14107}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_ad0adeae69b443b192dc6c7394302345.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_ad0adeae69b443b192dc6c7394302345.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_ad0adeae69b443b192dc6c7394302345.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_ad0adeae69b443b192dc6c7394302345.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_ad0adeae69b443b192dc6c7394302345.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_ad0adeae69b443b192dc6c7394302345.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_ad0adeae69b443b192dc6c7394302345.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_ad0adeae69b443b192dc6c7394302345.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 32}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 10 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "58a34f27-7995-4155-8b46-f60a7225e20e", "axes": {"seq_len": 11948}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_bcb1fd5c45f84a46b6e094fe7a988890.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_bcb1fd5c45f84a46b6e094fe7a988890.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_bcb1fd5c45f84a46b6e094fe7a988890.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_bcb1fd5c45f84a46b6e094fe7a988890.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_bcb1fd5c45f84a46b6e094fe7a988890.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_bcb1fd5c45f84a46b6e094fe7a988890.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_bcb1fd5c45f84a46b6e094fe7a988890.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_bcb1fd5c45f84a46b6e094fe7a988890.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 128}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 11 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "5eadab1e-a0e3-4966-b0fd-1115cd77497c", "axes": {"seq_len": 62}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_13f420b30bbe43548bf54556a0185219.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_13f420b30bbe43548bf54556a0185219.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_13f420b30bbe43548bf54556a0185219.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_13f420b30bbe43548bf54556a0185219.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_13f420b30bbe43548bf54556a0185219.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_13f420b30bbe43548bf54556a0185219.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_13f420b30bbe43548bf54556a0185219.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_13f420b30bbe43548bf54556a0185219.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 96}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 12 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "eedc63b2-c03b-4cf2-8d16-1f46566e3af7", "axes": {"seq_len": 59}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5d8ecbc401024d0697fec81a0dab7163.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5d8ecbc401024d0697fec81a0dab7163.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5d8ecbc401024d0697fec81a0dab7163.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5d8ecbc401024d0697fec81a0dab7163.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5d8ecbc401024d0697fec81a0dab7163.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5d8ecbc401024d0697fec81a0dab7163.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5d8ecbc401024d0697fec81a0dab7163.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5d8ecbc401024d0697fec81a0dab7163.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 160}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 13 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "e626d3e6-6c29-4fd8-bb7c-5b09eec61702", "axes": {"seq_len": 58}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_6c6354ff369c41f3b577e01b70a015a7.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_6c6354ff369c41f3b577e01b70a015a7.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_6c6354ff369c41f3b577e01b70a015a7.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_6c6354ff369c41f3b577e01b70a015a7.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_6c6354ff369c41f3b577e01b70a015a7.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_6c6354ff369c41f3b577e01b70a015a7.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_6c6354ff369c41f3b577e01b70a015a7.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_6c6354ff369c41f3b577e01b70a015a7.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 64}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 14 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "74d7ff04-0365-4cf8-a824-ce61b7131dea", "axes": {"seq_len": 57}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_a071bbc5a57648f6ac1346515429d92d.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_a071bbc5a57648f6ac1346515429d92d.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_a071bbc5a57648f6ac1346515429d92d.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_a071bbc5a57648f6ac1346515429d92d.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_a071bbc5a57648f6ac1346515429d92d.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_a071bbc5a57648f6ac1346515429d92d.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_a071bbc5a57648f6ac1346515429d92d.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_a071bbc5a57648f6ac1346515429d92d.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 96}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 15 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "4822167c-dae5-4bb1-bb53-e4adb256245b", "axes": {"seq_len": 56}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_c0d3fb257396413c85038b179a27d3a1.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_c0d3fb257396413c85038b179a27d3a1.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_c0d3fb257396413c85038b179a27d3a1.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_c0d3fb257396413c85038b179a27d3a1.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_c0d3fb257396413c85038b179a27d3a1.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_c0d3fb257396413c85038b179a27d3a1.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_c0d3fb257396413c85038b179a27d3a1.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_c0d3fb257396413c85038b179a27d3a1.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 64}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 16 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "81955b1e-086f-49c1-9f40-a18a5aaf509d", "axes": {"seq_len": 55}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_8d2c57225e994c41aca633b282f92c78.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_8d2c57225e994c41aca633b282f92c78.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_8d2c57225e994c41aca633b282f92c78.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_8d2c57225e994c41aca633b282f92c78.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_8d2c57225e994c41aca633b282f92c78.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_8d2c57225e994c41aca633b282f92c78.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_8d2c57225e994c41aca633b282f92c78.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_8d2c57225e994c41aca633b282f92c78.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 128}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 17 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "76010cb4-f73c-4145-8365-8642a2ce99de", "axes": {"seq_len": 54}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_d517fa44dc8245ef9053f097b6b0cb76.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_d517fa44dc8245ef9053f097b6b0cb76.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_d517fa44dc8245ef9053f097b6b0cb76.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_d517fa44dc8245ef9053f097b6b0cb76.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_d517fa44dc8245ef9053f097b6b0cb76.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_d517fa44dc8245ef9053f097b6b0cb76.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_d517fa44dc8245ef9053f097b6b0cb76.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_d517fa44dc8245ef9053f097b6b0cb76.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 128}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 18 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "fc378037-e8fa-4305-b00f-4af47933fd53", "axes": {"seq_len": 53}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_e82e1e6107d24eb9ba26b15acd580832.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_e82e1e6107d24eb9ba26b15acd580832.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_e82e1e6107d24eb9ba26b15acd580832.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_e82e1e6107d24eb9ba26b15acd580832.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_e82e1e6107d24eb9ba26b15acd580832.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_e82e1e6107d24eb9ba26b15acd580832.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_e82e1e6107d24eb9ba26b15acd580832.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_e82e1e6107d24eb9ba26b15acd580832.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 32}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|
| 19 |
+
{"definition": "moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048", "solution": null, "workload": {"uuid": "f7d6ac7c-24ec-47e4-aefc-4863a5e3e1d9", "axes": {"seq_len": 52}, "inputs": {"routing_logits": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5b074f60cf5d449db9375e6ff0fa89b6.safetensors", "tensor_key": "routing_logits"}, "hidden_states": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5b074f60cf5d449db9375e6ff0fa89b6.safetensors", "tensor_key": "hidden_states"}, "hidden_states_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5b074f60cf5d449db9375e6ff0fa89b6.safetensors", "tensor_key": "hidden_states_scale"}, "routing_bias": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5b074f60cf5d449db9375e6ff0fa89b6.safetensors", "tensor_key": "routing_bias"}, "gemm1_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5b074f60cf5d449db9375e6ff0fa89b6.safetensors", "tensor_key": "gemm1_weights"}, "gemm1_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5b074f60cf5d449db9375e6ff0fa89b6.safetensors", "tensor_key": "gemm1_weights_scale"}, "gemm2_weights": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5b074f60cf5d449db9375e6ff0fa89b6.safetensors", "tensor_key": "gemm2_weights"}, "gemm2_weights_scale": {"type": "safetensors", "path": "./blob/workloads/moe/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048/moe_fp8_block_scale_ds_routing_topk8_ng8_kg4_e32_h7168_i2048_5b074f60cf5d449db9375e6ff0fa89b6.safetensors", "tensor_key": "gemm2_weights_scale"}, "local_expert_offset": {"type": "scalar", "value": 160}, "routed_scaling_factor": {"type": "scalar", "value": 2.5}}}, "evaluation": null}
|