File size: 12,595 Bytes
0e74d35 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 | {
"@context": {
"@language": "en",
"@vocab": "https://schema.org/",
"citeAs": "cr:citeAs",
"column": "cr:column",
"conformsTo": "dct:conformsTo",
"cr": "http://mlcommons.org/croissant/",
"rai": "http://mlcommons.org/croissant/RAI/",
"data": {
"@id": "cr:data",
"@type": "@json"
},
"dataType": {
"@id": "cr:dataType",
"@type": "@vocab"
},
"dct": "http://purl.org/dc/terms/",
"equivalentProperty": {
"@id": "cr:equivalentProperty",
"@type": "@vocab"
},
"samplingRate": "cr:samplingRate",
"examples": {
"@id": "cr:examples",
"@type": "@json"
},
"extract": "cr:extract",
"field": "cr:field",
"fileProperty": "cr:fileProperty",
"fileObject": "cr:fileObject",
"fileSet": "cr:fileSet",
"format": "cr:format",
"includes": "cr:includes",
"isLiveDataset": "cr:isLiveDataset",
"jsonPath": "cr:jsonPath",
"key": "cr:key",
"md5": "cr:md5",
"parentField": "cr:parentField",
"path": "cr:path",
"recordSet": "cr:recordSet",
"references": "cr:references",
"regex": "cr:regex",
"repeated": "cr:repeated",
"replace": "cr:replace",
"sc": "https://schema.org/",
"separator": "cr:separator",
"source": "cr:source",
"subField": "cr:subField",
"transform": "cr:transform"
},
"@type": "sc:Dataset",
"conformsTo": "http://mlcommons.org/croissant/1.0",
"name": "TensorBench",
"description": "Feature-addition benchmark for LLMs and coding agents, evaluated against the Scorch codebase (a sparse+dense PyTorch compiler). Each task asks a model to add a feature; success is the full pytest suite passing after the patch is applied inside a Docker container. 199 tasks total (194 feature-addition, 5 refactor) across 5 base commits on the upstream `bench` branch.",
"url": "https://huggingface.co/datasets/tensorbench/tensorbench-1.0",
"license": "https://opensource.org/licenses/MIT",
"version": "1.0.0",
"citeAs": "(Anonymized for double-blind review.)",
"datePublished": "2026-05-06",
"keywords": [
"benchmark",
"code-generation",
"llm-evaluation",
"feature-addition",
"python",
"pytorch",
"sparse-tensors"
],
"rai:dataCollection": "Each task is a natural-language prompt paired with a `base_commit` SHA from the upstream Scorch repository's `bench` branch. Prompts were drafted with LLM assistance against the target codebase and then reviewed and curated by the authors. The upstream codebase, tests, and SHAs are pre-existing artifacts of the Scorch project.",
"rai:annotationsPerItem": "Each task has a single author-curated description and a fixed `base_commit`. There are no per-item human annotations beyond the prompt itself; success is computed automatically by running the patched repo's pytest suite.",
"rai:dataLimitations": "All tasks target a single Python codebase (Scorch). Performance does not generalize across languages or unrelated repositories. Test suites authored by the agent are accepted at face value, which can permit a small number of vacuous tests; the paper's adversarial-behavior audit (see supplementary code) characterizes this rate. Container builds depend on a network clone of the upstream repository and may be affected by upstream availability.",
"rai:dataBiases": "All tasks were curated by the same set of authors. The category distribution skews toward runtime/dispatch, scheduler/loop transformations, linear-algebra ops, sparse formats, and IR/codegen surface area — i.e. the parts of a sparse-tensor compiler the authors found tractable to specify. Because prompts were drafted with LLM assistance, phrasing and structure also reflect the drafting model's stylistic patterns. The benchmark therefore measures a slice of `extend an existing PyTorch-extension codebase`-style work, not all of `software engineering with LLMs`.",
"rai:dataUseCases": "Evaluating coding agents and LLMs on extending a real, non-trivial Python+C++ codebase. Useful for: comparing agent frameworks (e.g. Claude Code, OpenAI Codex CLI, Gemini CLI, OpenHands), comparing model capabilities at fixed agent harness, and characterizing failure modes (test fail, patch apply fail, timeout, empty patch, vacuous added tests).",
"rai:personalSensitiveInformation": "None. Tasks describe code changes; no human-subject data is involved.",
"rai:dataSocialImpact": "The benchmark is intended for AI/ML research on code-generation evaluation. It does not target sensitive domains (medical, legal, financial). Like other code-evaluation benchmarks, it could in principle be used to filter or rank developers, which is not its intended use.",
"rai:hasSyntheticData": "Task prompts were drafted with LLM assistance against the target codebase and then reviewed and curated by the authors. The underlying `base_commit` SHAs, source code, and pytest infrastructure are pre-existing upstream artifacts and were not generated.",
"rai:dataReleaseMaintenancePlan": "The dataset will be made public under the authors' real names at the camera-ready deadline. Future revisions will be tagged via the `version` field. Bug reports and corrections will be tracked on the public repository.",
"distribution": [
{
"@type": "cr:FileObject",
"@id": "tensorbench.json",
"name": "tensorbench.json",
"description": "JSON array of 199 task records.",
"contentUrl": "https://huggingface.co/datasets/tensorbench/tensorbench-1.0/resolve/main/tensorbench.json",
"encodingFormat": "application/json",
"sha256": "41b9cf73e37f8458990584a21882575040c360d08935c135eedfc5f474d155f7"
},
{
"@type": "cr:FileObject",
"@id": "Dockerfile",
"name": "Dockerfile",
"description": "Eval image: python:3.11-slim plus Scorch's C++ build deps and a clone of the upstream `bench` branch.",
"contentUrl": "https://huggingface.co/datasets/tensorbench/tensorbench-1.0/resolve/main/Dockerfile",
"encodingFormat": "text/plain",
"sha256": "130e9738bd764e26f5b655d9e3b72fcfc4c7d9df9687cead2b7d53b930289873"
},
{
"@type": "cr:FileObject",
"@id": "run_tests.sh",
"name": "run_tests.sh",
"description": "Container CMD: rebuild the C++ extension and run pytest -v.",
"contentUrl": "https://huggingface.co/datasets/tensorbench/tensorbench-1.0/resolve/main/run_tests.sh",
"encodingFormat": "application/x-sh",
"sha256": "5bcab82be4065de1002995baea922492ba77063f52f4da191df2bf8d23096dc1"
}
],
"recordSet": [
{
"@type": "cr:RecordSet",
"@id": "tasks",
"name": "tasks",
"description": "One record per benchmark task.",
"field": [
{
"@type": "cr:Field",
"@id": "tasks/instance_id",
"name": "instance_id",
"description": "Unique task id, e.g. bobbyyyan__scorch-feature_kernel_fusion. The feature_/refactor_ suffix follows the original taxonomy; semantics for both is feature-addition.",
"dataType": "sc:Text",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].instance_id"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/repo_id",
"name": "repo_id",
"description": "Always bobbyyyan__scorch — used by the grading registry to route to the project-specific strategy.",
"dataType": "sc:Text",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].repo_id"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/repo_url",
"name": "repo_url",
"description": "Git clone URL for the upstream Scorch repository. Identical for every task: https://github.com/bobbyyyan/scorch.git",
"dataType": "sc:URL",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].repo_url"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/base_commit",
"name": "base_commit",
"description": "Commit SHA the task is anchored at. The harness git reset --hards to this before applying patches.",
"dataType": "sc:Text",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].base_commit"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/language",
"name": "language",
"description": "Source language. Always python for this dataset.",
"dataType": "sc:Text",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].language"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/setup_commands",
"name": "setup_commands",
"description": "Optional list of shell commands run inside the container before the test command. Empty for all current Scorch tasks (setup is baked into the Docker image); the field is present for harness compatibility with other consumers.",
"dataType": "sc:Text",
"repeated": true,
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].setup_commands"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/test_command",
"name": "test_command",
"description": "Shell command that runs the pytest suite (typically /testbed/run_tests.sh).",
"dataType": "sc:Text",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].test_command"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/test_timeout",
"name": "test_timeout",
"description": "Test-runner timeout in seconds. 3000 for every current task.",
"dataType": "sc:Integer",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].test_timeout"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/refactor_type",
"name": "refactor_type",
"description": "Mostly empty (legacy field from the original taxonomy; only a handful of tasks have extract/consolidate populated).",
"dataType": "sc:Text",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].refactor_type"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/description",
"name": "description",
"description": "Natural-language task prompt the model receives.",
"dataType": "sc:Text",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].description"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/files",
"name": "files",
"description": "Optional list of upstream-codebase files relevant to the task, surfaced as a hint to the model. Empty for all current Scorch tasks; the field is present for harness compatibility with other consumers.",
"dataType": "sc:Text",
"repeated": true,
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].files"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/task_type",
"name": "task_type",
"description": "feature or refactor. Both are evaluated as feature-addition (success = pytest passes).",
"dataType": "sc:Text",
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].task_type"}
}
},
{
"@type": "cr:Field",
"@id": "tasks/categories",
"name": "categories",
"description": "Hierarchical category paths (e.g. `Runtime/Caching & dispatch`, `API/Linear Algebra/Matmul variants`, `Scheduler/Loop transformations/Tiling`). A task may carry multiple categories.",
"dataType": "sc:Text",
"repeated": true,
"source": {
"fileObject": {"@id": "tensorbench.json"},
"extract": {"jsonPath": "$[*].categories"}
}
}
]
}
]
}
|