VBVR-MultiStep / croissant.json
Mark7121983123's picture
Add rai:hasSyntheticData field (NeurIPS RAI requirement)
cb9ba6b verified
{
"@context": {
"@language": "en",
"@vocab": "https://schema.org/",
"citeAs": "cr:citeAs",
"column": "cr:column",
"conformsTo": "dct:conformsTo",
"cr": "http://mlcommons.org/croissant/",
"rai": "http://mlcommons.org/croissant/RAI/",
"data": {"@id": "cr:data", "@type": "@json"},
"dataType": {"@id": "cr:dataType", "@type": "@vocab"},
"dct": "http://purl.org/dc/terms/",
"examples": {"@id": "cr:examples", "@type": "@json"},
"extract": "cr:extract",
"field": "cr:field",
"fileProperty": "cr:fileProperty",
"fileObject": "cr:fileObject",
"fileSet": "cr:fileSet",
"format": "cr:format",
"includes": "cr:includes",
"isLiveDataset": "cr:isLiveDataset",
"jsonPath": "cr:jsonPath",
"key": "cr:key",
"md5": "cr:md5",
"parentField": "cr:parentField",
"path": "cr:path",
"recordSet": "cr:recordSet",
"references": "cr:references",
"regex": "cr:regex",
"repeated": "cr:repeated",
"replace": "cr:replace",
"sc": "https://schema.org/",
"separator": "cr:separator",
"source": "cr:source",
"subField": "cr:subField",
"transform": "cr:transform"
},
"@type": "sc:Dataset",
"name": "VBVR-MultiStep",
"conformsTo": "http://mlcommons.org/croissant/1.0",
"description": "The ~360,000-sample programmatic training corpus for long-horizon multi-step image-to-video reasoning. 36 parameterized tasks across six reasoning families (Navigation, Planning, CSP, Execution, Geometry, Physics). Distributed as 7,200 tar.gz shards (≈50 samples per shard) plus Parquet metadata; each instance follows a five-artifact contract identical to the VBVR-MultiStep-Bench evaluation split.",
"alternateName": ["VBVR-MultiStep Training Corpus"],
"creator": {
"@type": "sc:Organization",
"name": "Video-Reason",
"url": "https://video-reason.com"
},
"datePublished": "2026-05-06",
"keywords": ["video reasoning", "multi-step reasoning", "long-horizon", "image-to-video", "training", "synthetic", "tar.gz", "parquet"],
"license": "https://creativecommons.org/licenses/by/4.0/",
"url": "https://huggingface.co/datasets/Video-Reason/VBVR-MultiStep",
"version": "1.0.0",
"isLiveDataset": false,
"rai:dataCollection": "Fully synthetic. Each of the 36 tasks ships a deterministic generator that emits the five-artifact contract (first_frame.png, prompt.txt, final_frame.png, ground_truth.mp4, question_metadata.json) from a (task, seed) pair. No scraping, no human subjects, no third-party media, no manual annotation. The training corpus is partitioned into disjoint seed bands (1–5,000 and 5,001–10,000 per task) that are themselves disjoint from the evaluation seeds released in VBVR-MultiStep-Bench.",
"rai:dataCollectionType": ["Synthetic"],
"rai:hasSyntheticData": true,
"rai:dataPreprocessingProtocol": "Per-task generator output is grouped into 50-sample batches and packed into tar.gz shards under questions/. Per-task Parquet metadata files (data/metadata_shards/) and a global metadata.parquet index every instance with task id, family, seed, and per-task fields. No sample is filtered, dropped, or transformed after generation.",
"rai:dataAnnotationProtocol": "No human annotation. ground_truth.mp4 is rendered by each task's deterministic ground-truth solver.",
"rai:dataAnnotationPlatform": "N/A (no annotation).",
"rai:dataReleaseMaintenancePlan": "Versioned releases on the Hugging Face Hub. The Croissant file in this repository is the canonical long-term record. A 5 GB representative subset is provided under sample/ for quick inspection and reviewer convenience.",
"rai:dataLimitations": [
"Synthetic and stylized: transfer to unconstrained open-world video is not validated.",
"Visual rendering is intentionally simplified to keep the symbolic state recoverable from frames; this is not a photorealism corpus.",
"Per-task generator parameter ranges are bounded (e.g., maze sizes, planning horizons, physics regimes); the corpus does not span the long tail of any single family.",
"Reference rollouts encode one valid trajectory per instance; alternative valid trajectories are not enumerated.",
"Although 36 tasks ship, only 34 are used in the training experiments described in the companion paper; this release contains all 36 task families."
],
"rai:dataBiases": [
"Family balance is uniform (6 tasks per family) by design and does not reflect natural prevalence of these reasoning patterns.",
"Generator parameters bias the difficulty distribution toward bounded and seed-controlled regimes that are amenable to symbolic ground truth; rare or open-ended cases are out of scope.",
"Visual style is monocular, planar, and rendered by a fixed family of renderers; appearance distribution does not approximate any real-world video corpus and does not contain demographic content.",
"No human demographic information is generated; bias along human demographic axes does not apply."
],
"rai:personalSensitiveInformation": "None. The dataset contains no personal information, no biometric data, no demographic information, and no human subjects. All visual content is procedurally generated geometric, symbolic, or physical scenes.",
"rai:dataUseCases": "Training image-to-video systems on long-horizon multi-step reasoning under explicit per-step rules. Validated use case in the companion paper: fine-tuning Wan2.2-I2V-A14B (Apache-2.0) with Dual-DiT two-phase LoRA. Out-of-scope: production VLM pretraining at scale, real-world video generation, or any safety-critical use.",
"rai:dataSocialImpact": "Intended for academic research on reasoning evaluation in video generation. Risks are minimal: the dataset is synthetic, free of personal content, and rendered in a stylized regime not representative of any real population. The most plausible concern is research-direction effects (e.g., over-investing in stylized synthetic benchmarks), which we mitigate by positioning this corpus as a complement to (not a replacement for) appearance-centric and real-world video corpora.",
"rai:dataReleaseUpdate": "If post-release errors are discovered, fixes will be published as additive shards or replacement Parquet entries in a new dataset version, with the prior version retained at its commit hash for backwards reproducibility."
}