Update Croissant: pass NeurIPS validator (sha256, drop unfilled FileObjects, add rai:hasSyntheticData)
497e11e verified | { | |
| "@context": { | |
| "@language": "en", | |
| "@vocab": "https://schema.org/", | |
| "citeAs": "cr:citeAs", | |
| "column": "cr:column", | |
| "conformsTo": "dct:conformsTo", | |
| "cr": "http://mlcommons.org/croissant/", | |
| "rai": "http://mlcommons.org/croissant/RAI/", | |
| "data": {"@id": "cr:data", "@type": "@json"}, | |
| "dataType": {"@id": "cr:dataType", "@type": "@vocab"}, | |
| "dct": "http://purl.org/dc/terms/", | |
| "examples": {"@id": "cr:examples", "@type": "@json"}, | |
| "extract": "cr:extract", | |
| "field": "cr:field", | |
| "fileProperty": "cr:fileProperty", | |
| "fileObject": "cr:fileObject", | |
| "fileSet": "cr:fileSet", | |
| "format": "cr:format", | |
| "includes": "cr:includes", | |
| "isLiveDataset": "cr:isLiveDataset", | |
| "jsonPath": "cr:jsonPath", | |
| "key": "cr:key", | |
| "md5": "cr:md5", | |
| "parentField": "cr:parentField", | |
| "path": "cr:path", | |
| "recordSet": "cr:recordSet", | |
| "references": "cr:references", | |
| "regex": "cr:regex", | |
| "repeated": "cr:repeated", | |
| "replace": "cr:replace", | |
| "sc": "https://schema.org/", | |
| "separator": "cr:separator", | |
| "source": "cr:source", | |
| "subField": "cr:subField", | |
| "transform": "cr:transform" | |
| }, | |
| "@type": "sc:Dataset", | |
| "name": "cross-scenario-physics-code-transfer", | |
| "conformsTo": "http://mlcommons.org/croissant/1.0", | |
| "description": "A cross-scenario physics-transfer benchmark for stress-testing whether within-scenario compositionality metrics on frozen video features predict cross-scenario physical-property transfer. Contains four Kubric physics scenarios (collision, ramp, flat-drop, elasticity) totalling 1,800 scenes plus a 75-scene matched-visual low-gravity collision variant; pre-extracted frozen features for eight video and image backbones (V-JEPA 2, V-JEPA 2.1, DINOv2-S/L, CLIP ViT-L/14, MAE, SigLIP, VideoMAE); ground-truth per-object tracks; and N-shot adaptation protocols with explicit task/label mappings. Real-video evaluation uses the public Phys101 dataset; we redistribute only feature tensors extracted from it. The headline empirical finding is that high TopSim, PosDis, and causal-specificity bottleneck codes do not generalise across scenarios in the tested bottleneck protocol family. This Hugging Face repository hosts the load-bearing reviewer-inspectable subset (V-JEPA 2 collision features, all four Kubric scenario label files, full reproduction code); the remaining backbone features, scenario videos, GT tracks, and Phys101 features are prepared for an immediate post-acceptance public release.", | |
| "url": "https://huggingface.co/datasets/physics-code-transfer-bench/cross-scenario-physics-code-transfer", | |
| "version": "1.0.0", | |
| "license": "https://www.apache.org/licenses/LICENSE-2.0", | |
| "datePublished": "2026-05-05", | |
| "creator": { | |
| "@type": "Organization", | |
| "name": "Anonymous (NeurIPS 2026 E&D Track Submission)" | |
| }, | |
| "citeAs": "Anonymous Authors. A Benchmark for Cross-Scenario Physics-Code Transfer: Compositionality Metrics on Frozen Video Features. NeurIPS 2026 Evaluations & Datasets Track (under review).", | |
| "isLiveDataset": false, | |
| "keywords": [ | |
| "physics representation learning", | |
| "cross-scenario transfer", | |
| "compositionality metrics", | |
| "TopSim", | |
| "PosDis", | |
| "video foundation models", | |
| "V-JEPA 2", | |
| "frozen features", | |
| "benchmark", | |
| "Kubric", | |
| "Phys101", | |
| "negative result" | |
| ], | |
| "rai:dataCollection": "Kubric scenarios were rendered using the public Kubric simulator (Apache 2.0) with the PyBullet physics backend at 240Hz substeps. Scene parameters (mass, restitution, friction, drop-height, initial velocity) are sampled on explicit grids documented in the accompanying paper. The 75-scene low-gravity variant uses the same RNG seed as the standard-gravity collision dataset to match per-scene physics-random variables (sphere color, lighting, initial velocity, position jitter) for matched-visual analysis. Phys101 features are extracted from the publicly available Phys101 dataset (Wu et al., BMVC 2016) using the V-JEPA 2 frozen encoder; we redistribute the feature tensors, not the source video.", | |
| "rai:dataCollectionType": "Synthetic simulation (Kubric/PyBullet) plus derived features from public real-video dataset (Phys101).", | |
| "rai:dataCollectionRawData": "Raw rendered video frames at 256x256, 48 frames at 24 fps; ground-truth per-object position and finite-difference velocity from PyBullet.", | |
| "rai:dataCollectionTimeframe": "Rendered in 2025-2026.", | |
| "rai:dataAnnotationProtocol": "All labels are derived directly from the simulator's ground-truth physical state (mass, restitution, friction, drop-height, initial velocity) and binned into discrete classes (3-class union-binned restitution; 5-class mass-ratio for multi-property training; per-scenario tertile mass for Phys101). No human annotation involved.", | |
| "rai:dataAnnotationPlatform": "Programmatic (Python).", | |
| "rai:dataAnnotationAnalysis": "Class-balance and bin-boundary statistics are reported in Appendix A.7 of the paper.", | |
| "rai:dataAnnotationPerItemTime": "0", | |
| "rai:dataAnnotationDemographics": "Not applicable (synthetic data + derived features).", | |
| "rai:dataAnnotationTools": "Custom Python scripts; see code/ in this supplementary bundle.", | |
| "rai:dataAnnotatorDemographicsDescription": "No human annotators involved.", | |
| "rai:dataPreprocessingProtocol": "Frozen-feature pre-extraction: each scene is processed as 4 evenly-spaced frames at 256x256, with one forward pass per ViT-L-scale encoder; spatial features are mean-pooled within each tubelet/patch grid to produce (N, T=4, D) tensors per scenario per backbone. Stratified train/holdout splits use 80/20 per primary label class. N-shot target-side stratified subsampling is documented in Section 3.6 of the paper.", | |
| "rai:dataUseCases": "Evaluating whether within-scenario compositionality metrics (TopSim, PosDis, causal specificity) predict cross-scenario transfer of learned physics codes; benchmarking new sender architectures (slot-attention, EMA-codebook VQ, product-quantised continuous, transfer-aware objectives) on a frozen-feature substrate; stress-testing the assumption that high within-scenario metric values entail abstract reusable structure; isolating the contribution of visual versus dynamics versus scene/task-structure shift via the matched-visual transfer ladder.", | |
| "rai:dataLimitations": "Four Kubric scenarios with 1-2 spheres; broader scenario diversity (objects, occlusions, multi-agent) would strengthen generality. The metric-transfer sweep is collision-trained and primarily evaluated on collision -> ramp at N=192 with a partial replication on collision -> flat-drop. The n=24 correlational analysis is underpowered (widest bootstrap 95% CI [-0.58, +0.32]); the load-bearing claim is sufficiency, not non-predictiveness. Tested code families are Gumbel-Softmax, tanh-bounded continuous, and a 4-configuration VQ-VAE probe; slot-attention and EMA-codebook VQ are explicit forward directions, not tested. Phys101 per-scenario tertile binning means class boundaries differ across source and target, contributing a label-boundary shift on top of the feature-distribution shift on real video; this is documented and a global-binning diagnostic is also released.", | |
| "rai:dataSocialImpact": "The benchmark is intended to help researchers more honestly evaluate physics-representation methods and reduce overclaiming of compositionality from within-scenario metrics. We see no immediate negative societal impacts. The released features are derived from frozen public foundation models on synthetic and openly-licensed real-video sources; no personally identifiable information is present.", | |
| "rai:dataBiases": "All scenes are simulator-generated and contain no human subjects, places, or demographic information. Phys101 is a publicly released physics dataset with no personally identifiable content; we redistribute only V-JEPA 2 features extracted from it.", | |
| "rai:personalSensitiveInformation": "None. The dataset contains no personally identifiable information.", | |
| "rai:dataReleaseMaintenancePlan": "The benchmark is hosted on Hugging Face. Updates (additional backbones, additional scenarios, additional pre-extracted features) will follow semantic versioning; current release is v1.0.0. Issues and pull requests are tracked at the Hugging Face dataset repository.", | |
| "rai:hasSyntheticData": true, | |
| "distribution": [ | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "vjepa2-collision-features", | |
| "name": "vjepa2_collision_pooled.pt", | |
| "description": "V-JEPA 2 frozen features for the 600 collision scenes, mean-pooled to shape (N=600, T=4, D=1024), float32. Loaded with torch.load.", | |
| "encodingFormat": "application/octet-stream", | |
| "contentSize": "29604716 B", | |
| "sha256": "2bee296d965bf6a7d8e47c196575aff422e78788136957d7bc272ea647fb5fc2", | |
| "contentUrl": "https://huggingface.co/datasets/physics-code-transfer-bench/cross-scenario-physics-code-transfer/resolve/main/features/vjepa2_collision_pooled.pt" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "labels-collision", | |
| "name": "labels_collision.npz", | |
| "description": "Ground-truth physics labels for the 600 collision scenes: per-scene mass scalars, restitution scalars, mass quantile bins (3-class and 5-class), and restitution quantile bins (3-class and 5-class). Loaded with numpy.load.", | |
| "encodingFormat": "application/zip", | |
| "contentSize": "75889 B", | |
| "sha256": "aaa5f2940f4d7bb822e84873c2674bd2a853feefad7e4988044b2e9294eac1dc", | |
| "contentUrl": "https://huggingface.co/datasets/physics-code-transfer-bench/cross-scenario-physics-code-transfer/resolve/main/labels/labels_collision.npz" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "labels-ramp", | |
| "name": "labels_ramp.npz", | |
| "description": "Ground-truth physics labels for the 300 ramp scenes: restitution and friction scalars and bins.", | |
| "encodingFormat": "application/zip", | |
| "contentSize": "39858 B", | |
| "sha256": "142cf1154948e3698f954326c0f7ef33a0b71ea997fd46a4181297e53341b4c6", | |
| "contentUrl": "https://huggingface.co/datasets/physics-code-transfer-bench/cross-scenario-physics-code-transfer/resolve/main/labels/labels_ramp.npz" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "labels-flat-drop", | |
| "name": "labels_flat_drop.npz", | |
| "description": "Ground-truth physics labels for the 300 flat-drop scenes: restitution and friction scalars and bins.", | |
| "encodingFormat": "application/zip", | |
| "contentSize": "23854 B", | |
| "sha256": "f1e1b6ad78d53cd4a9ecfc648e3f30af61727f2f6669aa429aec9ab2987a230b", | |
| "contentUrl": "https://huggingface.co/datasets/physics-code-transfer-bench/cross-scenario-physics-code-transfer/resolve/main/labels/labels_flat_drop.npz" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "labels-elasticity", | |
| "name": "labels_elasticity.npz", | |
| "description": "Ground-truth physics labels for the 600 elasticity scenes: restitution and drop-height scalars and bins.", | |
| "encodingFormat": "application/zip", | |
| "contentSize": "55580 B", | |
| "sha256": "3e328c84a275f739ce81cbb35cf3dd4ab0550085a6674e6d65fd0321f8d43086", | |
| "contentUrl": "https://huggingface.co/datasets/physics-code-transfer-bench/cross-scenario-physics-code-transfer/resolve/main/labels/labels_elasticity.npz" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "labels-ramp-3prop", | |
| "name": "labels_ramp_3prop.npz", | |
| "description": "3-property labels for ramp (multi-property training): joint mass + restitution + friction labels for the 300 ramp scenes.", | |
| "encodingFormat": "application/zip", | |
| "contentSize": "38324 B", | |
| "sha256": "954eea264e382b56dcca1fa2c8c4501fb3fb8c46d485ecf596b985b662ff3a2f", | |
| "contentUrl": "https://huggingface.co/datasets/physics-code-transfer-bench/cross-scenario-physics-code-transfer/resolve/main/labels/labels_ramp_3prop.npz" | |
| } | |
| ] | |
| } | |