Datasets:
Add rai:dataSafetyMeasures and rai:outOfScopeUses to metadata.json (NeurIPS RAI checker free-form properties; mlcroissant 1.1.0 validates)
bc97402 verified | { | |
| "@context": { | |
| "@language": "en", | |
| "@vocab": "https://schema.org/", | |
| "citeAs": "cr:citeAs", | |
| "column": "cr:column", | |
| "conformsTo": "dct:conformsTo", | |
| "cr": "http://mlcommons.org/croissant/", | |
| "rai": "http://mlcommons.org/croissant/RAI/", | |
| "data": { | |
| "@id": "cr:data", | |
| "@type": "@json" | |
| }, | |
| "dataType": { | |
| "@id": "cr:dataType", | |
| "@type": "@vocab" | |
| }, | |
| "dct": "http://purl.org/dc/terms/", | |
| "equivalentProperty": "cr:equivalentProperty", | |
| "examples": { | |
| "@id": "cr:examples", | |
| "@type": "@json" | |
| }, | |
| "extract": "cr:extract", | |
| "field": "cr:field", | |
| "fileProperty": "cr:fileProperty", | |
| "fileObject": "cr:fileObject", | |
| "fileSet": "cr:fileSet", | |
| "format": "cr:format", | |
| "includes": "cr:includes", | |
| "isLiveDataset": "cr:isLiveDataset", | |
| "jsonPath": "cr:jsonPath", | |
| "key": "cr:key", | |
| "md5": "cr:md5", | |
| "parentField": "cr:parentField", | |
| "path": "cr:path", | |
| "recordSet": "cr:recordSet", | |
| "references": "cr:references", | |
| "regex": "cr:regex", | |
| "repeated": "cr:repeated", | |
| "replace": "cr:replace", | |
| "samplingRate": "cr:samplingRate", | |
| "sc": "https://schema.org/", | |
| "separator": "cr:separator", | |
| "source": "cr:source", | |
| "subField": "cr:subField", | |
| "transform": "cr:transform" | |
| }, | |
| "@type": "sc:Dataset", | |
| "name": "Substream Recollection", | |
| "description": "Substream Recollection is a controlled benchmark for testing whether long-context VLMs / LLMs can recall whether a short probe substream occurred inside a longer parent stream. It pairs synthetic streams (low / medium / uniform entropy, lengths L=8..4096) with EasyHuman rendered 3-belt videos and a natural-video sub-benchmark drawn from EPIC-Kitchens-100 and SoccerNet. The dataset is organized into four top-level configs: text (synthetic substream text-modality questions), synthetic_video (rendered synthetic substream videos), easyhuman (rendered 3-belt EasyHuman videos and their text-modality counterparts), and natural_video (real-world videos).", | |
| "conformsTo": "http://mlcommons.org/croissant/1.0", | |
| "citeAs": "Anonymous, \"Substream Recollection,\" 2026 (anonymized for review).", | |
| "keywords": [ | |
| "VLM", | |
| "benchmark", | |
| "calibration", | |
| "long-context", | |
| "memory", | |
| "substream-recall", | |
| "video-question-answering", | |
| "video-understanding", | |
| "vision-language-models" | |
| ], | |
| "license": "https://creativecommons.org/licenses/by/4.0/", | |
| "url": "https://huggingface.co/datasets/anonstreammem/substream-recollection", | |
| "version": "2.0.0", | |
| "creator": { | |
| "@type": "sc:Organization", | |
| "name": "Anonymous Authors" | |
| }, | |
| "author": { | |
| "@type": "sc:Person", | |
| "name": "Anonymous Authors" | |
| }, | |
| "datePublished": "2026-05-03", | |
| "rai:dataCollection": "Substream Recollection has two components. (1) Synthetic streams over a fixed alphabet of 16 symbols, generated by a variable-order n-gram process (rule lengths 1-8) and resampled until the realized stream falls within a target empirical Lempel-Ziv entropy band (low ~0.7 bits/symbol, medium ~1.9 bits/symbol, max-entropy = 4.0 bits/symbol = uniform i.i.d.). The 'EasyHuman' diagnostic split is generated separately by hand-specified pattern templates (repeating motifs and counting sequences) with controlled deviations. (2) A natural-video subset of 1,028 clips drawn from EPIC-Kitchens-100 (egocentric cooking videos) and SoccerNet (sports broadcasts) at L in {8, 16, 64, 128, 512, 1024}; EPIC-Kitchens-100 derived clips ship in this repo and SoccerNet rows ship as provenance metadata only.", | |
| "rai:dataCollectionType": "Mixed: programmatically generated synthetic streams (text and rendered video) plus curated derivatives of two existing public video datasets (EPIC-Kitchens-100, SoccerNet). No new human data collection was performed.", | |
| "rai:dataCollectionRawData": "Synthetic split: no raw data — streams are sampled from an n-gram generator over a 16-symbol alphabet with the seed-and-resample procedure described in the paper (Appendix 'Benchmark Details'). Natural-video split: raw data are EPIC-Kitchens-100 trimmed action clips (CC BY-NC 4.0, redistributed here) and SoccerNet broadcast events (NDA-gated; only provenance pointers are redistributed, not pixels).", | |
| "rai:dataCollectionTimeFrame": [ | |
| {"@type": "sc:Date", "@value": "2025-01-01"}, | |
| {"@type": "sc:Date", "@value": "2026-05-03"} | |
| ], | |
| "rai:dataCollectionMissingData": "None within the synthetic split (the generator is deterministic given a seed). Natural-video coverage is uneven by design: SoccerNet rows are provided as provenance pointers only (NDA-gated source), so users without SoccerNet access cannot run those rows. The natural-video subset is small (1,028 clips), so per-class statistics may be noisy.", | |
| "rai:dataAnnotationProtocol": "No human annotators. Synthetic-stream membership labels (yes/no whether a candidate subsequence appears as a contiguous subsequence of the host stream) are derived programmatically from the known generative process: positives are exact substrings sampled from the host stream and negatives are sampled from the same generator (low/medium bands) or i.i.d. uniform (max-entropy band) and rejection-checked against the host stream. EasyHuman labels are derived from the candidate_tag suffix ('_present' / '_absent'). Natural-video labels are derived from EPIC-Kitchens-100 narration metadata and SoccerNet event annotations as released by the upstream projects; no manual relabeling was performed by this work.", | |
| "rai:dataAnnotationPlatform": "Programmatic. No annotation platform, crowdsourcing service, or human-in-the-loop tool was used.", | |
| "rai:dataAnnotationAnalysis": "Annotation correctness is a property of the generator and the upstream metadata, not of human labels: synthetic positive/negative labels are exact (verified via the generator's own substring check at construction time); EasyHuman labels are exact by construction. For the natural-video subset, label quality inherits from the upstream EPIC-Kitchens-100 and SoccerNet annotation pipelines — no inter-annotator agreement statistics are computed for this work because no new annotation was performed.", | |
| "rai:machineAnnotationTools": "The synthetic substream-membership oracle is implemented as a deterministic Python substring scanner over the realized stream. Empirical Lempel-Ziv entropy is computed via a standard LZ-78-style estimator on the realized symbol sequence and reported per stream (h_hat_overall) and per question prefix (h_hat_prefix).", | |
| "rai:dataPreprocessingProtocol": "Synthetic streams are rendered into 448x448 video at 1 frame per second by mapping each symbol to a letter glyph on one of three labeled vertical conveyor belts; on each frame, exactly one new symbol enters the top of its assigned belt while previously placed glyphs advance one step downward. EasyHuman uses the same renderer. Natural-video clips are trimmed to L-frame, L-second sub-clips at 1 fps to match the synthetic protocol; no further per-pixel transformation is applied. All splits also ship a flat per-question parquet, an NDJSON copy, and a nested manifest.json for the project's main.py loader.", | |
| "rai:dataDataManipulationProtocol": "Beyond the rendering and trimming described in dataPreprocessingProtocol, no further manipulation is applied: no resampling, no class balancing rebalance after construction, no augmentation, no anonymization beyond what the upstream sources already applied. Per-row license tags are attached at packaging time so downstream consumers can filter by license.", | |
| "rai:dataImputationProtocol": "No imputation. Fields that do not apply to a given row (e.g., h_hat_overall and candidate_sequence_lanes on natural_video rows; input_sequence on easyhuman video rows) are stored as explicit nulls and documented as null in the schema.", | |
| "rai:personalSensitiveInformation": "Synthetic streams contain no personal or sensitive information by construction (abstract symbols over a 16-element alphabet). EasyHuman is similarly abstract. Natural-video clips inherit any privacy properties of the upstream EPIC-Kitchens-100 and SoccerNet sources: EPIC-Kitchens-100 is egocentric cooking footage that may incidentally show participants' hands and home interiors (released by the original authors under CC BY-NC 4.0 with participant consent); SoccerNet is broadcast sports footage of public events. No new identifiable persons are introduced by this dataset, and no faces, names, or other PII are added. SoccerNet rows are distributed as provenance pointers only — users obtain the underlying video from the SoccerNet project after signing the SoccerNet NDA.", | |
| "rai:dataSafetyMeasures": "(1) Synthetic streams (text and rendered video) and EasyHuman pose no foreseeable safety risk: content is an abstract 16-symbol alphabet and contains no natural language, no imagery of people, and no operational instructions. (2) Natural-video clips inherit the safety posture of their upstream sources — EPIC-Kitchens-100 (egocentric cooking, CC BY-NC 4.0, released by the original authors with participant consent) and SoccerNet (broadcast sports footage of public events, distributed under the SoccerNet NDA). No additional automated content moderation, NSFW filtering, or toxicity scanning was applied beyond inheriting these upstream release policies. (3) Membership-query labels are deterministic substring checks over symbol streams and cannot encode harmful content. (4) Users redistributing this dataset, fine-tuning on it, or deploying models evaluated on it should re-run any safety review appropriate to their deployment context; the upstream sources' own safety considerations carry over and are not superseded by this packaging. (5) The dataset ships no model weights and no generated text outputs from evaluated models, so downstream safety risks from model misuse are out of scope of this artifact.", | |
| "rai:dataUseCases": "Intended uses: (1) evaluating long-context recall in vision-language and language models on a controlled substream-membership task; (2) measuring how recall accuracy varies with input length L and with realized input entropy; (3) studying calibration and abstention behavior under known ground truth; (4) analyzing FLOPs-vs-accuracy trade-offs across model families using the closed-form per-model FLOPs predictor that ships alongside the paper. The benchmark is held out as an evaluation set; it is not intended as training data.", | |
| "rai:outOfScopeUses": "(1) Not for training data: the benchmark is held out for evaluation, and using it as a training set invalidates its use as a recall benchmark for the trained model. (2) Not for production retrieval, search, or memory systems: synthetic streams are abstract symbols rather than natural language or natural video, and the yes/no substring-membership query format is contrived to isolate recall — strong scores do not transfer to information-retrieval deployment. (3) Not for general video question-answering evaluation: the natural-video subset is narrow (cooking activities and soccer broadcasts) and the question templates are restricted to membership probes. (4) Not for human-subjects research, biometric identification, face recognition, or surveillance applications: the upstream natural-video sources are not consented for individual identification, and no per-person identifiers are provided. (5) Not for fairness audits or demographic bias measurement: the natural-video subset reflects upstream EPIC-Kitchens-100 and SoccerNet skews (Western kitchens, English-speaking participants, men's professional European football) rather than a controlled demographic sample. (6) Not for safety-critical decision-making about deployed systems: the benchmark targets recall-vs-compute trade-offs, not safety, hallucination calibration outside abstention, or robustness to adversarial inputs. (7) Not for claims about general 'long-term memory' or general video understanding: the controlled, abstract substream task is intentionally narrower than those constructs.", | |
| "rai:dataLimitations": "(1) The synthetic alphabet is small (16 symbols) and carries no semantic meaning, so strong performance on this benchmark does not directly imply strong performance on natural-language or natural-video memory tasks. (2) The natural-video subset is small (1,028 clips) and skewed toward EPIC-Kitchens-100 cooking activities. (3) SoccerNet rows ship as provenance metadata only; users without an executed SoccerNet NDA cannot run those rows. (4) Compute is reported via a closed-form FLOPs predictor that may diverge from torch.profiler-measured FLOPs at the per-call level (predictor validation is reported in the paper's appendix). (5) Each (length, entropy band) cell at L >= 1024 contains a small number of streams (3-8), so per-cell standard errors at the longest contexts are wide. (6) No human-baseline numbers are reported for the synthetic stream task. (7) The dataset is not designed for safety evaluation, fairness auditing, or production retrieval.", | |
| "rai:dataReleaseMaintenancePlan": "Versioned via Hugging Face git commits at https://huggingface.co/datasets/anonstreammem/substream-recollection. The current release is version 2.0.0. Schema or content changes will be recorded in the dataset README and as Hugging Face commit messages on the dataset repo. There is no formal long-term maintenance commitment beyond the paper review and post-acceptance period; dependent upstream sources (EPIC-Kitchens-100, SoccerNet) maintain their own versioning policies.", | |
| "rai:dataSocialImpact": "This benchmark is intended to support more rigorous evaluation of long-context multimodal models, in particular by separating raw recall from background knowledge and by exposing the FLOPs cost of that recall. The principal foreseeable risk is interpretive: strong scores on this controlled, abstract substream task should not be taken as evidence of general 'long-term memory' or general video understanding. The paper documents concrete failure modes and entropy-dependent accuracy drops to mitigate over-claiming. The dataset is not suitable for fairness audits or for any safety-critical decision-making about deployed systems.", | |
| "rai:dataBiases": "(1) Synthetic-stream content has no demographic bias by construction (abstract 16-symbol alphabet, uniform sampling within each entropy band), but the choice of three entropy bands and a fixed lane geometry constrains the kinds of memory failure that can be observed. (2) Natural-video biases are inherited from the upstream sources: EPIC-Kitchens-100 over-represents Western kitchens and English-speaking participants and is drawn from a non-representative sample of households; SoccerNet over-represents major European professional leagues and male players, with limited coverage of women's football and non-elite competitions. (3) Question-template coverage is biased toward yes/no membership probes; we do not test free-form recall, multi-step reasoning over the stream, or counterfactual queries.", | |
| "distribution": [ | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "text-parquet", | |
| "name": "text-parquet", | |
| "description": "Parquet file for the text config (synthetic substream text-modality questions only; EasyHuman now lives in its own config).", | |
| "contentUrl": "https://huggingface.co/datasets/anonstreammem/substream-recollection/resolve/main/text/questions.parquet", | |
| "encodingFormat": "application/x-parquet", | |
| "contentSize": "586342", | |
| "sha256": "8b3d15848c16ef71635301ad887fd8e9d56df50646ad8fbbe0f28f3f1eec5192" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "synthetic_video-parquet", | |
| "name": "synthetic_video-parquet", | |
| "description": "Parquet file for the synthetic_video config (rendered synthetic substream videos only; EasyHuman now lives in its own config).", | |
| "contentUrl": "https://huggingface.co/datasets/anonstreammem/substream-recollection/resolve/main/synthetic_video/questions.parquet", | |
| "encodingFormat": "application/x-parquet", | |
| "contentSize": "185956", | |
| "sha256": "ed0a711d12673de189d187d032796762ccab14f16deaa665e0f02a609a771205" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "natural_video-parquet", | |
| "name": "natural_video-parquet", | |
| "description": "Parquet file for the natural_video config (EPIC-Kitchens-100 derived clips and SoccerNet provenance metadata).", | |
| "contentUrl": "https://huggingface.co/datasets/anonstreammem/substream-recollection/resolve/main/natural_video/questions.parquet", | |
| "encodingFormat": "application/x-parquet", | |
| "contentSize": "25860", | |
| "sha256": "e4465281299e1f03699faba1970bb99bb5d425c5af257f1280f7aad8b1b62558" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "easyhuman-parquet", | |
| "name": "easyhuman-parquet", | |
| "description": "Parquet file for the easyhuman config (rendered 3-belt EasyHuman videos at L=256 plus their L=256 and L=1024 text-modality questions; modality column distinguishes 'text' vs 'video').", | |
| "contentUrl": "https://huggingface.co/datasets/anonstreammem/substream-recollection/resolve/main/easyhuman/questions.parquet", | |
| "encodingFormat": "application/x-parquet", | |
| "contentSize": "37225", | |
| "sha256": "5c4815297264c05f37957e6450077cae9f7ac8a48a0247a841c34abd827af6df" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "text-manifest-json", | |
| "name": "text-manifest-json", | |
| "description": "Nested-shape manifest.json for the text config — directly ingestible by the project's main.py via load_patternvideos_manifest. Each video entry contains a 'questions' list and 'sequences_used' = {S_tokens, S_lanes}. Co-exists with questions.parquet (for datasets.load_dataset) and questions.json (NDJSON).", | |
| "contentUrl": "https://huggingface.co/datasets/anonstreammem/substream-recollection/resolve/main/text/manifest.json", | |
| "encodingFormat": "application/json", | |
| "contentSize": "9340055", | |
| "sha256": "37d0a234526aa358734cc6d6b6de5e72728ec344eaab38c2f95cafcb01762487" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "synthetic_video-manifest-json", | |
| "name": "synthetic_video-manifest-json", | |
| "description": "Nested-shape manifest.json for the synthetic_video config — directly ingestible by the project's main.py. Each video entry carries the parent stream's S_tokens / S_lanes and per-question candidate metadata.", | |
| "contentUrl": "https://huggingface.co/datasets/anonstreammem/substream-recollection/resolve/main/synthetic_video/manifest.json", | |
| "encodingFormat": "application/json", | |
| "contentSize": "5729546", | |
| "sha256": "e35dcf7ee53bc65a31e2725662da53aa0143262e43a03d0b375e179afb01528c" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "natural_video-manifest-json", | |
| "name": "natural_video-manifest-json", | |
| "description": "Nested-shape manifest.json for the natural_video config — directly ingestible by the project's main.py. One video per question (1028 entries). No token/lane structure.", | |
| "contentUrl": "https://huggingface.co/datasets/anonstreammem/substream-recollection/resolve/main/natural_video/manifest.json", | |
| "encodingFormat": "application/json", | |
| "contentSize": "605090", | |
| "sha256": "f3f719f2812d8f433861a741440d13e836c6b008d516804259944e6c98b8d2ef" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "easyhuman-manifest-json", | |
| "name": "easyhuman-manifest-json", | |
| "description": "Nested-shape manifest.json for the easyhuman config — directly ingestible by the project's main.py. Per-video 'modality' field distinguishes text vs video rows.", | |
| "contentUrl": "https://huggingface.co/datasets/anonstreammem/substream-recollection/resolve/main/easyhuman/manifest.json", | |
| "encodingFormat": "application/json", | |
| "contentSize": "1123982", | |
| "sha256": "1fad69c8c36c476345e4ff478637cc55fde5a302e5fef9ce4e5a8da490b1eb67" | |
| } | |
| ], | |
| "recordSet": [ | |
| { | |
| "@type": "cr:RecordSet", | |
| "@id": "text", | |
| "name": "text", | |
| "description": "Text-modality questions over the synthetic substream benchmark only. EasyHuman text-modality questions live in the separate 'easyhuman' config. License: CC BY 4.0.", | |
| "field": [ | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/length_L", | |
| "name": "length_L", | |
| "description": "Number of items / frames in the parent stream (8..4096).", | |
| "dataType": "sc:Integer", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "length_L" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/entropy_band", | |
| "name": "entropy_band", | |
| "description": "Entropy regime of the synthetic stream: 'low', 'medium', 'max-entropy', 'easyhuman', or 'natural'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "entropy_band" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/question_variant", | |
| "name": "question_variant", | |
| "description": "Question template: 'sequential', 'spatial', 'easyhuman_binary', or 'binary_natural'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_variant" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/question_text", | |
| "name": "question_text", | |
| "description": "Natural-language probe shown to the model.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_text" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/answer", | |
| "name": "answer", | |
| "description": "Ground-truth answer: 'yes' or 'no'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "answer" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/input_sequence", | |
| "name": "input_sequence", | |
| "description": "Full text-modality stream rendered as a single string (text split only).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "input_sequence" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/question_id", | |
| "name": "question_id", | |
| "description": "Stable per-row unique identifier.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_id" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/stream_id", | |
| "name": "stream_id", | |
| "description": "ID of the parent stream/video this question targets.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "stream_id" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/split", | |
| "name": "split", | |
| "description": "Subset name: 'substream' (synthetic streams), 'easyhuman' (rendered 3-belt videos), or 'natural' (real videos).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "split" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/candidate_sequence", | |
| "name": "candidate_sequence", | |
| "description": "Probe substream as a list of token strings (synthetic only; null for natural).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_sequence" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/candidate_clip_start", | |
| "name": "candidate_clip_start", | |
| "description": "Start time (seconds) of the probe clip within the parent video, when applicable.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_clip_start" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/candidate_clip_end", | |
| "name": "candidate_clip_end", | |
| "description": "End time (seconds) of the probe clip within the parent video, when applicable.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_clip_end" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/candidate_tag", | |
| "name": "candidate_tag", | |
| "description": "Optional human-readable tag for the candidate (currently always null; reserved for future use).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_tag" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/candidate_present", | |
| "name": "candidate_present", | |
| "description": "Boolean ground-truth: True iff the candidate appears in the parent stream.", | |
| "dataType": "sc:Boolean", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_present" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/candidate_sequence_lanes", | |
| "name": "candidate_sequence_lanes", | |
| "description": "Per-question probe lane track (S_lanes), aligned 1:1 with candidate_sequence (S_tokens). Synthetic only; null on natural rows.", | |
| "dataType": "sc:Text", | |
| "repeated": true, | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_sequence_lanes" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/input_sequence_lanes", | |
| "name": "input_sequence_lanes", | |
| "description": "Parent-stream lane track (S_lanes) as a comma-joined string, aligned 1:1 with input_sequence (S_tokens).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "input_sequence_lanes" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/h_hat_overall", | |
| "name": "h_hat_overall", | |
| "description": "Per-stream empirical Lempel-Ziv entropy in bits/token (entropy_overall.empirical_bits.S_tokens). Paper-canonical entropy estimate. Synthetic only; null on natural.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "h_hat_overall" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/h_hat_prefix", | |
| "name": "h_hat_prefix", | |
| "description": "Per-question prefix empirical Lempel-Ziv entropy in bits/token (entropy_prefix.S_tokens), measured up to the question's prefix slice. Synthetic only; null on natural.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "h_hat_prefix" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "text/license", | |
| "name": "license", | |
| "description": "Per-row license tag: 'CC-BY-4.0' (synthetic/easyhuman), 'CC-BY-NC-4.0' (epic-kitchens-100), 'SoccerNet-NDA' (provenance-only).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "text-parquet" | |
| }, | |
| "extract": { | |
| "column": "license" | |
| } | |
| } | |
| } | |
| ] | |
| }, | |
| { | |
| "@type": "cr:RecordSet", | |
| "@id": "synthetic_video", | |
| "name": "synthetic_video", | |
| "description": "Video-modality questions over rendered synthetic substream streams. EasyHuman rendered 3-belt videos live in the separate 'easyhuman' config. video_path / clip_path are relative to this directory. License: CC BY 4.0.", | |
| "field": [ | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/length_L", | |
| "name": "length_L", | |
| "description": "Number of items / frames in the parent stream (8..4096).", | |
| "dataType": "sc:Integer", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "length_L" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/entropy_band", | |
| "name": "entropy_band", | |
| "description": "Entropy regime of the synthetic stream: 'low', 'medium', 'max-entropy', 'easyhuman', or 'natural'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "entropy_band" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/question_variant", | |
| "name": "question_variant", | |
| "description": "Question template: 'sequential', 'spatial', 'easyhuman_binary', or 'binary_natural'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_variant" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/question_text", | |
| "name": "question_text", | |
| "description": "Natural-language probe shown to the model.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_text" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/answer", | |
| "name": "answer", | |
| "description": "Ground-truth answer: 'yes' or 'no'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "answer" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/clip_path", | |
| "name": "clip_path", | |
| "description": "Repo-relative path to the probe clip video file (synthetic_video only).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "clip_path" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/video_path", | |
| "name": "video_path", | |
| "description": "Repo-relative path to the parent video file.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "video_path" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/question_id", | |
| "name": "question_id", | |
| "description": "Stable per-row unique identifier.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_id" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/stream_id", | |
| "name": "stream_id", | |
| "description": "ID of the parent stream/video this question targets.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "stream_id" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/split", | |
| "name": "split", | |
| "description": "Subset name: 'substream' (synthetic streams), 'easyhuman' (rendered 3-belt videos), or 'natural' (real videos).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "split" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/candidate_sequence", | |
| "name": "candidate_sequence", | |
| "description": "Probe substream as a list of token strings (synthetic only; null for natural).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_sequence" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/candidate_clip_start", | |
| "name": "candidate_clip_start", | |
| "description": "Start time (seconds) of the probe clip within the parent video, when applicable.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_clip_start" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/candidate_clip_end", | |
| "name": "candidate_clip_end", | |
| "description": "End time (seconds) of the probe clip within the parent video, when applicable.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_clip_end" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/candidate_tag", | |
| "name": "candidate_tag", | |
| "description": "Optional human-readable tag for the candidate (currently always null; reserved for future use).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_tag" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/candidate_present", | |
| "name": "candidate_present", | |
| "description": "Boolean ground-truth: True iff the candidate appears in the parent stream.", | |
| "dataType": "sc:Boolean", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_present" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/candidate_sequence_lanes", | |
| "name": "candidate_sequence_lanes", | |
| "description": "Per-question probe lane track (S_lanes), aligned 1:1 with candidate_sequence (S_tokens). Synthetic only; null on natural rows.", | |
| "dataType": "sc:Text", | |
| "repeated": true, | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_sequence_lanes" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/input_sequence_lanes", | |
| "name": "input_sequence_lanes", | |
| "description": "Parent-stream lane track (S_lanes) as a comma-joined string, aligned 1:1 with the parent stream's S_tokens.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "input_sequence_lanes" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/h_hat_overall", | |
| "name": "h_hat_overall", | |
| "description": "Per-stream empirical Lempel-Ziv entropy in bits/token (entropy_overall.empirical_bits.S_tokens). Paper-canonical entropy estimate. Synthetic only; null on natural.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "h_hat_overall" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/h_hat_prefix", | |
| "name": "h_hat_prefix", | |
| "description": "Per-question prefix empirical Lempel-Ziv entropy in bits/token (entropy_prefix.S_tokens), measured up to the question's prefix slice. Synthetic only; null on natural.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "h_hat_prefix" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "synthetic_video/license", | |
| "name": "license", | |
| "description": "Per-row license tag: 'CC-BY-4.0' (synthetic/easyhuman), 'CC-BY-NC-4.0' (epic-kitchens-100), 'SoccerNet-NDA' (provenance-only).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "synthetic_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "license" | |
| } | |
| } | |
| } | |
| ] | |
| }, | |
| { | |
| "@type": "cr:RecordSet", | |
| "@id": "natural_video", | |
| "name": "natural_video", | |
| "description": "Natural-video benchmark drawn from EPIC-Kitchens-100 and SoccerNet at L in {8,16,64,128,512,1024}. EPIC-Kitchens-100 derived clips ship in this repo (license: CC BY-NC 4.0). SoccerNet rows do NOT ship the underlying mp4s (NDA-gated source); the source_provenance column carries provenance metadata so the originals can be obtained from soccer-net.org after signing the SoccerNet NDA. Mixed license: CC BY-NC 4.0 (EPIC-Kitchens-100 derivatives) and SoccerNet NDA terms (provenance metadata only).", | |
| "field": [ | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/length_L", | |
| "name": "length_L", | |
| "description": "Number of items / frames in the parent stream (8..4096).", | |
| "dataType": "sc:Integer", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "length_L" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/entropy_band", | |
| "name": "entropy_band", | |
| "description": "Entropy regime of the synthetic stream: 'low', 'medium', 'max-entropy', 'easyhuman', or 'natural'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "entropy_band" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/question_variant", | |
| "name": "question_variant", | |
| "description": "Question template: 'sequential', 'spatial', 'easyhuman_binary', or 'binary_natural'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_variant" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/question_text", | |
| "name": "question_text", | |
| "description": "Natural-language probe shown to the model.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_text" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/answer", | |
| "name": "answer", | |
| "description": "Ground-truth answer: 'yes' or 'no'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "answer" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/video_path", | |
| "name": "video_path", | |
| "description": "Repo-relative path to the parent video file.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "video_path" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/source_class", | |
| "name": "source_class", | |
| "description": "Activity class label (natural rows; e.g. 'wash_plate', 'red_card').", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "source_class" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/source_dataset", | |
| "name": "source_dataset", | |
| "description": "Originating dataset for natural rows: 'epic-kitchens-100' or 'soccernet'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "source_dataset" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/question_id", | |
| "name": "question_id", | |
| "description": "Stable per-row unique identifier.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "question_id" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/source_provenance", | |
| "name": "source_provenance", | |
| "description": "JSON-string provenance for SoccerNet rows; null otherwise.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "source_provenance" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/stream_id", | |
| "name": "stream_id", | |
| "description": "ID of the parent stream/video this question targets.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "stream_id" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/split", | |
| "name": "split", | |
| "description": "Subset name: 'substream' (synthetic streams), 'easyhuman' (rendered 3-belt videos), or 'natural' (real videos).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "split" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/candidate_sequence", | |
| "name": "candidate_sequence", | |
| "description": "Probe substream as a list of token strings (synthetic only; null for natural).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_sequence" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/candidate_clip_start", | |
| "name": "candidate_clip_start", | |
| "description": "Start time (seconds) of the probe clip within the parent video, when applicable.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_clip_start" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/candidate_clip_end", | |
| "name": "candidate_clip_end", | |
| "description": "End time (seconds) of the probe clip within the parent video, when applicable.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_clip_end" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/candidate_tag", | |
| "name": "candidate_tag", | |
| "description": "Optional human-readable tag for the candidate (currently always null; reserved for future use).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_tag" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/candidate_present", | |
| "name": "candidate_present", | |
| "description": "Boolean ground-truth: True iff the candidate appears in the parent stream.", | |
| "dataType": "sc:Boolean", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_present" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/candidate_sequence_lanes", | |
| "name": "candidate_sequence_lanes", | |
| "description": "Per-question probe lane track (S_lanes); always null on natural_video (no token-level lane structure on real videos).", | |
| "dataType": "sc:Text", | |
| "repeated": true, | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "candidate_sequence_lanes" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/input_sequence_lanes", | |
| "name": "input_sequence_lanes", | |
| "description": "Parent-stream lane track (S_lanes); always null on natural_video.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "input_sequence_lanes" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/h_hat_overall", | |
| "name": "h_hat_overall", | |
| "description": "Per-stream empirical LZ entropy bits/token; always null on natural_video (no entropy ground truth for real videos).", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "h_hat_overall" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/h_hat_prefix", | |
| "name": "h_hat_prefix", | |
| "description": "Per-question prefix LZ entropy bits/token; always null on natural_video.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "h_hat_prefix" | |
| } | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "natural_video/license", | |
| "name": "license", | |
| "description": "Per-row license tag: 'CC-BY-4.0' (synthetic/easyhuman), 'CC-BY-NC-4.0' (epic-kitchens-100), 'SoccerNet-NDA' (provenance-only).", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": { | |
| "@id": "natural_video-parquet" | |
| }, | |
| "extract": { | |
| "column": "license" | |
| } | |
| } | |
| } | |
| ] | |
| }, | |
| { | |
| "@type": "cr:RecordSet", | |
| "@id": "easyhuman", | |
| "name": "easyhuman", | |
| "description": "EasyHuman rendered 3-belt videos and their text-modality counterparts. Pattern-based (alternating block templates), so no entropy ground truth: h_hat_overall and h_hat_prefix are NOT carried on this config. Per-question candidate lanes are not stored in the source manifest, so candidate_sequence_lanes is also absent. The 'modality' column is 'text' (L=256 + L=1024 text questions) or 'video' (L=256 rendered video questions). License: CC BY 4.0.", | |
| "field": [ | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/question_id", | |
| "name": "question_id", | |
| "description": "Stable per-row unique identifier (e.g. 'video1_q0').", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "question_id"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/stream_id", | |
| "name": "stream_id", | |
| "description": "ID of the parent EasyHuman stream/video (e.g. 'len_256_video_001_v0').", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "stream_id"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/split", | |
| "name": "split", | |
| "description": "Always 'easyhuman' on this config.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "split"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/modality", | |
| "name": "modality", | |
| "description": "'text' or 'video'. Text rows ship at L=256 and L=1024; video rows ship at L=256 only.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "modality"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/length_L", | |
| "name": "length_L", | |
| "description": "Number of items / frames in the parent stream. EasyHuman ships at L in {256, 1024}.", | |
| "dataType": "sc:Integer", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "length_L"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/entropy_band", | |
| "name": "entropy_band", | |
| "description": "Always 'easyhuman' on this config.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "entropy_band"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/question_variant", | |
| "name": "question_variant", | |
| "description": "Always 'easyhuman_binary' on this config.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "question_variant"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/question_text", | |
| "name": "question_text", | |
| "description": "Natural-language probe shown to the model.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "question_text"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/answer", | |
| "name": "answer", | |
| "description": "Ground-truth answer: 'yes' or 'no'.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "answer"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/candidate_sequence", | |
| "name": "candidate_sequence", | |
| "description": "Probe substream as a list of token strings (S_tokens).", | |
| "dataType": "sc:Text", | |
| "repeated": true, | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "candidate_sequence"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/candidate_clip_start", | |
| "name": "candidate_clip_start", | |
| "description": "Start time (seconds) of the probe clip within the parent video.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "candidate_clip_start"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/candidate_clip_end", | |
| "name": "candidate_clip_end", | |
| "description": "End time (seconds) of the probe clip within the parent video.", | |
| "dataType": "sc:Float", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "candidate_clip_end"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/candidate_tag", | |
| "name": "candidate_tag", | |
| "description": "EasyHuman category tag (e.g. 'x_present', 'mistake_absent').", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "candidate_tag"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/candidate_present", | |
| "name": "candidate_present", | |
| "description": "Boolean ground-truth: True iff the candidate appears in the parent stream (derived from candidate_tag suffix).", | |
| "dataType": "sc:Boolean", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "candidate_present"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/input_sequence", | |
| "name": "input_sequence", | |
| "description": "Full text-modality stream rendered as a comma-joined string. Populated on text rows; null on video rows.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "input_sequence"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/input_sequence_lanes", | |
| "name": "input_sequence_lanes", | |
| "description": "Parent-stream lane track (S_lanes) as a comma-joined string, aligned 1:1 with input_sequence.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "input_sequence_lanes"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/video_path", | |
| "name": "video_path", | |
| "description": "Repo-relative path to the parent EasyHuman video. Populated on video rows; null on text rows.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "video_path"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/clip_path", | |
| "name": "clip_path", | |
| "description": "Repo-relative path to the EasyHuman probe clip. Populated on video rows; null on text rows.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "clip_path"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "easyhuman/license", | |
| "name": "license", | |
| "description": "Always 'CC-BY-4.0' on EasyHuman.", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "easyhuman-parquet"}, | |
| "extract": {"column": "license"} | |
| } | |
| } | |
| ] | |
| } | |
| ] | |
| } |