| { |
| "@context": { |
| "@language": "en", |
| "@vocab": "https://schema.org/", |
| "citeAs": "cr:citeAs", |
| "column": "cr:column", |
| "conformsTo": "dct:conformsTo", |
| "cr": "http://mlcommons.org/croissant/", |
| "rai": "http://mlcommons.org/croissant/RAI/", |
| "data": { |
| "@id": "cr:data", |
| "@type": "@json" |
| }, |
| "dataType": { |
| "@id": "cr:dataType", |
| "@type": "@vocab" |
| }, |
| "dct": "http://purl.org/dc/terms/", |
| "examples": { |
| "@id": "cr:examples", |
| "@type": "@json" |
| }, |
| "extract": "cr:extract", |
| "field": "cr:field", |
| "fileProperty": "cr:fileProperty", |
| "fileObject": "cr:fileObject", |
| "fileSet": "cr:fileSet", |
| "format": "cr:format", |
| "includes": "cr:includes", |
| "isLiveDataset": "cr:isLiveDataset", |
| "jsonPath": "cr:jsonPath", |
| "key": "cr:key", |
| "md5": "cr:md5", |
| "parentField": "cr:parentField", |
| "path": "cr:path", |
| "recordSet": "cr:recordSet", |
| "references": "cr:references", |
| "regex": "cr:regex", |
| "repeated": "cr:repeated", |
| "replace": "cr:replace", |
| "sc": "https://schema.org/", |
| "separator": "cr:separator", |
| "source": "cr:source", |
| "subField": "cr:subField", |
| "transform": "cr:transform", |
| "prov": "http://www.w3.org/ns/prov#" |
| }, |
| "@type": "sc:Dataset", |
| "name": "SignVerse-2M", |
| "conformsTo": "http://mlcommons.org/croissant/1.0", |
| "description": "SignVerse-2M is a large-scale multilingual pose-native dataset for sign language research. It converts approximately two million publicly available sign language video clips covering 55+ sign languages into unified DWPose keypoint sequences (18 body + 21x2 hand + 68 face keypoints per frame at 24 FPS), providing a standardized data interface directly compatible with modern pose-driven generation and recognition pipelines.", |
| "citeAs": "@inproceedings{fang2026signverse2m, title={{SignVerse-2M}: A Two-Million-Clip Pose-Native Universe of 55+ Sign Languages}, author={Fang, Sen and Zhong, Hongbin and Zhang, Yanxin and Metaxas, Dimitris N.}, booktitle={Advances in Neural Information Processing Systems (NeurIPS)}, year={2026}, note={Evaluations \\& Datasets Track}}", |
| "citation": "@inproceedings{fang2026signverse2m, title={{SignVerse-2M}: A Two-Million-Clip Pose-Native Universe of 55+ Sign Languages}, author={Fang, Sen and Zhong, Hongbin and Zhang, Yanxin and Metaxas, Dimitris N.}, booktitle={Advances in Neural Information Processing Systems (NeurIPS)}, year={2026}, note={Evaluations \\& Datasets Track}}", |
| "license": "https://creativecommons.org/licenses/by-nc/4.0/", |
| "url": "https://huggingface.co/datasets/SignerX/SignVerse-2M", |
| "version": "1.0.0", |
| "keywords": [ |
| "sign language", |
| "pose estimation", |
| "DWPose", |
| "multilingual", |
| "keypoint", |
| "sign language generation", |
| "sign language recognition", |
| "pose-native", |
| "video understanding" |
| ], |
| "creator": [ |
| { |
| "@type": "Person", |
| "name": "Sen Fang", |
| "affiliation": { |
| "@type": "Organization", |
| "name": "Rutgers University" |
| } |
| }, |
| { |
| "@type": "Person", |
| "name": "Hongbin Zhong", |
| "affiliation": { |
| "@type": "Organization", |
| "name": "Georgia Institute of Technology" |
| } |
| }, |
| { |
| "@type": "Person", |
| "name": "Yanxin Zhang", |
| "affiliation": { |
| "@type": "Organization", |
| "name": "NVIDIA" |
| } |
| }, |
| { |
| "@type": "Person", |
| "name": "Dimitris N. Metaxas", |
| "affiliation": { |
| "@type": "Organization", |
| "name": "Rutgers University" |
| } |
| } |
| ], |
| "isLiveDataset": false, |
| "datePublished": "2026", |
| "inLanguage": [ |
| "ase", |
| "bfi", |
| "gsg", |
| "sgd", |
| "lsf", |
| "lse", |
| "lis", |
| "lgp", |
| "ngt", |
| "asf", |
| "jsl", |
| "kvk", |
| "csl", |
| "bzs", |
| "lsm", |
| "pjm", |
| "rsl", |
| "swl", |
| "dsl", |
| "fse", |
| "nsl", |
| "lsc", |
| "aed", |
| "tsm", |
| "fsl" |
| ], |
| "distribution": [ |
| { |
| "@type": "cr:FileObject", |
| "@id": "hf-dataset-repo", |
| "name": "HuggingFace dataset repository", |
| "description": "Root of the SignVerse-2M HuggingFace dataset repository", |
| "contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M", |
| "encodingFormat": "git+https", |
| "sha256": "main" |
| }, |
| { |
| "@type": "cr:FileSet", |
| "@id": "pose-shards", |
| "name": "DWPose keypoint shards", |
| "description": "Numbered .tar archives each containing per-video directories with poses.npz (DWPose keypoints) and caption.json (structured subtitles).", |
| "containedIn": { |
| "@id": "hf-dataset-repo" |
| }, |
| "encodingFormat": "application/x-tar", |
| "includes": "dataset/Sign_DWPose_NPZ_*.tar" |
| }, |
| { |
| "@type": "cr:FileObject", |
| "@id": "shard-manifest-csv", |
| "name": "croissant_shard_manifest.csv", |
| "description": "Online tar-shard manifest used for validator-friendly record generation.", |
| "contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M/resolve/main/debug/croissant_shard_manifest.csv", |
| "encodingFormat": "text/csv", |
| "sha256": "9fa5a2d82f875d0d0cce3f90424cebd48fb37e723d03128900197a28e0b63619" |
| }, |
| { |
| "@type": "cr:FileObject", |
| "@id": "metadata-csv", |
| "name": "Sign-DWPose-2M-metadata_ori.csv", |
| "description": "Master manifest of video IDs and sign language labels driving the processing pipeline.", |
| "contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M/resolve/main/Sign-DWPose-2M-metadata_ori.csv", |
| "encodingFormat": "text/csv", |
| "sha256": "79e8d2c35b0d3ed31b7e3c32348a1ce64513ca0ef217d7c1cbbd6b14410e7f08" |
| } |
| ], |
| "recordSet": [ |
| { |
| "@type": "cr:RecordSet", |
| "@id": "pose-shard-record", |
| "name": "Pose shard artifact record", |
| "description": "Each record corresponds to one released tar shard in the public dataset distribution. Detailed per-video and per-frame structure is documented in the dataset description and repository README, but this RecordSet is intentionally kept at the shard level so Croissant validators can enumerate records without parsing multi-gigabyte tar archives.", |
| "field": [ |
| { |
| "@type": "cr:Field", |
| "@id": "pose-shard-record/archive_path", |
| "name": "archive_path", |
| "description": "Relative path of a released tar shard within the dataset repository", |
| "dataType": "sc:Text", |
| "source": { |
| "fileObject": { |
| "@id": "shard-manifest-csv" |
| }, |
| "extract": { |
| "column": "archive_path" |
| } |
| } |
| }, |
| { |
| "@type": "cr:Field", |
| "@id": "pose-shard-record/archive_name", |
| "name": "archive_name", |
| "description": "Tar shard filename", |
| "dataType": "sc:Text", |
| "source": { |
| "fileObject": { |
| "@id": "shard-manifest-csv" |
| }, |
| "extract": { |
| "column": "archive_name" |
| } |
| } |
| }, |
| { |
| "@type": "cr:Field", |
| "@id": "pose-shard-record/size_bytes", |
| "name": "size_bytes", |
| "description": "Remote tar shard size in bytes from the live Hugging Face repository listing", |
| "dataType": "sc:Integer", |
| "source": { |
| "fileObject": { |
| "@id": "shard-manifest-csv" |
| }, |
| "extract": { |
| "column": "size_bytes" |
| } |
| } |
| } |
| ] |
| } |
| ], |
| "rai:dataCollection": "DWPose keypoint sequences were automatically extracted from publicly available multilingual sign language videos sourced from YouTube and inherited large public collections such as YouTube-SL-25. The pipeline (1) retrieves video metadata and subtitles, (2) decodes frames at 24 FPS via ffmpeg, (3) runs DWPose (RTMPose-based) to extract per-frame body, hand, and face keypoints, and (4) packages outputs into per-video poses.npz files. No human annotators labeled keypoints. Subtitle text is automatically downloaded and normalized from platform-exported WEBVTT captions.", |
| "rai:dataCollectionType": "Web Scraping", |
| "rai:dataCollectionMissingData": "Videos that failed to download, produced corrupted frames, or had no available subtitle track are excluded from the published corpus. Processing status is recorded per video in runtime_state/. Estimated processing success rate exceeds 90% of manifest entries.", |
| "rai:dataCollectionRawData": "The raw sources are publicly posted sign language videos and their associated subtitle tracks/metadata hosted on YouTube and related open web sources. This repository does not redistribute the raw RGB videos; it releases only derived pose keypoints, structured subtitle text, and dataset metadata. Source videos remain governed by the original platform terms and creator rights.", |
| "rai:dataPreprocessingProtocol": "Raw subtitles are cleaned by removing WEBVTT control fields, timestamp lines, HTML tags, and zero-width characters, and by normalizing repeated lines, broken punctuation, and hyphen continuations. DWPose is run per-frame; the primary signer (person_0) is selected by largest bounding-box area when multiple people are detected. Frames are decoded at 24 FPS using ffmpeg stdout streaming to avoid intermediate JPG materialization.", |
| "rai:dataAnnotationProtocol": "No manual annotation campaign was conducted for this release. The published supervision consists of machine-generated pose keypoints extracted by DWPose and subtitle text harvested from platform-provided caption tracks, then normalized into segment-level and document-level fields. In this sense, annotation is automatic rather than crowd- or expert-labeled.", |
| "rai:dataAnnotationPlatform": "yt-dlp/YouTube metadata access, platform-provided WEBVTT subtitle tracks, ffmpeg decoding, and DWPose (RTMPose-based) inference.", |
| "rai:dataAnnotationAnalysis": "Because the release does not rely on human annotators, there are no inter-annotator agreement statistics or annotator quality-control reports. Quality control is procedural: videos with failed download/decode, corrupted frames, or missing subtitle tracks are filtered out, and person_0 is selected heuristically as the primary signer. Remaining noise sources include pose-estimation failure, subtitle timing drift, automatic translation artifacts, and signer-selection errors in multi-person scenes.", |
| "rai:machineAnnotationTools": "DWPose for body/hand/face keypoint extraction; ffmpeg for frame decoding; yt-dlp plus subtitle normalization scripts for metadata and caption acquisition.", |
| "rai:dataBiases": "1. Language imbalance: the corpus inherits the long-tail distribution of YouTube-SL-25; ASL and BSL account for a disproportionate share of total hours. Models trained without language reweighting will underperform on low-resource languages. 2. Content-type bias: videos skew toward online educational and interpreter content; spontaneous or conversational signing is underrepresented. 3. Camera and production bias: professional interpreter videos with stable framing may be overrepresented relative to casual uploads. 4. Signer demographics: signer age, gender, and regional dialect distributions reflect what is available online and are not controlled.", |
| "rai:dataLimitations": "1. Automatic pose extraction introduces errors on challenging frames (fast motion, partial occlusion, unusual viewpoints, multi-signer scenes). 2. DWPose's 21-keypoint hand model does not resolve all handshape distinctions required for lexical discrimination in sign languages. 3. The 68-point facial model captures only a partial representation of non-manual features (facial expressions, mouthing) that carry phonological and grammatical information. 4. Subtitle alignment is automatic and may contain temporal offsets or missing segments. 5. No signer identity, demographic metadata, or manual quality labels are provided. 6. The single-signer assumption (person_0) may be incorrect for multi-signer or relay-interpreted videos.", |
| "rai:dataUseCases": "Intended for research use in: (1) multilingual sign language generation (text to pose to video); (2) pose-space sign language recognition and translation; (3) cross-lingual transfer and low-resource adaptation; (4) benchmarking pose-conditioned video generation models on sign language content. Not intended for safety-critical deployment (e.g., medical or legal interpretation) without independent validation, for re-identification of individuals, or for making definitive linguistic completeness claims about any specific sign language.", |
| "rai:dataSocialImpact": "Positive impact may include broader support for multilingual sign language research, lower barriers to pose-driven sign generation/recognition work, and improved benchmarking across sign languages under a unified interface. Potential negative impact includes misuse for surveillance, signer re-identification, non-consensual avatar synthesis, or overclaiming linguistic coverage/performance for low-resource signing communities. The dataset should therefore be treated as a research resource rather than evidence of deployable interpreting capability.", |
| "rai:hasSyntheticData": false, |
| "rai:dataReleaseMaintenancePlan": "The release is versioned. The repository keeps the processing pipeline, runtime status logs, and upload scripts needed to audit or refresh the corpus on selected subsets. Future maintenance may add corrected shards or metadata revisions, but no guarantee is made that every source video will remain available on upstream platforms over time.", |
| "rai:personalSensitiveInformation": "The dataset contains pose keypoint sequences derived from publicly posted YouTube videos of human signers. Raw video frames are NOT redistributed. Pose sequences may nonetheless allow re-identification of signers in combination with external metadata. No sensitive personal information (names, biometric identifiers beyond pose, health or financial data) is intentionally included. Users should be aware that sign language data inherently involves human subjects and should handle the data accordingly.", |
| "prov:wasDerivedFrom": [ |
| { |
| "@id": "https://github.com/google-research/google-research/tree/master/youtube_sl_25", |
| "prov:label": "YouTube-SL-25", |
| "sc:license": "CC BY 4.0 International license" |
| } |
| ], |
| "prov:wasGeneratedBy": [ |
| { |
| "@type": "prov:Activity", |
| "prov:type": { |
| "@id": "https://www.wikidata.org/wiki/Q4929239" |
| }, |
| "prov:label": "Download", |
| "sc:description": "Download and process the video from YouTube" |
| } |
| ] |
| } |
|
|