{ "@context": { "@language": "en", "@vocab": "https://schema.org/", "citeAs": "cr:citeAs", "column": "cr:column", "conformsTo": "dct:conformsTo", "cr": "http://mlcommons.org/croissant/", "rai": "http://mlcommons.org/croissant/RAI/", "data": {"@id": "cr:data", "@type": "@json"}, "dataType": {"@id": "cr:dataType", "@type": "@vocab"}, "dct": "http://purl.org/dc/terms/", "examples": {"@id": "cr:examples", "@type": "@json"}, "extract": "cr:extract", "field": "cr:field", "fileProperty": "cr:fileProperty", "fileObject": "cr:fileObject", "fileSet": "cr:fileSet", "format": "cr:format", "includes": "cr:includes", "isLiveDataset": "cr:isLiveDataset", "jsonPath": "cr:jsonPath", "key": "cr:key", "md5": "cr:md5", "parentField": "cr:parentField", "path": "cr:path", "recordSet": "cr:recordSet", "references": "cr:references", "regex": "cr:regex", "repeated": "cr:repeated", "replace": "cr:replace", "sc": "https://schema.org/", "separator": "cr:separator", "source": "cr:source", "subField": "cr:subField", "transform": "cr:transform" }, "@type": "sc:Dataset", "name": "SignVerse-2M", "conformsTo": "http://mlcommons.org/croissant/1.0", "description": "SignVerse-2M is a large-scale multilingual pose-native dataset for sign language research. It converts approximately two million publicly available sign language video clips covering 25+ sign languages into unified DWPose keypoint sequences (18 body + 21x2 hand + 68 face keypoints per frame at 24 FPS), providing a standardized data interface directly compatible with modern pose-driven generation and recognition pipelines.", "citeAs": "@inproceedings{fang2026signverse2m, title={{SignVerse-2M}: A Two-Million-Clip Pose-Native Universe of 25+ Sign Languages}, author={Fang, Sen and Zhong, Hongbin and Zhang, Yanxin and Metaxas, Dimitris N.}, booktitle={Advances in Neural Information Processing Systems (NeurIPS)}, year={2026}, note={Evaluations \\& Datasets Track}}", "license": "https://creativecommons.org/licenses/by-nc/4.0/", "url": "https://huggingface.co/datasets/SignerX/SignVerse-2M", "version": "1.0.0", "keywords": [ "sign language", "pose estimation", "DWPose", "multilingual", "keypoint", "sign language generation", "sign language recognition", "pose-native", "video understanding" ], "creator": [ { "@type": "Person", "name": "Sen Fang", "affiliation": {"@type": "Organization", "name": "Rutgers University"} }, { "@type": "Person", "name": "Hongbin Zhong", "affiliation": {"@type": "Organization", "name": "Georgia Institute of Technology"} }, { "@type": "Person", "name": "Yanxin Zhang", "affiliation": {"@type": "Organization", "name": "NVIDIA"} }, { "@type": "Person", "name": "Dimitris N. Metaxas", "affiliation": {"@type": "Organization", "name": "Rutgers University"} } ], "isLiveDataset": false, "datePublished": "2026", "inLanguage": [ "ase", "bfi", "gsg", "sgd", "lsf", "lse", "lis", "lgp", "ngt", "asf", "jsl", "kvk", "csl", "bzs", "lsm", "pjm", "rsl", "swl", "dsl", "fse", "nsl", "lsc", "aed", "tsm", "fsl" ], "distribution": [ { "@type": "cr:FileObject", "@id": "hf-dataset-repo", "name": "HuggingFace dataset repository", "description": "Root of the SignVerse-2M HuggingFace dataset repository", "contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M", "encodingFormat": "git+https", "sha256": "main" }, { "@type": "cr:FileSet", "@id": "pose-shards", "name": "DWPose keypoint shards", "description": "Numbered .tar archives each containing per-video directories with poses.npz (DWPose keypoints) and caption.json (structured subtitles).", "containedIn": {"@id": "hf-dataset-repo"}, "encodingFormat": "application/x-tar", "includes": "dataset/Sign_DWPose_NPZ_*.tar" }, { "@type": "cr:FileObject", "@id": "metadata-csv", "name": "Sign-DWPose-2M-metadata_ori.csv", "description": "Master manifest of video IDs and sign language labels driving the processing pipeline.", "contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M/resolve/main/Sign-DWPose-2M-metadata_ori.csv", "encodingFormat": "text/csv", "sha256": "79e8d2c35b0d3ed31b7e3c32348a1ce64513ca0ef217d7c1cbbd6b14410e7f08" } ], "recordSet": [ { "@type": "cr:RecordSet", "@id": "pose-frame-record", "name": "Per-frame DWPose keypoint record", "description": "Each record corresponds to one video frame. Keypoint coordinates are in pixel space; confidence scores are in [0, 1].", "field": [ { "@type": "cr:Field", "@id": "pose-frame-record/video_id", "name": "video_id", "description": "YouTube video identifier", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.video_id"} } }, { "@type": "cr:Field", "@id": "pose-frame-record/fps", "name": "fps", "description": "Sampling frame rate (24.0)", "dataType": "sc:Float", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.fps"} } }, { "@type": "cr:Field", "@id": "pose-frame-record/frame_id", "name": "frame_id", "description": "0-indexed frame index within the video", "dataType": "sc:Integer", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.frames[*].frame_id"} } }, { "@type": "cr:Field", "@id": "pose-frame-record/body_keypoints", "name": "body_keypoints", "description": "18 body keypoints per primary signer as float[18][3] arrays of (x, y, score) in pixel space", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.frames[*].person_0.body"} } }, { "@type": "cr:Field", "@id": "pose-frame-record/left_hand_keypoints", "name": "left_hand_keypoints", "description": "21 left-hand keypoints per primary signer as float[21][3] arrays of (x, y, score)", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.frames[*].person_0.left_hand"} } }, { "@type": "cr:Field", "@id": "pose-frame-record/right_hand_keypoints", "name": "right_hand_keypoints", "description": "21 right-hand keypoints per primary signer as float[21][3] arrays of (x, y, score)", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.frames[*].person_0.right_hand"} } }, { "@type": "cr:Field", "@id": "pose-frame-record/face_keypoints", "name": "face_keypoints", "description": "68 facial keypoints per primary signer as float[68][3] arrays of (x, y, score)", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.frames[*].person_0.face"} } } ] }, { "@type": "cr:RecordSet", "@id": "caption-record", "name": "Video caption record", "description": "Per-video structured caption with segment-level timestamps and English supervision.", "field": [ { "@type": "cr:Field", "@id": "caption-record/video_id", "name": "video_id", "description": "YouTube video identifier", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.video_id"} } }, { "@type": "cr:Field", "@id": "caption-record/sign_language", "name": "sign_language", "description": "ISO 639-3 sign language code (e.g. ase, bfi, gsg)", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.sign_language"} } }, { "@type": "cr:Field", "@id": "caption-record/segments", "name": "segments", "description": "List of {start, end, text} subtitle segments with second-level timestamps", "dataType": "sc:Text", "repeated": true, "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.segments[*]"} } }, { "@type": "cr:Field", "@id": "caption-record/document_text", "name": "document_text", "description": "Concatenated document-level transcript for training-time consumption", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.document_text"} } }, { "@type": "cr:Field", "@id": "caption-record/english_source", "name": "english_source", "description": "Provenance of English supervision: 'native' or 'translated_from:'", "dataType": "sc:Text", "source": { "fileSet": {"@id": "pose-shards"}, "extract": {"jsonPath": "$.english_source"} } } ] } ], "rai:dataCollection": "DWPose keypoint sequences were automatically extracted from publicly available multilingual sign language videos sourced from YouTube. The pipeline (1) retrieves video metadata and subtitles, (2) decodes frames at 24 FPS via ffmpeg, (3) runs DWPose (RTMPose-based) to extract per-frame body, hand, and face keypoints, and (4) packages outputs into per-video poses.npz files. No human annotators labeled keypoints. Subtitle text is automatically downloaded and normalized from platform-exported WEBVTT captions.", "rai:dataCollectionType": "Web Scraping", "rai:dataCollectionMissingData": "Videos that failed to download, produced corrupted frames, or had no available subtitle track are excluded from the published corpus. Processing status is recorded per video in runtime_state/. Estimated processing success rate exceeds 90% of manifest entries.", "rai:dataPreprocessingProtocol": "Raw subtitles are cleaned by removing WEBVTT control fields, timestamp lines, HTML tags, and zero-width characters, and by normalizing repeated lines, broken punctuation, and hyphen continuations. DWPose is run per-frame; the primary signer (person_0) is selected by largest bounding-box area when multiple people are detected. Frames are decoded at 24 FPS using ffmpeg stdout streaming to avoid intermediate JPG materialization.", "rai:dataBiases": "1. Language imbalance: the corpus inherits the long-tail distribution of YouTube-SL-25; ASL and BSL account for a disproportionate share of total hours. Models trained without language reweighting will underperform on low-resource languages. 2. Content-type bias: videos skew toward online educational and interpreter content; spontaneous or conversational signing is underrepresented. 3. Camera and production bias: professional interpreter videos with stable framing may be overrepresented relative to casual uploads. 4. Signer demographics: signer age, gender, and regional dialect distributions reflect what is available online and are not controlled.", "rai:dataLimitations": "1. Automatic pose extraction introduces errors on challenging frames (fast motion, partial occlusion, unusual viewpoints, multi-signer scenes). 2. DWPose's 21-keypoint hand model does not resolve all handshape distinctions required for lexical discrimination in sign languages. 3. The 68-point facial model captures only a partial representation of non-manual features (facial expressions, mouthing) that carry phonological and grammatical information. 4. Subtitle alignment is automatic and may contain temporal offsets or missing segments. 5. No signer identity, demographic metadata, or manual quality labels are provided. 6. The single-signer assumption (person_0) may be incorrect for multi-signer or relay-interpreted videos.", "rai:dataUseCases": "Intended for research use in: (1) multilingual sign language generation (text to pose to video); (2) pose-space sign language recognition and translation; (3) cross-lingual transfer and low-resource adaptation; (4) benchmarking pose-conditioned video generation models on sign language content. Not intended for safety-critical deployment (e.g., medical or legal interpretation) without independent validation, for re-identification of individuals, or for making definitive linguistic completeness claims about any specific sign language.", "rai:personalSensitiveInformation": "The dataset contains pose keypoint sequences derived from publicly posted YouTube videos of human signers. Raw video frames are NOT redistributed. Pose sequences may nonetheless allow re-identification of signers in combination with external metadata. No sensitive personal information (names, biometric identifiers beyond pose, health or financial data) is intentionally included. Users should be aware that sign language data inherently involves human subjects and should handle the data accordingly." }