File size: 13,769 Bytes
7ab8c60 e479720 7ab8c60 e479720 7ab8c60 9c6ddbf 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 e479720 7ab8c60 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 | {
"@context": {
"@language": "en",
"@vocab": "https://schema.org/",
"citeAs": "cr:citeAs",
"column": "cr:column",
"conformsTo": "dct:conformsTo",
"cr": "http://mlcommons.org/croissant/",
"rai": "http://mlcommons.org/croissant/RAI/",
"data": {"@id": "cr:data", "@type": "@json"},
"dataType": {"@id": "cr:dataType", "@type": "@vocab"},
"dct": "http://purl.org/dc/terms/",
"examples": {"@id": "cr:examples", "@type": "@json"},
"extract": "cr:extract",
"field": "cr:field",
"fileProperty": "cr:fileProperty",
"fileObject": "cr:fileObject",
"fileSet": "cr:fileSet",
"format": "cr:format",
"includes": "cr:includes",
"isLiveDataset": "cr:isLiveDataset",
"jsonPath": "cr:jsonPath",
"key": "cr:key",
"md5": "cr:md5",
"parentField": "cr:parentField",
"path": "cr:path",
"recordSet": "cr:recordSet",
"references": "cr:references",
"regex": "cr:regex",
"repeated": "cr:repeated",
"replace": "cr:replace",
"sc": "https://schema.org/",
"separator": "cr:separator",
"source": "cr:source",
"subField": "cr:subField",
"transform": "cr:transform"
},
"@type": "sc:Dataset",
"name": "SignVerse-2M",
"conformsTo": "http://mlcommons.org/croissant/1.0",
"description": "SignVerse-2M is a large-scale multilingual pose-native dataset for sign language research. It converts approximately two million publicly available sign language video clips covering 25+ sign languages into unified DWPose keypoint sequences (18 body + 21x2 hand + 68 face keypoints per frame at 24 FPS), providing a standardized data interface directly compatible with modern pose-driven generation and recognition pipelines.",
"citeAs": "@inproceedings{fang2026signverse2m, title={{SignVerse-2M}: A Two-Million-Clip Pose-Native Universe of 25+ Sign Languages}, author={Fang, Sen and Zhong, Hongbin and Zhang, Yanxin and Metaxas, Dimitris N.}, booktitle={Advances in Neural Information Processing Systems (NeurIPS)}, year={2026}, note={Evaluations \\& Datasets Track}}",
"license": "https://creativecommons.org/licenses/by-nc/4.0/",
"url": "https://huggingface.co/datasets/SignerX/SignVerse-2M",
"version": "1.0.0",
"keywords": [
"sign language", "pose estimation", "DWPose", "multilingual",
"keypoint", "sign language generation", "sign language recognition",
"pose-native", "video understanding"
],
"creator": [
{
"@type": "Person",
"name": "Sen Fang",
"affiliation": {"@type": "Organization", "name": "Rutgers University"}
},
{
"@type": "Person",
"name": "Hongbin Zhong",
"affiliation": {"@type": "Organization", "name": "Georgia Institute of Technology"}
},
{
"@type": "Person",
"name": "Yanxin Zhang",
"affiliation": {"@type": "Organization", "name": "NVIDIA"}
},
{
"@type": "Person",
"name": "Dimitris N. Metaxas",
"affiliation": {"@type": "Organization", "name": "Rutgers University"}
}
],
"isLiveDataset": false,
"datePublished": "2026",
"inLanguage": [
"ase", "bfi", "gsg", "sgd", "lsf", "lse", "lis", "lgp",
"ngt", "asf", "jsl", "kvk", "csl", "bzs", "lsm", "pjm",
"rsl", "swl", "dsl", "fse", "nsl", "lsc", "aed", "tsm", "fsl"
],
"distribution": [
{
"@type": "cr:FileObject",
"@id": "hf-dataset-repo",
"name": "HuggingFace dataset repository",
"description": "Root of the SignVerse-2M HuggingFace dataset repository",
"contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M",
"encodingFormat": "git+https",
"sha256": "main"
},
{
"@type": "cr:FileSet",
"@id": "pose-shards",
"name": "DWPose keypoint shards",
"description": "Numbered .tar archives each containing per-video directories with poses.npz (DWPose keypoints) and caption.json (structured subtitles).",
"containedIn": {"@id": "hf-dataset-repo"},
"encodingFormat": "application/x-tar",
"includes": "dataset/Sign_DWPose_NPZ_*.tar"
},
{
"@type": "cr:FileObject",
"@id": "metadata-csv",
"name": "Sign-DWPose-2M-metadata_ori.csv",
"description": "Master manifest of video IDs and sign language labels driving the processing pipeline.",
"contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M/resolve/main/Sign-DWPose-2M-metadata_ori.csv",
"encodingFormat": "text/csv",
"sha256": "79e8d2c35b0d3ed31b7e3c32348a1ce64513ca0ef217d7c1cbbd6b14410e7f08"
}
],
"recordSet": [
{
"@type": "cr:RecordSet",
"@id": "pose-frame-record",
"name": "Per-frame DWPose keypoint record",
"description": "Each record corresponds to one video frame. Keypoint coordinates are in pixel space; confidence scores are in [0, 1].",
"field": [
{
"@type": "cr:Field",
"@id": "pose-frame-record/video_id",
"name": "video_id",
"description": "YouTube video identifier",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.video_id"}
}
},
{
"@type": "cr:Field",
"@id": "pose-frame-record/fps",
"name": "fps",
"description": "Sampling frame rate (24.0)",
"dataType": "sc:Float",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.fps"}
}
},
{
"@type": "cr:Field",
"@id": "pose-frame-record/frame_id",
"name": "frame_id",
"description": "0-indexed frame index within the video",
"dataType": "sc:Integer",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.frames[*].frame_id"}
}
},
{
"@type": "cr:Field",
"@id": "pose-frame-record/body_keypoints",
"name": "body_keypoints",
"description": "18 body keypoints per primary signer as float[18][3] arrays of (x, y, score) in pixel space",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.frames[*].person_0.body"}
}
},
{
"@type": "cr:Field",
"@id": "pose-frame-record/left_hand_keypoints",
"name": "left_hand_keypoints",
"description": "21 left-hand keypoints per primary signer as float[21][3] arrays of (x, y, score)",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.frames[*].person_0.left_hand"}
}
},
{
"@type": "cr:Field",
"@id": "pose-frame-record/right_hand_keypoints",
"name": "right_hand_keypoints",
"description": "21 right-hand keypoints per primary signer as float[21][3] arrays of (x, y, score)",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.frames[*].person_0.right_hand"}
}
},
{
"@type": "cr:Field",
"@id": "pose-frame-record/face_keypoints",
"name": "face_keypoints",
"description": "68 facial keypoints per primary signer as float[68][3] arrays of (x, y, score)",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.frames[*].person_0.face"}
}
}
]
},
{
"@type": "cr:RecordSet",
"@id": "caption-record",
"name": "Video caption record",
"description": "Per-video structured caption with segment-level timestamps and English supervision.",
"field": [
{
"@type": "cr:Field",
"@id": "caption-record/video_id",
"name": "video_id",
"description": "YouTube video identifier",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.video_id"}
}
},
{
"@type": "cr:Field",
"@id": "caption-record/sign_language",
"name": "sign_language",
"description": "ISO 639-3 sign language code (e.g. ase, bfi, gsg)",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.sign_language"}
}
},
{
"@type": "cr:Field",
"@id": "caption-record/segments",
"name": "segments",
"description": "List of {start, end, text} subtitle segments with second-level timestamps",
"dataType": "sc:Text",
"repeated": true,
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.segments[*]"}
}
},
{
"@type": "cr:Field",
"@id": "caption-record/document_text",
"name": "document_text",
"description": "Concatenated document-level transcript for training-time consumption",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.document_text"}
}
},
{
"@type": "cr:Field",
"@id": "caption-record/english_source",
"name": "english_source",
"description": "Provenance of English supervision: 'native' or 'translated_from:<lang>'",
"dataType": "sc:Text",
"source": {
"fileSet": {"@id": "pose-shards"},
"extract": {"jsonPath": "$.english_source"}
}
}
]
}
],
"rai:dataCollection": "DWPose keypoint sequences were automatically extracted from publicly available multilingual sign language videos sourced from YouTube. The pipeline (1) retrieves video metadata and subtitles, (2) decodes frames at 24 FPS via ffmpeg, (3) runs DWPose (RTMPose-based) to extract per-frame body, hand, and face keypoints, and (4) packages outputs into per-video poses.npz files. No human annotators labeled keypoints. Subtitle text is automatically downloaded and normalized from platform-exported WEBVTT captions.",
"rai:dataCollectionType": "Web Scraping",
"rai:dataCollectionMissingData": "Videos that failed to download, produced corrupted frames, or had no available subtitle track are excluded from the published corpus. Processing status is recorded per video in runtime_state/. Estimated processing success rate exceeds 90% of manifest entries.",
"rai:dataPreprocessingProtocol": "Raw subtitles are cleaned by removing WEBVTT control fields, timestamp lines, HTML tags, and zero-width characters, and by normalizing repeated lines, broken punctuation, and hyphen continuations. DWPose is run per-frame; the primary signer (person_0) is selected by largest bounding-box area when multiple people are detected. Frames are decoded at 24 FPS using ffmpeg stdout streaming to avoid intermediate JPG materialization.",
"rai:dataBiases": "1. Language imbalance: the corpus inherits the long-tail distribution of YouTube-SL-25; ASL and BSL account for a disproportionate share of total hours. Models trained without language reweighting will underperform on low-resource languages. 2. Content-type bias: videos skew toward online educational and interpreter content; spontaneous or conversational signing is underrepresented. 3. Camera and production bias: professional interpreter videos with stable framing may be overrepresented relative to casual uploads. 4. Signer demographics: signer age, gender, and regional dialect distributions reflect what is available online and are not controlled.",
"rai:dataLimitations": "1. Automatic pose extraction introduces errors on challenging frames (fast motion, partial occlusion, unusual viewpoints, multi-signer scenes). 2. DWPose's 21-keypoint hand model does not resolve all handshape distinctions required for lexical discrimination in sign languages. 3. The 68-point facial model captures only a partial representation of non-manual features (facial expressions, mouthing) that carry phonological and grammatical information. 4. Subtitle alignment is automatic and may contain temporal offsets or missing segments. 5. No signer identity, demographic metadata, or manual quality labels are provided. 6. The single-signer assumption (person_0) may be incorrect for multi-signer or relay-interpreted videos.",
"rai:dataUseCases": "Intended for research use in: (1) multilingual sign language generation (text to pose to video); (2) pose-space sign language recognition and translation; (3) cross-lingual transfer and low-resource adaptation; (4) benchmarking pose-conditioned video generation models on sign language content. Not intended for safety-critical deployment (e.g., medical or legal interpretation) without independent validation, for re-identification of individuals, or for making definitive linguistic completeness claims about any specific sign language.",
"rai:personalSensitiveInformation": "The dataset contains pose keypoint sequences derived from publicly posted YouTube videos of human signers. Raw video frames are NOT redistributed. Pose sequences may nonetheless allow re-identification of signers in combination with external metadata. No sensitive personal information (names, biometric identifiers beyond pose, health or financial data) is intentionally included. Users should be aware that sign language data inherently involves human subjects and should handle the data accordingly."
}
|