File size: 10,680 Bytes
1f16612
 
 
 
 
 
 
dcaebca
1f16612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dcaebca
1f16612
 
 
 
 
 
 
 
 
 
 
 
 
 
dcaebca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f16612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
{
  "@context": {
    "@language": "en",
    "@vocab": "https://schema.org/",
    "sc": "https://schema.org/",
    "cr": "http://mlcommons.org/croissant/",
    "dct": "http://purl.org/dc/terms/",
    "prov": "http://www.w3.org/ns/prov#",
    "citeAs": "cr:citeAs",
    "column": "cr:column",
    "conformsTo": "dct:conformsTo",
    "data": {
      "@id": "cr:data",
      "@type": "@json"
    },
    "fileObject": "cr:fileObject",
    "fileSet": "cr:fileSet",
    "field": "cr:field",
    "dataType": {
      "@id": "cr:dataType",
      "@type": "@vocab"
    },
    "examples": {
      "@id": "cr:examples",
      "@type": "@json"
    },
    "source": "cr:source",
    "extract": "cr:extract",
    "fileProperty": "cr:fileProperty",
    "format": "cr:format",
    "includes": "cr:includes",
    "isLiveDataset": "cr:isLiveDataset",
    "jsonPath": "cr:jsonPath",
    "key": "cr:key",
    "md5": "cr:md5",
    "parentField": "cr:parentField",
    "path": "cr:path",
    "rai": "http://mlcommons.org/croissant/RAI/",
    "recordSet": "cr:recordSet",
    "references": "cr:references",
    "regex": "cr:regex",
    "repeated": "cr:repeated",
    "replace": "cr:replace",
    "separator": "cr:separator",
    "subField": "cr:subField",
    "transform": "cr:transform",
    "equivalentProperty": "cr:equivalentProperty",
    "samplingRate": "cr:samplingRate"
  },
  "@type": "sc:Dataset",
  "name": "FaVOS",
  "conformsTo": "http://mlcommons.org/croissant/1.1",
  "citeAs": "FaVOSsubmission/FaVOS. FaVOS dataset. Hugging Face Datasets. https://huggingface.co/datasets/FaVOSsubmission/FaVOS",
  "description": "FaVOS is a video object segmentation evaluation dataset with JPEG video frames, indexed PNG annotations, and two public evaluation split files.",
  "url": "https://huggingface.co/datasets/FaVOSsubmission/FaVOS",
  "sameAs": "https://huggingface.co/datasets/FaVOSsubmission/FaVOS/tree/main",
  "license": "https://creativecommons.org/licenses/by-nc/4.0/",
  "version": "1.0.0",
  "datePublished": "2026-05-07",
  "keywords": [
    "video object segmentation",
    "video segmentation",
    "object masks",
    "evaluation"
  ],
  "measurementTechnique": "The first annotated frame is used as the prompt frame. Evaluation metrics are computed on the remaining frames.",
  "rai:dataLimitations": "FaVOS is intended for evaluation of prompt-based video object segmentation on selected natural videos with temporally visible objects. It is not intended to represent all video domains, camera types, geographies, object categories, demographic groups, or deployment settings. The dataset contains 200 videos and 281 object instances, so conclusions should be interpreted as benchmark evidence rather than broad real-world performance guarantees. It is not recommended for training general-purpose recognition systems, identifying people, surveillance, biometric analysis, demographic inference, or safety-critical decision making.",
  "rai:dataBiases": "The videos were selected through source availability, quality checks, object visibility criteria, and human acceptance decisions, which can introduce selection bias toward videos with annotatable objects and sufficient visual quality. Crowded and uncrowded subsets were intentionally curated, so the distribution of object density, motion, occlusion, background, geography, culture, and human presence may differ from open-world video data. Labels are object masks produced through an iterative annotation workflow and may contain boundary errors, missed objects, or temporal inconsistencies that can affect measured model behavior.",
  "rai:personalSensitiveInformation": "The dataset may contain the following categories of personal or sensitive information in visual form: Geography and location-related contextual cues, Culture-related contextual cues, and Others, including faces, bodies, license plates, and public-scene identifiers that may appear incidentally in source videos. The dataset does not provide identity, demographic, biometric, health, political, religious, or socio-economic labels. The released task labels are object segmentation masks, not demographic or identity annotations. Face and license-plate defacing tooling was used during preprocessing where applicable, but automated anonymization may be imperfect. The dataset should not be used to identify, track, profile, or infer sensitive attributes of people.",
  "rai:dataUseCases": "FaVOS is designed to measure video object segmentation performance when the first annotated frame is provided as the prompt and all subsequent annotated frames are evaluated. Validated use cases include benchmarking segmentation mask quality, temporal robustness, and split-level comparisons using the provided favos-20 and favos-40 evaluation files. Validity has not been established for model training, demographic or fairness auditing, human identification, action recognition, object detection outside the annotated masks, medical or safety-critical applications, or deployment performance prediction in unrelated video domains.",
  "rai:dataSocialImpact": "Potential positive impacts include more reproducible evaluation of video object segmentation systems and clearer measurement of segmentation quality over time. Potential negative impacts include overgeneralizing results beyond the curated benchmark, using masks to support surveillance or tracking applications, or exposing residual personal or location information in source frames. Mitigations include non-commercial licensing, hashed/remapped video identifiers, release of evaluation-only splits, documented provenance, and automated defacing of detected sensitive visual regions where applicable.",
  "rai:hasSyntheticData": false,
  "prov:wasDerivedFrom": [
    {
      "@id": "http://projects.dfki.uni-kl.de/yfcc100m/",
      "prov:label": "YFCC100M",
      "description": "A subset of videos was selected from YFCC100M-backed media after filtering, curation, and annotation. Source media retain their original licensing constraints."
    },
    {
      "@id": "self_collected_videos",
      "prov:label": "Self-collected video collection",
      "description": "Additional locally collected videos were included to complement YFCC100M with scenarios relevant to low-temporal-visibility video object segmentation."
    }
  ],
  "prov:wasGeneratedBy": [
    {
      "@type": "prov:Activity",
      "name": "Data collection and source selection",
      "description": "Accepted records were collected from crowded, uncrowded, and myvideo annotation stores. Crowded and uncrowded records use YFCC100M-backed video paths; myvideo records use a local video collection. Source videos were included only when the corresponding source video, JPEG frame directory, and annotation directory were present."
    },
    {
      "@type": "prov:Activity",
      "name": "Preprocessing",
      "description": "The collection script remapped original video IDs to hashed identifiers, copied JPEG frames and indexed PNG annotations, optionally started each sequence at the stored prompt frame, renumbered frames from 00000, copied source videos for provenance, wrote name mappings and summary metadata, and packaged JPEGImages.zip, Annotations.zip, favos-20.txt, and favos-40.txt."
    },
    {
      "@type": "prov:Activity",
      "name": "Annotation",
      "description": "Object masks were produced through an iterative video object segmentation annotation workflow using prompt frames, propagated masks, refinement prompts, and human accept/reject review. The released indexed PNG masks encode object instances for evaluation."
    },
    {
      "@type": "prov:Activity",
      "name": "Sensitive-region mitigation",
      "description": "Preprocessing included automated defacing scripts for detected human faces and license plates where applicable, including InsightFace SCRFD-based face detection and optional YOLO-based restoration/defacing workflows."
    }
  ],
  "additionalProperty": [
    {
      "@type": "PropertyValue",
      "name": "num_videos",
      "value": 200
    },
    {
      "@type": "PropertyValue",
      "name": "num_frames",
      "value": 39422
    },
    {
      "@type": "PropertyValue",
      "name": "num_object_instances",
      "value": 281
    },
    {
      "@type": "PropertyValue",
      "name": "frame_count_min",
      "value": 55
    },
    {
      "@type": "PropertyValue",
      "name": "frame_count_max",
      "value": 690
    },
    {
      "@type": "PropertyValue",
      "name": "frame_count_mean",
      "value": 197.11
    },
    {
      "@type": "PropertyValue",
      "name": "favos_20_videos",
      "value": 100
    },
    {
      "@type": "PropertyValue",
      "name": "favos_40_videos",
      "value": 100
    },
    {
      "@type": "PropertyValue",
      "name": "split_overlap_videos",
      "value": 0
    }
  ],
  "distribution": [
    {
      "@type": "cr:FileObject",
      "@id": "JPEGImagesZip",
      "name": "JPEGImages.zip",
      "description": "ZIP archive containing video frames under JPEGImages/<video_id>/<frame>.jpg.",
      "contentUrl": "https://huggingface.co/datasets/FaVOSsubmission/FaVOS/resolve/main/JPEGImages.zip",
      "encodingFormat": "application/zip",
      "contentSize": "5018261297",
      "sha256": "56077e69e1b0c27c3d5439732b69ce5e9fe8cd801c04fe40cc8cdc1ed7384171"
    },
    {
      "@type": "cr:FileObject",
      "@id": "AnnotationsZip",
      "name": "Annotations.zip",
      "description": "ZIP archive containing indexed PNG annotations under Annotations/<video_id>/<frame>.png.",
      "contentUrl": "https://huggingface.co/datasets/FaVOSsubmission/FaVOS/resolve/main/Annotations.zip",
      "encodingFormat": "application/zip",
      "contentSize": "81620534",
      "sha256": "4e409046ae90cf324b4d9d801ed2d2b9d377b1fb76472099cb1d5628af166076"
    },
    {
      "@type": "cr:FileObject",
      "@id": "Favos20Split",
      "name": "favos-20.txt",
      "description": "Evaluation split containing 100 video IDs.",
      "contentUrl": "https://huggingface.co/datasets/FaVOSsubmission/FaVOS/resolve/main/favos-20.txt",
      "encodingFormat": "text/plain",
      "contentSize": "3300",
      "sha256": "06a0f87cd07e3ae51eb67b000a911a82eeadf032afe7758f925c7cbf1e184787"
    },
    {
      "@type": "cr:FileObject",
      "@id": "Favos40Split",
      "name": "favos-40.txt",
      "description": "Evaluation split containing 100 video IDs.",
      "contentUrl": "https://huggingface.co/datasets/FaVOSsubmission/FaVOS/resolve/main/favos-40.txt",
      "encodingFormat": "text/plain",
      "contentSize": "3300",
      "sha256": "89dd1e3fa74860e1a0a3e8f5bb38bc12a9320debc037995610cb2565f07f8444"
    }
  ]
}