|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PP4AV dataset.""" |
|
|
|
|
|
import os |
|
|
from glob import glob |
|
|
from tqdm import tqdm |
|
|
from pathlib import Path |
|
|
from typing import List |
|
|
import re |
|
|
from collections import defaultdict |
|
|
import datasets |
|
|
|
|
|
datasets.logging.set_verbosity_info() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_HOMEPAGE = "http://shuoyang1213.me/WIDERFACE/" |
|
|
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)" |
|
|
|
|
|
_CITATION = """\ |
|
|
@inproceedings{yang2016wider, |
|
|
Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou}, |
|
|
Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, |
|
|
Title = {WIDER FACE: A Face Detection Benchmark}, |
|
|
Year = {2016}} |
|
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
WIDER FACE dataset is a face detection benchmark dataset, of which images are |
|
|
selected from the publicly available WIDER dataset. We choose 32,203 images and |
|
|
label 393,703 faces with a high degree of variability in scale, pose and |
|
|
occlusion as depicted in the sample images. WIDER FACE dataset is organized |
|
|
based on 61 event classes. For each event class, we randomly select 40%/10%/50% |
|
|
data as training, validation and testing sets. We adopt the same evaluation |
|
|
metric employed in the PASCAL VOC dataset. Similar to MALF and Caltech datasets, |
|
|
we do not release bounding box ground truth for the test images. Users are |
|
|
required to submit final prediction files, which we shall proceed to evaluate. |
|
|
""" |
|
|
|
|
|
|
|
|
_REPO = "https://huggingface.co/datasets/khaclinh/testdata/resolve/main/data" |
|
|
_URLS = { |
|
|
"test": f"{_REPO}/fisheye.zip", |
|
|
"annot": f"{_REPO}/annotations.zip", |
|
|
} |
|
|
|
|
|
IMG_EXT = ['png', 'jpeg', 'jpg'] |
|
|
|
|
|
|
|
|
class TestData(datasets.GeneratorBasedBuilder): |
|
|
"""WIDER FACE dataset.""" |
|
|
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=datasets.Features( |
|
|
{ |
|
|
"image": datasets.Image(), |
|
|
"faces": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)), |
|
|
"plates": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)), |
|
|
} |
|
|
), |
|
|
supervised_keys=None, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TEST, |
|
|
gen_kwargs={ |
|
|
"split": "test", |
|
|
"data_dir": data_dir["test"], |
|
|
"annot_dir": data_dir["annot"], |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, split, data_dir, annot_dir): |
|
|
image_dir = os.path.join(data_dir, "fisheye") |
|
|
annotation_dir = os.path.join(annot_dir, "annotations", "fisheye") |
|
|
files = [] |
|
|
|
|
|
idx = 0 |
|
|
|
|
|
for img_file in glob(os.path.join(image_dir, "*.png")): |
|
|
|
|
|
d = { image_dir: annot_dir, "png": "txt"} |
|
|
gt_path = img_file |
|
|
for i, j in d.items(): |
|
|
gt_path = gt_path.replace(i, j) |
|
|
faces = [] |
|
|
plates = [] |
|
|
faces.append([1,2,3,4]) |
|
|
plates.append([1,2,3,4]) |
|
|
yield idx, {"image": str(img_file), "faces": faces, "plates": plates} |
|
|
|
|
|
idx += 1 |
|
|
|