File size: 4,722 Bytes
54d27c2 b1f388a 54d27c2 1c6793b 54d27c2 8d4e85f 54d27c2 1c6793b 54d27c2 1c6793b 54d27c2 0228ff0 54d27c2 5b5c391 54d27c2 8d4e85f 54d27c2 12aa2ba 54d27c2 caded91 54d27c2 caded91 54d27c2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | # Copyright 2023 Thinh T. Duong
import os
import datasets
import pandas as pd
from glob import glob
logger = datasets.logging.get_logger(__name__)
_CITATION = """
"""
_DESCRIPTION = """
"""
_HOMEPAGE = "https://github.com/dxli94/WLASL"
_REPO_URL = "https://huggingface.co/datasets/VieSignLang/wlasl/resolve/main"
_URLS = {
"meta": f"{_REPO_URL}/WLASL_v0.3.json",
"videos": f"{_REPO_URL}/start_kit/videos/*.zip",
}
class WLASLConfig(datasets.BuilderConfig):
"""WLASL configuration."""
def __init__(self, name, **kwargs):
"""
Parameters
----------
name : str
Name of subset.
kwargs : dict
Keyword arguments.
"""
super(WLASLConfig, self).__init__(
name=name,
version=datasets.Version("1.0.0"),
description=_DESCRIPTION,
**kwargs,
)
class WLASL(datasets.GeneratorBasedBuilder):
"""WLASL dataset."""
BUILDER_CONFIGS = [
WLASLConfig(name="rgb_videos"),
]
DEFAULT_CONFIG_NAME = "rgb_videos"
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features({
"gloss": datasets.Value("string"),
"bbox": datasets.Sequence(datasets.Value("int16")),
"fps": datasets.Value("int8"),
"frame_end": datasets.Value("int32"),
"frame_start": datasets.Value("int32"),
"instance_id": datasets.Value("int32"),
"signer_id": datasets.Value("int32"),
"source": datasets.Value("string"),
"url": datasets.Value("string"),
"variation_id": datasets.Value("int8"),
"video_id": datasets.Value("int32"),
"video": datasets.Value("string"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> list[datasets.SplitGenerator]:
"""
Get splits.
Parameters
----------
dl_manager : datasets.DownloadManager
Download manager.
Returns
-------
list[datasets.SplitGenerator]
Split generators.
"""
metadata_path = dl_manager.download(_URLS["meta"])
raw_df = pd.read_json(metadata_path)
exploded_df = raw_df.explode("instances")
df = pd.concat(
[
exploded_df[["gloss"]].reset_index(drop=True),
pd.json_normalize(exploded_df.instances)
],
axis=1,
)
split_dict = {
datasets.Split.TRAIN: df[df.split == "train"].drop(columns=["split"]),
datasets.Split.VALIDATION: df[df.split == "val"].drop(columns=["split"]),
datasets.Split.TEST: df[df.split == "test"].drop(columns=["split"]),
}
video_dirs = dl_manager.download_and_extract(glob(_URLS["videos"]))
return [
datasets.SplitGenerator(
name=name,
gen_kwargs={
"split_df": split_df,
"video_dirs": video_dirs,
},
)
for name, split_df in split_dict.items()
]
def _generate_examples(
self, split_df: str,
video_dirs: list[str],
) -> tuple[int, dict]:
"""
Generate examples from metadata.
Parameters
----------
split_df : str
Split dataframe.
video_dirs : list[str]
List of video directories.
Yields
------
tuple[int, dict]
Sample.
"""
split = datasets.Dataset.from_pandas(split_df)
for i, sample in enumerate(split):
for video_dir in video_dirs:
video_path = os.path.join(video_dir, sample["video_id"] + ".mp4")
if os.path.exists(video_path):
yield i, {
"gloss": sample["gloss"],
"bbox": sample["bbox"],
"fps": sample["fps"],
"frame_end": sample["frame_end"],
"frame_start": sample["frame_start"],
"instance_id": sample["instance_id"],
"signer_id": sample["signer_id"],
"source": sample["source"],
"url": sample["url"],
"variation_id": sample["variation_id"],
"video_id": sample["video_id"],
"video": video_path,
}
|