WLASL / wlasl.py
tanthinhdt's picture
update(builder): adjust urls and config names
1c6793b verified
# Copyright 2023 Thinh T. Duong
import os
import datasets
import pandas as pd
from glob import glob
logger = datasets.logging.get_logger(__name__)
_CITATION = """
"""
_DESCRIPTION = """
"""
_HOMEPAGE = "https://github.com/dxli94/WLASL"
_REPO_URL = "https://huggingface.co/datasets/VieSignLang/wlasl/resolve/main"
_URLS = {
"meta": f"{_REPO_URL}/WLASL_v0.3.json",
"videos": f"{_REPO_URL}/start_kit/videos/*.zip",
}
class WLASLConfig(datasets.BuilderConfig):
"""WLASL configuration."""
def __init__(self, name, **kwargs):
"""
Parameters
----------
name : str
Name of subset.
kwargs : dict
Keyword arguments.
"""
super(WLASLConfig, self).__init__(
name=name,
version=datasets.Version("1.0.0"),
description=_DESCRIPTION,
**kwargs,
)
class WLASL(datasets.GeneratorBasedBuilder):
"""WLASL dataset."""
BUILDER_CONFIGS = [
WLASLConfig(name="rgb_videos"),
]
DEFAULT_CONFIG_NAME = "rgb_videos"
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features({
"gloss": datasets.Value("string"),
"bbox": datasets.Sequence(datasets.Value("int16")),
"fps": datasets.Value("int8"),
"frame_end": datasets.Value("int32"),
"frame_start": datasets.Value("int32"),
"instance_id": datasets.Value("int32"),
"signer_id": datasets.Value("int32"),
"source": datasets.Value("string"),
"url": datasets.Value("string"),
"variation_id": datasets.Value("int8"),
"video_id": datasets.Value("int32"),
"video": datasets.Value("string"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> list[datasets.SplitGenerator]:
"""
Get splits.
Parameters
----------
dl_manager : datasets.DownloadManager
Download manager.
Returns
-------
list[datasets.SplitGenerator]
Split generators.
"""
metadata_path = dl_manager.download(_URLS["meta"])
raw_df = pd.read_json(metadata_path)
exploded_df = raw_df.explode("instances")
df = pd.concat(
[
exploded_df[["gloss"]].reset_index(drop=True),
pd.json_normalize(exploded_df.instances)
],
axis=1,
)
split_dict = {
datasets.Split.TRAIN: df[df.split == "train"].drop(columns=["split"]),
datasets.Split.VALIDATION: df[df.split == "val"].drop(columns=["split"]),
datasets.Split.TEST: df[df.split == "test"].drop(columns=["split"]),
}
video_dirs = dl_manager.download_and_extract(glob(_URLS["videos"]))
return [
datasets.SplitGenerator(
name=name,
gen_kwargs={
"split_df": split_df,
"video_dirs": video_dirs,
},
)
for name, split_df in split_dict.items()
]
def _generate_examples(
self, split_df: str,
video_dirs: list[str],
) -> tuple[int, dict]:
"""
Generate examples from metadata.
Parameters
----------
split_df : str
Split dataframe.
video_dirs : list[str]
List of video directories.
Yields
------
tuple[int, dict]
Sample.
"""
split = datasets.Dataset.from_pandas(split_df)
for i, sample in enumerate(split):
for video_dir in video_dirs:
video_path = os.path.join(video_dir, sample["video_id"] + ".mp4")
if os.path.exists(video_path):
yield i, {
"gloss": sample["gloss"],
"bbox": sample["bbox"],
"fps": sample["fps"],
"frame_end": sample["frame_end"],
"frame_start": sample["frame_start"],
"instance_id": sample["instance_id"],
"signer_id": sample["signer_id"],
"source": sample["source"],
"url": sample["url"],
"variation_id": sample["variation_id"],
"video_id": sample["video_id"],
"video": video_path,
}