How2Sign / how2sign-clips.py
tanthinhdt's picture
update: builder
125e868
raw
history blame
5.47 kB
# Copyright 2023 Thinh T. Duong
import os
import datasets
from glob import glob
logger = datasets.logging.get_logger(__name__)
_CITATION = """
"""
_DESCRIPTION = """
"""
_HOMEPAGE = "https://how2sign.github.io/index.html"
_REPO_URL = "https://huggingface.co/datasets/VieSignLang/how2sign-clips/resolve/main"
_URLS = {
"meta": f"{_REPO_URL}/metadata/" + "sharded_how2sign_realigned_{split}.parquet",
"video": f"{_REPO_URL}/videos/" + "{type}/{split}/*/*.zip",
}
class How2SignClipsConfig(datasets.BuilderConfig):
"""How2Sign configuration."""
def __init__(self, name, **kwargs):
"""
:param name: Name of subset.
:param kwargs: Arguments.
"""
super(How2SignClipsConfig, self).__init__(
name=name,
version=datasets.Version("1.0.0"),
description=_DESCRIPTION,
**kwargs,
)
class How2SignClips(datasets.GeneratorBasedBuilder):
"""How2Sign dataset."""
BUILDER_CONFIGS = [
How2SignClipsConfig(name="rgb"),
# How2SignClipsConfig(name="keypoints"),
]
DEFAULT_CONFIG_NAME = "rgb"
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features({
"VIDEO_ID": datasets.Value("string"),
"VIDEO_NAME": datasets.Value("string"),
"SENTENCE_ID": datasets.Value("string"),
"SENTENCE_NAME": datasets.Value("string"),
"START_REALIGNED": datasets.Value("float64"),
"END_REALIGNED": datasets.Value("float64"),
"SENTENCE": datasets.Value("string"),
"VIDEO": datasets.Value("large_binary"),
})
# features = datasets.Features({
# "id": datasets.Value("string"),
# "type": datasets.Value("string"),
# "view": datasets.Value("string"),
# "text": datasets.Value("string"),
# "video": datasets.Value("large_binary"),
# })
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> list[datasets.SplitGenerator]:
"""
Get splits.
:param dl_manager: Download manager.
:return: Splits.
"""
split_dict = {
"train": {
"name": datasets.Split.TRAIN,
"num_shards": 32,
},
"test": {
"name": datasets.Split.TEST,
"num_shards": 3,
},
"val": {
"name": datasets.Split.VALIDATION,
"num_shards": 2,
},
}
return [
datasets.SplitGenerator(
name=info["name"],
gen_kwargs={
"metadata_path": dl_manager.download(
_URLS["meta"].format(split=split)
),
"video_dirs": dl_manager.download_and_extract(
glob(
_URLS["video"].format(
type=self.config.name,
split=split
)
)
),
"num_shards": info["num_shards"],
},
)
for split, info in split_dict.items()
]
def _generate_examples(
self, metadata_path: str,
video_dirs: list[str],
num_shards: int,
) -> tuple[int, dict]:
"""
Generate examples from metadata.
:param metadata_path: Path to metadata.
:param visual_dirs: Directories of videos.
:param num_shards: Number of shards.
:yield: Example.
"""
dataset = datasets.load_dataset(
"parquet",
data_files=metadata_path,
split="train",
)
for i, sample in enumerate(dataset):
shard_idx = sample["shard"]
for video_dir in video_dirs:
video_path = os.path.join(
video_dir,
f"shard_{shard_idx:03d}_{num_shards:03d}",
sample["SENTENCE_NAME"] + ".mp4",
# sample["id"] + ".mp4",
)
if os.path.exists(video_path):
yield i, {
"VIDEO_ID": sample["VIDEO_ID"],
"VIDEO_NAME": sample["VIDEO_NAME"],
"SENTENCE_ID": sample["SENTENCE_ID"],
"SENTENCE_NAME": sample["SENTENCE_NAME"],
"START_REALIGNED": sample["START_REALIGNED"],
"END_REALIGNED": sample["END_REALIGNED"],
"SENTENCE": sample["SENTENCE"],
"VIDEO": self.__get_binary_data(video_path),
}
# yield i, {
# "id": sample["id"],
# "type": sample["type"],
# "view": sample["view"],
# "text": sample["text"],
# "video": self.__get_binary_data(video_path),
# }
def __get_binary_data(self, path):
with open(path, "rb") as f:
return f.read()