UniTalk / UniTalk.py
plnguyen2908's picture
Upload UniTalk.py
c336ea1 verified
# unitalk_dataset.py
import os
import zipfile
import tempfile
import numpy as np
import pandas as pd
from PIL import Image
from scipy.io import wavfile
import datasets
from datasets import Value, Sequence, Features
_CITATION = """\
@misc{unitalk2025,
title={UniTalk Dataset},
author={Your Name},
year={2025}
}
"""
class UniTalk(datasets.GeneratorBasedBuilder):
"""
UniTalk: grouped frames and audio per entity_id.
Downloads CSVs and ZIPs from the HF repo, pairs video_ids with downloaded zips directly.
Each example returns:
- entity_id: string
- images: list of flattened uint8 frame arrays ([H*W*3])
- audio: list of int16 PCM samples
- frame_timestamp: list of floats
- label_id: list of int64 labels (0/1)
"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="default",
version=VERSION,
description="Default config for UniTalk dataset"
),
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
return datasets.DatasetInfo(
description="UniTalk: frames and audio grouped by entity_id.",
features=Features({
"entity_id": Value("string"),
"images": Sequence(Sequence(Value("uint8"))), # [num_frames][H*W*3]
"audio": Sequence(Value("int16")), # [num_samples]
"frame_timestamp": Sequence(Value("float32")), # timestamps per frame
"label_id": Sequence(Value("int64")), # labels per frame
}),
supervised_keys=None,
homepage="https://huggingface.co/datasets/plnguyen2908/UniTalk",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
repo_id = "plnguyen2908/UniTalk"
revision = "main"
base_url = f"https://huggingface.co/datasets/{repo_id}/resolve/{revision}"
# Download CSVs
train_csv = dl_manager.download(f"{base_url}/csv/train_orig.csv")
val_csv = dl_manager.download(f"{base_url}/csv/val_orig.csv")
# Read CSVs to get video_ids
df_train = pd.read_csv(train_csv)
train_vids = sorted(df_train["video_id"].unique())
df_val = pd.read_csv(val_csv)
val_vids = sorted(df_val["video_id"].unique())
# Build URLs and download train zips
train_vid_urls = [f"{base_url}/clips_videos/train/{v}.zip" for v in train_vids]
train_aud_urls = [f"{base_url}/clips_audios/train/{v}.zip" for v in train_vids]
train_vid_zips = dl_manager.download(train_vid_urls)
train_aud_zips = dl_manager.download(train_aud_urls)
# Build URLs and download val zips
val_vid_urls = [f"{base_url}/clips_videos/val/{v}.zip" for v in val_vids]
val_aud_urls = [f"{base_url}/clips_audios/val/{v}.zip" for v in val_vids]
val_vid_zips = dl_manager.download(val_vid_urls)
val_aud_zips = dl_manager.download(val_aud_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"csv_path": train_csv,
"video_ids": train_vids,
"video_zips": train_vid_zips,
"audio_zips": train_aud_zips,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"csv_path": val_csv,
"video_ids": val_vids,
"video_zips": val_vid_zips,
"audio_zips": val_aud_zips,
},
),
]
def _generate_examples(self, csv_path, video_ids, video_zips, audio_zips):
temp_root = tempfile.mkdtemp(prefix="unitalk_")
df = pd.read_csv(csv_path, dtype={"frame_timestamp": float, "label_id": int})
# Pair video_ids to corresponding zips
vid_map = dict(zip(video_ids, video_zips))
aud_map = dict(zip(video_ids, audio_zips))
for entity_id, group in df.groupby("entity_id"):
raw_vid = group["video_id"].iloc[0]
# video_id must match key in video_ids
if raw_vid not in vid_map:
raise ValueError(f"Video ID {raw_vid} not found among downloaded zips")
video_zip = vid_map[raw_vid]
audio_zip = aud_map[raw_vid]
# 1) Extract frames
with zipfile.ZipFile(video_zip, "r") as z:
prefix = f"{raw_vid}/{entity_id}/"
members = sorted(f for f in z.namelist() if f.startswith(prefix) and f.lower().endswith('.jpg'))
images, timestamps = [], []
for member in members:
path = z.extract(member, path=temp_root)
arr = np.array(Image.open(path).convert('RGB'), dtype=np.uint8)
images.append(arr.flatten().tolist())
ts = float(os.path.splitext(os.path.basename(path))[0])
timestamps.append(ts)
# 2) Extract audio
with zipfile.ZipFile(audio_zip, "r") as z:
wav_members = [m for m in z.namelist()
if m.startswith(f"{raw_vid}/{entity_id}") and m.lower().endswith('.wav')]
if not wav_members:
raise FileNotFoundError(f"No WAV for {entity_id} in {audio_zip}")
wav_path = z.extract(wav_members[0], path=temp_root)
_, audio_arr = wavfile.read(wav_path)
audio = audio_arr.tolist()
# 3) Align labels
label_map = dict(zip(group["frame_timestamp"].tolist(), group["label_id"].tolist()))
label_id = [label_map[t] for t in timestamps]
yield entity_id, {
"entity_id": entity_id,
"images": images,
"audio": audio,
"frame_timestamp": timestamps,
"label_id": label_id,
}
if __name__ == "__main__":
ds = datasets.load_dataset(
path=__file__,
trust_remote_code=True
)
print(ds)