| import json |
| import os |
| import tarfile |
| import zipfile |
| import gzip |
| import subprocess |
| from os.path import join as p_join |
| from math import ceil, floor |
| from tqdm import tqdm |
| from multiprocessing import Pool |
| from typing import Optional, Dict |
| from glob import glob |
| |
|
|
| import pandas as pd |
| import soundfile as sf |
| from datasets import Dataset, Audio, DatasetDict |
|
|
| audio_loader = Audio() |
| |
| url_metadata_dict = { |
| "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz", |
| "enA-zhA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-zhA.tsv.gz", |
| "enA-viA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-viA.tsv.gz", |
| } |
| direction = os.getenv("DIRECTION", "enA-jaA") |
| if direction not in url_metadata_dict: |
| a, b = direction.split("-") |
| url_metadata_dict[direction] = f"https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.{a}-{b}.tsv.gz" |
| sides = set(direction.split("-")) |
| cache_dir_audio = p_join("download", "audio", direction) |
| cache_dir_feature = p_join("download", "feature", direction) |
| os.makedirs(cache_dir_feature, exist_ok=True) |
| for s in sides: |
| os.makedirs(p_join(cache_dir_audio, s), exist_ok=True) |
| |
| n_pool = int(os.getenv("N_POOL", 1)) |
| wget_max_retry = os.getenv("MAX_RETRY", "2") |
| wget_timeout = os.getenv("TIMEOUT", "20") |
| line_no_start = int(os.getenv("LINE_NO_START", 0)) |
| line_no_end = int(os.getenv("LINE_NO_END", 10000)) |
| dataset_id = os.getenv("DATASET_ID", 0) |
| hf_org = os.getenv("HF_ORG", "asahi417") |
| hf_dataset = f"seamless-align-{direction}" |
| skip_download = bool(int(os.getenv("SKIP_DOWNLOAD", 0))) |
| sampling_rate = 16000 |
|
|
|
|
| def wget(url: str, output_file: Optional[str] = None): |
| os.makedirs(os.path.dirname(output_file), exist_ok=True) |
| subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout]) |
| if not os.path.exists(output_file): |
| return False |
| if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'): |
| if output_file.endswith('.tar'): |
| tar = tarfile.open(output_file) |
| else: |
| tar = tarfile.open(output_file, "r:gz") |
| tar.extractall(os.path.dirname(output_file)) |
| tar.close() |
| os.remove(output_file) |
| elif output_file.endswith('.gz'): |
| with gzip.open(output_file, 'rb') as f: |
| with open(output_file.replace('.gz', ''), 'wb') as f_write: |
| f_write.write(f.read()) |
| os.remove(output_file) |
| elif output_file.endswith('.zip'): |
| with zipfile.ZipFile(output_file, 'r') as zip_ref: |
| zip_ref.extractall() |
| os.remove(output_file) |
| return True |
|
|
|
|
| def get_metadata(): |
| url_metadata = url_metadata_dict[direction] |
| meta_data_filename = os.path.basename(url_metadata) |
| meta_data_path = p_join("download", "meta", meta_data_filename) |
| if not os.path.exists(meta_data_path.replace(".gz", "")): |
| assert wget(url_metadata, output_file=meta_data_path) |
| df = pd.read_csv(meta_data_path.replace(".gz", ""), sep=r'[\t\s]', header=None) |
| df = df[[0, 2, 3, 4, 9, 10, 11, 12]] |
| df.columns = ["id", "url", "duration_start", "duration_end", "laser_score", "direction", "side", "line_no"] |
| if direction == "enA-jpn": |
| df = df[df["side"] == "enA"] |
| assert len(df["direction"].unique()) == 1 |
| df.pop("direction") |
| return df.sort_values(by=["line_no", "side"]) |
|
|
|
|
| def to_json_serializable(val): |
| if "float" in str(type(val)): |
| return float(val) |
| if "int" in str(type(val)): |
| return int(val) |
| return str(val) |
|
|
|
|
| def cleanup(features, feature_file): |
| if os.path.exists(feature_file): |
| os.remove(feature_file) |
| for _side in sides: |
| for _unrelated_audio_file in glob(p_join(cache_dir_audio, _side, f"{features['line_no']}.*")): |
| os.remove(_unrelated_audio_file) |
| |
| with open(feature_file, "w") as f: |
| json.dump({"dummy": "dummy"}, f) |
|
|
|
|
| def get_audio(dataframe: pd.DataFrame): |
| features = {"line_no": int(dataframe.pop('line_no').values[0])} |
| feature_file = p_join(cache_dir_feature, f'{features["line_no"]}.json') |
| for side, df in dataframe.groupby("side"): |
| df.pop("side") |
| features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()}) |
| identifier = os.path.basename(features[f"{side}.url"]).split(".")[-1] |
| features[f"{side}.path"] = str(p_join(cache_dir_audio, side, f"{features['line_no']}.{identifier}")) |
| start, end = features[f"{side}.duration_start"], features[f"{side}.duration_end"] |
| if not os.path.exists(features[f"{side}.path"]): |
| print(f"WGET {features[f'{side}.url']}") |
| flag = wget(features[f"{side}.url"], output_file=features[f"{side}.path"]) |
| if not flag: |
| print("\n#### ERROR: wget failure ####\n") |
| cleanup(features, feature_file) |
| return None |
| else: |
| try: |
| print(f"LOAD AUDIO FROM {features[f'{side}.path']}") |
| wav, sr = sf.read(features[f"{side}.path"]) |
| print(f"wav shape:{wav.shape}") |
| if wav.ndim > 1: |
| wav = wav[:, 0] |
| wav = wav[floor(start / sampling_rate * sr):ceil(end / sampling_rate * sr)] |
| print(f"wav shape (after truncate):{wav.shape}") |
| wav = wav[:int(end/sampling_rate * sr) + sr] |
| print(f"SAVING: {features[f'{side}.path']}") |
| sf.write(features[f"{side}.path"], wav, sr) |
| |
| |
| |
| |
|
|
| except Exception as e: |
| print(f"\n#### ERROR ####\n {e}") |
| cleanup(features, feature_file) |
| return None |
| print(f"\n### SUCCESS! ###\n:{features['line_no']}") |
| with open(feature_file, "w") as f: |
| json.dump(features, f) |
| return features["line_no"] |
|
|
|
|
| def loader(feature: str) -> Dict: |
| with open(feature) as f_reader: |
| return json.load(f_reader) |
|
|
|
|
| if __name__ == '__main__': |
| if not skip_download: |
| df_metadata = get_metadata() |
| print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}") |
| inputs = [ |
| g for line_no, g in df_metadata.groupby("line_no") |
| if line_no_start <= line_no < line_no_end and not os.path.exists( |
| p_join(cache_dir_feature, f'{int(line_no)}.json') |
| ) |
| ] |
| print(f"filtered unique lines: {len(inputs)}") |
| inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides] |
| print(f"removed side != 2: {len(inputs)}") |
|
|
| if n_pool == 1: |
| for g in tqdm(inputs, total=len(inputs)): |
| line_no = get_audio(g) |
| else: |
| with Pool(n_pool) as pool: |
| for line_no in pool.imap_unordered(get_audio, inputs): |
| if line_no: |
| print(line_no) |
|
|
| print("UPLOADING TO HF!!!") |
| features = [p_join(cache_dir_feature, f'{i}.json') for i in range(line_no_start, line_no_end)] |
| print(f"- raw feature: {len(features)}") |
| features = [i for i in features if os.path.exists(i)] |
| print(f"- path exists: {len(features)}") |
| features = [loader(i) for i in features] |
| features = [i for i in features if "dummy" not in i] |
| print(f"- dummy removed: {len(features)}") |
| print(f"push {len(features)} records to hub") |
| data_dict = {} |
| for side in sides: |
| data_dict.update({f"{side}.audio": [i.pop(f"{side}.path") for i in features]}) |
| data_dict.update({k: [i[k] for i in features] for k in features[0].keys()}) |
| audio_dataset = Dataset.from_dict(data_dict) |
| for side in sides: |
| audio_dataset = audio_dataset.cast_column(f"{side}.audio", Audio()) |
| DatasetDict({"train": audio_dataset}).push_to_hub( |
| f"{hf_org}/{hf_dataset}", |
| config_name=f"subset_{dataset_id}" |
| ) |
| print("clear the workspace") |
| for i in tqdm(range(line_no_start, line_no_end), total=line_no_end - line_no_start): |
| for audio_file in glob(p_join(cache_dir_audio, "*", f"{i}.*")): |
| os.remove(audio_file) |
| if os.path.exists(p_join(cache_dir_feature, f"{i}.json")): |
| os.remove(p_join(cache_dir_feature, f"{i}.json")) |
|
|