| | import os |
| | import json |
| | import datasets |
| |
|
| | _DEFAULT_DATA_DIR = os.path.dirname(__file__) |
| |
|
| | |
| | FEATURES = datasets.Features({ |
| | "audio_output_path": datasets.Value("string"), |
| | "prompt_text": datasets.Value("string"), |
| | "prompt_audio": datasets.Value("string"), |
| | "text_input": datasets.Value("string"), |
| | "audio_ground_truth": datasets.Value("string"), |
| | }) |
| |
|
| | |
| | DATASET_CONFIGS = { |
| | "en": { |
| | "splits": { |
| | "test_wer": "data/en_meta.jsonl", |
| | "test_sim": "data/en_non_para_reconstruct_meta.jsonl", |
| | } |
| | }, |
| | "zh": { |
| | "splits": { |
| | "test_wer": "data/zh_meta.jsonl", |
| | "test_sim": "data/zh_non_para_reconstruct_meta.jsonl", |
| | "test_wer_hardcase": "data/zh_hardcase.jsonl", |
| | } |
| | }, |
| | } |
| |
|
| |
|
| | class SeedTTSDataset(datasets.GeneratorBasedBuilder): |
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig(name=config_name, version=datasets.Version("1.0.0")) |
| | for config_name in DATASET_CONFIGS.keys() |
| | ] |
| |
|
| | DEFAULT_CONFIG_NAME = "en" |
| | |
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description="Seed-TTS dataset", |
| | features=FEATURES, |
| | supervised_keys=None, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | config_name = self.config.name |
| |
|
| | |
| | print('Extracting wav files ...') |
| | archive_path = dl_manager.download_and_extract(f"data/{config_name}.tgz") |
| | |
| | |
| | expected_repo_dir = dl_manager._base_path |
| | index_data_dir = getattr(self.config, "data_dir", None) or expected_repo_dir |
| | |
| | |
| | wav_data_dir = archive_path |
| | |
| |
|
| | |
| | wav_data_dir = archive_path |
| | |
| | |
| | if dl_manager.manual_dir is not None: |
| | print(f"[INFO] Using manual_dir: {dl_manager.manual_dir}") |
| | else: |
| | print(f"[INFO] Extracted split {config_name} archive to: {archive_path}") |
| | print("[TIP] To control this cache path, use `load_dataset(..., cache_dir=...)`") |
| |
|
| | if config_name not in DATASET_CONFIGS: |
| | raise ValueError(f"Unknown config name: {config_name}") |
| |
|
| | splits = [] |
| | for split_name, relative_path in DATASET_CONFIGS[config_name]["splits"].items(): |
| | file_path = os.path.join(index_data_dir, relative_path) |
| | if not os.path.isfile(file_path): |
| | raise FileNotFoundError( |
| | f"[ERROR] Missing split file for '{split_name}': expected at '{file_path}'.\n" |
| | f"Please make sure all required files exist in the archive." |
| | ) |
| |
|
| | splits.append( |
| | datasets.SplitGenerator( |
| | name=split_name, |
| | gen_kwargs={"filepath": file_path, "wav_data_dir": wav_data_dir} |
| | ) |
| | ) |
| |
|
| | return splits |
| |
|
| |
|
| | def _generate_examples(self, filepath, wav_data_dir): |
| | with open(filepath, encoding="utf-8") as f: |
| | for idx, line in enumerate(f): |
| | item = json.loads(line.strip()) |
| |
|
| | |
| | for key in ["prompt_audio", "audio_ground_truth"]: |
| | if not item[key]: |
| | continue |
| | if key in item and item[key] and not os.path.isabs(item[key]): |
| | item[key] = os.path.abspath(os.path.join(wav_data_dir, item[key])) |
| |
|
| | yield idx, item |
| |
|
| |
|
| |
|