seedtts_testset / seedtts_testset.py
hhqx's picture
Upload folder using huggingface_hub
82ad42e verified
import os
import json
import datasets
_DEFAULT_DATA_DIR = os.path.dirname(__file__)
# 配置YAML里features对应的datasets Features格式
FEATURES = datasets.Features({
"audio_output_path": datasets.Value("string"),
"prompt_text": datasets.Value("string"),
"prompt_audio": datasets.Value("string"), # 这里用string,后续可cast成 Audio()
"text_input": datasets.Value("string"),
"audio_ground_truth": datasets.Value("string"),
})
# YAML里的configs和splits映射成字典
DATASET_CONFIGS = {
"en": {
"splits": {
"test_wer": "data/en_meta.jsonl",
"test_sim": "data/en_non_para_reconstruct_meta.jsonl",
}
},
"zh": {
"splits": {
"test_wer": "data/zh_meta.jsonl",
"test_sim": "data/zh_non_para_reconstruct_meta.jsonl",
"test_wer_hardcase": "data/zh_hardcase.jsonl",
}
},
}
class SeedTTSDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=config_name, version=datasets.Version("1.0.0"))
for config_name in DATASET_CONFIGS.keys()
]
DEFAULT_CONFIG_NAME = "en"
def _info(self):
return datasets.DatasetInfo(
description="Seed-TTS dataset",
features=FEATURES,
supervised_keys=None,
)
def _split_generators(self, dl_manager):
config_name = self.config.name
# 下载并自动解压 repo 中的 data.tgz
print('Extracting wav files ...')
archive_path = dl_manager.download_and_extract(f"data/{config_name}.tgz")
# 设置默认 repo的索引 base 路径
expected_repo_dir = dl_manager._base_path
index_data_dir = getattr(self.config, "data_dir", None) or expected_repo_dir
# print('archive_path', archive_path)
wav_data_dir = archive_path
# import ipdb; ipdb.set_trace()
# 自动处理压缩包:下载 & 解压到缓存
wav_data_dir = archive_path
# 提示解压路径
if dl_manager.manual_dir is not None:
print(f"[INFO] Using manual_dir: {dl_manager.manual_dir}")
else:
print(f"[INFO] Extracted split {config_name} archive to: {archive_path}")
print("[TIP] To control this cache path, use `load_dataset(..., cache_dir=...)`")
if config_name not in DATASET_CONFIGS:
raise ValueError(f"Unknown config name: {config_name}")
splits = []
for split_name, relative_path in DATASET_CONFIGS[config_name]["splits"].items():
file_path = os.path.join(index_data_dir, relative_path)
if not os.path.isfile(file_path):
raise FileNotFoundError(
f"[ERROR] Missing split file for '{split_name}': expected at '{file_path}'.\n"
f"Please make sure all required files exist in the archive."
)
splits.append(
datasets.SplitGenerator(
name=split_name,
gen_kwargs={"filepath": file_path, "wav_data_dir": wav_data_dir}
)
)
return splits
def _generate_examples(self, filepath, wav_data_dir):
with open(filepath, encoding="utf-8") as f:
for idx, line in enumerate(f):
item = json.loads(line.strip())
# 转换相对路径为绝对路径,方便后续cast为Audio
for key in ["prompt_audio", "audio_ground_truth"]:
if not item[key]:
continue
if key in item and item[key] and not os.path.isabs(item[key]):
item[key] = os.path.abspath(os.path.join(wav_data_dir, item[key]))
yield idx, item