hhqx commited on
Commit
f3dbe61
·
verified ·
1 Parent(s): da76289

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,59 +1,2 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ data/en.tgz filter=lfs diff=lfs merge=lfs -text
2
+ data/zh.tgz filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.wav
README.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # SeedTTS Evaluation Dataset
3
+
4
+ This dataset contains evaluation data for SeedTTS text-to-speech model testing in multiple languages.
5
+
6
+ Original repo from: [https://github.com/BytedanceSpeech/seed-tts-eval](https://github.com/BytedanceSpeech/seed-tts-eval)
7
+
8
+ ---
9
+
10
+ ## Languages
11
+
12
+ * **English (en)**: Contains `test_wer` and `test_sim` splits
13
+ * **Chinese (zh)**: Contains `test_wer`, `test_sim`, and `test_wer_hardcase` splits
14
+
15
+ ---
16
+
17
+ ## Usage
18
+
19
+ ```python
20
+ from datasets import load_dataset
21
+
22
+ # Load English dataset
23
+ ds_en = load_dataset("path/to/seedtts-dataset-repo", "en")
24
+
25
+ # Load Chinese dataset
26
+ ds_zh = load_dataset("path/to/seedtts-dataset-repo", "zh")
27
+
28
+ # Access specific splits
29
+ en_wer = ds_en['test_wer']
30
+ en_sim = ds_en['test_sim']
31
+
32
+ zh_wer = ds_zh['test_wer']
33
+ zh_sim = ds_zh['test_sim']
34
+ zh_hardcase = ds_zh['test_wer_hardcase']
35
+ ```
36
+
37
+ ---
38
+
39
+ ## Data Structure
40
+
41
+ ### Dataset Info (example)
42
+
43
+ ```yaml
44
+ dataset_info:
45
+ - config_name: en
46
+ features:
47
+ - audio_output_path: string
48
+ - prompt_text: string
49
+ - prompt_audio: string
50
+ - text_input: string
51
+ - audio_ground_truth: string
52
+ splits:
53
+ - name: test_wer
54
+ num_examples: 1088 # Update with actual numbers
55
+ - name: test_sim
56
+ num_examples: 1086 # Update with actual numbers
57
+
58
+ - config_name: zh
59
+ features:
60
+ - audio_output_path: string
61
+ - prompt_text: string
62
+ - prompt_audio: string
63
+ - text_input: string
64
+ - audio_ground_truth: string
65
+ splits:
66
+ - name: test_wer
67
+ num_examples: 2020 # Update with actual numbers
68
+ - name: test_sim
69
+ num_examples: 2018 # Update with actual numbers
70
+ - name: test_wer_hardcase
71
+ num_examples: 400 # Update with actual numbers
72
+ ```
73
+
74
+ ---
75
+
76
+ ## Configs & Data Files Mapping
77
+
78
+ ```yaml
79
+ configs:
80
+ - config_name: en
81
+ data_files:
82
+ - split: test_wer
83
+ path: data/en_meta.jsonl
84
+ - split: test_sim
85
+ path: data/en_non_para_reconstruct_meta.jsonl
86
+
87
+ - config_name: zh
88
+ data_files:
89
+ - split: test_wer
90
+ path: data/zh_meta.jsonl
91
+ - split: test_sim
92
+ path: data/zh_non_para_reconstruct_meta.jsonl
93
+ - split: test_wer_hardcase
94
+ path: data/zh_hardcase.jsonl
95
+ ```
96
+
97
+ ---
98
+
99
+ ## File Structure
100
+
101
+ ```
102
+ .
103
+ ├── seedtts_dataset.py # Your dataset loading script
104
+ ├── README.md # This file
105
+ ├── data/
106
+ │ ├── en_meta.jsonl
107
+ │ ├── en_non_para_reconstruct_meta.jsonl
108
+ │ ├── en.tgz # Compressed wav/audio files for English
109
+ │ ├── zh_meta.jsonl
110
+ │ ├── zh_non_para_reconstruct_meta.jsonl
111
+ │ ├── zh_hardcase.jsonl
112
+ │ └── zh.tgz # Compressed wav/audio files for Chinese
113
+ ├── convert_seedtts_to_dataset.py
114
+ ├── test_demo.py
115
+ ```
116
+
117
+ ---
118
+
119
+ ## Notes
120
+
121
+ * The `.tgz` files contain the audio `.wav` files and will be automatically extracted to the local Hugging Face cache directory during dataset loading.
122
+ * To control where the data archive is extracted and cached, use the `cache_dir` argument in `load_dataset`, e.g.:
123
+
124
+ ```python
125
+ ds = load_dataset("path/to/seedtts-dataset-repo", "en", cache_dir="/your/fast/storage/path")
126
+ ```
convert_seedtts_to_dataset.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+
5
+ _FEATURES = datasets.Features({
6
+ "audio_output_path": datasets.Value("string"),
7
+ "prompt_text": datasets.Value("string"),
8
+ "prompt_audio": datasets.Value("string"),
9
+ "text_input": datasets.Value("string"),
10
+ "audio_ground_truth": datasets.Value("string"),
11
+ })
12
+
13
+ _DATASET_CONFIGS = {
14
+ "en": {
15
+ "splits": {
16
+ "test_wer": "data/en_meta.jsonl",
17
+ "test_sim": "data/en_non_para_reconstruct_meta.jsonl",
18
+ }
19
+ },
20
+ "zh": {
21
+ "splits": {
22
+ "test_wer": "data/zh_meta.jsonl",
23
+ "test_sim": "data/zh_non_para_reconstruct_meta.jsonl",
24
+ "test_wer_hardcase": "data/zh_hardcase.jsonl",
25
+ }
26
+ },
27
+ }
28
+
29
+
30
+ class SeedTTSDataset(datasets.GeneratorBasedBuilder):
31
+ BUILDER_CONFIGS = [
32
+ datasets.BuilderConfig(name=config, version=datasets.Version("1.0.0"))
33
+ for config in _DATASET_CONFIGS.keys()
34
+ ]
35
+ DEFAULT_CONFIG_NAME = "en"
36
+
37
+ def _info(self):
38
+ return datasets.DatasetInfo(
39
+ description="Seed-TTS Dataset",
40
+ features=_FEATURES,
41
+ supervised_keys=None,
42
+ )
43
+
44
+ def _split_generators(self, dl_manager):
45
+ config_name = self.config.name
46
+
47
+ # 自动处理压缩包:下载 & 解压到缓存
48
+ archive_path = dl_manager.download_and_extract("seedtts_data.tgz")
49
+
50
+ # 提示解压路径
51
+ if dl_manager.manual_dir is not None:
52
+ print(f"[INFO] Using manual_dir: {dl_manager.manual_dir}")
53
+ else:
54
+ print(f"[INFO] Extracted archive to: {archive_path}")
55
+ print("[TIP] To control this path, use `load_dataset(..., cache_dir=...)`")
56
+
57
+ extracted_base = os.path.join(archive_path, "extracted")
58
+ splits = []
59
+
60
+ for split_name, meta_path in _DATASET_CONFIGS[config_name]["splits"].items():
61
+ abs_meta_path = os.path.join(dl_manager._base_path, meta_path)
62
+ if not os.path.isfile(abs_meta_path):
63
+ raise FileNotFoundError(f"Missing metadata file: {abs_meta_path}")
64
+
65
+ splits.append(datasets.SplitGenerator(
66
+ name=split_name,
67
+ gen_kwargs={
68
+ "filepath": abs_meta_path,
69
+ "audio_base": os.path.join(extracted_base, config_name)
70
+ }
71
+ ))
72
+
73
+ return splits
74
+
75
+ def _generate_examples(self, filepath, audio_base):
76
+ with open(filepath, encoding="utf-8") as f:
77
+ for idx, line in enumerate(f):
78
+ item = json.loads(line.strip())
79
+ for key in ["prompt_audio", "audio_ground_truth"]:
80
+ if key in item and item[key] and not os.path.isabs(item[key]):
81
+ item[key] = os.path.abspath(os.path.join(audio_base, item[key]))
82
+ yield idx, item
data/en.tgz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a47c030bb4882adaa03580dd20a449f789dc4c9eef9d5c2ba8fb1cfebf96aaf2
3
+ size 376940860
data/en_meta.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/en_meta.lst ADDED
The diff for this file is too large to render. See raw diff
 
data/en_non_para_reconstruct_meta.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/en_non_para_reconstruct_meta.lst ADDED
The diff for this file is too large to render. See raw diff
 
data/zh.tgz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b26c241e007711fed3c78c7de41c0f038172e875a70afbd203d3d687bf2d48b
3
+ size 649968708
data/zh_hardcase.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/zh_hardcase.lst ADDED
The diff for this file is too large to render. See raw diff
 
data/zh_meta.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/zh_meta.lst ADDED
The diff for this file is too large to render. See raw diff
 
data/zh_non_para_reconstruct_meta.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/zh_non_para_reconstruct_meta.lst ADDED
The diff for this file is too large to render. See raw diff
 
seedtts_testset.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+
5
+ _DEFAULT_DATA_DIR = os.path.dirname(__file__)
6
+
7
+ # 配置YAML里features对应的datasets Features格式
8
+ FEATURES = datasets.Features({
9
+ "audio_output_path": datasets.Value("string"),
10
+ "prompt_text": datasets.Value("string"),
11
+ "prompt_audio": datasets.Value("string"), # 这里用string,后续可cast成 Audio()
12
+ "text_input": datasets.Value("string"),
13
+ "audio_ground_truth": datasets.Value("string"),
14
+ })
15
+
16
+ # YAML里的configs和splits映射成字典
17
+ DATASET_CONFIGS = {
18
+ "en": {
19
+ "splits": {
20
+ "test_wer": "data/en_meta.jsonl",
21
+ "test_sim": "data/en_non_para_reconstruct_meta.jsonl",
22
+ }
23
+ },
24
+ "zh": {
25
+ "splits": {
26
+ "test_wer": "data/zh_meta.jsonl",
27
+ "test_sim": "data/zh_non_para_reconstruct_meta.jsonl",
28
+ "test_wer_hardcase": "data/zh_hardcase.jsonl",
29
+ }
30
+ },
31
+ }
32
+
33
+
34
+ class SeedTTSDataset(datasets.GeneratorBasedBuilder):
35
+ BUILDER_CONFIGS = [
36
+ datasets.BuilderConfig(name=config_name, version=datasets.Version("1.0.0"))
37
+ for config_name in DATASET_CONFIGS.keys()
38
+ ]
39
+
40
+ DEFAULT_CONFIG_NAME = "en"
41
+
42
+ def _info(self):
43
+ return datasets.DatasetInfo(
44
+ description="Seed-TTS dataset",
45
+ features=FEATURES,
46
+ supervised_keys=None,
47
+ )
48
+
49
+ def _split_generators(self, dl_manager):
50
+ config_name = self.config.name
51
+
52
+ # 下载并自动解压 repo 中的 data.tgz
53
+ print('Extracting wav files ...')
54
+ archive_path = dl_manager.download_and_extract(f"data/{config_name}.tgz")
55
+
56
+ # 设置默认 repo的索引 base 路径
57
+ expected_repo_dir = dl_manager._base_path
58
+ index_data_dir = getattr(self.config, "data_dir", None) or expected_repo_dir
59
+
60
+ # print('archive_path', archive_path)
61
+ wav_data_dir = archive_path
62
+ # import ipdb; ipdb.set_trace()
63
+
64
+ # 自动处理压缩包:下载 & 解压到缓存
65
+ wav_data_dir = os.path.join(archive_path, config_name)
66
+
67
+ # 提示解压路径
68
+ if dl_manager.manual_dir is not None:
69
+ print(f"[INFO] Using manual_dir: {dl_manager.manual_dir}")
70
+ else:
71
+ print(f"[INFO] Extracted split {config_name} archive to: {archive_path}")
72
+ print("[TIP] To control this cache path, use `load_dataset(..., cache_dir=...)`")
73
+
74
+ if config_name not in DATASET_CONFIGS:
75
+ raise ValueError(f"Unknown config name: {config_name}")
76
+
77
+ splits = []
78
+ for split_name, relative_path in DATASET_CONFIGS[config_name]["splits"].items():
79
+ file_path = os.path.join(index_data_dir, relative_path)
80
+ if not os.path.isfile(file_path):
81
+ raise FileNotFoundError(
82
+ f"[ERROR] Missing split file for '{split_name}': expected at '{file_path}'.\n"
83
+ f"Please make sure all required files exist in the archive."
84
+ )
85
+
86
+ splits.append(
87
+ datasets.SplitGenerator(
88
+ name=split_name,
89
+ gen_kwargs={"filepath": file_path, "wav_data_dir": wav_data_dir}
90
+ )
91
+ )
92
+
93
+ return splits
94
+
95
+
96
+ def _generate_examples(self, filepath, wav_data_dir):
97
+ with open(filepath, encoding="utf-8") as f:
98
+ for idx, line in enumerate(f):
99
+ item = json.loads(line.strip())
100
+
101
+ # 转换相对路径为绝对路径,方便后续cast为Audio
102
+ for key in ["prompt_audio", "audio_ground_truth"]:
103
+ if not item[key]:
104
+ continue
105
+ if key in item and item[key] and not os.path.isabs(item[key]):
106
+ item[key] = os.path.abspath(os.path.join(wav_data_dir, item[key]))
107
+
108
+ yield idx, item
test_demo.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ ds_en = load_dataset("/hy-tmp/datasets/eval/seed-tts-eval/datasets/seedtts_testset", 'en', trust_remote_code=True)
4
+ print(ds_en['test_wer'][0])
5
+
6
+ ds_zh = load_dataset("/hy-tmp/datasets/eval/seed-tts-eval/datasets/seedtts_testset/", 'zh', trust_remote_code=True)
7
+ print(ds_zh['test_sim'][0])
8
+
9
+
10
+ # Access specific splits
11
+ en_wer = ds_en['test_wer']
12
+ en_sim = ds_en['test_sim']
13
+
14
+ zh_wer = ds_zh['test_wer']
15
+ zh_sim = ds_zh['test_sim']
16
+ zh_hardcase = ds_zh['test_wer_hardcase']
17
+