Approximetal commited on
Commit
84bf161
·
verified ·
1 Parent(s): b6d415a

Create LEMAS-Dataset.py

Browse files
Files changed (1) hide show
  1. LEMAS-Dataset.py +88 -0
LEMAS-Dataset.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import tarfile
4
+ import datasets
5
+
6
+ class LEMASDataset(datasets.GeneratorBasedBuilder):
7
+ VERSION = datasets.Version("1.0.0")
8
+
9
+ def _info(self):
10
+ return datasets.DatasetInfo(
11
+ description="LEMAS multilingual training dataset with split audio archives",
12
+ features=datasets.Features(
13
+ {
14
+ "key": datasets.Value("string"),
15
+ "audio": datasets.Audio(),
16
+ "dur": datasets.Value("float32"),
17
+ "txt": datasets.Value("string"),
18
+ "align": datasets.features.Sequence({
19
+ "txt": datasets.Value("string"),
20
+ "words": datasets.features.Sequence({
21
+ "word": datasets.Value("string"),
22
+ "start": datasets.Value("float32"),
23
+ "end": datasets.Value("float32"),
24
+ "score": datasets.Value("float32"),
25
+ }),
26
+ }),
27
+ }
28
+ ),
29
+ supervised_keys=("audio", "txt"),
30
+ homepage="https://your-project-homepage",
31
+ license="Apache-2.0",
32
+ )
33
+
34
+ def _split_generators(self, dl_manager):
35
+ # 假设你上传的文件结构已经是这样的解压路径
36
+ # LEMAS-Dataset/train/de/de000.jsonl, de000.tar.gz ...
37
+ data_dir = dl_manager.extract("LEMAS-Dataset/train") # 或者目录路径,也可以改成dl_manager.manual_dir等
38
+
39
+ # 遍历所有语言目录
40
+ languages = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
41
+ all_generators = []
42
+ for lang in languages:
43
+ lang_dir = os.path.join(data_dir, lang)
44
+ jsonl_files = sorted([f for f in os.listdir(lang_dir) if f.endswith(".jsonl")])
45
+ tar_files = sorted([f for f in os.listdir(lang_dir) if f.endswith(".tar.gz")])
46
+
47
+ # 匹配jsonl和tar.gz
48
+ for jsonl_file in jsonl_files:
49
+ prefix = jsonl_file[:-6]
50
+ tar_file = prefix + ".tar.gz"
51
+ if tar_file not in tar_files:
52
+ continue
53
+ all_generators.append(
54
+ datasets.SplitGenerator(
55
+ name=datasets.Split.TRAIN,
56
+ gen_kwargs={
57
+ "jsonl_path": os.path.join(lang_dir, jsonl_file),
58
+ "audio_archive_path": os.path.join(lang_dir, tar_file),
59
+ },
60
+ )
61
+ )
62
+ return all_generators
63
+
64
+ def _generate_examples(self, jsonl_path, audio_archive_path):
65
+ # 打开tar.gz文件
66
+ archive = tarfile.open(audio_archive_path, "r:gz")
67
+ members = {m.name: m for m in archive.getmembers()}
68
+
69
+ def get_audio_bytes(path_in_archive):
70
+ if path_in_archive not in members:
71
+ raise FileNotFoundError(f"Audio {path_in_archive} not found in archive {audio_archive_path}")
72
+ f = archive.extractfile(members[path_in_archive])
73
+ if f is None:
74
+ raise FileNotFoundError(f"Cannot extract {path_in_archive} from {audio_archive_path}")
75
+ return f.read()
76
+
77
+ with open(jsonl_path, "r", encoding="utf-8") as f:
78
+ for line in f:
79
+ example = json.loads(line)
80
+ audio_bytes = get_audio_bytes(example["audio"])
81
+ yield example["key"], {
82
+ "key": example["key"],
83
+ "audio": {"bytes": audio_bytes, "path": example["audio"]},
84
+ "dur": example["dur"],
85
+ "txt": example["txt"],
86
+ "align": example["align"],
87
+ }
88
+ archive.close()