Approximetal commited on
Commit
cf58f29
·
verified ·
1 Parent(s): 7d9f9c5

Rename dataset.py to LEMAS_Dataset.py

Browse files
Files changed (2) hide show
  1. LEMAS_Dataset.py +74 -0
  2. dataset.py +0 -54
LEMAS_Dataset.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+
4
+ _DESCRIPTION = """
5
+ LEMAS Dataset - multilingual speech dataset
6
+ """
7
+
8
+ _CITATION = """
9
+ Your citation here.
10
+ """
11
+
12
+ class LEMASDataset(datasets.GeneratorBasedBuilder):
13
+ VERSION = datasets.Version("1.0.0")
14
+
15
+ def _info(self):
16
+ # 定义数据集字段和类型
17
+ features = datasets.Features({
18
+ "key": datasets.Value("string"),
19
+ "audio": datasets.Audio(sampling_rate=None),
20
+ "dur": datasets.Value("float32"),
21
+ "txt": datasets.Value("string"),
22
+ "align": datasets.features.Sequence({
23
+ "word": datasets.Value("string"),
24
+ "start": datasets.Value("float32"),
25
+ "end": datasets.Value("float32"),
26
+ "score": datasets.Value("float32"),
27
+ }),
28
+ "align_txt": datasets.Value("string"),
29
+ })
30
+
31
+ return datasets.DatasetInfo(
32
+ description=_DESCRIPTION,
33
+ features=features,
34
+ supervised_keys=None,
35
+ homepage="https://your-dataset-homepage",
36
+ citation=_CITATION,
37
+ version=self.VERSION,
38
+ )
39
+
40
+ def _split_generators(self, dl_manager):
41
+ # 只返回 eval 拆分
42
+ data_dir = dl_manager.download_and_extract("https://huggingface.co/datasets/your-username/LEMAS-Dataset/resolve/main/")
43
+ return [
44
+ datasets.SplitGenerator(
45
+ name=datasets.Split.EVAL,
46
+ gen_kwargs={"filepath": os.path.join(data_dir, "eval.jsonl")},
47
+ ),
48
+ ]
49
+
50
+ def _generate_examples(self, filepath):
51
+ # 读取 jsonl 文件,逐条 yield 数据
52
+ import json
53
+ with open(filepath, encoding="utf-8") as f:
54
+ for idx, line in enumerate(f):
55
+ data = json.loads(line)
56
+ # 预处理 align 格式:这里把 align 字段拆开,方便符合 features
57
+ words = data.get("align", {}).get("words", [])
58
+ align_words = []
59
+ for w in words:
60
+ align_words.append({
61
+ "word": w.get("word", ""),
62
+ "start": float(w.get("start", 0)),
63
+ "end": float(w.get("end", 0)),
64
+ "score": float(w.get("score", 0)),
65
+ })
66
+
67
+ yield idx, {
68
+ "key": data.get("key", ""),
69
+ "audio": data.get("audio", ""),
70
+ "dur": float(data.get("dur", 0)),
71
+ "txt": data.get("txt", ""),
72
+ "align": align_words,
73
+ "align_txt": data.get("align", {}).get("txt", ""),
74
+ }
dataset.py DELETED
@@ -1,54 +0,0 @@
1
- import json
2
- import datasets
3
-
4
- _DESCRIPTION = """
5
- LEMAS Eval Dataset (Multilingual)
6
-
7
- Each sample contains:
8
- - audio: speech audio
9
- - txt: normalized transcript
10
- - align: word-level alignment (json string)
11
- """
12
-
13
- class LEMASEval(datasets.GeneratorBasedBuilder):
14
- VERSION = datasets.Version("1.0.0")
15
-
16
- def _info(self):
17
- return datasets.DatasetInfo(
18
- description=_DESCRIPTION,
19
- features=datasets.Features(
20
- {
21
- "key": datasets.Value("string"),
22
- "audio": datasets.Audio(sampling_rate=None),
23
- "dur": datasets.Value("float32"),
24
- "txt": datasets.Value("string"),
25
- "align": datasets.Value("string"),
26
- }
27
- ),
28
- supervised_keys=None,
29
- )
30
-
31
- def _split_generators(self, dl_manager):
32
- return [
33
- datasets.SplitGenerator(
34
- name=datasets.Split.TEST,
35
- gen_kwargs={
36
- "jsonl_path": "eval.jsonl",
37
- },
38
- )
39
- ]
40
-
41
- def _generate_examples(self, jsonl_path):
42
- with open(jsonl_path, "r", encoding="utf-8") as f:
43
- for idx, line in enumerate(f):
44
- obj = json.loads(line)
45
-
46
- yield idx, {
47
- "key": obj["key"],
48
- "audio": obj["audio"],
49
- "dur": float(obj["dur"]),
50
- "txt": obj["txt"],
51
- # 把 struct → string
52
- "align": json.dumps(obj["align"], ensure_ascii=False),
53
- }
54
-