File size: 2,659 Bytes
cf58f29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import os
import datasets

_DESCRIPTION = """
LEMAS Dataset - multilingual speech dataset
"""

_CITATION = """
Your citation here.
"""

class LEMASDataset(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    def _info(self):
        # 定义数据集字段和类型
        features = datasets.Features({
            "key": datasets.Value("string"),
            "audio": datasets.Audio(sampling_rate=None),
            "dur": datasets.Value("float32"),
            "txt": datasets.Value("string"),
            "align": datasets.features.Sequence({
                "word": datasets.Value("string"),
                "start": datasets.Value("float32"),
                "end": datasets.Value("float32"),
                "score": datasets.Value("float32"),
            }),
            "align_txt": datasets.Value("string"),
        })

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage="https://your-dataset-homepage",
            citation=_CITATION,
            version=self.VERSION,
        )

    def _split_generators(self, dl_manager):
        # 只返回 eval 拆分
        data_dir = dl_manager.download_and_extract("https://huggingface.co/datasets/your-username/LEMAS-Dataset/resolve/main/")
        return [
            datasets.SplitGenerator(
                name=datasets.Split.EVAL,
                gen_kwargs={"filepath": os.path.join(data_dir, "eval.jsonl")},
            ),
        ]

    def _generate_examples(self, filepath):
        # 读取 jsonl 文件,逐条 yield 数据
        import json
        with open(filepath, encoding="utf-8") as f:
            for idx, line in enumerate(f):
                data = json.loads(line)
                # 预处理 align 格式:这里把 align 字段拆开,方便符合 features
                words = data.get("align", {}).get("words", [])
                align_words = []
                for w in words:
                    align_words.append({
                        "word": w.get("word", ""),
                        "start": float(w.get("start", 0)),
                        "end": float(w.get("end", 0)),
                        "score": float(w.get("score", 0)),
                    })

                yield idx, {
                    "key": data.get("key", ""),
                    "audio": data.get("audio", ""),
                    "dur": float(data.get("dur", 0)),
                    "txt": data.get("txt", ""),
                    "align": align_words,
                    "align_txt": data.get("align", {}).get("txt", ""),
                }