LEMAS-Dataset-train / LEMAS_Dataset.py
Approximetal's picture
Rename dataset.py to LEMAS_Dataset.py
cf58f29 verified
raw
history blame
2.66 kB
import os
import datasets
_DESCRIPTION = """
LEMAS Dataset - multilingual speech dataset
"""
_CITATION = """
Your citation here.
"""
class LEMASDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
# 定义数据集字段和类型
features = datasets.Features({
"key": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=None),
"dur": datasets.Value("float32"),
"txt": datasets.Value("string"),
"align": datasets.features.Sequence({
"word": datasets.Value("string"),
"start": datasets.Value("float32"),
"end": datasets.Value("float32"),
"score": datasets.Value("float32"),
}),
"align_txt": datasets.Value("string"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://your-dataset-homepage",
citation=_CITATION,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
# 只返回 eval 拆分
data_dir = dl_manager.download_and_extract("https://huggingface.co/datasets/your-username/LEMAS-Dataset/resolve/main/")
return [
datasets.SplitGenerator(
name=datasets.Split.EVAL,
gen_kwargs={"filepath": os.path.join(data_dir, "eval.jsonl")},
),
]
def _generate_examples(self, filepath):
# 读取 jsonl 文件,逐条 yield 数据
import json
with open(filepath, encoding="utf-8") as f:
for idx, line in enumerate(f):
data = json.loads(line)
# 预处理 align 格式:这里把 align 字段拆开,方便符合 features
words = data.get("align", {}).get("words", [])
align_words = []
for w in words:
align_words.append({
"word": w.get("word", ""),
"start": float(w.get("start", 0)),
"end": float(w.get("end", 0)),
"score": float(w.get("score", 0)),
})
yield idx, {
"key": data.get("key", ""),
"audio": data.get("audio", ""),
"dur": float(data.get("dur", 0)),
"txt": data.get("txt", ""),
"align": align_words,
"align_txt": data.get("align", {}).get("txt", ""),
}