File size: 2,007 Bytes
4d5d675 b9c0338 d55a9f8 f6a7447 b9c0338 d55a9f8 4d5d675 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 | from typing import Tuple
from typing import Generator
import datasets
from datasets import Value
_DESCRIPTION = """\
...
"""
_URL = "..."
class LaboASRConfig(datasets.BuilderConfig):
def __init__(
self,
do_stuff: bool = False,
**kwargs,
):
super(LaboASRConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs)
self.do_stuff = do_stuff
class LaboASR(datasets.GeneratorBasedBuilder):
CORTI_DATASET_NAME = "labo"
DEFAULT_WRITER_BATCH_SIZE = 256
DEFAULT_CONFIG_NAME = "default"
BUILDER_CONFIG_CLASS = LaboASRConfig
BUILDER_CONFIGS = [
LaboASRConfig(
name="default",
description="Default config.",
),
]
def _info(self):
features = {
"transcript": Value("string"),
"id": Value("string"),
}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
homepage=_URL,
)
def _split_generators(self, dl_manager):
# return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# gen_kwargs={
# "paths": ["1", "2", "3"],
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.VALIDATION,
# gen_kwargs={
# "paths": ["4", "5", "6"],
# },
# ),
# ]
return [datasets.SplitGenerator(
name="haps",
gen_kwargs={
"paths": ["1", "2", "3"],
},
)]
def _generate_examples(
self,
paths: list[str],
) -> Generator[Tuple[int, dict], None, None]:
for i, p in enumerate(paths):
# Yield example
example = {
"transcript": "Hello, world!",
"id": "0000" + p,
}
yield i, example
|