| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """Cleaned Indonesian split of the KoPI corpus.""" |
| | import json |
| | import glob |
| | import gzip |
| | import textwrap |
| | import datasets |
| | import zstandard as zstd |
| | logger = datasets.logging.get_logger(__name__) |
| |
|
| | _CITATION = """ |
| | """ |
| | _DESCRIPTION = """\ |
| | """ |
| | _HOMEPAGE = "https://huggingface.co/datasets/duckaiml/Polylingual_Id" |
| | _LICENSE = "CC0" |
| | _BASE_URL = { |
| | "train":"https://huggingface.co/datasets/duckaiml/Polylingual_Id/resolve/main/{folder}/part-{index:012d}.json.zst", |
| | } |
| | _CONFIGS = { |
| | "hplt": {"part": 10, "url": "https://huggingface.co/datasets/duckaiml/Polylingual_Id/resolve/main/hplt/part_{index:d}.jsonl.zst"}, |
| | "mc4_und": {"part": 24, "url": "https://huggingface.co/datasets/duckaiml/Polylingual_Id/resolve/main/mc4_und/part_{index:d}.jsonl.zst"}, |
| | "indonesia_crawl": {"part": 10, "url": "https://huggingface.co/datasets/duckaiml/Polylingual_Id/resolve/main/indonesia_crawl/part_{index:d}.jsonl.zst"}, |
| | } |
| | class Polylingual_IdConfig(datasets.BuilderConfig): |
| | """BuilderConfig for the Clean Polylingual_Id corpus.""" |
| | def __init__(self, **kwargs): |
| | """BuilderConfig for Clean Polylingual_Id corpus. |
| | Args: |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super().__init__(**kwargs) |
| | class Polylingual_Id(datasets.GeneratorBasedBuilder): |
| | """KoPI corpus.""" |
| | BUILDER_CONFIGS = [ |
| | Polylingual_IdConfig( |
| | name="hplt", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | hplt |
| | """ |
| | ) |
| | ), |
| | Polylingual_IdConfig( |
| | name="mc4_und", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | mc4_und |
| | """ |
| | ) |
| | ), |
| | Polylingual_IdConfig( |
| | name="indonesia_crawl", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | indonesia_crawl |
| | """ |
| | ) |
| | ), |
| | Polylingual_IdConfig( |
| | name="full", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | indonesia-crawl |
| | """ |
| | ) |
| | ) |
| | ] |
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "text": datasets.Value("string"), |
| | "url": datasets.Value("string"), |
| | } |
| | ), |
| | supervised_keys=None, |
| | homepage=_HOMEPAGE, |
| | license=_LICENSE, |
| | citation=_CITATION, |
| | ) |
| | def _split_generators(self, dl_manager): |
| | if self.config.name == "full": |
| | data = ['hplt','mc4_und','indonesia_crawl'] |
| | train = [] |
| | for d in data: |
| | url = [_CONFIGS[d]['url'].format(index=k + 1) for k in range(_CONFIGS[d]['part'])] |
| | train.extend(url) |
| | train_downloaded_files = dl_manager.download(train) |
| | else: |
| | train = [_CONFIGS[self.config.name]['url'].format(index=k + 1) for k in range(_CONFIGS[self.config.name]['part'])] |
| | |
| | train_downloaded_files = dl_manager.download(train) |
| | return [ |
| | datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), |
| | ] |
| | def _generate_examples(self, filepaths): |
| | """This function returns the examples in the raw (text) form by iterating on all the files.""" |
| | id_ = 0 |
| | for filepath in filepaths: |
| | logger.info(f"Generating examples from {filepath}") |
| | with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: |
| | for line in f: |
| | if line: |
| | example = json.loads(line) |
| | if example.get('url') is not None: |
| | yield id_, {'text':example['text'],'url':example['url']} |
| | id_ += 1 |
| | elif example.get('metadata') is not None: |
| | yield id_, {'text':example['text'],'url':example['metadata']['url']} |
| | id_ += 1 |
| | else: |
| | yield id_, {'text':example['text'],'url':None} |
| | id_ += 1 |