Datasets:

Languages:
Vietnamese
DOI:
License:
File size: 1,584 Bytes
f965b87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d739cef
f965b87
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os
import re
from itertools import chain

import datasets

_DESCRIPTION = """\
UTSText
"""

_CITATION = """\
"""

_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_Dictionary/resolve/main/data/"
DATA_FILE = "data.txt"

class UTSDictionaryConfig(datasets.BuilderConfig):
    """BuilderConfig"""

    def __init__(self, **kwargs):
        super(UTSDictionaryConfig, self).__init__(**kwargs)

class UTSText(datasets.GeneratorBasedBuilder):
    """UTS Word Tokenize datasets"""
    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=None,
            citation=_CITATION
        )
    
    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        data_file = dl_manager.download(os.path.join(_BASE_URL, DATA_FILE))

        splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": data_file}
            )
        ]
        return splits

    def _generate_examples(self, filepath):
        guid = 0
        with open(filepath, encoding="utf-8") as f:
            for line in f:
                if line.strip() != "":
                    item = {
                        "text": line.strip()
                    }
                    yield guid, item
                    guid += 1