update
Browse files- README.md +2 -0
- UTS_WTK.py +25 -4
- data/base/test.txt +0 -0
- data/base/train.txt +0 -0
- data/base/validation.txt +0 -0
- data/{test.txt → small/test.txt} +0 -0
- data/small/train.txt +0 -0
- data/{dev.txt → small/validation.txt} +0 -0
- data/train.txt +0 -0
- generate_dataset.py +33 -36
README.md
CHANGED
|
@@ -2,3 +2,5 @@
|
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
|
|
|
|
|
|
|
|
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
|
| 5 |
+
|
| 6 |
+
|
UTS_WTK.py
CHANGED
|
@@ -10,11 +10,31 @@ _CITATION = """\
|
|
| 10 |
|
| 11 |
_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_WTK/raw/main/data/"
|
| 12 |
TRAIN_FILE = "train.txt"
|
| 13 |
-
DEV_FILE = "
|
| 14 |
TEST_FILE = "test.txt"
|
| 15 |
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
class UTSWTK(datasets.GeneratorBasedBuilder):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
def _info(self):
|
| 19 |
return datasets.DatasetInfo(
|
| 20 |
description=_DESCRIPTION,
|
|
@@ -34,9 +54,10 @@ class UTSWTK(datasets.GeneratorBasedBuilder):
|
|
| 34 |
|
| 35 |
def _split_generators(self, dl_manager):
|
| 36 |
"""Returns SplitGenerators."""
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
|
|
|
| 40 |
|
| 41 |
splits = [
|
| 42 |
datasets.SplitGenerator(
|
|
|
|
| 10 |
|
| 11 |
_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_WTK/raw/main/data/"
|
| 12 |
TRAIN_FILE = "train.txt"
|
| 13 |
+
DEV_FILE = "validation.txt"
|
| 14 |
TEST_FILE = "test.txt"
|
| 15 |
|
| 16 |
|
| 17 |
+
class UTSWTKConfig(datasets.BuilderConfig):
|
| 18 |
+
"""BuilderConfig"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, **kwargs):
|
| 21 |
+
super(UTSWTKConfig, self).__init__(**kwargs)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
class UTSWTK(datasets.GeneratorBasedBuilder):
|
| 25 |
+
"""UTS Word Tokenize datasets"""
|
| 26 |
+
VERSION = datasets.Version("1.0.0")
|
| 27 |
+
BUILDER_CONFIGS = [
|
| 28 |
+
# UTSWTKConfig(
|
| 29 |
+
# name="small", version=VERSION, description="UTS_WTK Small"),
|
| 30 |
+
UTSWTKConfig(
|
| 31 |
+
name="base", version=VERSION, description="UTS_WTK Base"),
|
| 32 |
+
# UTSWTKConfig(
|
| 33 |
+
# name="large", version=VERSION, description="UTS_WTK Large"),
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
BUILDER_CONFIG_CLASS = UTSWTKConfig
|
| 37 |
+
|
| 38 |
def _info(self):
|
| 39 |
return datasets.DatasetInfo(
|
| 40 |
description=_DESCRIPTION,
|
|
|
|
| 54 |
|
| 55 |
def _split_generators(self, dl_manager):
|
| 56 |
"""Returns SplitGenerators."""
|
| 57 |
+
subset_folder = self.config.name
|
| 58 |
+
train_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TRAIN_FILE))
|
| 59 |
+
dev_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, DEV_FILE))
|
| 60 |
+
test_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TEST_FILE))
|
| 61 |
|
| 62 |
splits = [
|
| 63 |
datasets.SplitGenerator(
|
data/base/test.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/base/train.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/base/validation.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/{test.txt → small/test.txt}
RENAMED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/small/train.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/{dev.txt → small/validation.txt}
RENAMED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/train.txt
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
generate_dataset.py
CHANGED
|
@@ -1,43 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from datasets import load_dataset
|
| 2 |
from underthesea import word_tokenize
|
| 3 |
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
|
| 4 |
-
from os.path import dirname, join
|
| 5 |
from underthesea.utils import logger
|
| 6 |
|
| 7 |
-
dataset = load_dataset("undertheseanlp/UTS_Text_v1")
|
| 8 |
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
pwd = dirname(__file__)
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
for j, s in enumerate(sentences):
|
| 22 |
-
if j == 8000:
|
| 23 |
-
f.close()
|
| 24 |
-
f = open(dev_file, "a")
|
| 25 |
-
if j == 9000:
|
| 26 |
-
f.close()
|
| 27 |
-
f = open(test_file, "a")
|
| 28 |
-
words = word_tokenize(s)
|
| 29 |
-
for word in words:
|
| 30 |
-
tokens = tokenize(word)
|
| 31 |
-
for i, token in enumerate(tokens):
|
| 32 |
-
if i == 0:
|
| 33 |
-
tag = "B-W"
|
| 34 |
-
else:
|
| 35 |
-
tag = "I-W"
|
| 36 |
-
content += token + "\t" + tag + "\n"
|
| 37 |
-
content += "\n"
|
| 38 |
-
if j % 1000 == 999:
|
| 39 |
-
f.write(content)
|
| 40 |
-
content = ""
|
| 41 |
-
logger.info(j)
|
| 42 |
-
|
| 43 |
-
f.close()
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
from os.path import dirname, join
|
| 4 |
+
|
| 5 |
from datasets import load_dataset
|
| 6 |
from underthesea import word_tokenize
|
| 7 |
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
|
|
|
|
| 8 |
from underthesea.utils import logger
|
| 9 |
|
|
|
|
| 10 |
|
| 11 |
+
def create_wtk_dataset(text_dataset, output_folder):
|
| 12 |
+
if os.path.exists(output_folder):
|
| 13 |
+
shutil.rmtree(output_folder)
|
| 14 |
+
os.makedirs(output_folder)
|
| 15 |
+
for split in ["train", "validation", "test"]:
|
| 16 |
+
sentences = text_dataset[split]["text"]
|
| 17 |
+
with open(join(output_folder, f"{split}.txt"), "w") as f:
|
| 18 |
+
for sentence in sentences:
|
| 19 |
+
items = word_tokenize(sentence)
|
| 20 |
+
for item in items:
|
| 21 |
+
tokens = item.split()
|
| 22 |
+
for i, token in enumerate(tokens):
|
| 23 |
+
if i > 0:
|
| 24 |
+
f.write(f"{token}\tI-W\n")
|
| 25 |
+
else:
|
| 26 |
+
f.write(f"{token}\tB-W\n")
|
| 27 |
+
f.write("\n")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
pwd = dirname(__file__)
|
| 31 |
+
data_folder = join(pwd, "data")
|
| 32 |
+
|
| 33 |
+
text_dataset = load_dataset("undertheseanlp/UTS_Text", "small")
|
| 34 |
+
create_wtk_dataset(text_dataset, join(data_folder, "small"))
|
| 35 |
+
|
| 36 |
+
text_dataset = load_dataset("undertheseanlp/UTS_Text", "base")
|
| 37 |
+
create_wtk_dataset(text_dataset, join(data_folder, "base"))
|
| 38 |
+
|
| 39 |
+
text_dataset = load_dataset("undertheseanlp/UTS_Text", "large")
|
| 40 |
+
create_wtk_dataset(text_dataset, join(data_folder, "large"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|