update
Browse files- UTS_Text.py +2 -4
- generate_dataset.py +56 -1
- statistics.py +5 -2
- test_dataset.py +3 -3
UTS_Text.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
| 1 |
import os
|
| 2 |
-
import re
|
| 3 |
-
from itertools import chain
|
| 4 |
|
| 5 |
import datasets
|
| 6 |
|
|
@@ -23,6 +21,7 @@ class UTSTextConfig(datasets.BuilderConfig):
|
|
| 23 |
def __init__(self, **kwargs):
|
| 24 |
super(UTSTextConfig, self).__init__(**kwargs)
|
| 25 |
|
|
|
|
| 26 |
class UTSText(datasets.GeneratorBasedBuilder):
|
| 27 |
"""UTS Word Tokenize datasets"""
|
| 28 |
VERSION = datasets.Version("1.0.0")
|
|
@@ -47,7 +46,7 @@ class UTSText(datasets.GeneratorBasedBuilder):
|
|
| 47 |
homepage=None,
|
| 48 |
citation=_CITATION
|
| 49 |
)
|
| 50 |
-
|
| 51 |
def _split_generators(self, dl_manager):
|
| 52 |
"""Returns SplitGenerators."""
|
| 53 |
subset_folder = self.config.name
|
|
@@ -83,4 +82,3 @@ class UTSText(datasets.GeneratorBasedBuilder):
|
|
| 83 |
}
|
| 84 |
yield guid, item
|
| 85 |
guid += 1
|
| 86 |
-
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
|
| 3 |
import datasets
|
| 4 |
|
|
|
|
| 21 |
def __init__(self, **kwargs):
|
| 22 |
super(UTSTextConfig, self).__init__(**kwargs)
|
| 23 |
|
| 24 |
+
|
| 25 |
class UTSText(datasets.GeneratorBasedBuilder):
|
| 26 |
"""UTS Word Tokenize datasets"""
|
| 27 |
VERSION = datasets.Version("1.0.0")
|
|
|
|
| 46 |
homepage=None,
|
| 47 |
citation=_CITATION
|
| 48 |
)
|
| 49 |
+
|
| 50 |
def _split_generators(self, dl_manager):
|
| 51 |
"""Returns SplitGenerators."""
|
| 52 |
subset_folder = self.config.name
|
|
|
|
| 82 |
}
|
| 83 |
yield guid, item
|
| 84 |
guid += 1
|
|
|
generate_dataset.py
CHANGED
|
@@ -1 +1,56 @@
|
|
| 1 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from os.path import join, dirname
|
| 2 |
+
from underthesea.file_utils import DATASETS_FOLDER
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
random.seed(10)
|
| 6 |
+
text_file = join(DATASETS_FOLDER, "VNESES", "VNESEScorpus.txt")
|
| 7 |
+
with open(text_file) as f:
|
| 8 |
+
lines = f.read().splitlines()
|
| 9 |
+
NUM_LONG_TOKENS = 50
|
| 10 |
+
NUM_SHORT_TOKENS = 20
|
| 11 |
+
NUM_MAX_TOKENS = 200
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def longline_conditions(line):
|
| 15 |
+
if len(line) < NUM_LONG_TOKENS or len(line) > NUM_MAX_TOKENS:
|
| 16 |
+
return False
|
| 17 |
+
if not (line[0].isupper() and line[-1] == "."):
|
| 18 |
+
return False
|
| 19 |
+
return True
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
long_lines = [line for line in lines if longline_conditions(line)]
|
| 23 |
+
print("Short lines", len(long_lines))
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def shortline_conditions(line):
|
| 27 |
+
if len(line) < NUM_SHORT_TOKENS:
|
| 28 |
+
return False
|
| 29 |
+
if len(line) > NUM_LONG_TOKENS:
|
| 30 |
+
return False
|
| 31 |
+
if not line[0].isupper():
|
| 32 |
+
return False
|
| 33 |
+
return True
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
short_lines = [line for line in lines if shortline_conditions(line)]
|
| 37 |
+
print("Short lines", len(short_lines))
|
| 38 |
+
|
| 39 |
+
print("Long lines", len(long_lines))
|
| 40 |
+
print("Short lines", len(short_lines))
|
| 41 |
+
|
| 42 |
+
pwd = dirname(__file__)
|
| 43 |
+
tmp = join(pwd, "data")
|
| 44 |
+
corpus_file = join(tmp, "UTS_Text_v1.txt")
|
| 45 |
+
|
| 46 |
+
with open(corpus_file, "w") as f:
|
| 47 |
+
lines = long_lines + short_lines
|
| 48 |
+
lines = lines[:1000]
|
| 49 |
+
content = "\n".join(lines)
|
| 50 |
+
f.write(content)
|
| 51 |
+
|
| 52 |
+
with open(join(tmp, "large", "train.txt"), "w") as f:
|
| 53 |
+
lines = long_lines + short_lines
|
| 54 |
+
lines = lines[:100000]
|
| 55 |
+
content = "\n".join(lines)
|
| 56 |
+
f.write(content)
|
statistics.py
CHANGED
|
@@ -1,17 +1,20 @@
|
|
| 1 |
from os.path import dirname, join
|
| 2 |
|
|
|
|
| 3 |
def count_lines(filepath):
|
| 4 |
with open(filepath, encoding="utf-8") as f:
|
| 5 |
lines = f.read().splitlines()
|
| 6 |
return len(lines)
|
| 7 |
|
|
|
|
| 8 |
def statistic(subset_folder):
|
| 9 |
print("Train\t\t", count_lines(join(subset_folder, "train.txt")))
|
| 10 |
print("Validation\t", count_lines(join(subset_folder, "validation.txt")))
|
| 11 |
print("Test\t\t", count_lines(join(subset_folder, "test.txt")))
|
| 12 |
|
|
|
|
| 13 |
pwd = dirname(__file__)
|
| 14 |
-
dataset_folder = join(pwd, "data")
|
| 15 |
|
| 16 |
print("Subset Small")
|
| 17 |
statistic(join(dataset_folder, "small"))
|
|
@@ -20,4 +23,4 @@ print("Subset Base")
|
|
| 20 |
statistic(join(dataset_folder, "base"))
|
| 21 |
|
| 22 |
print("Subset Large")
|
| 23 |
-
statistic(join(dataset_folder, "large"))
|
|
|
|
| 1 |
from os.path import dirname, join
|
| 2 |
|
| 3 |
+
|
| 4 |
def count_lines(filepath):
|
| 5 |
with open(filepath, encoding="utf-8") as f:
|
| 6 |
lines = f.read().splitlines()
|
| 7 |
return len(lines)
|
| 8 |
|
| 9 |
+
|
| 10 |
def statistic(subset_folder):
|
| 11 |
print("Train\t\t", count_lines(join(subset_folder, "train.txt")))
|
| 12 |
print("Validation\t", count_lines(join(subset_folder, "validation.txt")))
|
| 13 |
print("Test\t\t", count_lines(join(subset_folder, "test.txt")))
|
| 14 |
|
| 15 |
+
|
| 16 |
pwd = dirname(__file__)
|
| 17 |
+
dataset_folder = join(pwd, "data")
|
| 18 |
|
| 19 |
print("Subset Small")
|
| 20 |
statistic(join(dataset_folder, "small"))
|
|
|
|
| 23 |
statistic(join(dataset_folder, "base"))
|
| 24 |
|
| 25 |
print("Subset Large")
|
| 26 |
+
statistic(join(dataset_folder, "large"))
|
test_dataset.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
from datasets import load_dataset
|
| 2 |
|
| 3 |
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
print(dataset["train"][0])
|
|
|
|
| 1 |
from datasets import load_dataset
|
| 2 |
|
| 3 |
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
dataset = load_dataset("undertheseanlp/UTS_Text", "small")
|
| 6 |
+
print(dataset["train"][0])
|