update
Browse files- eval.py +3 -6
- generate_dataset.py +0 -2
eval.py
CHANGED
|
@@ -1,8 +1,5 @@
|
|
| 1 |
from datasets import load_dataset
|
| 2 |
-
from underthesea import word_tokenize
|
| 3 |
-
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
|
| 4 |
-
from os.path import dirname, join
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
sentences = dataset["train"]["text"]
|
|
|
|
| 1 |
from datasets import load_dataset
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
if __name__ == '__main__':
|
| 4 |
+
dataset = load_dataset("undertheseanlp/UTS_Text_v1")
|
| 5 |
+
sentences = dataset["train"]["text"]
|
generate_dataset.py
CHANGED
|
@@ -4,8 +4,6 @@ from os.path import dirname, join
|
|
| 4 |
|
| 5 |
from datasets import load_dataset
|
| 6 |
from underthesea import word_tokenize
|
| 7 |
-
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
|
| 8 |
-
from underthesea.utils import logger
|
| 9 |
|
| 10 |
|
| 11 |
def create_wtk_dataset(text_dataset, output_folder):
|
|
|
|
| 4 |
|
| 5 |
from datasets import load_dataset
|
| 6 |
from underthesea import word_tokenize
|
|
|
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
def create_wtk_dataset(text_dataset, output_folder):
|