File size: 1,161 Bytes
e4b9a7b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
"""
Tokenizers at different levels of linguistic analysis.
"""
__all__ = [
"THAI2FIT_TOKENIZER",
"Tokenizer",
"Trie",
"paragraph_tokenize",
"sent_tokenize",
"subword_tokenize",
"syllable_tokenize",
"word_detokenize",
"word_tokenize",
]
from pythainlp.corpus import thai_syllables, thai_words
from pythainlp.util.trie import Trie
DEFAULT_WORD_TOKENIZE_ENGINE = "newmm"
DEFAULT_SENT_TOKENIZE_ENGINE = "crfcut"
DEFAULT_SUBWORD_TOKENIZE_ENGINE = "tcc"
DEFAULT_SYLLABLE_TOKENIZE_ENGINE = "han_solo"
DEFAULT_WORD_DICT_TRIE = Trie(thai_words())
DEFAULT_SYLLABLE_DICT_TRIE = Trie(thai_syllables())
DEFAULT_DICT_TRIE = DEFAULT_WORD_DICT_TRIE
from pythainlp.tokenize.core import (
Tokenizer,
paragraph_tokenize,
sent_tokenize,
subword_tokenize,
syllable_tokenize,
word_detokenize,
word_tokenize,
)
from pythainlp.corpus import get_corpus as _get_corpus
THAI2FIT_TOKENIZER = Tokenizer(
custom_dict=_get_corpus("words_th_thai2fit_201810.txt"), engine="mm"
)
|