File size: 4,822 Bytes
e4b9a7b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
"""
Command line for PyThaiNLP's tokenizers.
"""
import argparse
from pythainlp import cli
from pythainlp.tokenize import (
DEFAULT_SENT_TOKENIZE_ENGINE,
DEFAULT_SUBWORD_TOKENIZE_ENGINE,
DEFAULT_WORD_TOKENIZE_ENGINE,
sent_tokenize,
subword_tokenize,
word_tokenize,
)
from pythainlp.tools import safe_print
DEFAULT_SENT_TOKEN_SEPARATOR = "@@"
DEFAULT_SUBWORD_TOKEN_SEPARATOR = "/"
DEFAULT_SYLLABLE_TOKEN_SEPARATOR = "~"
DEFAULT_WORD_TOKEN_SEPARATOR = "|"
class SubAppBase:
def __init__(self, name, argv):
parser = argparse.ArgumentParser(**cli.make_usage("tokenize " + name))
parser.add_argument(
"text",
type=str,
nargs="?",
help="input text",
)
parser.add_argument(
"-s",
"--sep",
dest="separator",
type=str,
help=f"default: {self.separator}",
default=self.separator,
)
parser.add_argument(
"-a",
"--algo",
dest="algorithm",
type=str,
help=f"default: {self.algorithm}",
default=self.algorithm,
)
parser.add_argument(
"-w",
"--keep-whitespace",
dest="keep_whitespace",
action="store_true",
)
parser.add_argument(
"-nw",
"--no-whitespace",
dest="keep_whitespace",
action="store_false",
)
parser.set_defaults(keep_whitespace=True)
args = parser.parse_args(argv)
self.args = args
cli.exit_if_empty(args.text, parser)
result = self.run(
args.text,
engine=args.algorithm,
keep_whitespace=args.keep_whitespace,
)
safe_print(args.separator.join(result) + args.separator)
class WordTokenizationApp(SubAppBase):
def __init__(self, *args, **kwargs):
self.keep_whitespace = True
self.algorithm = DEFAULT_WORD_TOKENIZE_ENGINE
self.separator = DEFAULT_WORD_TOKEN_SEPARATOR
self.run = word_tokenize
super().__init__(*args, **kwargs)
class SentenceTokenizationApp(SubAppBase):
def __init__(self, *args, **kwargs):
self.keep_whitespace = True
self.algorithm = DEFAULT_SENT_TOKENIZE_ENGINE
self.separator = DEFAULT_SENT_TOKEN_SEPARATOR
self.run = sent_tokenize
super().__init__(*args, **kwargs)
class SubwordTokenizationApp(SubAppBase):
def __init__(self, *args, **kwargs):
self.keep_whitespace = True
self.algorithm = DEFAULT_SUBWORD_TOKENIZE_ENGINE
self.separator = DEFAULT_SUBWORD_TOKEN_SEPARATOR
self.run = subword_tokenize
super().__init__(*args, **kwargs)
class App:
def __init__(self, argv):
parser = argparse.ArgumentParser(
prog="tokenize",
description="Break a text into small units (tokens).",
usage=(
'thainlp tokenize <token_type> [options] "<text>"\n\n'
"token_type:\n\n"
"subword subword (may not be a linguistic unit)\n"
"syllable syllable\n"
"word word\n"
"sent sentence\n\n"
"options:\n\n"
"--sep or -s <separator> specify custom separator\n"
" (default is a space)\n"
"--algo or -a <algorithm> tokenization algorithm\n"
" (see API doc for more info)\n"
"--keep-whitespace or -w keep whitespaces in output\n"
" (default)\n\n"
"<separator> and <text> should be inside double quotes.\n\n"
"Example:\n\n"
'thainlp tokenize word -s "|" "ใต้แสงนีออนเปลี่ยวเหงา"\n\n'
"--"
),
)
parser.add_argument(
"token_type",
type=str,
help="[subword|word|sent]",
)
args = parser.parse_args(argv[2:3])
cli.exit_if_empty(args.token_type, parser)
token_type = str.lower(args.token_type)
argv = argv[3:]
if token_type.startswith("w"):
WordTokenizationApp("word", argv)
elif token_type.startswith("su"):
SubwordTokenizationApp("subword", argv)
elif token_type.startswith("se"):
SentenceTokenizationApp("sent", argv)
else:
safe_print(f"Token type not available: {token_type}")
|