File size: 1,407 Bytes
9ec538b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
"""Vietnamese sentence tokenization using trained NLTK Punkt model.

Usage:
    from sent_tokenize import sent_tokenize
    sentences = sent_tokenize("Xin chào. Tôi là Claude.")
"""

import json
from os.path import dirname, join

from nltk.tokenize.punkt import PunktParameters, PunktSentenceTokenizer

_tokenizer = None


def _load_tokenizer():
    """Load PunktSentenceTokenizer with trained parameters."""
    global _tokenizer
    if _tokenizer is not None:
        return

    params_path = join(dirname(__file__), "punkt_params_trained.json")
    with open(params_path, encoding="utf-8") as f:
        data = json.load(f)

    params = PunktParameters()
    params.abbrev_types = set(data.get("abbrev_types", []))
    params.sent_starters = set(data.get("sent_starters", []))
    collocations = set()
    for colloc_str in data.get("collocations", []):
        parts = colloc_str.split(" ", 1)
        if len(parts) == 2:
            collocations.add(tuple(parts))
    params.collocations = collocations

    _tokenizer = PunktSentenceTokenizer(params)


def sent_tokenize(text: str) -> list[str]:
    """Tokenize Vietnamese text into sentences using trained Punkt model.

    Args:
        text: Input Vietnamese text string.

    Returns:
        List of sentence strings.
    """
    if not text or not text.strip():
        return []

    _load_tokenizer()
    return _tokenizer.tokenize(text)