sentence-segmentation-1 / sent_tokenize.py
rain1024's picture
Train Punkt model on Vietnamese data for sentence segmentation
9ec538b
"""Vietnamese sentence tokenization using trained NLTK Punkt model.
Usage:
from sent_tokenize import sent_tokenize
sentences = sent_tokenize("Xin chào. Tôi là Claude.")
"""
import json
from os.path import dirname, join
from nltk.tokenize.punkt import PunktParameters, PunktSentenceTokenizer
_tokenizer = None
def _load_tokenizer():
"""Load PunktSentenceTokenizer with trained parameters."""
global _tokenizer
if _tokenizer is not None:
return
params_path = join(dirname(__file__), "punkt_params_trained.json")
with open(params_path, encoding="utf-8") as f:
data = json.load(f)
params = PunktParameters()
params.abbrev_types = set(data.get("abbrev_types", []))
params.sent_starters = set(data.get("sent_starters", []))
collocations = set()
for colloc_str in data.get("collocations", []):
parts = colloc_str.split(" ", 1)
if len(parts) == 2:
collocations.add(tuple(parts))
params.collocations = collocations
_tokenizer = PunktSentenceTokenizer(params)
def sent_tokenize(text: str) -> list[str]:
"""Tokenize Vietnamese text into sentences using trained Punkt model.
Args:
text: Input Vietnamese text string.
Returns:
List of sentence strings.
"""
if not text or not text.strip():
return []
_load_tokenizer()
return _tokenizer.tokenize(text)