sentence-segmentation-1 / train_punkt.py
rain1024's picture
Train Punkt model on Vietnamese data for sentence segmentation
9ec538b
"""Train NLTK PunktTrainer on Vietnamese text data.
Fetches full documents from 4 HuggingFace datasets and trains an unsupervised
Punkt model to learn abbreviations, sentence starters, and collocations.
Usage:
python train_punkt.py
python train_punkt.py --max-chars 50000000 # limit training data size
"""
import argparse
import json
import re
import sys
import time
from datasets import load_dataset
from nltk.tokenize.punkt import PunktTrainer
def clean_text(text: str) -> str:
"""Clean raw text for Punkt training."""
if not text:
return ""
# Remove markdown headers
text = re.sub(r"^#{1,6}\s+", "", text, flags=re.MULTILINE)
# Remove markdown bold/italic
text = re.sub(r"\*{1,3}(.+?)\*{1,3}", r"\1", text)
# Remove markdown links
text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text)
# Remove wiki markup artifacts
text = re.sub(r"__[A-Z]+__", "", text)
# Normalize whitespace (keep newlines as paragraph separators)
text = re.sub(r"[ \t]+", " ", text)
text = re.sub(r"\n{3,}", "\n\n", text)
return text.strip()
def fetch_wikipedia(max_chars: int) -> str:
"""Fetch Vietnamese Wikipedia articles (UVW-2026)."""
print("Fetching Wikipedia articles (UVW-2026)...")
ds = load_dataset("undertheseanlp/UVW-2026", split="train", streaming=True)
texts = []
total_chars = 0
count = 0
for row in ds:
if row.get("quality_score", 0) < 3:
continue
content = clean_text(row.get("content", ""))
if len(content) < 100:
continue
texts.append(content)
total_chars += len(content)
count += 1
if count % 10000 == 0:
print(f" ... {count} articles, {total_chars:,} chars")
if total_chars >= max_chars:
break
print(f" Wikipedia: {count} articles, {total_chars:,} chars")
return "\n\n".join(texts)
def fetch_news(max_chars: int) -> str:
"""Fetch Vietnamese news articles (UVN-1)."""
print("Fetching news articles (UVN-1)...")
ds = load_dataset("undertheseanlp/UVN-1", split="train", streaming=True)
texts = []
total_chars = 0
count = 0
for row in ds:
content = clean_text(row.get("content", ""))
if len(content) < 50:
continue
texts.append(content)
total_chars += len(content)
count += 1
if count % 1000 == 0:
print(f" ... {count} articles, {total_chars:,} chars")
if total_chars >= max_chars:
break
print(f" News: {count} articles, {total_chars:,} chars")
return "\n\n".join(texts)
def fetch_books(max_chars: int) -> str:
"""Fetch Vietnamese books (UVB-v0.1)."""
print("Fetching books (UVB-v0.1)...")
ds = load_dataset("undertheseanlp/UVB-v0.1", split="train", streaming=True)
texts = []
total_chars = 0
count = 0
for row in ds:
content = clean_text(row.get("content", ""))
if len(content) < 100:
continue
texts.append(content)
total_chars += len(content)
count += 1
if count % 100 == 0:
print(f" ... {count} books, {total_chars:,} chars")
if total_chars >= max_chars:
break
print(f" Books: {count} books, {total_chars:,} chars")
return "\n\n".join(texts)
def fetch_legal(max_chars: int) -> str:
"""Fetch Vietnamese legal documents (UTS_VLC)."""
print("Fetching legal documents (UTS_VLC)...")
ds = load_dataset("undertheseanlp/UTS_VLC", split="2026", streaming=True)
texts = []
total_chars = 0
count = 0
for row in ds:
content = clean_text(row.get("content", ""))
if len(content) < 100:
continue
texts.append(content)
total_chars += len(content)
count += 1
if count % 100 == 0:
print(f" ... {count} documents, {total_chars:,} chars")
if total_chars >= max_chars:
break
print(f" Legal: {count} documents, {total_chars:,} chars")
return "\n\n".join(texts)
def train_punkt(text: str) -> dict:
"""Train PunktTrainer on text and return learned parameters."""
print(f"\nTraining Punkt on {len(text):,} characters...")
start = time.time()
trainer = PunktTrainer()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.INCLUDE_ABBREV_COLLOCS = True
trainer.train(text, verbose=True, finalize=True)
params = trainer.get_params()
elapsed = time.time() - start
print(f"Training completed in {elapsed:.1f}s")
# Extract learned parameters
abbrev_types = sorted(params.abbrev_types)
sent_starters = sorted(params.sent_starters)
collocations = sorted(
[f"{a} {b}" for a, b in params.collocations], key=str.lower
)
print(f"\nLearned parameters:")
print(f" Abbreviations: {len(abbrev_types)}")
print(f" Sentence starters: {len(sent_starters)}")
print(f" Collocations: {len(collocations)}")
if abbrev_types:
print(f" Sample abbrevs: {abbrev_types[:20]}")
if sent_starters:
print(f" Sample starters: {sent_starters[:20]}")
return {
"abbrev_types": abbrev_types,
"sent_starters": sent_starters,
"collocations": collocations,
}
def main():
parser = argparse.ArgumentParser(description="Train Punkt model on Vietnamese data")
parser.add_argument(
"--max-chars",
type=int,
default=50_000_000,
help="Max characters per data source (default: 50M)",
)
parser.add_argument(
"--output",
default="punkt_params_trained.json",
help="Output file for trained parameters",
)
args = parser.parse_args()
# Fetch data from all sources
all_texts = []
for fetcher in [fetch_legal, fetch_news, fetch_books, fetch_wikipedia]:
try:
text = fetcher(args.max_chars)
if text:
all_texts.append(text)
except Exception as e:
print(f" Warning: {e}", file=sys.stderr)
if not all_texts:
print("ERROR: No training data fetched!", file=sys.stderr)
sys.exit(1)
combined = "\n\n".join(all_texts)
print(f"\nTotal training data: {len(combined):,} characters")
# Train
params = train_punkt(combined)
# Save
with open(args.output, "w", encoding="utf-8") as f:
json.dump(params, f, ensure_ascii=False, indent=2)
print(f"\nSaved trained parameters to {args.output}")
if __name__ == "__main__":
main()