UDD-1 / src /fetch_ws_sentences.py
rain1024's picture
Restructure technical report to ACL format, add Phase 0 gold eval methodology
bbc68b5
# /// script
# requires-python = ">=3.9"
# dependencies = [
# "datasets>=2.0.0",
# "underthesea>=6.8.0",
# ]
# ///
"""
Fetch sentences for word segmentation dataset (100K total).
Fetches 20,000 sentences per domain from 4 HuggingFace datasets:
- Legal: undertheseanlp/UTS_VLC → ws_sentences_vlc.txt
- News: undertheseanlp/UVN-1 → ws_sentences_uvn.txt
- Wikipedia: undertheseanlp/UVW-2026 → ws_sentences_uvw.txt
- Fiction: undertheseanlp/UVB-v0.1 → ws_sentences_uvb_f.txt
- Non-fiction: undertheseanlp/UVB-v0.1 → ws_sentences_uvb_n.txt
Output format: idx\tsentence (one sentence per line)
"""
import re
from os.path import dirname, join
from datasets import load_dataset
from underthesea import lang_detect, sent_tokenize, text_normalize
TARGET_PER_DOMAIN = 20000
# Vietnamese toned vowels — each syllable has at most one tone mark.
# A whitespace-delimited token with 2+ toned vowels is glued text.
TONED_VOWELS = set(
'áàảãạắằẳẵặấầẩẫậéèẻẽẹếềểễệíìỉĩịóòỏõọốồổỗộớờởỡợúùủũụứừửữựýỳỷỹỵ'
'ÁÀẢÃẠẮẰẲẴẶẤẦẨẪẬÉÈẺẼẸẾỀỂỄỆÍÌỈĨỊÓÒỎÕỌỐỒỔỖỘỚỜỞỠỢÚÙỦŨỤỨỪỬỮỰÝỲỶỸỴ'
)
# All Vietnamese diacritical characters (toned vowels + base vowels ă, â, ê, ô, ơ, ư, đ)
VIET_DIACRITICS = TONED_VOWELS | set('ăâêôơưđĂÂÊÔƠƯĐ')
# Characters unique to Vietnamese (not in French/Portuguese/other Latin scripts)
# ă/ơ/ư and their toned variants distinguish Vietnamese from French (which shares â, ê, ô, é, è, à)
VIET_ONLY_CHARS = set('ăắằẳẵặơớờởỡợưứừửữựđ'
'ĂẮẰẲẴẶƠỚỜỞỠỢƯỨỪỬỮỰĐ')
# ============================================================================
# Shared text cleaning
# ============================================================================
def clean_text(text):
"""Remove markdown formatting and clean text."""
text = text_normalize(text)
text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
text = re.sub(r'\*+', '', text)
text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
text = re.sub(r'\n{2,}', '\n', text)
lines = [line.strip() for line in text.split('\n')]
text = '\n'.join(lines)
return text
def safe_sent_tokenize(text):
"""Sentence tokenize with fix for numbers split across sentence boundaries.
underthesea's sent_tokenize splits "2.000" into ["...hơn 2.", "000 học sinh..."].
This merges consecutive sentences where the split occurred inside a number.
"""
raw_sents = sent_tokenize(text)
if not raw_sents:
return raw_sents
merged = [raw_sents[0]]
for sent in raw_sents[1:]:
prev = merged[-1]
# Previous ends with digit(s) + period, current starts with digit(s)
if re.search(r'\d\s*\.\s*$', prev) and re.match(r'\d', sent):
merged[-1] = prev + sent
else:
merged.append(sent)
return merged
def sentence_score(sent):
"""Score a sentence's quality on a (0, 1) scale.
Combines 6 sub-scores:
1. Length score — Gaussian around ideal range [60, 200] chars
2. Word count score — Gaussian around ideal range [8, 35] words
3. Vietnamese density — ratio of Vietnamese diacritical chars to total letters
4. Structure score — proper start (uppercase/digit) + proper end (punctuation)
5. Cleanliness score — absence of markup, unbalanced brackets, glued text
6. Completeness score — balanced quotes, no trailing fragments
Returns a float in (0, 1). Higher is better.
"""
import math
sent = sent.strip()
if not sent:
return 0.0
# --- 1. Length score (0-1): Gaussian penalty outside [60, 200] ---
char_len = len(sent)
if 60 <= char_len <= 200:
len_score = 1.0
elif char_len < 60:
len_score = math.exp(-0.5 * ((char_len - 60) / 30) ** 2)
else:
len_score = math.exp(-0.5 * ((char_len - 200) / 60) ** 2)
# --- 2. Word count score (0-1): Gaussian penalty outside [8, 35] ---
words = sent.split()
wc = len(words)
if 8 <= wc <= 35:
wc_score = 1.0
elif wc < 8:
wc_score = math.exp(-0.5 * ((wc - 8) / 3) ** 2)
else:
wc_score = math.exp(-0.5 * ((wc - 35) / 10) ** 2)
# --- 3. Vietnamese density (0-1): diacritical chars / total letters ---
total_letters = sum(1 for c in sent if c.isalpha())
viet_chars = sum(1 for c in sent if c in VIET_DIACRITICS)
viet_score = min(viet_chars / max(total_letters, 1) * 5, 1.0) # 20%+ diacritics → 1.0
# --- 4. Structure score (0-1): start + end quality ---
start_ok = 0.5 if (sent[0].isupper() or sent[0].isdigit()) else 0.0
end_ok = 0.5 if sent[-1] in '.!?…"»"\'):' else 0.0
struct_score = start_ok + end_ok
# --- 5. Cleanliness score (0-1): penalty for noise signals ---
clean_score = 1.0
# Markup characters
if re.search(r'[{}<>|]', sent):
clean_score -= 0.3
if re.search(r'\w+=\w+', sent):
clean_score -= 0.2
# Unbalanced brackets
for o, c in [('(', ')'), ('[', ']')]:
if sent.count(o) != sent.count(c):
clean_score -= 0.2
break
# Glued text
for token in words:
tone_count = sum(1 for ch in token if ch in TONED_VOWELS)
if tone_count >= 2:
clean_score -= 0.3
break
if tone_count >= 1 and re.search(r'\d[a-zA-ZĐđÀ-ỹ]', token):
clean_score -= 0.3
break
# File extensions
if re.search(r'\.(jpg|jpeg|png|gif|svg|webp)\b', sent, re.IGNORECASE):
clean_score -= 0.2
# Excessive uppercase
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5:
clean_score -= 0.2
clean_score = max(clean_score, 0.0)
# --- 6. Completeness score (0-1): balanced quotes, no fragments ---
comp_score = 1.0
# Unbalanced quotes
double_quotes = sent.count('"') + sent.count('\u201c') + sent.count('\u201d')
if double_quotes % 2 != 0:
comp_score -= 0.2
# Excessive digit ratio
digit_ratio = sum(1 for c in sent if c.isdigit()) / max(len(sent), 1)
if digit_ratio > 0.3:
comp_score -= 0.3
elif digit_ratio > 0.2:
comp_score -= 0.1
comp_score = max(comp_score, 0.0)
# --- Weighted combination ---
# Vietnamese density is a multiplier (gate): non-Vietnamese → near 0
base_score = (
0.20 * len_score
+ 0.15 * wc_score
+ 0.20 * struct_score
+ 0.30 * clean_score
+ 0.15 * comp_score
)
score = base_score * viet_score
return round(max(0.01, min(score, 0.99)), 4)
def base_valid(sent):
"""Shared base validation: length, word count, structure, language, punctuation, markup."""
sent = sent.strip()
if not sent:
return False, sent
if len(sent) < 20 or len(sent) > 300:
return False, sent
# Minimum word count — ensures enough syntactic structure
if len(sent.split()) < 4:
return False, sent
# Must start with uppercase letter or digit
if not sent[0].isupper() and not sent[0].isdigit():
return False, sent
# Must end with proper punctuation (including : and ) for legal/academic)
if sent[-1] not in '.!?…"»"\'):':
return False, sent
if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
return False, sent
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5:
return False, sent
# Markup characters — reject template/HTML remnants
if re.search(r'[{}<>]', sent):
return False, sent
if '|' in sent:
return False, sent
# Template-like = (no spaces): reject "key=value" but allow "x = y"
if re.search(r'\w+=\w+', sent):
return False, sent
# Unbalanced brackets — opening and closing counts must match
for open_ch, close_ch in [('(', ')'), ('[', ']')]:
if sent.count(open_ch) != sent.count(close_ch):
return False, sent
# File extension remnants (wiki image markup)
if re.search(r'\.(jpg|jpeg|png|gif|svg|webp)\b', sent, re.IGNORECASE):
return False, sent
# Glued text — two syllables merged without space
for token in sent.split():
tone_count = sum(1 for c in token if c in TONED_VOWELS)
# Vietnamese syllable has at most 1 tone mark; 2+ means glued (e.g., "lõmlà")
if tone_count >= 2:
return False, sent
# Digit glued to Vietnamese text (e.g., "59Sẹo")
if tone_count >= 1 and re.search(r'\d[a-zA-ZĐđÀ-ỹ]', token):
return False, sent
# Language detection with Vietnamese-only character fallback
if lang_detect(sent) != "vi":
# Fallback: accept only if sentence has Vietnamese-only chars (ă, ơ, ư, đ and variants)
# French shares â, ê, ô, é, è, à with Vietnamese, so those are not sufficient
vn_only_count = sum(1 for c in sent if c in VIET_ONLY_CHARS)
if vn_only_count < 1:
return False, sent
return True, sent
# ============================================================================
# Domain-specific validators
# ============================================================================
def is_valid_legal(sent):
"""Validate sentence from legal domain (UTS_VLC)."""
ok, sent = base_valid(sent)
if not ok:
return False, sent
# Remove trailing list markers
sent = re.sub(r'\n\d+\.$', '', sent)
sent = re.sub(r'\n[a-z]\)$', '', sent)
sent = sent.strip()
if not sent:
return False, sent
# Skip legal structure headers
if re.match(r'^(QUỐC HỘI|CỘNG HÒA|Độc lập|Phần thứ|Chương [IVX]+|MỤC \d+)', sent):
return False, sent
if re.match(r'^(Điều \d+|Khoản \d+|Mục \d+)', sent):
return False, sent
if sent.startswith(('English:', 'Số hiệu:', 'Ngày hiệu lực:', '---', '|')):
return False, sent
if re.search(r'\n\d+$', sent):
return False, sent
return True, sent
def is_valid_news(sent):
"""Validate sentence from news domain (UVN-1)."""
ok, sent = base_valid(sent)
if not ok:
return False, sent
# Skip bylines
if re.match(r'^(Theo |PV |Nguồn:|Ảnh:|Video:|Bài:|Tin ảnh:)', sent):
return False, sent
# Skip photo captions
if re.search(r'\(Ảnh:.*\)$', sent):
return False, sent
if re.search(r'\(Nguồn:.*\)$', sent):
return False, sent
# Skip date/time at start
if re.match(r'^\d{1,2}/\d{1,2}/\d{4}', sent):
return False, sent
if re.match(r'^\d{1,2}:\d{2}', sent):
return False, sent
# Skip URLs
if re.search(r'(http|www\.|\.com|\.vn)', sent, re.IGNORECASE):
return False, sent
# Skip tags/categories
if re.match(r'^(Tags?:|Chuyên mục:|Từ khóa:)', sent, re.IGNORECASE):
return False, sent
# Skip data tables (>30% digits)
if sum(1 for c in sent if c.isdigit()) > len(sent) * 0.3:
return False, sent
return True, sent
def is_valid_wiki(sent):
"""Validate sentence from Wikipedia domain (UVW-2026)."""
ok, sent = base_valid(sent)
if not ok:
return False, sent
# Skip stub markers
if re.search(r'(bài sơ khai|sơ khai về|cần được mở rộng|Thể loại:)', sent):
return False, sent
# Skip category/list pages
if re.match(r'^(Thể loại|Danh sách|Xem thêm|Tham khảo|Liên kết ngoài|Chú thích)', sent):
return False, sent
# Skip infobox remnants
if sent.count('|') >= 2:
return False, sent
# Skip reference fragments
if re.search(r'\[\d+\]', sent):
return False, sent
if re.search(r'\[cần', sent):
return False, sent
# Skip URLs
if re.search(r'(http|www\.|\.com|\.org)', sent, re.IGNORECASE):
return False, sent
# Skip data tables
if sum(1 for c in sent if c.isdigit()) > len(sent) * 0.3:
return False, sent
# Skip list items
if re.match(r'^[\*\-•]\s', sent):
return False, sent
return True, sent
def is_valid_book(sent):
"""Validate sentence from book domain (UVB-v0.1) — stricter quality on top of base_valid."""
ok, sent = base_valid(sent)
if not ok:
return False, sent
# Stricter length
if len(sent) < 30 or len(sent) > 250:
return False, sent
# Word count
words = sent.split()
if len(words) < 5 or len(words) > 40:
return False, sent
# Stricter uppercase threshold
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.3:
return False, sent
# Skip too many numbers
if sum(1 for c in sent if c.isdigit()) > len(sent) * 0.15:
return False, sent
# Skip structure markers
if re.match(r'^(Chương|Phần|Mục|Điều|\d+\.|\([a-z]\))', sent):
return False, sent
# Skip URLs/emails
if re.search(r'(http|www\.|@|\.com|\.vn)', sent, re.IGNORECASE):
return False, sent
# Skip excessive punctuation
punct_count = sum(1 for c in sent if c in '.,;:!?-\u2013\u2014()[]""\'\'\xab\xbb')
if punct_count > len(words) * 1.5:
return False, sent
# Skip incomplete sentences (ellipsis in middle)
if '...' in sent[:-5]:
return False, sent
# Skip dialogue-heavy
quote_count = sent.count('"') + sent.count('\u201c') + sent.count('\u201d')
if quote_count > 4:
return False, sent
return True, sent
# ============================================================================
# Book genre classification (from fetch_uvb_data.py)
# ============================================================================
FICTION_GENRES = {
"Fiction", "Novels", "Romance", "Fantasy", "Science Fiction",
"Mystery", "Thriller", "Horror", "Historical Fiction", "Literary Fiction",
"Adventure", "Crime", "Suspense", "Drama", "Short Stories"
}
NON_FICTION_GENRES = {
"Non Fiction", "Nonfiction", "History", "Biography", "Autobiography",
"Self Help", "Psychology", "Philosophy", "Science", "Politics",
"Economics", "Business", "Education", "Travel", "Memoir",
"Essays", "Reference", "Health", "Religion", "Spirituality"
}
def classify_book(genres):
"""Classify book as fiction or non-fiction based on genres."""
if not genres:
return None
genres_set = set(genres)
is_fiction = bool(genres_set & FICTION_GENRES)
is_non_fiction = bool(genres_set & NON_FICTION_GENRES)
if is_fiction and not is_non_fiction:
return "fiction"
elif is_non_fiction and not is_fiction:
return "non-fiction"
elif is_fiction and is_non_fiction:
fiction_count = len(genres_set & FICTION_GENRES)
non_fiction_count = len(genres_set & NON_FICTION_GENRES)
return "fiction" if fiction_count > non_fiction_count else "non-fiction"
return None
# ============================================================================
# Sentence extraction helpers
# ============================================================================
def normalize_for_dedup(sent):
"""Normalize sentence for near-duplicate detection.
Replaces all digit sequences with '#' so that sentences differing only
in numbers (e.g., article numbers, amounts) are treated as duplicates.
Also lowercases for case-insensitive matching.
"""
return re.sub(r'\d+', '#', sent.lower())
MAX_PER_DOC = 500 # Cap sentences per document to ensure source diversity
def extract_sentences(docs, validator, target, label=""):
"""Extract validated, deduplicated sentences from documents via round-robin.
Phase 1: Scan all documents, collect up to MAX_PER_DOC valid sentences each.
Phase 2: Round-robin across documents to ensure even source representation.
"""
# Phase 1: collect candidate sentences per document
doc_pools = [] # list of [sentences]
for idx, doc in enumerate(docs):
content = doc["content"]
content = clean_text(content)
sents = []
for sent in safe_sent_tokenize(content):
sent = sent.strip()
ok, cleaned = validator(sent)
if ok:
sents.append(cleaned)
if len(sents) >= MAX_PER_DOC:
break
if sents:
doc_pools.append(sents)
if (idx + 1) % 200 == 0:
print(f" {label}: [{idx+1}] docs scanned, {len(doc_pools)} with valid sentences")
# Stop scanning if we have enough candidates (3x target for safety)
total_candidates = sum(len(s) for s in doc_pools)
if total_candidates >= target * 3:
print(f" {label}: [{idx+1}] docs scanned, {len(doc_pools)} with candidates, {total_candidates:,} total")
break
if not doc_pools:
return []
total_candidates = sum(len(s) for s in doc_pools)
print(f" {label}: {len(doc_pools)} docs with candidates, {total_candidates:,} total candidate sentences")
# Phase 2: round-robin selection with dedup
sentences = []
seen = set()
seen_normalized = set()
round_idx = 0
while len(sentences) < target and doc_pools:
made_progress = False
for i in range(len(doc_pools)):
sents = doc_pools[i]
if round_idx >= len(sents):
continue
cleaned = sents[round_idx]
if cleaned in seen:
continue
norm = normalize_for_dedup(cleaned)
if norm in seen_normalized:
continue
seen.add(cleaned)
seen_normalized.add(norm)
sentences.append(cleaned)
made_progress = True
if len(sentences) >= target:
break
if not made_progress:
break
# Remove exhausted documents
doc_pools = [s for s in doc_pools if round_idx + 1 < len(s)]
round_idx += 1
print(f" {label}: {len(sentences):,} sentences from {round_idx} rounds")
return sentences[:target]
MAX_PER_BOOK = 500 # Cap sentences per book to ensure source diversity
def extract_book_sentences(books, target, label=""):
"""Extract validated, deduplicated sentences from books via round-robin.
Phase 1: Extract up to MAX_PER_BOOK valid sentences from each book.
Phase 2: Round-robin across all books to ensure even representation.
"""
# Phase 1: collect candidate sentences per book
book_pools = [] # list of (title, [sentences])
for i, book in enumerate(books):
content = clean_text(book["content"])
sents = []
for sent in safe_sent_tokenize(content):
ok, cleaned = is_valid_book(sent)
if ok:
sents.append(cleaned)
if len(sents) >= MAX_PER_BOOK:
break
if sents:
book_pools.append((book["title"], sents))
if (i + 1) % 50 == 0:
print(f" {label}: [{i+1}/{len(books)}] books scanned, {len(book_pools)} with valid sentences")
total_candidates = sum(len(s) for _, s in book_pools)
print(f" {label}: {len(book_pools)} books with candidates, {total_candidates:,} total candidate sentences")
# Phase 2: round-robin selection with dedup
sentences = []
seen = set()
seen_normalized = set()
round_idx = 0
books_contributed = set()
while len(sentences) < target and book_pools:
made_progress = False
for i in range(len(book_pools)):
title, sents = book_pools[i]
if round_idx >= len(sents):
continue
cleaned = sents[round_idx]
if cleaned in seen:
continue
norm = normalize_for_dedup(cleaned)
if norm in seen_normalized:
continue
seen.add(cleaned)
seen_normalized.add(norm)
sentences.append(cleaned)
books_contributed.add(title)
made_progress = True
if len(sentences) >= target:
break
if not made_progress:
break
# Remove exhausted books
book_pools = [(t, s) for t, s in book_pools if round_idx + 1 < len(s)]
round_idx += 1
print(f" {label}: {len(sentences):,} sentences from {len(books_contributed)} books ({round_idx} rounds)")
return sentences[:target]
def save_sentences(sentences, filepath):
"""Save sentences to file in idx\\tsentence format."""
with open(filepath, "w", encoding="utf-8") as f:
for i, sent in enumerate(sentences, 1):
f.write(f"{i}\t{sent}\n")
print(f" Saved {len(sentences)} sentences to {filepath}")
# ============================================================================
# Main
# ============================================================================
def main():
base_dir = dirname(dirname(__file__))
# --- Legal (UTS_VLC) ---
print(f"\n[1/5] Fetching legal sentences (target: {TARGET_PER_DOMAIN})...")
ds_vlc = load_dataset("undertheseanlp/UTS_VLC", split="2026")
vlc_sentences = extract_sentences(ds_vlc, is_valid_legal, TARGET_PER_DOMAIN, "Legal")
save_sentences(vlc_sentences, join(base_dir, "ws_sentences_vlc.txt"))
# --- News (UVN-1) ---
print(f"\n[2/5] Fetching news sentences (target: {TARGET_PER_DOMAIN})...")
ds_uvn = load_dataset("undertheseanlp/UVN-1", split="train")
uvn_sentences = extract_sentences(ds_uvn, is_valid_news, TARGET_PER_DOMAIN, "News")
save_sentences(uvn_sentences, join(base_dir, "ws_sentences_uvn.txt"))
# --- Wikipedia (UVW-2026) ---
print(f"\n[3/5] Fetching Wikipedia sentences (target: {TARGET_PER_DOMAIN})...")
ds_uvw = load_dataset("undertheseanlp/UVW-2026", split="train")
high_quality = [doc for doc in ds_uvw if (doc.get("quality_score") or 0) >= 5]
print(f" High-quality articles: {len(high_quality)}")
uvw_sentences = extract_sentences(high_quality, is_valid_wiki, TARGET_PER_DOMAIN, "Wikipedia")
save_sentences(uvw_sentences, join(base_dir, "ws_sentences_uvw.txt"))
# --- Books (UVB-v0.1) ---
print(f"\n[4/5] Fetching fiction sentences (target: {TARGET_PER_DOMAIN})...")
print(f"[5/5] Fetching non-fiction sentences (target: {TARGET_PER_DOMAIN})...")
ds_uvb = load_dataset("undertheseanlp/UVB-v0.1", split="train")
fiction_books = []
non_fiction_books = []
for book in ds_uvb:
genres = book.get("genres", [])
rating = book.get("goodreads_rating", 0) or 0
num_ratings = book.get("goodreads_num_ratings", 0) or 0
quality_score = rating * min(num_ratings / 100, 10)
book_type = classify_book(genres)
book_info = {
"title": book["title"],
"content": book["content"],
"quality_score": quality_score,
}
if book_type == "fiction":
fiction_books.append(book_info)
elif book_type == "non-fiction":
non_fiction_books.append(book_info)
fiction_books.sort(key=lambda x: x["quality_score"], reverse=True)
non_fiction_books.sort(key=lambda x: x["quality_score"], reverse=True)
print(f" Fiction books: {len(fiction_books)}, Non-fiction books: {len(non_fiction_books)}")
fiction_sentences = extract_book_sentences(fiction_books, TARGET_PER_DOMAIN, "Fiction")
save_sentences(fiction_sentences, join(base_dir, "ws_sentences_uvb_f.txt"))
nonfiction_sentences = extract_book_sentences(non_fiction_books, TARGET_PER_DOMAIN, "Non-fiction")
save_sentences(nonfiction_sentences, join(base_dir, "ws_sentences_uvb_n.txt"))
# --- Summary ---
print("\n" + "=" * 60)
print("Summary:")
print(f" Legal: {len(vlc_sentences):,}")
print(f" News: {len(uvn_sentences):,}")
print(f" Wikipedia: {len(uvw_sentences):,}")
print(f" Fiction: {len(fiction_sentences):,}")
print(f" Non-fiction: {len(nonfiction_sentences):,}")
total = len(vlc_sentences) + len(uvn_sentences) + len(uvw_sentences) + len(fiction_sentences) + len(nonfiction_sentences)
print(f" Total: {total:,}")
if __name__ == "__main__":
main()