| |
| |
| |
| |
| |
| |
| |
| """ |
| Fetch sentences for word segmentation dataset (100K total). |
| |
| Fetches 20,000 sentences per domain from 4 HuggingFace datasets: |
| - Legal: undertheseanlp/UTS_VLC → ws_sentences_vlc.txt |
| - News: undertheseanlp/UVN-1 → ws_sentences_uvn.txt |
| - Wikipedia: undertheseanlp/UVW-2026 → ws_sentences_uvw.txt |
| - Fiction: undertheseanlp/UVB-v0.1 → ws_sentences_uvb_f.txt |
| - Non-fiction: undertheseanlp/UVB-v0.1 → ws_sentences_uvb_n.txt |
| |
| Output format: idx\tsentence (one sentence per line) |
| """ |
|
|
| import re |
| from os.path import dirname, join |
|
|
| from datasets import load_dataset |
| from underthesea import lang_detect, sent_tokenize, text_normalize |
|
|
|
|
| TARGET_PER_DOMAIN = 20000 |
|
|
| |
| |
| TONED_VOWELS = set( |
| 'áàảãạắằẳẵặấầẩẫậéèẻẽẹếềểễệíìỉĩịóòỏõọốồổỗộớờởỡợúùủũụứừửữựýỳỷỹỵ' |
| 'ÁÀẢÃẠẮẰẲẴẶẤẦẨẪẬÉÈẺẼẸẾỀỂỄỆÍÌỈĨỊÓÒỎÕỌỐỒỔỖỘỚỜỞỠỢÚÙỦŨỤỨỪỬỮỰÝỲỶỸỴ' |
| ) |
|
|
| |
| VIET_DIACRITICS = TONED_VOWELS | set('ăâêôơưđĂÂÊÔƠƯĐ') |
|
|
| |
| |
| VIET_ONLY_CHARS = set('ăắằẳẵặơớờởỡợưứừửữựđ' |
| 'ĂẮẰẲẴẶƠỚỜỞỠỢƯỨỪỬỮỰĐ') |
|
|
|
|
| |
| |
| |
|
|
| def clean_text(text): |
| """Remove markdown formatting and clean text.""" |
| text = text_normalize(text) |
| text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE) |
| text = re.sub(r'\*+', '', text) |
| text = re.sub(r'^-+$', '', text, flags=re.MULTILINE) |
| text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text) |
| text = re.sub(r'\n{2,}', '\n', text) |
| lines = [line.strip() for line in text.split('\n')] |
| text = '\n'.join(lines) |
| return text |
|
|
|
|
| def safe_sent_tokenize(text): |
| """Sentence tokenize with fix for numbers split across sentence boundaries. |
| |
| underthesea's sent_tokenize splits "2.000" into ["...hơn 2.", "000 học sinh..."]. |
| This merges consecutive sentences where the split occurred inside a number. |
| """ |
| raw_sents = sent_tokenize(text) |
| if not raw_sents: |
| return raw_sents |
| merged = [raw_sents[0]] |
| for sent in raw_sents[1:]: |
| prev = merged[-1] |
| |
| if re.search(r'\d\s*\.\s*$', prev) and re.match(r'\d', sent): |
| merged[-1] = prev + sent |
| else: |
| merged.append(sent) |
| return merged |
|
|
|
|
| def sentence_score(sent): |
| """Score a sentence's quality on a (0, 1) scale. |
| |
| Combines 6 sub-scores: |
| 1. Length score — Gaussian around ideal range [60, 200] chars |
| 2. Word count score — Gaussian around ideal range [8, 35] words |
| 3. Vietnamese density — ratio of Vietnamese diacritical chars to total letters |
| 4. Structure score — proper start (uppercase/digit) + proper end (punctuation) |
| 5. Cleanliness score — absence of markup, unbalanced brackets, glued text |
| 6. Completeness score — balanced quotes, no trailing fragments |
| |
| Returns a float in (0, 1). Higher is better. |
| """ |
| import math |
|
|
| sent = sent.strip() |
| if not sent: |
| return 0.0 |
|
|
| |
| char_len = len(sent) |
| if 60 <= char_len <= 200: |
| len_score = 1.0 |
| elif char_len < 60: |
| len_score = math.exp(-0.5 * ((char_len - 60) / 30) ** 2) |
| else: |
| len_score = math.exp(-0.5 * ((char_len - 200) / 60) ** 2) |
|
|
| |
| words = sent.split() |
| wc = len(words) |
| if 8 <= wc <= 35: |
| wc_score = 1.0 |
| elif wc < 8: |
| wc_score = math.exp(-0.5 * ((wc - 8) / 3) ** 2) |
| else: |
| wc_score = math.exp(-0.5 * ((wc - 35) / 10) ** 2) |
|
|
| |
| total_letters = sum(1 for c in sent if c.isalpha()) |
| viet_chars = sum(1 for c in sent if c in VIET_DIACRITICS) |
| viet_score = min(viet_chars / max(total_letters, 1) * 5, 1.0) |
|
|
| |
| start_ok = 0.5 if (sent[0].isupper() or sent[0].isdigit()) else 0.0 |
| end_ok = 0.5 if sent[-1] in '.!?…"»"\'):' else 0.0 |
| struct_score = start_ok + end_ok |
|
|
| |
| clean_score = 1.0 |
| |
| if re.search(r'[{}<>|]', sent): |
| clean_score -= 0.3 |
| if re.search(r'\w+=\w+', sent): |
| clean_score -= 0.2 |
| |
| for o, c in [('(', ')'), ('[', ']')]: |
| if sent.count(o) != sent.count(c): |
| clean_score -= 0.2 |
| break |
| |
| for token in words: |
| tone_count = sum(1 for ch in token if ch in TONED_VOWELS) |
| if tone_count >= 2: |
| clean_score -= 0.3 |
| break |
| if tone_count >= 1 and re.search(r'\d[a-zA-ZĐđÀ-ỹ]', token): |
| clean_score -= 0.3 |
| break |
| |
| if re.search(r'\.(jpg|jpeg|png|gif|svg|webp)\b', sent, re.IGNORECASE): |
| clean_score -= 0.2 |
| |
| if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5: |
| clean_score -= 0.2 |
| clean_score = max(clean_score, 0.0) |
|
|
| |
| comp_score = 1.0 |
| |
| double_quotes = sent.count('"') + sent.count('\u201c') + sent.count('\u201d') |
| if double_quotes % 2 != 0: |
| comp_score -= 0.2 |
| |
| digit_ratio = sum(1 for c in sent if c.isdigit()) / max(len(sent), 1) |
| if digit_ratio > 0.3: |
| comp_score -= 0.3 |
| elif digit_ratio > 0.2: |
| comp_score -= 0.1 |
| comp_score = max(comp_score, 0.0) |
|
|
| |
| |
| base_score = ( |
| 0.20 * len_score |
| + 0.15 * wc_score |
| + 0.20 * struct_score |
| + 0.30 * clean_score |
| + 0.15 * comp_score |
| ) |
| score = base_score * viet_score |
| return round(max(0.01, min(score, 0.99)), 4) |
|
|
|
|
| def base_valid(sent): |
| """Shared base validation: length, word count, structure, language, punctuation, markup.""" |
| sent = sent.strip() |
| if not sent: |
| return False, sent |
| if len(sent) < 20 or len(sent) > 300: |
| return False, sent |
| |
| if len(sent.split()) < 4: |
| return False, sent |
| |
| if not sent[0].isupper() and not sent[0].isdigit(): |
| return False, sent |
| |
| if sent[-1] not in '.!?…"»"\'):': |
| return False, sent |
| if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE): |
| return False, sent |
| if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5: |
| return False, sent |
| |
| if re.search(r'[{}<>]', sent): |
| return False, sent |
| if '|' in sent: |
| return False, sent |
| |
| if re.search(r'\w+=\w+', sent): |
| return False, sent |
| |
| for open_ch, close_ch in [('(', ')'), ('[', ']')]: |
| if sent.count(open_ch) != sent.count(close_ch): |
| return False, sent |
| |
| if re.search(r'\.(jpg|jpeg|png|gif|svg|webp)\b', sent, re.IGNORECASE): |
| return False, sent |
| |
| for token in sent.split(): |
| tone_count = sum(1 for c in token if c in TONED_VOWELS) |
| |
| if tone_count >= 2: |
| return False, sent |
| |
| if tone_count >= 1 and re.search(r'\d[a-zA-ZĐđÀ-ỹ]', token): |
| return False, sent |
| |
| if lang_detect(sent) != "vi": |
| |
| |
| vn_only_count = sum(1 for c in sent if c in VIET_ONLY_CHARS) |
| if vn_only_count < 1: |
| return False, sent |
| return True, sent |
|
|
|
|
| |
| |
| |
|
|
| def is_valid_legal(sent): |
| """Validate sentence from legal domain (UTS_VLC).""" |
| ok, sent = base_valid(sent) |
| if not ok: |
| return False, sent |
| |
| sent = re.sub(r'\n\d+\.$', '', sent) |
| sent = re.sub(r'\n[a-z]\)$', '', sent) |
| sent = sent.strip() |
| if not sent: |
| return False, sent |
| |
| if re.match(r'^(QUỐC HỘI|CỘNG HÒA|Độc lập|Phần thứ|Chương [IVX]+|MỤC \d+)', sent): |
| return False, sent |
| if re.match(r'^(Điều \d+|Khoản \d+|Mục \d+)', sent): |
| return False, sent |
| if sent.startswith(('English:', 'Số hiệu:', 'Ngày hiệu lực:', '---', '|')): |
| return False, sent |
| if re.search(r'\n\d+$', sent): |
| return False, sent |
| return True, sent |
|
|
|
|
| def is_valid_news(sent): |
| """Validate sentence from news domain (UVN-1).""" |
| ok, sent = base_valid(sent) |
| if not ok: |
| return False, sent |
| |
| if re.match(r'^(Theo |PV |Nguồn:|Ảnh:|Video:|Bài:|Tin ảnh:)', sent): |
| return False, sent |
| |
| if re.search(r'\(Ảnh:.*\)$', sent): |
| return False, sent |
| if re.search(r'\(Nguồn:.*\)$', sent): |
| return False, sent |
| |
| if re.match(r'^\d{1,2}/\d{1,2}/\d{4}', sent): |
| return False, sent |
| if re.match(r'^\d{1,2}:\d{2}', sent): |
| return False, sent |
| |
| if re.search(r'(http|www\.|\.com|\.vn)', sent, re.IGNORECASE): |
| return False, sent |
| |
| if re.match(r'^(Tags?:|Chuyên mục:|Từ khóa:)', sent, re.IGNORECASE): |
| return False, sent |
| |
| if sum(1 for c in sent if c.isdigit()) > len(sent) * 0.3: |
| return False, sent |
| return True, sent |
|
|
|
|
| def is_valid_wiki(sent): |
| """Validate sentence from Wikipedia domain (UVW-2026).""" |
| ok, sent = base_valid(sent) |
| if not ok: |
| return False, sent |
| |
| if re.search(r'(bài sơ khai|sơ khai về|cần được mở rộng|Thể loại:)', sent): |
| return False, sent |
| |
| if re.match(r'^(Thể loại|Danh sách|Xem thêm|Tham khảo|Liên kết ngoài|Chú thích)', sent): |
| return False, sent |
| |
| if sent.count('|') >= 2: |
| return False, sent |
| |
| if re.search(r'\[\d+\]', sent): |
| return False, sent |
| if re.search(r'\[cần', sent): |
| return False, sent |
| |
| if re.search(r'(http|www\.|\.com|\.org)', sent, re.IGNORECASE): |
| return False, sent |
| |
| if sum(1 for c in sent if c.isdigit()) > len(sent) * 0.3: |
| return False, sent |
| |
| if re.match(r'^[\*\-•]\s', sent): |
| return False, sent |
| return True, sent |
|
|
|
|
| def is_valid_book(sent): |
| """Validate sentence from book domain (UVB-v0.1) — stricter quality on top of base_valid.""" |
| ok, sent = base_valid(sent) |
| if not ok: |
| return False, sent |
| |
| if len(sent) < 30 or len(sent) > 250: |
| return False, sent |
| |
| words = sent.split() |
| if len(words) < 5 or len(words) > 40: |
| return False, sent |
| |
| if sum(1 for c in sent if c.isupper()) > len(sent) * 0.3: |
| return False, sent |
| |
| if sum(1 for c in sent if c.isdigit()) > len(sent) * 0.15: |
| return False, sent |
| |
| if re.match(r'^(Chương|Phần|Mục|Điều|\d+\.|\([a-z]\))', sent): |
| return False, sent |
| |
| if re.search(r'(http|www\.|@|\.com|\.vn)', sent, re.IGNORECASE): |
| return False, sent |
| |
| punct_count = sum(1 for c in sent if c in '.,;:!?-\u2013\u2014()[]""\'\'\xab\xbb') |
| if punct_count > len(words) * 1.5: |
| return False, sent |
| |
| if '...' in sent[:-5]: |
| return False, sent |
| |
| quote_count = sent.count('"') + sent.count('\u201c') + sent.count('\u201d') |
| if quote_count > 4: |
| return False, sent |
| return True, sent |
|
|
|
|
| |
| |
| |
|
|
| FICTION_GENRES = { |
| "Fiction", "Novels", "Romance", "Fantasy", "Science Fiction", |
| "Mystery", "Thriller", "Horror", "Historical Fiction", "Literary Fiction", |
| "Adventure", "Crime", "Suspense", "Drama", "Short Stories" |
| } |
|
|
| NON_FICTION_GENRES = { |
| "Non Fiction", "Nonfiction", "History", "Biography", "Autobiography", |
| "Self Help", "Psychology", "Philosophy", "Science", "Politics", |
| "Economics", "Business", "Education", "Travel", "Memoir", |
| "Essays", "Reference", "Health", "Religion", "Spirituality" |
| } |
|
|
|
|
| def classify_book(genres): |
| """Classify book as fiction or non-fiction based on genres.""" |
| if not genres: |
| return None |
| genres_set = set(genres) |
| is_fiction = bool(genres_set & FICTION_GENRES) |
| is_non_fiction = bool(genres_set & NON_FICTION_GENRES) |
| if is_fiction and not is_non_fiction: |
| return "fiction" |
| elif is_non_fiction and not is_fiction: |
| return "non-fiction" |
| elif is_fiction and is_non_fiction: |
| fiction_count = len(genres_set & FICTION_GENRES) |
| non_fiction_count = len(genres_set & NON_FICTION_GENRES) |
| return "fiction" if fiction_count > non_fiction_count else "non-fiction" |
| return None |
|
|
|
|
| |
| |
| |
|
|
| def normalize_for_dedup(sent): |
| """Normalize sentence for near-duplicate detection. |
| |
| Replaces all digit sequences with '#' so that sentences differing only |
| in numbers (e.g., article numbers, amounts) are treated as duplicates. |
| Also lowercases for case-insensitive matching. |
| """ |
| return re.sub(r'\d+', '#', sent.lower()) |
|
|
|
|
| MAX_PER_DOC = 500 |
|
|
|
|
| def extract_sentences(docs, validator, target, label=""): |
| """Extract validated, deduplicated sentences from documents via round-robin. |
| |
| Phase 1: Scan all documents, collect up to MAX_PER_DOC valid sentences each. |
| Phase 2: Round-robin across documents to ensure even source representation. |
| """ |
| |
| doc_pools = [] |
| for idx, doc in enumerate(docs): |
| content = doc["content"] |
| content = clean_text(content) |
| sents = [] |
| for sent in safe_sent_tokenize(content): |
| sent = sent.strip() |
| ok, cleaned = validator(sent) |
| if ok: |
| sents.append(cleaned) |
| if len(sents) >= MAX_PER_DOC: |
| break |
| if sents: |
| doc_pools.append(sents) |
| if (idx + 1) % 200 == 0: |
| print(f" {label}: [{idx+1}] docs scanned, {len(doc_pools)} with valid sentences") |
| |
| total_candidates = sum(len(s) for s in doc_pools) |
| if total_candidates >= target * 3: |
| print(f" {label}: [{idx+1}] docs scanned, {len(doc_pools)} with candidates, {total_candidates:,} total") |
| break |
|
|
| if not doc_pools: |
| return [] |
|
|
| total_candidates = sum(len(s) for s in doc_pools) |
| print(f" {label}: {len(doc_pools)} docs with candidates, {total_candidates:,} total candidate sentences") |
|
|
| |
| sentences = [] |
| seen = set() |
| seen_normalized = set() |
| round_idx = 0 |
|
|
| while len(sentences) < target and doc_pools: |
| made_progress = False |
| for i in range(len(doc_pools)): |
| sents = doc_pools[i] |
| if round_idx >= len(sents): |
| continue |
| cleaned = sents[round_idx] |
| if cleaned in seen: |
| continue |
| norm = normalize_for_dedup(cleaned) |
| if norm in seen_normalized: |
| continue |
| seen.add(cleaned) |
| seen_normalized.add(norm) |
| sentences.append(cleaned) |
| made_progress = True |
| if len(sentences) >= target: |
| break |
| if not made_progress: |
| break |
| |
| doc_pools = [s for s in doc_pools if round_idx + 1 < len(s)] |
| round_idx += 1 |
|
|
| print(f" {label}: {len(sentences):,} sentences from {round_idx} rounds") |
| return sentences[:target] |
|
|
|
|
| MAX_PER_BOOK = 500 |
|
|
|
|
| def extract_book_sentences(books, target, label=""): |
| """Extract validated, deduplicated sentences from books via round-robin. |
| |
| Phase 1: Extract up to MAX_PER_BOOK valid sentences from each book. |
| Phase 2: Round-robin across all books to ensure even representation. |
| """ |
| |
| book_pools = [] |
| for i, book in enumerate(books): |
| content = clean_text(book["content"]) |
| sents = [] |
| for sent in safe_sent_tokenize(content): |
| ok, cleaned = is_valid_book(sent) |
| if ok: |
| sents.append(cleaned) |
| if len(sents) >= MAX_PER_BOOK: |
| break |
| if sents: |
| book_pools.append((book["title"], sents)) |
| if (i + 1) % 50 == 0: |
| print(f" {label}: [{i+1}/{len(books)}] books scanned, {len(book_pools)} with valid sentences") |
|
|
| total_candidates = sum(len(s) for _, s in book_pools) |
| print(f" {label}: {len(book_pools)} books with candidates, {total_candidates:,} total candidate sentences") |
|
|
| |
| sentences = [] |
| seen = set() |
| seen_normalized = set() |
| round_idx = 0 |
| books_contributed = set() |
|
|
| while len(sentences) < target and book_pools: |
| made_progress = False |
| for i in range(len(book_pools)): |
| title, sents = book_pools[i] |
| if round_idx >= len(sents): |
| continue |
| cleaned = sents[round_idx] |
| if cleaned in seen: |
| continue |
| norm = normalize_for_dedup(cleaned) |
| if norm in seen_normalized: |
| continue |
| seen.add(cleaned) |
| seen_normalized.add(norm) |
| sentences.append(cleaned) |
| books_contributed.add(title) |
| made_progress = True |
| if len(sentences) >= target: |
| break |
| if not made_progress: |
| break |
| |
| book_pools = [(t, s) for t, s in book_pools if round_idx + 1 < len(s)] |
| round_idx += 1 |
|
|
| print(f" {label}: {len(sentences):,} sentences from {len(books_contributed)} books ({round_idx} rounds)") |
| return sentences[:target] |
|
|
|
|
| def save_sentences(sentences, filepath): |
| """Save sentences to file in idx\\tsentence format.""" |
| with open(filepath, "w", encoding="utf-8") as f: |
| for i, sent in enumerate(sentences, 1): |
| f.write(f"{i}\t{sent}\n") |
| print(f" Saved {len(sentences)} sentences to {filepath}") |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| base_dir = dirname(dirname(__file__)) |
|
|
| |
| print(f"\n[1/5] Fetching legal sentences (target: {TARGET_PER_DOMAIN})...") |
| ds_vlc = load_dataset("undertheseanlp/UTS_VLC", split="2026") |
| vlc_sentences = extract_sentences(ds_vlc, is_valid_legal, TARGET_PER_DOMAIN, "Legal") |
| save_sentences(vlc_sentences, join(base_dir, "ws_sentences_vlc.txt")) |
|
|
| |
| print(f"\n[2/5] Fetching news sentences (target: {TARGET_PER_DOMAIN})...") |
| ds_uvn = load_dataset("undertheseanlp/UVN-1", split="train") |
| uvn_sentences = extract_sentences(ds_uvn, is_valid_news, TARGET_PER_DOMAIN, "News") |
| save_sentences(uvn_sentences, join(base_dir, "ws_sentences_uvn.txt")) |
|
|
| |
| print(f"\n[3/5] Fetching Wikipedia sentences (target: {TARGET_PER_DOMAIN})...") |
| ds_uvw = load_dataset("undertheseanlp/UVW-2026", split="train") |
| high_quality = [doc for doc in ds_uvw if (doc.get("quality_score") or 0) >= 5] |
| print(f" High-quality articles: {len(high_quality)}") |
| uvw_sentences = extract_sentences(high_quality, is_valid_wiki, TARGET_PER_DOMAIN, "Wikipedia") |
| save_sentences(uvw_sentences, join(base_dir, "ws_sentences_uvw.txt")) |
|
|
| |
| print(f"\n[4/5] Fetching fiction sentences (target: {TARGET_PER_DOMAIN})...") |
| print(f"[5/5] Fetching non-fiction sentences (target: {TARGET_PER_DOMAIN})...") |
| ds_uvb = load_dataset("undertheseanlp/UVB-v0.1", split="train") |
|
|
| fiction_books = [] |
| non_fiction_books = [] |
| for book in ds_uvb: |
| genres = book.get("genres", []) |
| rating = book.get("goodreads_rating", 0) or 0 |
| num_ratings = book.get("goodreads_num_ratings", 0) or 0 |
| quality_score = rating * min(num_ratings / 100, 10) |
| book_type = classify_book(genres) |
| book_info = { |
| "title": book["title"], |
| "content": book["content"], |
| "quality_score": quality_score, |
| } |
| if book_type == "fiction": |
| fiction_books.append(book_info) |
| elif book_type == "non-fiction": |
| non_fiction_books.append(book_info) |
|
|
| fiction_books.sort(key=lambda x: x["quality_score"], reverse=True) |
| non_fiction_books.sort(key=lambda x: x["quality_score"], reverse=True) |
| print(f" Fiction books: {len(fiction_books)}, Non-fiction books: {len(non_fiction_books)}") |
|
|
| fiction_sentences = extract_book_sentences(fiction_books, TARGET_PER_DOMAIN, "Fiction") |
| save_sentences(fiction_sentences, join(base_dir, "ws_sentences_uvb_f.txt")) |
|
|
| nonfiction_sentences = extract_book_sentences(non_fiction_books, TARGET_PER_DOMAIN, "Non-fiction") |
| save_sentences(nonfiction_sentences, join(base_dir, "ws_sentences_uvb_n.txt")) |
|
|
| |
| print("\n" + "=" * 60) |
| print("Summary:") |
| print(f" Legal: {len(vlc_sentences):,}") |
| print(f" News: {len(uvn_sentences):,}") |
| print(f" Wikipedia: {len(uvw_sentences):,}") |
| print(f" Fiction: {len(fiction_sentences):,}") |
| print(f" Non-fiction: {len(nonfiction_sentences):,}") |
| total = len(vlc_sentences) + len(uvn_sentences) + len(uvw_sentences) + len(fiction_sentences) + len(nonfiction_sentences) |
| print(f" Total: {total:,}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|