UDD-1 / src /build_ws_dataset.py
rain1024's picture
Add word segmentation dataset pipeline and technical report v1.1
1b2f095
# /// script
# requires-python = ">=3.9"
# dependencies = [
# "underthesea>=6.8.0",
# ]
# ///
"""
Build word segmentation dataset in BIO format (VLSP 2013 compatible).
Reads 5 intermediate sentence files (ws_sentences_*.txt), applies
underthesea.word_tokenize to get compound tokens, then splits into
syllables with B-W/I-W tags. Creates stratified 80/10/10 train/dev/test
splits with equal domain proportions.
Output files:
- udd-ws-v1.1-train.txt (~80K sentences)
- udd-ws-v1.1-dev.txt (~10K sentences)
- udd-ws-v1.1-test.txt (~10K sentences)
BIO format (tab-separated, blank line between sentences):
# sent_id = vlc-1
# text = Một doanh nghiệp lớn.
syllable1\tB-W
syllable2\tI-W
syllable3\tB-W
<blank line>
"""
import random
from os.path import dirname, isfile, join
from underthesea import word_tokenize
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize as regex_tokenize
# Split ratios
TRAIN_RATIO = 0.80
DEV_RATIO = 0.10
TEST_RATIO = 0.10
# Domain source files, sent_id prefixes
DOMAIN_CONFIG = {
"legal": ("ws_sentences_vlc.txt", "vlc-"),
"news": ("ws_sentences_uvn.txt", "uvn-"),
"wikipedia": ("ws_sentences_uvw.txt", "uvw-"),
"fiction": ("ws_sentences_uvb_f.txt", "uvb-f-"),
"non-fiction": ("ws_sentences_uvb_n.txt", "uvb-n-"),
}
def load_sentences(filepath, prefix):
"""Load sentences from idx\\tsentence file, returning (sent_id, text) tuples."""
sentences = []
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split("\t", 1)
if len(parts) == 2:
idx = parts[0]
sent_id = f"{prefix}{idx}"
sentences.append((sent_id, parts[1]))
return sentences
def sentence_to_bio(sent_id, text):
"""Convert a sentence to BIO-tagged syllable sequence.
Uses underthesea.word_tokenize to get compound tokens (with underscores),
then splits each token into syllables and assigns B-W/I-W tags.
Returns (sent_id, text, bio_pairs) or None on failure.
"""
tokenized = word_tokenize(text, format="text")
if not tokenized or not tokenized.strip():
return None
tokens = tokenized.strip().split()
if not tokens:
return None
bio_pairs = []
for token in tokens:
token_text = token.replace("_", " ")
syllables = regex_tokenize(token_text)
for i, syl in enumerate(syllables):
tag = "B-W" if i == 0 else "I-W"
bio_pairs.append((syl, tag))
if not bio_pairs:
return None
return (sent_id, text, bio_pairs)
def stratified_split(domain_data, seed=42):
"""Create stratified train/dev/test split preserving domain proportions.
Args:
domain_data: dict of domain_name -> list of BIO sequences
Returns:
train, dev, test lists of BIO sequences
"""
random.seed(seed)
train = []
dev = []
test = []
for domain_name, sequences in domain_data.items():
shuffled = list(sequences)
random.shuffle(shuffled)
n = len(shuffled)
n_dev = max(1, round(n * DEV_RATIO))
n_test = max(1, round(n * TEST_RATIO))
n_train = n - n_dev - n_test
train.extend(shuffled[:n_train])
dev.extend(shuffled[n_train:n_train + n_dev])
test.extend(shuffled[n_train + n_dev:])
print(f" {domain_name}: {n_train} train / {n_dev} dev / {n_test} test (total: {n})")
# Shuffle each split so domains are interleaved
random.shuffle(train)
random.shuffle(dev)
random.shuffle(test)
return train, dev, test
def write_bio_file(sequences, filepath):
"""Write BIO sequences to file in VLSP format with comment headers."""
with open(filepath, "w", encoding="utf-8") as f:
for sent_id, text, bio_pairs in sequences:
f.write(f"# sent_id = {sent_id}\n")
f.write(f"# text = {text}\n")
for syl, tag in bio_pairs:
f.write(f"{syl}\t{tag}\n")
f.write("\n")
print(f" Saved {len(sequences)} sentences to {filepath}")
def validate_bio_file(filepath):
"""Validate BIO format constraints."""
errors = 0
sentence_count = 0
in_sentence = False
with open(filepath, "r", encoding="utf-8") as f:
for line_num, line in enumerate(f, 1):
line = line.rstrip("\n")
if not line:
if in_sentence:
sentence_count += 1
in_sentence = False
continue
# Skip comment lines
if line.startswith("#"):
continue
parts = line.split("\t")
if len(parts) != 2:
print(f" ERROR line {line_num}: expected 2 tab-separated fields, got {len(parts)}")
errors += 1
continue
syl, tag = parts
if tag not in ("B-W", "I-W"):
print(f" ERROR line {line_num}: invalid tag '{tag}'")
errors += 1
# I-W cannot start a sentence
if not in_sentence and tag == "I-W":
print(f" ERROR line {line_num}: sentence starts with I-W")
errors += 1
in_sentence = True
# Handle last sentence without trailing newline
if in_sentence:
sentence_count += 1
if errors == 0:
print(f" PASS: {sentence_count} sentences, no errors")
else:
print(f" FAIL: {sentence_count} sentences, {errors} errors")
return errors, sentence_count
def main():
base_dir = dirname(dirname(__file__))
# Load sentences from all domains
print("Loading sentences from domain files...")
domain_data = {}
total_loaded = 0
for domain, (filename, prefix) in DOMAIN_CONFIG.items():
filepath = join(base_dir, filename)
if not isfile(filepath):
print(f" WARNING: {filename} not found, skipping {domain}")
continue
sentences = load_sentences(filepath, prefix)
print(f" {domain}: loaded {len(sentences)} sentences from {filename}")
total_loaded += len(sentences)
domain_data[domain] = sentences
print(f" Total loaded: {total_loaded}")
# Convert sentences to BIO format
print("\nConverting sentences to BIO format...")
domain_bio = {}
total_converted = 0
total_failed = 0
for domain, sentences in domain_data.items():
bio_sequences = []
failed = 0
for i, (sent_id, text) in enumerate(sentences):
result = sentence_to_bio(sent_id, text)
if result:
bio_sequences.append(result)
else:
failed += 1
if (i + 1) % 5000 == 0:
print(f" {domain}: {i+1}/{len(sentences)} processed ({len(bio_sequences)} ok, {failed} failed)")
domain_bio[domain] = bio_sequences
total_converted += len(bio_sequences)
total_failed += failed
print(f" {domain}: {len(bio_sequences)} converted, {failed} failed")
print(f" Total converted: {total_converted}, failed: {total_failed}")
# Create stratified split
print("\nCreating stratified 80/10/10 split...")
train, dev, test = stratified_split(domain_bio)
total = len(train) + len(dev) + len(test)
print(f"\nSplit sizes:")
print(f" Train: {len(train)} ({100*len(train)/total:.1f}%)")
print(f" Dev: {len(dev)} ({100*len(dev)/total:.1f}%)")
print(f" Test: {len(test)} ({100*len(test)/total:.1f}%)")
print(f" Total: {total}")
# Write output files
print("\nWriting BIO files...")
train_path = join(base_dir, "udd-ws-v1.1-train.txt")
dev_path = join(base_dir, "udd-ws-v1.1-dev.txt")
test_path = join(base_dir, "udd-ws-v1.1-test.txt")
write_bio_file(train, train_path)
write_bio_file(dev, dev_path)
write_bio_file(test, test_path)
# Validate output
print("\nValidating output files...")
for name, path in [("Train", train_path), ("Dev", dev_path), ("Test", test_path)]:
print(f" {name} ({path}):")
validate_bio_file(path)
# Print sample
print("\nSample output (first sentence from train):")
if train:
sent_id, text, bio_pairs = train[0]
print(f" # sent_id = {sent_id}")
print(f" # text = {text}")
for syl, tag in bio_pairs:
print(f" {syl}\t{tag}")
if __name__ == "__main__":
main()