UDD-1 / src /build_dataset.py
rain1024's picture
Expand UDD-1 to 40K sentences across 5 domains
f5d0a0d
"""
Build the combined UDD-1 multi-domain dataset (40,000 sentences).
Reads sentence files from all domains, assigns domain-specific sent_id prefixes,
and creates stratified train/dev/test splits.
Domain mapping:
- sentences_vlc.txt -> prefix: vlc- (Legal)
- sentences_uvn.txt -> prefix: uvn- (News)
- sentences_uvw.txt -> prefix: uvw- (Wikipedia)
- sentences_uvb.txt -> prefix: uvb-f- (Fiction), uvb-n- (Non-fiction)
Output:
- sentences_train.txt (91.4%)
- sentences_dev.txt (4.3%)
- sentences_test.txt (4.3%)
Each line format: sent_id\tsentence
"""
import random
from os.path import dirname, isfile, join
# Split ratios
TRAIN_RATIO = 0.914
DEV_RATIO = 0.043
TEST_RATIO = 0.043
def load_sentences_with_prefix(filepath, prefix):
"""Load sentences from a file and assign sent_id prefix.
Returns list of (sent_id, sentence) tuples.
"""
sentences = []
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split("\t")
# Format: idx\tsentence
if len(parts) == 2:
idx = parts[0]
sentence = parts[1]
sent_id = f"{prefix}{idx}"
sentences.append((sent_id, sentence))
# Format: idx\tsource\tsentence (sentences_uvb.txt)
elif len(parts) >= 3:
idx = parts[0]
source = parts[1]
sentence = parts[2]
sent_id = f"{prefix}{idx}"
sentences.append((sent_id, sentence, source))
return sentences
def load_uvb_sentences(filepath):
"""Load UVB sentences and split by fiction/non-fiction with proper prefixes."""
fiction = []
non_fiction = []
fiction_idx = 0
non_fiction_idx = 0
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split("\t")
if len(parts) >= 3:
source = parts[1]
sentence = parts[2]
if source == "fiction":
fiction_idx += 1
fiction.append((f"uvb-f-{fiction_idx}", sentence))
else:
non_fiction_idx += 1
non_fiction.append((f"uvb-n-{non_fiction_idx}", sentence))
return fiction, non_fiction
def stratified_split(domain_sentences, seed=42):
"""Create stratified train/dev/test split preserving domain proportions.
Args:
domain_sentences: dict of domain_name -> list of (sent_id, sentence)
seed: random seed for reproducibility
Returns:
train, dev, test lists of (sent_id, sentence)
"""
random.seed(seed)
train = []
dev = []
test = []
for domain_name, sentences in domain_sentences.items():
# Shuffle within each domain
shuffled = list(sentences)
random.shuffle(shuffled)
n = len(shuffled)
n_dev = max(1, round(n * DEV_RATIO))
n_test = max(1, round(n * TEST_RATIO))
n_train = n - n_dev - n_test
train.extend(shuffled[:n_train])
dev.extend(shuffled[n_train:n_train + n_dev])
test.extend(shuffled[n_train + n_dev:])
print(f" {domain_name}: {n_train} train / {n_dev} dev / {n_test} test (total: {n})")
return train, dev, test
def save_split(sentences, filepath):
"""Save a list of (sent_id, sentence) to file."""
with open(filepath, "w", encoding="utf-8") as f:
for sent_id, sentence in sentences:
f.write(f"{sent_id}\t{sentence}\n")
def main():
base_dir = dirname(dirname(__file__))
# Define source files and their prefixes
sources = {
"vlc": ("sentences_vlc.txt", "vlc-"),
"uvn": ("sentences_uvn.txt", "uvn-"),
"uvw": ("sentences_uvw.txt", "uvw-"),
}
# Load sentences from each domain
domain_sentences = {}
for domain, (filename, prefix) in sources.items():
filepath = join(base_dir, filename)
if not isfile(filepath):
print(f"Warning: {filepath} not found, skipping {domain}")
continue
sents = load_sentences_with_prefix(filepath, prefix)
# Extract just (sent_id, sentence) tuples
domain_sentences[domain] = [(s[0], s[1]) for s in sents]
print(f"Loaded {len(domain_sentences[domain])} sentences from {filename}")
# Load UVB (books) with fiction/non-fiction split
uvb_filepath = join(base_dir, "sentences_uvb.txt")
if isfile(uvb_filepath):
fiction, non_fiction = load_uvb_sentences(uvb_filepath)
domain_sentences["uvb-fiction"] = fiction
domain_sentences["uvb-nonfiction"] = non_fiction
print(f"Loaded {len(fiction)} fiction + {len(non_fiction)} non-fiction sentences from sentences_uvb.txt")
else:
print(f"Warning: {uvb_filepath} not found, skipping books domain")
# Report totals
total = sum(len(v) for v in domain_sentences.values())
print(f"\nTotal sentences across all domains: {total}")
# Create stratified split
print("\nCreating stratified train/dev/test split...")
train, dev, test = stratified_split(domain_sentences)
print(f"\nSplit sizes:")
print(f" Train: {len(train)} ({100*len(train)/total:.1f}%)")
print(f" Dev: {len(dev)} ({100*len(dev)/total:.1f}%)")
print(f" Test: {len(test)} ({100*len(test)/total:.1f}%)")
print(f" Total: {len(train) + len(dev) + len(test)}")
# Save splits
save_split(train, join(base_dir, "sentences_train.txt"))
save_split(dev, join(base_dir, "sentences_dev.txt"))
save_split(test, join(base_dir, "sentences_test.txt"))
print(f"\nSaved to:")
print(f" {join(base_dir, 'sentences_train.txt')}")
print(f" {join(base_dir, 'sentences_dev.txt')}")
print(f" {join(base_dir, 'sentences_test.txt')}")
# Print domain distribution per split
print("\nDomain distribution per split:")
for split_name, split_data in [("Train", train), ("Dev", dev), ("Test", test)]:
domain_counts = {}
for sent_id, _ in split_data:
# Determine domain from sent_id prefix
if sent_id.startswith("vlc-"):
domain = "legal"
elif sent_id.startswith("uvn-"):
domain = "news"
elif sent_id.startswith("uvw-"):
domain = "wikipedia"
elif sent_id.startswith("uvb-f-"):
domain = "fiction"
elif sent_id.startswith("uvb-n-"):
domain = "non-fiction"
else:
domain = "unknown"
domain_counts[domain] = domain_counts.get(domain, 0) + 1
counts_str = ", ".join(f"{d}: {c}" for d, c in sorted(domain_counts.items()))
print(f" {split_name}: {counts_str}")
if __name__ == "__main__":
main()