Datasets:
File size: 6,980 Bytes
f5d0a0d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | """
Build the combined UDD-1 multi-domain dataset (40,000 sentences).
Reads sentence files from all domains, assigns domain-specific sent_id prefixes,
and creates stratified train/dev/test splits.
Domain mapping:
- sentences_vlc.txt -> prefix: vlc- (Legal)
- sentences_uvn.txt -> prefix: uvn- (News)
- sentences_uvw.txt -> prefix: uvw- (Wikipedia)
- sentences_uvb.txt -> prefix: uvb-f- (Fiction), uvb-n- (Non-fiction)
Output:
- sentences_train.txt (91.4%)
- sentences_dev.txt (4.3%)
- sentences_test.txt (4.3%)
Each line format: sent_id\tsentence
"""
import random
from os.path import dirname, isfile, join
# Split ratios
TRAIN_RATIO = 0.914
DEV_RATIO = 0.043
TEST_RATIO = 0.043
def load_sentences_with_prefix(filepath, prefix):
"""Load sentences from a file and assign sent_id prefix.
Returns list of (sent_id, sentence) tuples.
"""
sentences = []
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split("\t")
# Format: idx\tsentence
if len(parts) == 2:
idx = parts[0]
sentence = parts[1]
sent_id = f"{prefix}{idx}"
sentences.append((sent_id, sentence))
# Format: idx\tsource\tsentence (sentences_uvb.txt)
elif len(parts) >= 3:
idx = parts[0]
source = parts[1]
sentence = parts[2]
sent_id = f"{prefix}{idx}"
sentences.append((sent_id, sentence, source))
return sentences
def load_uvb_sentences(filepath):
"""Load UVB sentences and split by fiction/non-fiction with proper prefixes."""
fiction = []
non_fiction = []
fiction_idx = 0
non_fiction_idx = 0
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split("\t")
if len(parts) >= 3:
source = parts[1]
sentence = parts[2]
if source == "fiction":
fiction_idx += 1
fiction.append((f"uvb-f-{fiction_idx}", sentence))
else:
non_fiction_idx += 1
non_fiction.append((f"uvb-n-{non_fiction_idx}", sentence))
return fiction, non_fiction
def stratified_split(domain_sentences, seed=42):
"""Create stratified train/dev/test split preserving domain proportions.
Args:
domain_sentences: dict of domain_name -> list of (sent_id, sentence)
seed: random seed for reproducibility
Returns:
train, dev, test lists of (sent_id, sentence)
"""
random.seed(seed)
train = []
dev = []
test = []
for domain_name, sentences in domain_sentences.items():
# Shuffle within each domain
shuffled = list(sentences)
random.shuffle(shuffled)
n = len(shuffled)
n_dev = max(1, round(n * DEV_RATIO))
n_test = max(1, round(n * TEST_RATIO))
n_train = n - n_dev - n_test
train.extend(shuffled[:n_train])
dev.extend(shuffled[n_train:n_train + n_dev])
test.extend(shuffled[n_train + n_dev:])
print(f" {domain_name}: {n_train} train / {n_dev} dev / {n_test} test (total: {n})")
return train, dev, test
def save_split(sentences, filepath):
"""Save a list of (sent_id, sentence) to file."""
with open(filepath, "w", encoding="utf-8") as f:
for sent_id, sentence in sentences:
f.write(f"{sent_id}\t{sentence}\n")
def main():
base_dir = dirname(dirname(__file__))
# Define source files and their prefixes
sources = {
"vlc": ("sentences_vlc.txt", "vlc-"),
"uvn": ("sentences_uvn.txt", "uvn-"),
"uvw": ("sentences_uvw.txt", "uvw-"),
}
# Load sentences from each domain
domain_sentences = {}
for domain, (filename, prefix) in sources.items():
filepath = join(base_dir, filename)
if not isfile(filepath):
print(f"Warning: {filepath} not found, skipping {domain}")
continue
sents = load_sentences_with_prefix(filepath, prefix)
# Extract just (sent_id, sentence) tuples
domain_sentences[domain] = [(s[0], s[1]) for s in sents]
print(f"Loaded {len(domain_sentences[domain])} sentences from {filename}")
# Load UVB (books) with fiction/non-fiction split
uvb_filepath = join(base_dir, "sentences_uvb.txt")
if isfile(uvb_filepath):
fiction, non_fiction = load_uvb_sentences(uvb_filepath)
domain_sentences["uvb-fiction"] = fiction
domain_sentences["uvb-nonfiction"] = non_fiction
print(f"Loaded {len(fiction)} fiction + {len(non_fiction)} non-fiction sentences from sentences_uvb.txt")
else:
print(f"Warning: {uvb_filepath} not found, skipping books domain")
# Report totals
total = sum(len(v) for v in domain_sentences.values())
print(f"\nTotal sentences across all domains: {total}")
# Create stratified split
print("\nCreating stratified train/dev/test split...")
train, dev, test = stratified_split(domain_sentences)
print(f"\nSplit sizes:")
print(f" Train: {len(train)} ({100*len(train)/total:.1f}%)")
print(f" Dev: {len(dev)} ({100*len(dev)/total:.1f}%)")
print(f" Test: {len(test)} ({100*len(test)/total:.1f}%)")
print(f" Total: {len(train) + len(dev) + len(test)}")
# Save splits
save_split(train, join(base_dir, "sentences_train.txt"))
save_split(dev, join(base_dir, "sentences_dev.txt"))
save_split(test, join(base_dir, "sentences_test.txt"))
print(f"\nSaved to:")
print(f" {join(base_dir, 'sentences_train.txt')}")
print(f" {join(base_dir, 'sentences_dev.txt')}")
print(f" {join(base_dir, 'sentences_test.txt')}")
# Print domain distribution per split
print("\nDomain distribution per split:")
for split_name, split_data in [("Train", train), ("Dev", dev), ("Test", test)]:
domain_counts = {}
for sent_id, _ in split_data:
# Determine domain from sent_id prefix
if sent_id.startswith("vlc-"):
domain = "legal"
elif sent_id.startswith("uvn-"):
domain = "news"
elif sent_id.startswith("uvw-"):
domain = "wikipedia"
elif sent_id.startswith("uvb-f-"):
domain = "fiction"
elif sent_id.startswith("uvb-n-"):
domain = "non-fiction"
else:
domain = "unknown"
domain_counts[domain] = domain_counts.get(domain, 0) + 1
counts_str = ", ".join(f"{d}: {c}" for d, c in sorted(domain_counts.items()))
print(f" {split_name}: {counts_str}")
if __name__ == "__main__":
main()
|