Datasets:
File size: 11,292 Bytes
63188b4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 | #!/usr/bin/env python3
"""
Extract cognate pairs from 4 Tier 1 CLDF repositories.
Sources:
1. lexibank/iecor — IE-CoR (Indo-European Cognate Relationships)
Heggarty et al. 2024, Scientific Data (Nature)
License: CC-BY-4.0
2. lexibank/kitchensemitic — Kitchen et al. 2009, Proc. R. Soc. B
License: CC-BY-NC-4.0
3. lexibank/robbeetstriangulation — Robbeets et al. 2021, Nature
License: CC-BY-4.0
4. lexibank/savelyevturkic — Savelyev & Robbeets 2020, J. Language Evolution
License: CC-BY-4.0
All data extracted from CLDF CognateTable files. No data is hardcoded.
Output: 14-column TSV staging files per source.
"""
import csv
import os
import sys
import unicodedata
from collections import defaultdict
from itertools import combinations
from pathlib import Path
# ── Sound Class Alphabet (List 2012) ──
# Reference: List, J.-M. (2012). "SCA: Phonetic alignment based on sound classes."
# New Directions in Logic, Language, and Computation, Springer.
SCA_MAP = {
# Vowels → V
'a': 'A', 'e': 'E', 'i': 'I', 'o': 'O', 'u': 'U',
'ɑ': 'A', 'æ': 'A', 'ɐ': 'A', 'ə': 'E', 'ɛ': 'E',
'ɪ': 'I', 'ɨ': 'I', 'ɔ': 'O', 'ʊ': 'U', 'ʉ': 'U',
'ɯ': 'U', 'ø': 'O', 'œ': 'O', 'y': 'U', 'ɤ': 'O',
'ɒ': 'O', 'ʌ': 'A',
# Stops
'p': 'P', 'b': 'P', 't': 'T', 'd': 'T', 'k': 'K', 'g': 'K',
'q': 'K', 'ɢ': 'K', 'ʔ': 'H', 'c': 'K', 'ɟ': 'K',
'ʈ': 'T', 'ɖ': 'T',
# Fricatives
'f': 'P', 'v': 'P', 's': 'S', 'z': 'S', 'ʃ': 'S', 'ʒ': 'S',
'x': 'K', 'ɣ': 'K', 'h': 'H', 'ɦ': 'H', 'θ': 'T', 'ð': 'T',
'ç': 'K', 'ʝ': 'K', 'χ': 'K', 'ʁ': 'R', 'ħ': 'H', 'ʕ': 'H',
'ɸ': 'P', 'β': 'P', 'ʂ': 'S', 'ʐ': 'S',
# Nasals
'm': 'M', 'n': 'N', 'ŋ': 'N', 'ɲ': 'N', 'ɳ': 'N', 'ɴ': 'N',
# Liquids
'l': 'L', 'r': 'R', 'ɾ': 'R', 'ɹ': 'R', 'ɻ': 'R', 'ɬ': 'L',
'ɮ': 'L', 'ʎ': 'L', 'ɭ': 'L', 'ʟ': 'L',
# Glides
'w': 'W', 'j': 'Y', 'ʋ': 'W', 'ɰ': 'W',
# Affricates (common)
'ʦ': 'S', 'ʧ': 'S', 'ʤ': 'S', 'ʣ': 'S',
}
def ipa_to_sca(ipa: str) -> str:
"""Convert IPA string to SCA encoding."""
if not ipa or ipa == '-':
return '-'
result = []
# NFC normalize
ipa = unicodedata.normalize('NFC', ipa)
for ch in ipa:
base = ch.lower()
cat = unicodedata.category(ch)
# Skip combining marks, suprasegmentals, brackets, whitespace
if cat.startswith('M') or ch in 'ˈˌːˑ[]/()\u0361\u035c' or cat == 'Zs':
continue
if base in SCA_MAP:
result.append(SCA_MAP[base])
# Skip unknown characters silently (diacritics, tone marks, etc.)
return ''.join(result)
def sca_distance(sca_a: str, sca_b: str) -> float:
"""
Normalized SCA-weighted Levenshtein distance.
Returns similarity score in [0.0, 1.0].
Reference: List (2012), gap penalty = 0.5.
"""
if sca_a == '-' or sca_b == '-' or not sca_a or not sca_b:
return 0.0
n, m = len(sca_a), len(sca_b)
gap = 0.5
# DP matrix
dp = [[0.0] * (m + 1) for _ in range(n + 1)]
for i in range(n + 1):
dp[i][0] = i * gap
for j in range(m + 1):
dp[0][j] = j * gap
for i in range(1, n + 1):
for j in range(1, m + 1):
if sca_a[i-1] == sca_b[j-1]:
cost = 0.0
else:
cost = 1.0
dp[i][j] = min(
dp[i-1][j] + gap,
dp[i][j-1] + gap,
dp[i-1][j-1] + cost,
)
max_len = max(n, m)
if max_len == 0:
return 1.0
return round(1.0 - dp[n][m] / max_len, 4)
def load_cldf_source(repo_dir: str, source_name: str):
"""
Load a CLDF repo and extract cognate pairs.
Reads:
- cldf/languages.csv → language ID → ISO mapping
- cldf/forms.csv → form ID → (language, word, IPA, concept)
- cldf/cognates.csv → form ID → cognate set membership
Returns list of 14-column rows.
All data comes from the downloaded CSV files.
"""
cldf_dir = Path(repo_dir) / 'cldf'
# 1. Load languages: ID → ISO code
lang_map = {} # Language_ID → ISO
lang_names = {} # Language_ID → name
with open(cldf_dir / 'languages.csv', encoding='utf-8') as f:
for row in csv.DictReader(f):
lid = row['ID']
iso = row.get('ISO639P3code', '')
name = row.get('Name', '')
lang_map[lid] = iso if iso else lid # fall back to internal ID
lang_names[lid] = name
# 2. Load forms: Form_ID → metadata
forms = {} # Form_ID → dict
with open(cldf_dir / 'forms.csv', encoding='utf-8') as f:
for row in csv.DictReader(f):
fid = row['ID']
lid = row['Language_ID']
iso = lang_map.get(lid, lid)
# Word = original orthographic form (Value column)
word = row.get('Value', '') or row.get('Form', '')
# IPA resolution priority:
# 1. phon_form (phonetic transcription, e.g. IE-CoR)
# 2. Phonemic (phonemic transcription, e.g. IE-CoR)
# 3. Form (CLDF normalized form — used when Form ≠ Value,
# which indicates phonological encoding, e.g. Kitchen Semitic)
# 4. Value as fallback (if nothing else available)
phon = row.get('phon_form', '').strip()
phonemic = row.get('Phonemic', '').strip()
form_val = row.get('Form', '').strip()
value_val = row.get('Value', '').strip()
if phon:
ipa = phon
elif phonemic:
ipa = phonemic
elif form_val and form_val != value_val:
# Form differs from Value → likely a phonological encoding
ipa = form_val
else:
# Form == Value: use it but flag that IPA may be orthographic
ipa = form_val if form_val else value_val
concept = row.get('Parameter_ID', '')
forms[fid] = {
'iso': iso,
'word': word,
'ipa': ipa,
'concept': concept,
'lang_id': lid,
}
# 3. Load cognates: group forms by cognate set
cogsets = defaultdict(list) # Cognateset_ID → [(Form_ID, doubt)]
with open(cldf_dir / 'cognates.csv', encoding='utf-8') as f:
for row in csv.DictReader(f):
fid = row['Form_ID']
csid = row['Cognateset_ID']
doubt = row.get('Doubt', 'false')
if fid in forms:
cogsets[csid].append((fid, doubt))
# 4. Generate pairwise cognate pairs from cognate sets
pairs = []
seen = set()
for csid, members in cogsets.items():
if len(members) < 2:
continue
for (fid_a, doubt_a), (fid_b, doubt_b) in combinations(members, 2):
fa = forms[fid_a]
fb = forms[fid_b]
# Skip pairs from the same language
if fa['iso'] == fb['iso']:
continue
# Skip if missing IPA
if not fa['ipa'] or not fb['ipa']:
continue
# Canonical ordering (alphabetic by ISO)
if fa['iso'] > fb['iso']:
fa, fb = fb, fa
fid_a, fid_b = fid_b, fid_a
doubt_a, doubt_b = doubt_b, doubt_a
# Dedup key
key = (fa['iso'], fb['iso'], fa['concept'])
if key in seen:
continue
seen.add(key)
# SCA encoding and scoring
sca_a = ipa_to_sca(fa['ipa'])
sca_b = ipa_to_sca(fb['ipa'])
score = sca_distance(sca_a, sca_b)
# Confidence: "certain" if neither is doubtful
if doubt_a == 'true' or doubt_b == 'true':
confidence = 'doubtful'
else:
confidence = 'certain'
pairs.append({
'Lang_A': fa['iso'],
'Word_A': fa['word'],
'IPA_A': fa['ipa'],
'Lang_B': fb['iso'],
'Word_B': fb['word'],
'IPA_B': fb['ipa'],
'Concept_ID': fa['concept'],
'Relationship': 'expert_cognate',
'Score': str(score),
'Source': source_name,
'Relation_Detail': f'cognateset_{csid}',
'Donor_Language': '-',
'Confidence': confidence,
'Source_Record_ID': f'{source_name}:{csid}:{fid_a}+{fid_b}',
})
return pairs
def write_staging_tsv(pairs, output_path):
"""Write pairs to 14-column TSV staging file."""
COLUMNS = [
'Lang_A', 'Word_A', 'IPA_A', 'Lang_B', 'Word_B', 'IPA_B',
'Concept_ID', 'Relationship', 'Score', 'Source',
'Relation_Detail', 'Donor_Language', 'Confidence', 'Source_Record_ID',
]
with open(output_path, 'w', encoding='utf-8', newline='') as f:
writer = csv.DictWriter(f, fieldnames=COLUMNS, delimiter='\t',
extrasaction='ignore')
writer.writeheader()
for pair in pairs:
writer.writerow(pair)
print(f' Wrote {len(pairs):,} pairs to {output_path}')
def main():
base = Path(__file__).parent.parent / 'sources_tier1'
staging = Path(__file__).parent.parent / 'staging_tier1'
staging.mkdir(exist_ok=True)
# NOTE: kitchensemitic EXCLUDED — license is CC-BY-NC-4.0, incompatible
# with our dataset's CC-BY-SA-4.0 license. Flagged by adversarial audit.
sources = [
('iecor', 'iecor'),
# ('kitchensemitic', 'kitchensemitic'), # EXCLUDED: CC-BY-NC-4.0
('robbeetstriangulation', 'robbeetstriangulation'),
('savelyevturkic', 'savelyevturkic'),
]
all_pairs = []
for repo_name, source_name in sources:
repo_dir = base / repo_name
if not repo_dir.exists():
print(f'SKIP: {repo_dir} not found')
continue
print(f'\nExtracting from {repo_name}...')
pairs = load_cldf_source(str(repo_dir), source_name)
# Write per-source staging file
write_staging_tsv(pairs, staging / f'cognate_pairs_{source_name}.tsv')
# Stats
langs = set()
for p in pairs:
langs.add(p['Lang_A'])
langs.add(p['Lang_B'])
certain = sum(1 for p in pairs if p['Confidence'] == 'certain')
doubtful = sum(1 for p in pairs if p['Confidence'] == 'doubtful')
print(f' Languages: {len(langs)}')
print(f' Certain: {certain:,}, Doubtful: {doubtful:,}')
all_pairs.extend(pairs)
# Write combined staging file
write_staging_tsv(all_pairs, staging / 'cognate_pairs_tier1_combined.tsv')
# Summary
all_langs = set()
for p in all_pairs:
all_langs.add(p['Lang_A'])
all_langs.add(p['Lang_B'])
print(f'\n=== TOTAL ===')
print(f'Total pairs: {len(all_pairs):,}')
print(f'Total languages: {len(all_langs)}')
print(f'Certain: {sum(1 for p in all_pairs if p["Confidence"] == "certain"):,}')
print(f'Doubtful: {sum(1 for p in all_pairs if p["Confidence"] == "doubtful"):,}')
if __name__ == '__main__':
main()
|