Datasets:
File size: 8,391 Bytes
26786e3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 | #!/usr/bin/env python3
"""Parser for the UT Austin LRC (Linguistics Research Center) PIE lexicon.
Extracts Proto-Indo-European etyma with reconstructed forms and glosses
from the online lexicon at https://lrc.la.utexas.edu/lex
The LRC presents a multi-page index of PIE roots organized alphabetically,
with each root linking to a detail page.
Uses only stdlib (urllib, html.parser, re).
"""
from __future__ import annotations
import logging
import re
import urllib.request
import urllib.error
from html.parser import HTMLParser
from typing import Any
logger = logging.getLogger(__name__)
class LRCIndexParser(HTMLParser):
"""Parse the LRC lexicon index page to extract root links and glosses."""
def __init__(self) -> None:
super().__init__()
self.in_link = False
self.link_href = ""
self.link_text = ""
self.entries: list[dict] = []
self.in_table = False
self.in_cell = False
self.cell_text = ""
self.current_row: list[str] = []
self.rows: list[list[str]] = []
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
attr_dict = dict(attrs)
if tag == "a":
href = attr_dict.get("href", "")
if href:
self.in_link = True
self.link_href = href
self.link_text = ""
elif tag == "table":
self.in_table = True
elif tag == "tr" and self.in_table:
self.current_row = []
elif tag in ("td", "th") and self.in_table:
self.in_cell = True
self.cell_text = ""
def handle_endtag(self, tag: str) -> None:
if tag == "a" and self.in_link:
self.in_link = False
text = self.link_text.strip()
if text:
self.entries.append({
"text": text,
"href": self.link_href,
})
elif tag == "table":
self.in_table = False
elif tag == "tr" and self.in_table:
if self.current_row:
self.rows.append(self.current_row)
elif tag in ("td", "th") and self.in_cell:
self.in_cell = False
self.current_row.append(self.cell_text.strip())
def handle_data(self, data: str) -> None:
if self.in_link:
self.link_text += data
if self.in_cell:
self.cell_text += data
def _clean_pie_form(form: str) -> str:
"""Clean a PIE reconstructed form."""
form = form.strip()
# Remove leading asterisk (reconstruction marker)
form = re.sub(r"^\*+", "", form)
# Remove trailing punctuation
form = re.sub(r"[.,;:]+$", "", form)
# Remove parenthetical variants
form = re.sub(r"\s*\([^)]*\)\s*$", "", form)
return form.strip()
def _extract_from_index_page(html: str) -> list[dict]:
"""Extract entries from the LRC index page HTML."""
parser = LRCIndexParser()
parser.feed(html)
entries: list[dict] = []
# Strategy 1: Extract from table rows
for row in parser.rows:
if len(row) < 2:
continue
# Skip header rows
if any(kw in c.lower() for c in row for kw in ("root", "meaning", "gloss", "#")):
continue
form = _clean_pie_form(row[0])
gloss = row[1].strip() if len(row) >= 2 else ""
# Sometimes gloss is in column 2 (after POS)
if len(row) >= 3 and not gloss:
gloss = row[2].strip()
if form and gloss and len(form) < 50:
entries.append({
"word": form,
"transliteration": form,
"gloss": gloss,
})
# Strategy 2: Extract from link text patterns
if not entries:
for link_entry in parser.entries:
text = link_entry["text"]
# Pattern: "*root - gloss" or "*root 'gloss'"
m = (
re.match(r"^\*?(.+?)\s*[-–—]\s+(.+)$", text)
or re.match(r"^\*?(.+?)\s+'(.+?)'", text)
or re.match(r"^\*?(.+?)\s+\"(.+?)\"", text)
)
if m:
form = _clean_pie_form(m.group(1))
gloss = m.group(2).strip()
if form and gloss:
entries.append({
"word": form,
"transliteration": form,
"gloss": gloss,
})
# Strategy 3: regex fallback on raw text
if not entries:
text = re.sub(r"<[^>]+>", " ", html)
# Pattern: *form meaning
for m in re.finditer(
r"\*([a-zA-ZÀ-žḀ-ỿəɛɪɔʊ\-]+)"
r"\s+['\"]?([A-Za-z][A-Za-z\s,'-]{2,60}?)['\"]?"
r"(?=[,;.\n]|$)",
text,
):
form = _clean_pie_form(m.group(1))
gloss = m.group(2).strip()
gloss = re.sub(r"[,;:\s]+$", "", gloss)
if form and gloss and len(form) < 40:
entries.append({
"word": form,
"transliteration": form,
"gloss": gloss,
})
return entries
def _fetch_page(url: str) -> str:
"""Fetch a single page, returning empty string on failure."""
try:
req = urllib.request.Request(url, headers={"User-Agent": "PhaiPhon/1.0"})
with urllib.request.urlopen(req, timeout=30) as resp:
return resp.read().decode("utf-8", errors="replace")
except (urllib.error.URLError, urllib.error.HTTPError, OSError) as exc:
logger.warning("LRC: failed to download %s: %s", url, exc)
return ""
def parse(url: str, **kwargs: Any) -> list[dict]:
"""Download and parse the LRC PIE lexicon.
Args:
url: Base URL to the LRC lexicon, e.g.
https://lrc.la.utexas.edu/lex
**kwargs:
max_pages: Maximum number of subpages to fetch (default 30).
Returns:
List of dicts with keys: word, transliteration, gloss.
Returns empty list if URL is unreachable.
"""
# No artificial page limit — follow ALL pagination links
max_pages = kwargs.get("max_pages", 9999)
logger.info("LRC: downloading index from %s", url)
html = _fetch_page(url)
if not html:
return []
entries = _extract_from_index_page(html)
# If the index page has pagination links, follow them
# Look for page links like ?page=2 or /lex/2 etc.
page_pattern = re.compile(
r'href="([^"]*(?:page=\d+|/lex/[a-z]|/lex\?letter=[a-z])[^"]*)"',
re.IGNORECASE,
)
subpage_urls: set[str] = set()
for m in page_pattern.finditer(html):
href = m.group(1)
# Make absolute URL
if href.startswith("/"):
# Extract base domain
domain_match = re.match(r"(https?://[^/]+)", url)
if domain_match:
href = domain_match.group(1) + href
elif href.startswith("?"):
href = url.rstrip("/") + href
elif not href.startswith("http"):
href = url.rstrip("/") + "/" + href
subpage_urls.add(href)
# Fetch subpages (up to max_pages)
pages_fetched = 0
for subpage_url in sorted(subpage_urls):
if pages_fetched >= max_pages:
break
logger.info("LRC: downloading subpage %s", subpage_url)
subpage_html = _fetch_page(subpage_url)
if subpage_html:
sub_entries = _extract_from_index_page(subpage_html)
entries.extend(sub_entries)
pages_fetched += 1
# Deduplicate
seen: set[tuple[str, str]] = set()
unique: list[dict] = []
for e in entries:
key = (e["word"], e["gloss"])
if key not in seen:
seen.add(key)
unique.append(e)
logger.info("LRC: extracted %d unique entries total", len(unique))
return unique
if __name__ == "__main__":
import sys
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
test_url = (
sys.argv[1] if len(sys.argv) > 1
else "https://lrc.la.utexas.edu/lex"
)
results = parse(test_url)
print(f"\nExtracted {len(results)} entries:")
for entry in results[:15]:
print(f" *{entry['word']:25s} {entry['gloss']}")
if len(results) > 15:
print(f" ... and {len(results) - 15} more")
|