|
|
|
|
|
""" |
|
|
geonames_fetch.py — Fetch countries, provinces, and cities from GeoNames. |
|
|
|
|
|
Outputs JSON files usable by csv_repair.py's real city mode: |
|
|
- report/countries.json (list of country names) |
|
|
- report/provinces_by_country.json (country -> [admin1 names]) |
|
|
- report/cities_by_country.json (country -> [top city names by population]) |
|
|
|
|
|
Sources (preferring official GeoNames dumps for reliability): |
|
|
- Countries: https://www.geonames.org/countries/ (HTML) or |
|
|
http://download.geonames.org/export/dump/countryInfo.txt |
|
|
- Provinces: http://download.geonames.org/export/dump/admin1CodesASCII.txt |
|
|
- Cities: http://download.geonames.org/export/dump/cities500.txt |
|
|
|
|
|
Usage examples: |
|
|
python geonames_fetch.py --outdir report |
|
|
python geonames_fetch.py --outdir report --max-cities 30 |
|
|
python geonames_fetch.py --prefer-html-countries |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import argparse |
|
|
import json |
|
|
import os |
|
|
import re |
|
|
from typing import Dict, List, Tuple |
|
|
|
|
|
import urllib.request |
|
|
import io |
|
|
import zipfile |
|
|
import unicodedata |
|
|
from html.parser import HTMLParser |
|
|
from typing import Optional |
|
|
|
|
|
|
|
|
COUNTRIES_HTML_URL = "https://www.geonames.org/countries/" |
|
|
COUNTRYINFO_URL = "http://download.geonames.org/export/dump/countryInfo.txt" |
|
|
ADMIN1_URL = "http://download.geonames.org/export/dump/admin1CodesASCII.txt" |
|
|
CITIES500_URL = "http://download.geonames.org/export/dump/cities500.txt" |
|
|
CITIES500_ZIP_URL = "http://download.geonames.org/export/dump/cities500.zip" |
|
|
|
|
|
|
|
|
def fetch_url(url: str, timeout: int = 30) -> bytes: |
|
|
with urllib.request.urlopen(url, timeout=timeout) as resp: |
|
|
return resp.read() |
|
|
|
|
|
|
|
|
def parse_countries_from_html(html: str) -> List[str]: |
|
|
|
|
|
|
|
|
|
|
|
countries: List[str] = [] |
|
|
|
|
|
|
|
|
for m in re.finditer(r"<a[^>]+href=\"/\w{2}/?[^\"]*\"[^>]*>([^<]+)</a>", html, re.IGNORECASE): |
|
|
name = m.group(1).strip() |
|
|
if name and name not in countries: |
|
|
countries.append(name) |
|
|
|
|
|
if len(countries) < 50: |
|
|
for m in re.finditer(r"<a[^>]+href=\"/countries/[^\"]+\"[^>]*>([^<]+)</a>", html, re.IGNORECASE): |
|
|
name = m.group(1).strip() |
|
|
if name and name not in countries: |
|
|
countries.append(name) |
|
|
|
|
|
countries = sorted(set(countries), key=lambda x: x.casefold()) |
|
|
return countries |
|
|
|
|
|
|
|
|
def parse_countryinfo(text: str) -> Tuple[List[str], Dict[str, str]]: |
|
|
|
|
|
names: List[str] = [] |
|
|
iso2_to_name: Dict[str, str] = {} |
|
|
for line in text.splitlines(): |
|
|
if not line or line.startswith("#"): |
|
|
continue |
|
|
parts = line.split("\t") |
|
|
|
|
|
|
|
|
|
|
|
if len(parts) >= 5: |
|
|
iso2 = parts[0].strip() |
|
|
name = parts[4].strip() |
|
|
if iso2 and name: |
|
|
iso2_to_name[iso2] = name |
|
|
names.append(name) |
|
|
names = sorted(set(names), key=lambda x: x.casefold()) |
|
|
return names, iso2_to_name |
|
|
|
|
|
|
|
|
def parse_admin1(text: str, iso2_to_name: Dict[str, str]) -> Dict[str, List[str]]: |
|
|
|
|
|
by_country: Dict[str, List[str]] = {} |
|
|
for line in text.splitlines(): |
|
|
if not line: |
|
|
continue |
|
|
parts = line.split("\t") |
|
|
if len(parts) >= 4: |
|
|
code = parts[0].strip() |
|
|
name = parts[1].strip() |
|
|
|
|
|
if "." in code: |
|
|
iso2 = code.split(".", 1)[0] |
|
|
country = iso2_to_name.get(iso2) |
|
|
if country: |
|
|
by_country.setdefault(country, []).append(name) |
|
|
|
|
|
for c, lst in by_country.items(): |
|
|
by_country[c] = sorted(set(lst), key=lambda x: x.casefold()) |
|
|
return by_country |
|
|
|
|
|
|
|
|
def _slugify_country(name: str) -> str: |
|
|
s = unicodedata.normalize("NFKD", name) |
|
|
s = "".join(c for c in s if not unicodedata.combining(c)) |
|
|
s = s.lower() |
|
|
s = re.sub(r"[^a-z0-9\s-]", "", s) |
|
|
s = re.sub(r"\s+", "-", s).strip("-") |
|
|
return s |
|
|
|
|
|
|
|
|
class _SubdivTableParser(HTMLParser): |
|
|
def __init__(self) -> None: |
|
|
super().__init__() |
|
|
self.in_target_table = False |
|
|
self.target_table_depth = 0 |
|
|
self.in_tr = False |
|
|
self.in_cell = False |
|
|
self.current_cell_text: List[str] = [] |
|
|
self.current_row: List[str] = [] |
|
|
self.rows: List[List[str]] = [] |
|
|
|
|
|
def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]): |
|
|
if tag.lower() == "table": |
|
|
attrs_dict = {k.lower(): (v or "") for k, v in attrs} |
|
|
if attrs_dict.get("id", "").lower() == "subdivtable1": |
|
|
self.in_target_table = True |
|
|
self.target_table_depth = 1 |
|
|
elif self.in_target_table: |
|
|
self.target_table_depth += 1 |
|
|
elif self.in_target_table and tag.lower() == "tr": |
|
|
self.in_tr = True |
|
|
self.current_row = [] |
|
|
elif self.in_target_table and tag.lower() in ("td", "th"): |
|
|
self.in_cell = True |
|
|
self.current_cell_text = [] |
|
|
|
|
|
def handle_endtag(self, tag: str): |
|
|
if self.in_target_table and tag.lower() in ("td", "th") and self.in_cell: |
|
|
text = "".join(self.current_cell_text).strip() |
|
|
self.current_row.append(re.sub(r"\s+", " ", text)) |
|
|
self.in_cell = False |
|
|
self.current_cell_text = [] |
|
|
elif self.in_target_table and tag.lower() == "tr" and self.in_tr: |
|
|
|
|
|
if any(cell.strip() for cell in self.current_row): |
|
|
self.rows.append(self.current_row) |
|
|
self.in_tr = False |
|
|
self.current_row = [] |
|
|
elif tag.lower() == "table": |
|
|
if self.in_target_table: |
|
|
self.target_table_depth -= 1 |
|
|
if self.target_table_depth <= 0: |
|
|
self.in_target_table = False |
|
|
|
|
|
def handle_data(self, data: str): |
|
|
if self.in_target_table and self.in_cell: |
|
|
self.current_cell_text.append(data) |
|
|
|
|
|
|
|
|
def parse_admin_divisions_html(html: str) -> List[str]: |
|
|
parser = _SubdivTableParser() |
|
|
parser.feed(html) |
|
|
rows = parser.rows |
|
|
if not rows: |
|
|
return [] |
|
|
|
|
|
header_idx = None |
|
|
for i, row in enumerate(rows[:5]): |
|
|
joined = " ".join(row).lower() |
|
|
if "name of subdivision" in joined: |
|
|
header_idx = i |
|
|
break |
|
|
if header_idx is None: |
|
|
|
|
|
header_idx = 0 |
|
|
headers = [c.strip() for c in rows[header_idx]] |
|
|
|
|
|
col_idx = None |
|
|
for j, h in enumerate(headers): |
|
|
if h.strip().lower() == "name of subdivision" or "name of subdivision" in h.strip().lower(): |
|
|
col_idx = j |
|
|
break |
|
|
if col_idx is None: |
|
|
|
|
|
return [] |
|
|
names: List[str] = [] |
|
|
for r in rows[header_idx + 1:]: |
|
|
if col_idx < len(r): |
|
|
val = r[col_idx].strip() |
|
|
if val and not re.fullmatch(r"[-\s]*", val): |
|
|
names.append(val) |
|
|
names = sorted(set(names), key=lambda x: x.casefold()) |
|
|
return names |
|
|
|
|
|
|
|
|
def fetch_provinces_from_html(iso2_to_name: Dict[str, str]) -> Dict[str, List[str]]: |
|
|
out: Dict[str, List[str]] = {} |
|
|
for iso2, country in iso2_to_name.items(): |
|
|
slug = _slugify_country(country) |
|
|
url = f"https://www.geonames.org/{iso2}/administrative-division-{slug}.html" |
|
|
names: List[str] = [] |
|
|
try: |
|
|
html_bytes = fetch_url(url) |
|
|
html_text = html_bytes.decode("utf-8", errors="replace") |
|
|
names = parse_admin_divisions_html(html_text) |
|
|
except Exception: |
|
|
names = [] |
|
|
out[country] = names |
|
|
return out |
|
|
|
|
|
|
|
|
def parse_cities500(text: str, iso2_to_name: Dict[str, str], max_cities: int) -> Dict[str, List[str]]: |
|
|
|
|
|
by_country: Dict[str, List[Tuple[int, str]]] = {} |
|
|
for line in text.splitlines(): |
|
|
if not line or line.startswith("#"): |
|
|
continue |
|
|
parts = line.split("\t") |
|
|
if len(parts) >= 15: |
|
|
name = parts[1].strip() |
|
|
iso2 = parts[8].strip() |
|
|
pop_str = parts[14].strip() |
|
|
try: |
|
|
pop = int(pop_str) if pop_str else 0 |
|
|
except ValueError: |
|
|
pop = 0 |
|
|
country = iso2_to_name.get(iso2) |
|
|
if country and name: |
|
|
by_country.setdefault(country, []).append((pop, name)) |
|
|
|
|
|
out: Dict[str, List[str]] = {} |
|
|
for country, entries in by_country.items(): |
|
|
entries.sort(key=lambda x: x[0], reverse=True) |
|
|
seen = set() |
|
|
names: List[str] = [] |
|
|
for pop, nm in entries: |
|
|
if nm not in seen: |
|
|
names.append(nm) |
|
|
seen.add(nm) |
|
|
if len(names) >= max_cities: |
|
|
break |
|
|
out[country] = names |
|
|
return out |
|
|
|
|
|
|
|
|
def safe_makedirs(path: str) -> None: |
|
|
if path and not os.path.exists(path): |
|
|
os.makedirs(path, exist_ok=True) |
|
|
|
|
|
|
|
|
def main() -> int: |
|
|
ap = argparse.ArgumentParser(description="Fetch GeoNames countries, provinces, and cities into JSON files") |
|
|
ap.add_argument("--outdir", default="report", help="Directory to write JSON outputs") |
|
|
ap.add_argument("--max-cities", type=int, default=25, help="Top N cities per country by population") |
|
|
ap.add_argument("--prefer-html-countries", action="store_true", help="Prefer parsing countries from HTML page") |
|
|
ap.add_argument("--provinces-from-html", action="store_true", help="Fetch provinces per country from GeoNames HTML pages instead of admin1 dump") |
|
|
args = ap.parse_args() |
|
|
|
|
|
safe_makedirs(args.outdir) |
|
|
|
|
|
|
|
|
countries: List[str] = [] |
|
|
iso2_to_name: Dict[str, str] = {} |
|
|
|
|
|
countryinfo_bytes = fetch_url(COUNTRYINFO_URL) |
|
|
countryinfo_text = countryinfo_bytes.decode("utf-8", errors="replace") |
|
|
countries, iso2_to_name = parse_countryinfo(countryinfo_text) |
|
|
|
|
|
if args.prefer_html_countries: |
|
|
try: |
|
|
html_bytes = fetch_url(COUNTRIES_HTML_URL) |
|
|
html_text = html_bytes.decode("utf-8", errors="replace") |
|
|
html_countries = parse_countries_from_html(html_text) |
|
|
|
|
|
if len(html_countries) >= len(countries) - 5: |
|
|
countries = html_countries |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
admin1_bytes = fetch_url(ADMIN1_URL) |
|
|
admin1_text = admin1_bytes.decode("utf-8", errors="replace") |
|
|
if args.provinces_from_html: |
|
|
provinces_by_country = fetch_provinces_from_html(iso2_to_name) |
|
|
|
|
|
admin1_fallback = parse_admin1(admin1_text, iso2_to_name) |
|
|
for c in admin1_fallback: |
|
|
if not provinces_by_country.get(c): |
|
|
provinces_by_country[c] = admin1_fallback[c] |
|
|
else: |
|
|
provinces_by_country = parse_admin1(admin1_text, iso2_to_name) |
|
|
|
|
|
|
|
|
try: |
|
|
cities_bytes = fetch_url(CITIES500_URL) |
|
|
cities_text = cities_bytes.decode("utf-8", errors="replace") |
|
|
except Exception: |
|
|
zip_bytes = fetch_url(CITIES500_ZIP_URL) |
|
|
with zipfile.ZipFile(io.BytesIO(zip_bytes)) as zf: |
|
|
member_name = None |
|
|
for n in zf.namelist(): |
|
|
if n.lower().endswith(".txt") and "cities500" in n.lower(): |
|
|
member_name = n |
|
|
break |
|
|
if member_name is None and zf.namelist(): |
|
|
member_name = zf.namelist()[0] |
|
|
with zf.open(member_name) as f: |
|
|
cities_text = f.read().decode("utf-8", errors="replace") |
|
|
cities_by_country = parse_cities500(cities_text, iso2_to_name, args.max_cities) |
|
|
|
|
|
|
|
|
with open(os.path.join(args.outdir, "countries.json"), "w", encoding="utf-8") as f: |
|
|
json.dump(countries, f, indent=2, ensure_ascii=False) |
|
|
with open(os.path.join(args.outdir, "provinces_by_country.json"), "w", encoding="utf-8") as f: |
|
|
json.dump(provinces_by_country, f, indent=2, ensure_ascii=False) |
|
|
with open(os.path.join(args.outdir, "cities_by_country.json"), "w", encoding="utf-8") as f: |
|
|
json.dump(cities_by_country, f, indent=2, ensure_ascii=False) |
|
|
|
|
|
print(f"Wrote: {os.path.join(args.outdir, 'countries.json')}") |
|
|
print(f"Wrote: {os.path.join(args.outdir, 'provinces_by_country.json')}") |
|
|
print(f"Wrote: {os.path.join(args.outdir, 'cities_by_country.json')}") |
|
|
return 0 |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
raise SystemExit(main()) |
|
|
|