#!/usr/bin/env python3 """ geonames_fetch.py — Fetch countries, provinces, and cities from GeoNames. Outputs JSON files usable by csv_repair.py's real city mode: - report/countries.json (list of country names) - report/provinces_by_country.json (country -> [admin1 names]) - report/cities_by_country.json (country -> [top city names by population]) Sources (preferring official GeoNames dumps for reliability): - Countries: https://www.geonames.org/countries/ (HTML) or http://download.geonames.org/export/dump/countryInfo.txt - Provinces: http://download.geonames.org/export/dump/admin1CodesASCII.txt - Cities: http://download.geonames.org/export/dump/cities500.txt Usage examples: python geonames_fetch.py --outdir report python geonames_fetch.py --outdir report --max-cities 30 python geonames_fetch.py --prefer-html-countries """ from __future__ import annotations import argparse import json import os import re from typing import Dict, List, Tuple import urllib.request import io import zipfile import unicodedata from html.parser import HTMLParser from typing import Optional COUNTRIES_HTML_URL = "https://www.geonames.org/countries/" COUNTRYINFO_URL = "http://download.geonames.org/export/dump/countryInfo.txt" ADMIN1_URL = "http://download.geonames.org/export/dump/admin1CodesASCII.txt" CITIES500_URL = "http://download.geonames.org/export/dump/cities500.txt" CITIES500_ZIP_URL = "http://download.geonames.org/export/dump/cities500.zip" def fetch_url(url: str, timeout: int = 30) -> bytes: with urllib.request.urlopen(url, timeout=timeout) as resp: return resp.read() def parse_countries_from_html(html: str) -> List[str]: # Extract the main countries table rows; anchor text typically is country name # Look for links to country pages like "/countries//" or "//" # Fallback to table cells containing country names. countries: List[str] = [] # Attempt to capture anchor text inside the countries table # This regex finds table rows with a link where the text is the country name. for m in re.finditer(r"]+href=\"/\w{2}/?[^\"]*\"[^>]*>([^<]+)", html, re.IGNORECASE): name = m.group(1).strip() if name and name not in countries: countries.append(name) # If too few extracted, try a broader pattern within the countries page if len(countries) < 50: for m in re.finditer(r"]+href=\"/countries/[^\"]+\"[^>]*>([^<]+)", html, re.IGNORECASE): name = m.group(1).strip() if name and name not in countries: countries.append(name) # Deduplicate and sort case-insensitively countries = sorted(set(countries), key=lambda x: x.casefold()) return countries def parse_countryinfo(text: str) -> Tuple[List[str], Dict[str, str]]: # Returns (country_names, iso2_to_name) names: List[str] = [] iso2_to_name: Dict[str, str] = {} for line in text.splitlines(): if not line or line.startswith("#"): continue parts = line.split("\t") # Format ref: https://download.geonames.org/export/dump/readme.txt # columns: ISO, ISO3, ISO-Numeric, fips, Country, Capital, Area(in sq km), Population, Continent, # tld, CurrencyCode, CurrencyName, Phone, PostalCodeFormat, PostalCodeRegex, Languages, geonameId, neighbours, EquivalentFipsCode if len(parts) >= 5: iso2 = parts[0].strip() name = parts[4].strip() if iso2 and name: iso2_to_name[iso2] = name names.append(name) names = sorted(set(names), key=lambda x: x.casefold()) return names, iso2_to_name def parse_admin1(text: str, iso2_to_name: Dict[str, str]) -> Dict[str, List[str]]: # admin1CodesASCII.txt: codenameasciinamegeonameid by_country: Dict[str, List[str]] = {} for line in text.splitlines(): if not line: continue parts = line.split("\t") if len(parts) >= 4: code = parts[0].strip() # e.g., US.NY or IN.KA name = parts[1].strip() # Extract ISO2 from code prefix if "." in code: iso2 = code.split(".", 1)[0] country = iso2_to_name.get(iso2) if country: by_country.setdefault(country, []).append(name) # Deduplicate and sort entries for each country for c, lst in by_country.items(): by_country[c] = sorted(set(lst), key=lambda x: x.casefold()) return by_country def _slugify_country(name: str) -> str: s = unicodedata.normalize("NFKD", name) s = "".join(c for c in s if not unicodedata.combining(c)) s = s.lower() s = re.sub(r"[^a-z0-9\s-]", "", s) s = re.sub(r"\s+", "-", s).strip("-") return s class _SubdivTableParser(HTMLParser): def __init__(self) -> None: super().__init__() self.in_target_table = False self.target_table_depth = 0 self.in_tr = False self.in_cell = False self.current_cell_text: List[str] = [] self.current_row: List[str] = [] self.rows: List[List[str]] = [] def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]): if tag.lower() == "table": attrs_dict = {k.lower(): (v or "") for k, v in attrs} if attrs_dict.get("id", "").lower() == "subdivtable1": self.in_target_table = True self.target_table_depth = 1 elif self.in_target_table: self.target_table_depth += 1 elif self.in_target_table and tag.lower() == "tr": self.in_tr = True self.current_row = [] elif self.in_target_table and tag.lower() in ("td", "th"): self.in_cell = True self.current_cell_text = [] def handle_endtag(self, tag: str): if self.in_target_table and tag.lower() in ("td", "th") and self.in_cell: text = "".join(self.current_cell_text).strip() self.current_row.append(re.sub(r"\s+", " ", text)) self.in_cell = False self.current_cell_text = [] elif self.in_target_table and tag.lower() == "tr" and self.in_tr: # Only keep non-empty rows if any(cell.strip() for cell in self.current_row): self.rows.append(self.current_row) self.in_tr = False self.current_row = [] elif tag.lower() == "table": if self.in_target_table: self.target_table_depth -= 1 if self.target_table_depth <= 0: self.in_target_table = False def handle_data(self, data: str): if self.in_target_table and self.in_cell: self.current_cell_text.append(data) def parse_admin_divisions_html(html: str) -> List[str]: parser = _SubdivTableParser() parser.feed(html) rows = parser.rows if not rows: return [] # Find header row: the first row that contains "Name of Subdivision" header_idx = None for i, row in enumerate(rows[:5]): joined = " ".join(row).lower() if "name of subdivision" in joined: header_idx = i break if header_idx is None: # Sometimes header is the first row with th; fall back to first row header_idx = 0 headers = [c.strip() for c in rows[header_idx]] # Determine the column index for "Name of Subdivision" col_idx = None for j, h in enumerate(headers): if h.strip().lower() == "name of subdivision" or "name of subdivision" in h.strip().lower(): col_idx = j break if col_idx is None: # Could not find the exact header; bail out return [] names: List[str] = [] for r in rows[header_idx + 1:]: if col_idx < len(r): val = r[col_idx].strip() if val and not re.fullmatch(r"[-\s]*", val): names.append(val) names = sorted(set(names), key=lambda x: x.casefold()) return names def fetch_provinces_from_html(iso2_to_name: Dict[str, str]) -> Dict[str, List[str]]: out: Dict[str, List[str]] = {} for iso2, country in iso2_to_name.items(): slug = _slugify_country(country) url = f"https://www.geonames.org/{iso2}/administrative-division-{slug}.html" names: List[str] = [] try: html_bytes = fetch_url(url) html_text = html_bytes.decode("utf-8", errors="replace") names = parse_admin_divisions_html(html_text) except Exception: names = [] out[country] = names return out def parse_cities500(text: str, iso2_to_name: Dict[str, str], max_cities: int) -> Dict[str, List[str]]: # cities500.txt columns: tab-separated; population at index 14, country code at index 8, name at index 1 by_country: Dict[str, List[Tuple[int, str]]] = {} for line in text.splitlines(): if not line or line.startswith("#"): continue parts = line.split("\t") if len(parts) >= 15: name = parts[1].strip() iso2 = parts[8].strip() pop_str = parts[14].strip() try: pop = int(pop_str) if pop_str else 0 except ValueError: pop = 0 country = iso2_to_name.get(iso2) if country and name: by_country.setdefault(country, []).append((pop, name)) # For each country, sort by population desc and keep top N unique names out: Dict[str, List[str]] = {} for country, entries in by_country.items(): entries.sort(key=lambda x: x[0], reverse=True) seen = set() names: List[str] = [] for pop, nm in entries: if nm not in seen: names.append(nm) seen.add(nm) if len(names) >= max_cities: break out[country] = names return out def safe_makedirs(path: str) -> None: if path and not os.path.exists(path): os.makedirs(path, exist_ok=True) def main() -> int: ap = argparse.ArgumentParser(description="Fetch GeoNames countries, provinces, and cities into JSON files") ap.add_argument("--outdir", default="report", help="Directory to write JSON outputs") ap.add_argument("--max-cities", type=int, default=25, help="Top N cities per country by population") ap.add_argument("--prefer-html-countries", action="store_true", help="Prefer parsing countries from HTML page") ap.add_argument("--provinces-from-html", action="store_true", help="Fetch provinces per country from GeoNames HTML pages instead of admin1 dump") args = ap.parse_args() safe_makedirs(args.outdir) # Fetch country mappings countries: List[str] = [] iso2_to_name: Dict[str, str] = {} countryinfo_bytes = fetch_url(COUNTRYINFO_URL) countryinfo_text = countryinfo_bytes.decode("utf-8", errors="replace") countries, iso2_to_name = parse_countryinfo(countryinfo_text) if args.prefer_html_countries: try: html_bytes = fetch_url(COUNTRIES_HTML_URL) html_text = html_bytes.decode("utf-8", errors="replace") html_countries = parse_countries_from_html(html_text) # If HTML yielded enough, prefer it; otherwise keep countryInfo list if len(html_countries) >= len(countries) - 5: countries = html_countries except Exception: pass # Fetch provinces (admin1) admin1_bytes = fetch_url(ADMIN1_URL) admin1_text = admin1_bytes.decode("utf-8", errors="replace") if args.provinces_from_html: provinces_by_country = fetch_provinces_from_html(iso2_to_name) # If some countries yielded empty from HTML, fill from admin1 dump admin1_fallback = parse_admin1(admin1_text, iso2_to_name) for c in admin1_fallback: if not provinces_by_country.get(c): provinces_by_country[c] = admin1_fallback[c] else: provinces_by_country = parse_admin1(admin1_text, iso2_to_name) # Fetch cities (top by population) — try .txt first, then .zip fallback try: cities_bytes = fetch_url(CITIES500_URL) cities_text = cities_bytes.decode("utf-8", errors="replace") except Exception: zip_bytes = fetch_url(CITIES500_ZIP_URL) with zipfile.ZipFile(io.BytesIO(zip_bytes)) as zf: member_name = None for n in zf.namelist(): if n.lower().endswith(".txt") and "cities500" in n.lower(): member_name = n break if member_name is None and zf.namelist(): member_name = zf.namelist()[0] with zf.open(member_name) as f: cities_text = f.read().decode("utf-8", errors="replace") cities_by_country = parse_cities500(cities_text, iso2_to_name, args.max_cities) # Write JSON outputs with open(os.path.join(args.outdir, "countries.json"), "w", encoding="utf-8") as f: json.dump(countries, f, indent=2, ensure_ascii=False) with open(os.path.join(args.outdir, "provinces_by_country.json"), "w", encoding="utf-8") as f: json.dump(provinces_by_country, f, indent=2, ensure_ascii=False) with open(os.path.join(args.outdir, "cities_by_country.json"), "w", encoding="utf-8") as f: json.dump(cities_by_country, f, indent=2, ensure_ascii=False) print(f"Wrote: {os.path.join(args.outdir, 'countries.json')}") print(f"Wrote: {os.path.join(args.outdir, 'provinces_by_country.json')}") print(f"Wrote: {os.path.join(args.outdir, 'cities_by_country.json')}") return 0 if __name__ == "__main__": raise SystemExit(main())