|
|
--- |
|
|
license: odbl |
|
|
language: |
|
|
- id |
|
|
tags: |
|
|
- sports |
|
|
- location |
|
|
- lantitute |
|
|
- longtitude |
|
|
--- |
|
|
|
|
|
|
|
|
# This is The initial dataset we scraped from open maps |
|
|
|
|
|
|
|
|
## this dataset has not been `cleaned` yet be aware! |
|
|
```python |
|
|
# requirements |
|
|
!pip install requests |
|
|
``` |
|
|
|
|
|
## script |
|
|
```python |
|
|
import csv |
|
|
import time |
|
|
import requests |
|
|
from urllib.parse import quote |
|
|
|
|
|
OUT_CSV = "jabodetabek_sports_osm.csv" |
|
|
|
|
|
BBOX = (-6.80, 106.30, -5.90, 107.20) |
|
|
|
|
|
OVERPASS_URL = "https://overpass-api.de/api/interpreter" |
|
|
WIKIDATA_ENTITY_URL = "https://www.wikidata.org/wiki/Special:EntityData/{qid}.json" |
|
|
|
|
|
FETCH_WIKIDATA_IMAGES = True |
|
|
|
|
|
HEADERS = {"User-Agent": "jabodetabek-sports-scraper/1.0 (contact: yourname@example.com)"} |
|
|
|
|
|
def osm_browse_link(osm_type: str, osm_id: int) -> str: |
|
|
return f"https://www.openstreetmap.org/{osm_type}/{osm_id}" |
|
|
|
|
|
def commons_file_url(filename: str, width: int = 1600) -> str: |
|
|
|
|
|
fn = filename.strip() |
|
|
if fn.lower().startswith("file:"): |
|
|
fn = fn.split(":", 1)[1] |
|
|
return f"https://commons.wikimedia.org/wiki/Special:FilePath/{quote(fn)}?width={width}" |
|
|
|
|
|
def extract_image_link(tags: dict) -> str: |
|
|
|
|
|
img = tags.get("image") |
|
|
if img: |
|
|
if img.startswith("http"): |
|
|
return img |
|
|
return commons_file_url(img) |
|
|
|
|
|
wm = tags.get("wikimedia_commons") |
|
|
if wm: |
|
|
return commons_file_url(wm) |
|
|
|
|
|
qid = tags.get("wikidata") |
|
|
if FETCH_WIKIDATA_IMAGES and qid and qid.upper().startswith("Q"): |
|
|
try: |
|
|
r = requests.get(WIKIDATA_ENTITY_URL.format(qid=qid), headers=HEADERS, timeout=30) |
|
|
if r.status_code == 200: |
|
|
data = r.json() |
|
|
ent = data.get("entities", {}).get(qid.upper(), {}) |
|
|
claims = ent.get("claims", {}) |
|
|
p18 = claims.get("P18", []) |
|
|
if p18: |
|
|
filename = p18[0]["mainsnak"]["datavalue"]["value"] |
|
|
return commons_file_url(filename) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
return "" |
|
|
|
|
|
def compose_address(tags: dict) -> str: |
|
|
|
|
|
if "addr:full" in tags: |
|
|
return tags["addr:full"] |
|
|
|
|
|
parts = [] |
|
|
|
|
|
street = tags.get("addr:street") |
|
|
houseno = tags.get("addr:housenumber") |
|
|
if street and houseno: |
|
|
parts.append(f"{street} {houseno}") |
|
|
elif street: |
|
|
parts.append(street) |
|
|
|
|
|
for k in ("addr:neighbourhood", "addr:suburb", "addr:village"): |
|
|
if tags.get(k): |
|
|
parts.append(tags[k]) |
|
|
|
|
|
for k in ("addr:city", "addr:municipality", "addr:county"): |
|
|
if tags.get(k): |
|
|
parts.append(tags[k]) |
|
|
|
|
|
for k in ("addr:province", "addr:state"): |
|
|
if tags.get(k): |
|
|
parts.append(tags[k]) |
|
|
|
|
|
if tags.get("addr:postcode"): |
|
|
parts.append(tags["addr:postcode"]) |
|
|
|
|
|
return ", ".join(parts) |
|
|
|
|
|
def build_types(tags: dict) -> str: |
|
|
bits = [] |
|
|
if "leisure" in tags: |
|
|
bits.append(f"leisure:{tags['leisure']}") |
|
|
if "amenity" in tags: |
|
|
bits.append(f"amenity:{tags['amenity']}") |
|
|
if "sport" in tags: |
|
|
bits.append(f"sport:{tags['sport']}") |
|
|
return ", ".join(bits) |
|
|
|
|
|
def fetch_overpass(bbox): |
|
|
s, w, n, e = bbox |
|
|
|
|
|
leisure_regex = "^(sports_centre|fitness_centre|stadium|pitch|swimming_pool|track)$" |
|
|
|
|
|
query = f""" |
|
|
[out:json][timeout:180]; |
|
|
( |
|
|
node["leisure"~"{leisure_regex}"]({s},{w},{n},{e}); |
|
|
way["leisure"~"{leisure_regex}"]({s},{w},{n},{e}); |
|
|
relation["leisure"~"{leisure_regex}"]({s},{w},{n},{e}); |
|
|
|
|
|
// Any feature explicitly tagged with sport=*, but avoid retail shops |
|
|
node["sport"]["shop"!~".*"]({s},{w},{n},{e}); |
|
|
way["sport"]["shop"!~".*"]({s},{w},{n},{e}); |
|
|
relation["sport"]["shop"!~".*"]({s},{w},{n},{e}); |
|
|
); |
|
|
out center tags; |
|
|
""" |
|
|
r = requests.post(OVERPASS_URL, data={"data": query}, headers=HEADERS, timeout=180) |
|
|
r.raise_for_status() |
|
|
return r.json().get("elements", []) |
|
|
|
|
|
def element_coords(el) -> tuple[float, float]: |
|
|
if el["type"] == "node": |
|
|
return el.get("lat"), el.get("lon") |
|
|
|
|
|
c = el.get("center") or {} |
|
|
return c.get("lat"), c.get("lon") |
|
|
|
|
|
def main(): |
|
|
elements = fetch_overpass(BBOX) |
|
|
seen = set() |
|
|
rows = [] |
|
|
|
|
|
for el in elements: |
|
|
el_type = el.get("type") |
|
|
|
|
|
el_id = el.get("id") |
|
|
tags = el.get("tags", {}) or {} |
|
|
|
|
|
key = (el_type, el_id) |
|
|
if key in seen: |
|
|
continue |
|
|
seen.add(key) |
|
|
|
|
|
lat, lon = element_coords(el) |
|
|
if lat is None or lon is None: |
|
|
continue |
|
|
|
|
|
name = tags.get("name") or "(Unnamed)" |
|
|
addr = compose_address(tags) |
|
|
types = build_types(tags) |
|
|
osm_link = osm_browse_link(el_type, el_id) |
|
|
image_link = extract_image_link(tags) |
|
|
|
|
|
likely_sporty = ( |
|
|
"leisure" in tags and tags["leisure"] in |
|
|
{"sports_centre", "fitness_centre", "stadium", "pitch", "swimming_pool", "track"} |
|
|
) or ("sport" in tags) |
|
|
|
|
|
if not likely_sporty: |
|
|
continue |
|
|
|
|
|
rows.append({ |
|
|
"name": name, |
|
|
"address": addr, |
|
|
"lat": lat, |
|
|
"lng": lon, |
|
|
"types": types, |
|
|
"osm_link": osm_link, |
|
|
"image_link": image_link, |
|
|
"osm_type": el_type, |
|
|
"osm_id": el_id, |
|
|
}) |
|
|
|
|
|
fieldnames = ["name", "address", "lat", "lng", "types", "osm_link", "image_link", "osm_type", "osm_id"] |
|
|
with open(OUT_CSV, "w", newline="", encoding="utf-8") as f: |
|
|
w = csv.DictWriter(f, fieldnames=fieldnames) |
|
|
w.writeheader() |
|
|
for row in rows: |
|
|
w.writerow(row) |
|
|
|
|
|
print(f"Saved {len(rows)} places to {OUT_CSV}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
``` |