Shiowo2 commited on
Commit
4609f08
·
verified ·
1 Parent(s): 1d49a71

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +154 -3
README.md CHANGED
@@ -1,3 +1,154 @@
1
- ---
2
- license: cc
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: odbl
3
+ language:
4
+ - id
5
+ tags:
6
+ - sports
7
+ - location
8
+ - lantitute
9
+ - longtitude
10
+ ---
11
+
12
+
13
+ # This is The initial dataset we scraped from open maps the licence
14
+
15
+
16
+ ## this dataset has not been `cleaned` yet be aware!
17
+ ```python
18
+ # requirements
19
+ !pip install requests
20
+ ```
21
+
22
+ ## script
23
+ ```python
24
+ import csv
25
+ import time
26
+ import requests
27
+ from urllib.parse import quote
28
+ OUT_CSV = "jabodetabek_sports_osm.csv"
29
+ BBOX = (-6.80, 106.30, -5.90, 107.20)
30
+ OVERPASS_URL = "https://overpass-api.de/api/interpreter"
31
+ WIKIDATA_ENTITY_URL = "https://www.wikidata.org/wiki/Special:EntityData/{qid}.json"
32
+ FETCH_WIKIDATA_IMAGES = True
33
+ HEADERS = {"User-Agent": "jabodetabek-sports-scraper/1.0 (contact: yourname@example.com)"}
34
+ def osm_browse_link(osm_type: str, osm_id: int) -> str:
35
+ return f"https://www.openstreetmap.org/{osm_type}/{osm_id}"
36
+ def commons_file_url(filename: str, width: int = 1600) -> str:
37
+ fn = filename.strip()
38
+ if fn.lower().startswith("file:"):
39
+ fn = fn.split(":", 1)[1]
40
+ return f"https://commons.wikimedia.org/wiki/Special:FilePath/{quote(fn)}?width={width}"
41
+ def extract_image_link(tags: dict) -> str:
42
+ img = tags.get("image")
43
+ if img:
44
+ if img.startswith("http"):
45
+ return img
46
+ return commons_file_url(img)
47
+ wm = tags.get("wikimedia_commons")
48
+ if wm:
49
+ return commons_file_url(wm)
50
+ qid = tags.get("wikidata")
51
+ if FETCH_WIKIDATA_IMAGES and qid and qid.upper().startswith("Q"):
52
+ try:
53
+ r = requests.get(WIKIDATA_ENTITY_URL.format(qid=qid), headers=HEADERS, timeout=30)
54
+ if r.status_code == 200:
55
+ data = r.json()
56
+ ent = data.get("entities", {}).get(qid.upper(), {})
57
+ claims = ent.get("claims", {})
58
+ p18 = claims.get("P18", [])
59
+ if p18:
60
+ filename = p18[0]["mainsnak"]["datavalue"]["value"]
61
+ return commons_file_url(filename)
62
+ except Exception:
63
+ pass
64
+ return ""
65
+ def compose_address(tags: dict) -> str:
66
+ if "addr:full" in tags:
67
+ return tags["addr:full"]
68
+ parts = []
69
+ street = tags.get("addr:street")
70
+ houseno = tags.get("addr:housenumber")
71
+ if street and houseno:
72
+ parts.append(f"{street} {houseno}")
73
+ elif street:
74
+ parts.append(street)
75
+ for k in ("addr:neighbourhood", "addr:suburb", "addr:village"):
76
+ if tags.get(k):
77
+ parts.append(tags[k])
78
+ for k in ("addr:city", "addr:municipality", "addr:county"):
79
+ if tags.get(k):
80
+ parts.append(tags[k])
81
+ for k in ("addr:province", "addr:state"):
82
+ if tags.get(k):
83
+ parts.append(tags[k])
84
+ if tags.get("addr:postcode"):
85
+ parts.append(tags["addr:postcode"])
86
+ return ", ".join(parts)
87
+ def build_types(tags: dict) -> str:
88
+ bits = []
89
+ if "leisure" in tags:
90
+ bits.append(f"leisure:{tags['leisure']}")
91
+ if "amenity" in tags:
92
+ bits.append(f"amenity:{tags['amenity']}")
93
+ if "sport" in tags:
94
+ bits.append(f"sport:{tags['sport']}")
95
+ return ", ".join(bits)
96
+ def fetch_overpass(bbox):
97
+ s, w, n, e = bbox
98
+ leisure_regex = "^(sports_centre|fitness_centre|stadium|pitch|swimming_pool|track)$"
99
+ query = f
100
+ r = requests.post(OVERPASS_URL, data={"data": query}, headers=HEADERS, timeout=180)
101
+ r.raise_for_status()
102
+ return r.json().get("elements", [])
103
+ def element_coords(el) -> tuple[float, float]:
104
+ if el["type"] == "node":
105
+ return el.get("lat"), el.get("lon")
106
+ c = el.get("center") or {}
107
+ return c.get("lat"), c.get("lon")
108
+ def main():
109
+ elements = fetch_overpass(BBOX)
110
+ seen = set()
111
+ rows = []
112
+ for el in elements:
113
+ el_type = el.get("type")
114
+ el_id = el.get("id")
115
+ tags = el.get("tags", {}) or {}
116
+ key = (el_type, el_id)
117
+ if key in seen:
118
+ continue
119
+ seen.add(key)
120
+ lat, lon = element_coords(el)
121
+ if lat is None or lon is None:
122
+ continue
123
+ name = tags.get("name") or "(Unnamed)"
124
+ addr = compose_address(tags)
125
+ types = build_types(tags)
126
+ osm_link = osm_browse_link(el_type, el_id)
127
+ image_link = extract_image_link(tags)
128
+ likely_sporty = (
129
+ "leisure" in tags and tags["leisure"] in
130
+ {"sports_centre", "fitness_centre", "stadium", "pitch", "swimming_pool", "track"}
131
+ ) or ("sport" in tags)
132
+ if not likely_sporty:
133
+ continue
134
+ rows.append({
135
+ "name": name,
136
+ "address": addr,
137
+ "lat": lat,
138
+ "lng": lon,
139
+ "types": types,
140
+ "osm_link": osm_link,
141
+ "image_link": image_link,
142
+ "osm_type": el_type,
143
+ "osm_id": el_id,
144
+ })
145
+ fieldnames = ["name", "address", "lat", "lng", "types", "osm_link", "image_link", "osm_type", "osm_id"]
146
+ with open(OUT_CSV, "w", newline="", encoding="utf-8") as f:
147
+ w = csv.DictWriter(f, fieldnames=fieldnames)
148
+ w.writeheader()
149
+ for row in rows:
150
+ w.writerow(row)
151
+ print(f"Saved {len(rows)} places to {OUT_CSV}")
152
+ if __name__ == "__main__":
153
+ main()
154
+ ```