places / places_dataset.py
piemonte's picture
Upload folder using huggingface_hub
714c51a verified
"""Places Dataset Loading Script for Hugging Face"""
import csv
import json
import os
import datasets
from typing import Dict, List, Any
_CITATION = """\
@dataset{places_dataset_2025,
title={Places Dataset},
author={patrick piemonte},
year={2025},
publisher={Hugging Face}
}
"""
_DESCRIPTION = """\
This dataset contains information about close to 70,000 places with associated metadata including
locations, attribution tags, and contact details. The data includes geographic coordinates,
place descriptions, categorization through attribution tags, and social media presence information.
"""
_HOMEPAGE = ""
_LICENSE = "cc-by-4.0"
_URLS = {
"place": "place.csv",
"location": "location.csv",
"place_contact": "place_contact.csv",
"tag": "tag.csv",
"place_tag": "place_tag.csv",
}
class PlacesDataset(datasets.GeneratorBasedBuilder):
"""Places dataset with multiple related tables."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="all", version=VERSION, description="Load all tables"),
datasets.BuilderConfig(name="place", version=VERSION, description="Main places table"),
datasets.BuilderConfig(name="location", version=VERSION, description="Geographic coordinates"),
datasets.BuilderConfig(name="place_contact", version=VERSION, description="Contact information"),
datasets.BuilderConfig(name="tag", version=VERSION, description="Categorization tags"),
datasets.BuilderConfig(name="place_tag", version=VERSION, description="Place-tag relationships"),
datasets.BuilderConfig(name="denormalized", version=VERSION, description="Denormalized view with places, locations, and primary tags"),
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
if self.config.name == "place":
features = datasets.Features({
"id": datasets.Value("string"),
"created_at": datasets.Value("string"),
"name": datasets.Value("string"),
"description": datasets.Value("string"),
"address": datasets.Value("string"),
"address_formatted": datasets.Value("string"),
"cross_street": datasets.Value("string"),
"locality": datasets.Value("string"),
"administrative_area": datasets.Value("string"),
"postal_code": datasets.Value("string"),
"country_code": datasets.Value("string"),
"verified": datasets.Value("bool"),
"flagged": datasets.Value("bool"),
"place_contact_id": datasets.Value("string"),
"location_id": datasets.Value("string"),
"author_id": datasets.Value("string"),
"owner_id": datasets.Value("string"),
"locale_id": datasets.Value("string"),
"primary_tag_id": datasets.Value("string"),
"country": datasets.Value("string"),
"sublocality": datasets.Value("string"),
"subadministrative_area": datasets.Value("string"),
"updated_at": datasets.Value("string"),
"radius_in_meters": datasets.Value("int32"),
"stamp_id": datasets.Value("string"),
"z_priority": datasets.Value("int32"),
"clustering_category": datasets.Value("string"),
"places_token_id": datasets.Value("string"),
"nano_id": datasets.Value("string"),
"slug": datasets.Value("string"),
})
elif self.config.name == "location":
features = datasets.Features({
"id": datasets.Value("string"),
"latitude": datasets.Value("float64"),
"longitude": datasets.Value("float64"),
"horizontal_accuracy": datasets.Value("float32"),
"altitude": datasets.Value("float32"),
"vertical_accuracy": datasets.Value("float32"),
"geom": datasets.Value("string"),
"geog": datasets.Value("string"),
})
elif self.config.name == "place_contact":
features = datasets.Features({
"id": datasets.Value("string"),
"instagram": datasets.Value("string"),
"x": datasets.Value("string"),
"website": datasets.Value("string"),
})
elif self.config.name == "tag":
features = datasets.Features({
"id": datasets.Value("string"),
"created_at": datasets.Value("string"),
"name": datasets.Value("string"),
"slug": datasets.Value("string"),
"private_tag": datasets.Value("bool"),
"tag_type": datasets.Value("string"),
"image_asset_id": datasets.Value("string"),
"theme_asset_id": datasets.Value("string"),
"search_tokens": datasets.Value("string"),
"keywords": datasets.Value("string"),
"radius_in_meters": datasets.Value("int32"),
"content_rating": datasets.Value("string"),
"stamp_id": datasets.Value("string"),
"hidden_tag": datasets.Value("bool"),
})
elif self.config.name == "place_tag":
features = datasets.Features({
"id": datasets.Value("string"),
"tag_id": datasets.Value("string"),
"place_id": datasets.Value("string"),
"created_at": datasets.Value("string"),
})
elif self.config.name == "denormalized":
features = datasets.Features({
# Place fields
"place_id": datasets.Value("string"),
"name": datasets.Value("string"),
"description": datasets.Value("string"),
"address": datasets.Value("string"),
"address_formatted": datasets.Value("string"),
"locality": datasets.Value("string"),
"administrative_area": datasets.Value("string"),
"postal_code": datasets.Value("string"),
"country_code": datasets.Value("string"),
"country": datasets.Value("string"),
"verified": datasets.Value("bool"),
# Location fields
"latitude": datasets.Value("float64"),
"longitude": datasets.Value("float64"),
"horizontal_accuracy": datasets.Value("float32"),
"altitude": datasets.Value("float32"),
# Primary tag fields
"primary_tag_name": datasets.Value("string"),
"primary_tag_slug": datasets.Value("string"),
"primary_tag_type": datasets.Value("string"),
# Contact fields
"website": datasets.Value("string"),
"instagram": datasets.Value("string"),
"twitter": datasets.Value("string"),
})
else: # "all" config
features = datasets.Features({
"table_name": datasets.Value("string"),
"data": datasets.Value("string"), # JSON string
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.name == "denormalized":
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": downloaded_files,
},
),
]
elif self.config.name != "all":
downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": downloaded_file,
"table_name": self.config.name,
},
),
]
else:
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": downloaded_files,
},
),
]
def _generate_examples(self, filepath=None, table_name=None, filepaths=None):
if self.config.name == "denormalized":
# Load all necessary tables for denormalized view
places = {}
locations = {}
tags = {}
contacts = {}
# Load places
with open(filepaths["place"], encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
places[row["id"]] = row
# Load locations
with open(filepaths["location"], encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
locations[row["id"]] = row
# Load tags
with open(filepaths["tag"], encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
tags[row["id"]] = row
# Load contacts
with open(filepaths["place_contact"], encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
contacts[row["id"]] = row
# Generate denormalized rows
idx = 0
for place_id, place in places.items():
result = {
"place_id": place_id,
"name": place.get("name"),
"description": place.get("description"),
"address": place.get("address"),
"address_formatted": place.get("address_formatted"),
"locality": place.get("locality"),
"administrative_area": place.get("administrative_area"),
"postal_code": place.get("postal_code"),
"country_code": place.get("country_code"),
"country": place.get("country"),
"verified": place.get("verified") == "true" if place.get("verified") else False,
}
# Add location data
if place.get("location_id") and place["location_id"] in locations:
loc = locations[place["location_id"]]
# Parse and validate coordinates
lat = None
lon = None
if loc.get("latitude"):
try:
lat = float(loc["latitude"])
if not (-90 <= lat <= 90):
print(f"Warning: Invalid latitude {lat} for place {place_id}")
lat = None
except ValueError:
pass
if loc.get("longitude"):
try:
lon = float(loc["longitude"])
if not (-180 <= lon <= 180):
print(f"Warning: Invalid longitude {lon} for place {place_id}")
lon = None
except ValueError:
pass
result.update({
"latitude": lat,
"longitude": lon,
"horizontal_accuracy": float(loc["horizontal_accuracy"]) if loc.get("horizontal_accuracy") else None,
"altitude": float(loc["altitude"]) if loc.get("altitude") else None,
})
else:
result.update({
"latitude": None,
"longitude": None,
"horizontal_accuracy": None,
"altitude": None,
})
# Add primary tag data
if place.get("primary_tag_id") and place["primary_tag_id"] in tags:
tag = tags[place["primary_tag_id"]]
result.update({
"primary_tag_name": tag.get("name"),
"primary_tag_slug": tag.get("slug"),
"primary_tag_type": tag.get("tag_type"),
})
else:
result.update({
"primary_tag_name": None,
"primary_tag_slug": None,
"primary_tag_type": None,
})
# Add contact data
if place.get("place_contact_id") and place["place_contact_id"] in contacts:
contact = contacts[place["place_contact_id"]]
result.update({
"website": contact.get("website"),
"instagram": contact.get("instagram"),
"twitter": contact.get("twitter"),
})
else:
result.update({
"website": None,
"instagram": None,
"twitter": None,
})
yield idx, result
idx += 1
elif self.config.name != "all":
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
for idx, row in enumerate(reader):
# Convert boolean strings to actual booleans
for key, value in row.items():
if value in ["true", "false"]:
row[key] = value == "true"
elif value == "":
row[key] = None
# Convert numeric strings
elif key in ["radius_in_meters", "z_priority"]:
try:
row[key] = int(value) if value else None
except ValueError:
row[key] = None
elif key in ["latitude", "longitude", "horizontal_accuracy", "altitude", "vertical_accuracy"]:
try:
if value:
val = float(value)
# Validate coordinates
if key == "latitude" and not (-90 <= val <= 90):
print(f"Warning: Invalid latitude {val} in row {idx}")
row[key] = None
elif key == "longitude" and not (-180 <= val <= 180):
print(f"Warning: Invalid longitude {val} in row {idx}")
row[key] = None
else:
row[key] = val
else:
row[key] = None
except ValueError:
row[key] = None
yield idx, row
else:
# Load all tables
idx = 0
for table_name, filepath in filepaths.items():
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
rows = list(reader)
yield idx, {
"table_name": table_name,
"data": json.dumps(rows)
}
idx += 1
# Alternative simple loading function for users who prefer pandas
def load_places_as_dict(data_dir: str) -> Dict[str, List[Dict[str, Any]]]:
"""
Load all CSV files from the directory into a dictionary of tables.
Args:
data_dir: Directory containing the CSV files
Returns:
Dictionary where keys are table names and values are lists of row dictionaries
"""
tables = {}
for table_name, filename in _URLS.items():
filepath = os.path.join(data_dir, filename)
if os.path.exists(filepath):
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
tables[table_name] = list(reader)
return tables