Commit ·
884f942
1
Parent(s): 590df96
data-update (#3)
Browse files- improved data refresh process and updated monumental statussus (aa4d0af14bdb7f392c58213cc3b5e17726f29868)
- update-analysis.ipynb without output cells (c251b378d987e3226e28965aafe9ab019bae213f)
- Fix dotenv dependency (84084e367c32533382ccdc22612dd8cdd4bfc4fb)
Co-authored-by: Ben Verhees <benverhees@users.noreply.huggingface.co>
- .gitignore +3 -1
- README.md +3 -2
- monumenten.parquet +2 -2
- process_data.py +0 -229
- requirements.txt +7 -4
- run_pipeline.py +935 -0
- update-analysis.ipynb +277 -0
.gitignore
CHANGED
|
@@ -7,4 +7,6 @@
|
|
| 7 |
*.jsonl
|
| 8 |
*.jsonl.gz
|
| 9 |
*.jsonl.gz.part
|
| 10 |
-
*.jsonl.gz.part.
|
|
|
|
|
|
|
|
|
| 7 |
*.jsonl
|
| 8 |
*.jsonl.gz
|
| 9 |
*.jsonl.gz.part
|
| 10 |
+
*.jsonl.gz.part.
|
| 11 |
+
*.sqlite
|
| 12 |
+
*.log
|
README.md
CHANGED
|
@@ -61,8 +61,8 @@ size_categories:
|
|
| 61 |
|
| 62 |
For the stasuses of whether an addressable unit is a national monument (`is_rijksmonument`), we looked at two sources:
|
| 63 |
|
| 64 |
-
|
| 65 |
-
|
| 66 |
|
| 67 |
**Please keep in mind that the Cultural Heritage Agency (_RCE_ in the dataset) is the official source for monumental statuses**. However, in our
|
| 68 |
data analyses on the national monumental statuses of addressable units in Rotterdam, we concluded that:
|
|
@@ -77,6 +77,7 @@ Overall, for us, het Kadaster seemed more up to date than the Cultural Heritage
|
|
| 77 |
---
|
| 78 |
|
| 79 |
## Special thanks
|
|
|
|
| 80 |
to het Kadaster en the Cultural Heritage Agency for their APIs and datasets.
|
| 81 |
|
| 82 |
---
|
|
|
|
| 61 |
|
| 62 |
For the stasuses of whether an addressable unit is a national monument (`is_rijksmonument`), we looked at two sources:
|
| 63 |
|
| 64 |
+
- Cultural Heritage Agency
|
| 65 |
+
- Het Kadaster
|
| 66 |
|
| 67 |
**Please keep in mind that the Cultural Heritage Agency (_RCE_ in the dataset) is the official source for monumental statuses**. However, in our
|
| 68 |
data analyses on the national monumental statuses of addressable units in Rotterdam, we concluded that:
|
|
|
|
| 77 |
---
|
| 78 |
|
| 79 |
## Special thanks
|
| 80 |
+
|
| 81 |
to het Kadaster en the Cultural Heritage Agency for their APIs and datasets.
|
| 82 |
|
| 83 |
---
|
monumenten.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2b607e43b7e068dc57fb33e816aaac364f0daf8cf1fd0376a3d40f3ab71b966
|
| 3 |
+
size 129082892
|
process_data.py
DELETED
|
@@ -1,229 +0,0 @@
|
|
| 1 |
-
import asyncio
|
| 2 |
-
import glob
|
| 3 |
-
import json
|
| 4 |
-
import logging
|
| 5 |
-
import multiprocessing
|
| 6 |
-
import os
|
| 7 |
-
import xml.etree.ElementTree as ET
|
| 8 |
-
from datetime import datetime
|
| 9 |
-
from typing import List, Optional, Set
|
| 10 |
-
|
| 11 |
-
import pandas as pd
|
| 12 |
-
from datasets import Dataset
|
| 13 |
-
from dotenv import load_dotenv
|
| 14 |
-
from monumenten import MonumentenClient
|
| 15 |
-
|
| 16 |
-
load_dotenv()
|
| 17 |
-
|
| 18 |
-
# Configure logging
|
| 19 |
-
logging.basicConfig(level=logging.INFO)
|
| 20 |
-
logger = logging.getLogger(__name__)
|
| 21 |
-
|
| 22 |
-
# Define constants
|
| 23 |
-
XML_DIRECTORY = "vbo_xmls/"
|
| 24 |
-
INTERMEDIATE_CSV_PATH = "verblijfsobjecten_ids.csv"
|
| 25 |
-
FINAL_CSV_PATH = "monumenten.csv"
|
| 26 |
-
|
| 27 |
-
HF_REPO_ID = "woonstadrotterdam/monumenten"
|
| 28 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
def is_valid_identificatie(id_value: str) -> bool:
|
| 32 |
-
"""
|
| 33 |
-
Validate if the ID is a proper verblijfsobject ID.
|
| 34 |
-
Valid IDs must be 16 characters long, consist of digits,
|
| 35 |
-
and have '01' at positions 4-5 (0-indexed).
|
| 36 |
-
Example: 'xxxx01xxxxxxxxxx' where x is a digit (e.g., '0304010000269586').
|
| 37 |
-
"""
|
| 38 |
-
if id_value is None:
|
| 39 |
-
return False
|
| 40 |
-
return len(id_value) == 16 and id_value.isdigit() and id_value[4:6] == "01"
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
def extract_identificaties(xml_path: str) -> List[str]:
|
| 44 |
-
"""
|
| 45 |
-
Extract all valid identificatie values from a single XML file using iterative parsing.
|
| 46 |
-
"""
|
| 47 |
-
identificaties = []
|
| 48 |
-
try:
|
| 49 |
-
context = ET.iterparse(xml_path, events=("end",))
|
| 50 |
-
for event, elem in context:
|
| 51 |
-
if elem.tag.endswith("identificatie"):
|
| 52 |
-
id_value = elem.text
|
| 53 |
-
if is_valid_identificatie(id_value):
|
| 54 |
-
identificaties.append(id_value)
|
| 55 |
-
elem.clear() # Free memory
|
| 56 |
-
|
| 57 |
-
if identificaties:
|
| 58 |
-
logger.debug(
|
| 59 |
-
f"Found {len(identificaties)} valid identificatie values in {xml_path}"
|
| 60 |
-
)
|
| 61 |
-
return identificaties
|
| 62 |
-
except Exception as e:
|
| 63 |
-
logger.error(f"Error parsing XML file {xml_path}: {e}")
|
| 64 |
-
return []
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
def get_xml_files() -> List[str]:
|
| 68 |
-
"""
|
| 69 |
-
Get list of XML files from the specified directory.
|
| 70 |
-
"""
|
| 71 |
-
xml_files = glob.glob(os.path.join(XML_DIRECTORY, "*.xml"))
|
| 72 |
-
if not xml_files:
|
| 73 |
-
logger.error(f"No XML files found in {XML_DIRECTORY}")
|
| 74 |
-
else:
|
| 75 |
-
logger.info(f"Found {len(xml_files)} XML files in {XML_DIRECTORY}")
|
| 76 |
-
return xml_files
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
def process_files_parallel(xml_files: List[str]) -> Set[str]:
|
| 80 |
-
"""
|
| 81 |
-
Process XML files in parallel using multiprocessing.
|
| 82 |
-
Returns a set of unique identificaties.
|
| 83 |
-
"""
|
| 84 |
-
unique_identificaties = set()
|
| 85 |
-
|
| 86 |
-
logger.info(f"Starting parallel processing of {len(xml_files)} XML files...")
|
| 87 |
-
with multiprocessing.Pool() as pool:
|
| 88 |
-
results = pool.imap_unordered(extract_identificaties, xml_files)
|
| 89 |
-
for i, file_identificaties in enumerate(results):
|
| 90 |
-
unique_identificaties.update(file_identificaties)
|
| 91 |
-
if (i + 1) % 100 == 0: # Log progress every 100 files
|
| 92 |
-
logger.info(
|
| 93 |
-
f"Processed {i + 1}/{len(xml_files)} files. "
|
| 94 |
-
f"Current unique identificaties: {len(unique_identificaties)}"
|
| 95 |
-
)
|
| 96 |
-
|
| 97 |
-
logger.info(
|
| 98 |
-
f"All files processed. Total unique identificaties found: {len(unique_identificaties)}"
|
| 99 |
-
)
|
| 100 |
-
return unique_identificaties
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
def create_identificaties_dataframe(unique_ids: Set[str]) -> Optional[pd.DataFrame]:
|
| 104 |
-
"""
|
| 105 |
-
Create and save DataFrame from unique identificaties.
|
| 106 |
-
Returns the DataFrame or None if no valid identificaties found.
|
| 107 |
-
"""
|
| 108 |
-
if not unique_ids:
|
| 109 |
-
logger.info("No valid identificaties found.")
|
| 110 |
-
return None
|
| 111 |
-
|
| 112 |
-
df = pd.DataFrame(list(unique_ids), columns=["bag_verblijfsobject_id"])
|
| 113 |
-
logger.info(f"Created DataFrame with {len(df)} unique valid identificaties.")
|
| 114 |
-
|
| 115 |
-
# Save intermediate results
|
| 116 |
-
df.to_csv(INTERMEDIATE_CSV_PATH, index=False)
|
| 117 |
-
logger.info(f"Saved DataFrame to {INTERMEDIATE_CSV_PATH}")
|
| 118 |
-
|
| 119 |
-
# Display info
|
| 120 |
-
print("\nFirst few rows of the extracted identificaties DataFrame:")
|
| 121 |
-
print(df.head())
|
| 122 |
-
print("\nIdentificaties DataFrame Info:")
|
| 123 |
-
df.info()
|
| 124 |
-
|
| 125 |
-
return df
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
async def process_with_monumenten_client(df: pd.DataFrame) -> Optional[pd.DataFrame]:
|
| 129 |
-
"""
|
| 130 |
-
Process the DataFrame using MonumentenClient.
|
| 131 |
-
Returns processed DataFrame or None if processing fails.
|
| 132 |
-
"""
|
| 133 |
-
if df.empty:
|
| 134 |
-
logger.warning("Empty DataFrame provided to MonumentenClient.")
|
| 135 |
-
return None
|
| 136 |
-
|
| 137 |
-
logger.info(f"Processing {len(df)} identificaties with MonumentenClient...")
|
| 138 |
-
try:
|
| 139 |
-
async with MonumentenClient() as client:
|
| 140 |
-
result_df = await client.process_from_df(
|
| 141 |
-
df=df, verblijfsobject_id_col="bag_verblijfsobject_id"
|
| 142 |
-
)
|
| 143 |
-
logger.info("Finished processing with MonumentenClient.")
|
| 144 |
-
return result_df
|
| 145 |
-
except Exception as e:
|
| 146 |
-
logger.error(f"Error processing with MonumentenClient: {e}")
|
| 147 |
-
return None
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
def save_final_results(result_df: Optional[pd.DataFrame]) -> None:
|
| 151 |
-
"""
|
| 152 |
-
Save the final results to CSV if valid data is present.
|
| 153 |
-
"""
|
| 154 |
-
if result_df is not None and not result_df.empty:
|
| 155 |
-
result_df.to_csv(FINAL_CSV_PATH, index=False)
|
| 156 |
-
logger.info(f"Successfully saved final monumenten data to {FINAL_CSV_PATH}")
|
| 157 |
-
print(f"\nFinal data saved to {FINAL_CSV_PATH}")
|
| 158 |
-
print(result_df.head())
|
| 159 |
-
# Push to Hugging Face
|
| 160 |
-
if push_to_huggingface(result_df):
|
| 161 |
-
print(f"\nData successfully pushed to Hugging Face dataset: {HF_REPO_ID}")
|
| 162 |
-
else:
|
| 163 |
-
print("\nFailed to push data to Hugging Face. Check logs for details.")
|
| 164 |
-
elif result_df is not None and result_df.empty:
|
| 165 |
-
logger.info("Processing resulted in an empty DataFrame. Nothing to save.")
|
| 166 |
-
print("\nProcessing resulted in an empty DataFrame.")
|
| 167 |
-
else:
|
| 168 |
-
logger.warning("No valid data to save. Process did not complete successfully.")
|
| 169 |
-
print("\nProcess did not complete successfully or returned no data.")
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
def push_to_huggingface(result_df: pd.DataFrame) -> bool:
|
| 173 |
-
"""
|
| 174 |
-
Push the final results to Hugging Face datasets hub using datasets.push_to_hub
|
| 175 |
-
with a custom split name.
|
| 176 |
-
Returns True if successful, False otherwise.
|
| 177 |
-
"""
|
| 178 |
-
if not HF_TOKEN:
|
| 179 |
-
logger.error("No Hugging Face token found in environment variables (HF_TOKEN)")
|
| 180 |
-
return False
|
| 181 |
-
|
| 182 |
-
if result_df.empty:
|
| 183 |
-
logger.warning(
|
| 184 |
-
"Result DataFrame is empty. Skipping push of main dataset to Hugging Face."
|
| 185 |
-
)
|
| 186 |
-
else:
|
| 187 |
-
logger.info(
|
| 188 |
-
f"Converting DataFrame with {len(result_df)} rows to Hugging Face Dataset."
|
| 189 |
-
)
|
| 190 |
-
|
| 191 |
-
hf_dataset_single = Dataset.from_pandas(result_df)
|
| 192 |
-
|
| 193 |
-
hf_dataset_single.push_to_hub(
|
| 194 |
-
repo_id=HF_REPO_ID,
|
| 195 |
-
commit_message=f"Update monumenten dataset",
|
| 196 |
-
token=HF_TOKEN,
|
| 197 |
-
)
|
| 198 |
-
logger.info(f"Successfully pushed dataset dictionary to {HF_REPO_ID}")
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
async def main() -> Optional[pd.DataFrame]:
|
| 202 |
-
"""
|
| 203 |
-
Main function orchestrating the entire process.
|
| 204 |
-
Returns the final processed DataFrame or None if processing fails.
|
| 205 |
-
"""
|
| 206 |
-
# Get XML files
|
| 207 |
-
xml_files = get_xml_files()
|
| 208 |
-
if not xml_files:
|
| 209 |
-
return None
|
| 210 |
-
|
| 211 |
-
# Process files and get unique identificaties
|
| 212 |
-
unique_identificaties = process_files_parallel(xml_files)
|
| 213 |
-
|
| 214 |
-
# Create DataFrame from unique identificaties
|
| 215 |
-
df = create_identificaties_dataframe(unique_identificaties)
|
| 216 |
-
if df is None:
|
| 217 |
-
return None
|
| 218 |
-
|
| 219 |
-
# Process with MonumentenClient
|
| 220 |
-
result_df = await process_with_monumenten_client(df)
|
| 221 |
-
return result_df
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
if __name__ == "__main__":
|
| 225 |
-
# Run main process
|
| 226 |
-
result_dataframe = asyncio.run(main())
|
| 227 |
-
|
| 228 |
-
# Save results
|
| 229 |
-
save_final_results(result_dataframe)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1,4 +1,7 @@
|
|
| 1 |
-
huggingface-hub
|
| 2 |
-
dotenv
|
| 3 |
-
monumenten==
|
| 4 |
-
datasets
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
huggingface-hub>=0.31
|
| 2 |
+
python-dotenv>=1
|
| 3 |
+
monumenten==1.1.0
|
| 4 |
+
datasets>=3
|
| 5 |
+
requests>=2
|
| 6 |
+
tqdm>=4
|
| 7 |
+
pandas>=2
|
run_pipeline.py
ADDED
|
@@ -0,0 +1,935 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Combined pipeline: process XML + MonumentenClient -> monumenten.csv,
|
| 4 |
+
then add postcodes via Kadaster -> CSV -> Parquet, with optional Hugging Face push.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import argparse
|
| 8 |
+
import asyncio
|
| 9 |
+
import glob
|
| 10 |
+
import json
|
| 11 |
+
import logging
|
| 12 |
+
import multiprocessing
|
| 13 |
+
import os
|
| 14 |
+
import signal
|
| 15 |
+
import sqlite3
|
| 16 |
+
import sys
|
| 17 |
+
import threading
|
| 18 |
+
import time
|
| 19 |
+
import xml.etree.ElementTree as ET
|
| 20 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 21 |
+
from datetime import datetime
|
| 22 |
+
from logging.handlers import RotatingFileHandler
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
from typing import Dict, Iterable, List, Optional, Sequence, Set, Tuple
|
| 25 |
+
|
| 26 |
+
import pandas as pd
|
| 27 |
+
import requests
|
| 28 |
+
from datasets import Dataset
|
| 29 |
+
from dotenv import load_dotenv
|
| 30 |
+
from huggingface_hub import HfApi
|
| 31 |
+
from tqdm import tqdm
|
| 32 |
+
|
| 33 |
+
load_dotenv()
|
| 34 |
+
|
| 35 |
+
# --------------------- Unified configuration ---------------------
|
| 36 |
+
|
| 37 |
+
# Paths (stage 1 output = stage 2 input)
|
| 38 |
+
XML_DIRECTORY = "vbo_xmls/"
|
| 39 |
+
INTERMEDIATE_CSV_PATH = "verblijfsobjecten_ids.csv"
|
| 40 |
+
MONUMENTEN_CSV = "monumenten.csv"
|
| 41 |
+
OUTPUT_CSV = "monumenten_with_postcodes.csv"
|
| 42 |
+
OUTPUT_PARQUET = "monumenten.parquet"
|
| 43 |
+
ID_COL = "bag_verblijfsobject_id"
|
| 44 |
+
|
| 45 |
+
# Postcode stage (Kadaster)
|
| 46 |
+
KADASTER_ENDPOINT = "https://data.kkg.kadaster.nl/service/sparql"
|
| 47 |
+
BATCH_SIZE = 1000
|
| 48 |
+
MAX_RETRIES = 5
|
| 49 |
+
RETRY_DELAY = 10
|
| 50 |
+
MAX_WORKERS = 6
|
| 51 |
+
CHUNK_SIZE = 10000
|
| 52 |
+
CHECKPOINT_INTERVAL = 10
|
| 53 |
+
SQLITE_DB = "postcode_cache.sqlite"
|
| 54 |
+
CHECKPOINT_FILE = "postcode_update_checkpoint.json"
|
| 55 |
+
RESET_CACHE = False
|
| 56 |
+
|
| 57 |
+
# Hugging Face
|
| 58 |
+
HF_REPO_ID = "woonstadrotterdam/monumenten"
|
| 59 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 60 |
+
|
| 61 |
+
# Global flag for graceful shutdown
|
| 62 |
+
shutdown_requested = False
|
| 63 |
+
|
| 64 |
+
# --------------------- Logging ---------------------
|
| 65 |
+
|
| 66 |
+
logging.basicConfig(
|
| 67 |
+
level=logging.INFO,
|
| 68 |
+
format="%(asctime)s %(levelname)s:%(name)s:%(message)s",
|
| 69 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
| 70 |
+
)
|
| 71 |
+
logger = logging.getLogger(__name__)
|
| 72 |
+
try:
|
| 73 |
+
_fh = RotatingFileHandler(
|
| 74 |
+
"monumenten.log", maxBytes=5 * 1024 * 1024, backupCount=3
|
| 75 |
+
)
|
| 76 |
+
_fh.setLevel(logging.INFO)
|
| 77 |
+
_fh.setFormatter(logging.Formatter("%(asctime)s %(levelname)s:%(name)s:%(message)s"))
|
| 78 |
+
logger.addHandler(_fh)
|
| 79 |
+
except Exception as _e:
|
| 80 |
+
logger.debug(f"Could not attach file logger: {_e}")
|
| 81 |
+
|
| 82 |
+
# Thread-local storage for requests session
|
| 83 |
+
thread_local = threading.local()
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def signal_handler(signum, frame):
|
| 87 |
+
global shutdown_requested
|
| 88 |
+
logger.info(f"Received signal {signum}. Starting graceful shutdown...")
|
| 89 |
+
shutdown_requested = True
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
# ===================== Stage 2: Postcode (Kadaster + SQLite) =====================
|
| 93 |
+
|
| 94 |
+
def get_session() -> requests.Session:
|
| 95 |
+
if not hasattr(thread_local, "session"):
|
| 96 |
+
sess = requests.Session()
|
| 97 |
+
sess.headers.update({
|
| 98 |
+
"User-Agent": "Monumenten-Postcode-Updater/1.0",
|
| 99 |
+
"Accept": "application/sparql-results+json",
|
| 100 |
+
"Content-Type": "application/x-www-form-urlencoded",
|
| 101 |
+
})
|
| 102 |
+
adapter = requests.adapters.HTTPAdapter(
|
| 103 |
+
pool_connections=1, pool_maxsize=1, max_retries=0
|
| 104 |
+
)
|
| 105 |
+
sess.mount("http://", adapter)
|
| 106 |
+
sess.mount("https://", adapter)
|
| 107 |
+
thread_local.session = sess
|
| 108 |
+
return thread_local.session
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def ensure_columns(conn: sqlite3.Connection, table: str, columns: Dict[str, str]):
|
| 112 |
+
cur = conn.execute(f"PRAGMA table_info({table});")
|
| 113 |
+
existing = {row[1] for row in cur.fetchall()}
|
| 114 |
+
for col, col_type in columns.items():
|
| 115 |
+
if col not in existing:
|
| 116 |
+
conn.execute(f"ALTER TABLE {table} ADD COLUMN {col} {col_type};")
|
| 117 |
+
conn.commit()
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def init_db(db_path: str) -> sqlite3.Connection:
|
| 121 |
+
conn = sqlite3.connect(db_path)
|
| 122 |
+
conn.execute("PRAGMA journal_mode=WAL;")
|
| 123 |
+
conn.execute("PRAGMA synchronous=OFF;")
|
| 124 |
+
conn.execute("PRAGMA temp_store=MEMORY;")
|
| 125 |
+
conn.execute(
|
| 126 |
+
"""
|
| 127 |
+
CREATE TABLE IF NOT EXISTS addresses (
|
| 128 |
+
identificatie TEXT PRIMARY KEY,
|
| 129 |
+
postcode TEXT,
|
| 130 |
+
huisnummer TEXT,
|
| 131 |
+
straatnaam TEXT,
|
| 132 |
+
plaatsnaam TEXT
|
| 133 |
+
)
|
| 134 |
+
"""
|
| 135 |
+
)
|
| 136 |
+
ensure_columns(
|
| 137 |
+
conn,
|
| 138 |
+
"addresses",
|
| 139 |
+
{
|
| 140 |
+
"identificatie": "TEXT",
|
| 141 |
+
"postcode": "TEXT",
|
| 142 |
+
"huisnummer": "TEXT",
|
| 143 |
+
"straatnaam": "TEXT",
|
| 144 |
+
"plaatsnaam": "TEXT",
|
| 145 |
+
},
|
| 146 |
+
)
|
| 147 |
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_ident ON addresses(identificatie);")
|
| 148 |
+
conn.commit()
|
| 149 |
+
return conn
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def upsert_results(conn: sqlite3.Connection, results: List[Dict]) -> None:
|
| 153 |
+
if not results:
|
| 154 |
+
return
|
| 155 |
+
with conn:
|
| 156 |
+
conn.executemany(
|
| 157 |
+
"INSERT OR REPLACE INTO addresses (identificatie, postcode, huisnummer, straatnaam, plaatsnaam) VALUES (?, ?, ?, ?, ?)",
|
| 158 |
+
[
|
| 159 |
+
(
|
| 160 |
+
r.get("identificatie", ""),
|
| 161 |
+
r.get("postcode", ""),
|
| 162 |
+
r.get("huisnummer_volledig", ""),
|
| 163 |
+
r.get("straatnaam", ""),
|
| 164 |
+
r.get("plaatsnaam", ""),
|
| 165 |
+
)
|
| 166 |
+
for r in results
|
| 167 |
+
],
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def fetch_mapping_for_ids(
|
| 172 |
+
conn: sqlite3.Connection, ids: Sequence[str]
|
| 173 |
+
) -> Dict[str, Tuple[str, str, str, str]]:
|
| 174 |
+
if not ids:
|
| 175 |
+
return {}
|
| 176 |
+
mapping: Dict[str, Tuple[str, str, str, str]] = {}
|
| 177 |
+
step = 500
|
| 178 |
+
for i in range(0, len(ids), step):
|
| 179 |
+
batch = ids[i : i + step]
|
| 180 |
+
placeholders = ",".join(["?"] * len(batch))
|
| 181 |
+
rows = conn.execute(
|
| 182 |
+
f"SELECT identificatie, straatnaam, huisnummer, postcode, plaatsnaam FROM addresses WHERE identificatie IN ({placeholders})",
|
| 183 |
+
list(batch),
|
| 184 |
+
).fetchall()
|
| 185 |
+
for ident, straat, huisnr, postc, plaats in rows:
|
| 186 |
+
mapping[ident] = (
|
| 187 |
+
straat or "",
|
| 188 |
+
huisnr or "",
|
| 189 |
+
postc or "",
|
| 190 |
+
plaats or "",
|
| 191 |
+
)
|
| 192 |
+
return mapping
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def build_query(ids: List[str]) -> str:
|
| 196 |
+
iri_values = "\n ".join(
|
| 197 |
+
f"<https://bag.basisregistraties.overheid.nl/id/verblijfsobject/{vid}>"
|
| 198 |
+
for vid in ids
|
| 199 |
+
)
|
| 200 |
+
return f"""
|
| 201 |
+
PREFIX prov: <http://www.w3.org/ns/prov#>
|
| 202 |
+
PREFIX imx: <http://modellen.geostandaarden.nl/def/imx-geo#>
|
| 203 |
+
|
| 204 |
+
SELECT DISTINCT
|
| 205 |
+
?identificatie
|
| 206 |
+
?postcode
|
| 207 |
+
?huisnummer_volledig
|
| 208 |
+
?straatnaam
|
| 209 |
+
?plaatsnaam
|
| 210 |
+
WHERE {{
|
| 211 |
+
VALUES ?verblijfsobjectIri {{
|
| 212 |
+
{iri_values}
|
| 213 |
+
}}
|
| 214 |
+
|
| 215 |
+
?adres prov:wasDerivedFrom ?verblijfsobjectIri ;
|
| 216 |
+
imx:isHoofdadres true .
|
| 217 |
+
|
| 218 |
+
OPTIONAL {{ ?adres imx:postcode ?postcode . }}
|
| 219 |
+
OPTIONAL {{ ?adres imx:huisnummer ?huisnummer . }}
|
| 220 |
+
OPTIONAL {{ ?adres imx:huisletter ?huisletter . }}
|
| 221 |
+
OPTIONAL {{ ?adres imx:huisnummertoevoeging ?huisnummertoevoeging . }}
|
| 222 |
+
OPTIONAL {{ ?adres imx:straatnaam ?straatnaam . }}
|
| 223 |
+
OPTIONAL {{ ?adres imx:plaatsnaam ?plaatsnaam . }}
|
| 224 |
+
|
| 225 |
+
BIND(STRAFTER(STR(?verblijfsobjectIri), "https://bag.basisregistraties.overheid.nl/id/verblijfsobject/") AS ?identificatie)
|
| 226 |
+
BIND(CONCAT(
|
| 227 |
+
STR(?huisnummer),
|
| 228 |
+
IF(BOUND(?huisletter), ?huisletter, ""),
|
| 229 |
+
IF(BOUND(?huisnummertoevoeging), CONCAT("-", ?huisnummertoevoeging), "")
|
| 230 |
+
) AS ?huisnummer_volledig)
|
| 231 |
+
}}
|
| 232 |
+
ORDER BY ?identificatie
|
| 233 |
+
""".strip()
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def query_kadaster(ids: List[str]) -> List[Dict]:
|
| 237 |
+
session = get_session()
|
| 238 |
+
for attempt in range(MAX_RETRIES):
|
| 239 |
+
try:
|
| 240 |
+
if shutdown_requested:
|
| 241 |
+
return []
|
| 242 |
+
form = {"query": build_query(ids), "format": "json"}
|
| 243 |
+
resp = session.post(KADASTER_ENDPOINT, data=form, timeout=(30, 90))
|
| 244 |
+
if resp.status_code in (429, 500, 502, 503, 504):
|
| 245 |
+
retry_after = resp.headers.get("Retry-After")
|
| 246 |
+
wait_seconds = (
|
| 247 |
+
int(retry_after)
|
| 248 |
+
if (retry_after and retry_after.isdigit())
|
| 249 |
+
else (RETRY_DELAY * (2 ** attempt))
|
| 250 |
+
)
|
| 251 |
+
logger.warning(
|
| 252 |
+
f"{resp.status_code} for batch {ids[:3]}..., waiting {wait_seconds}s"
|
| 253 |
+
)
|
| 254 |
+
time.sleep(wait_seconds)
|
| 255 |
+
continue
|
| 256 |
+
resp.raise_for_status()
|
| 257 |
+
data = resp.json()
|
| 258 |
+
out: List[Dict] = []
|
| 259 |
+
for b in data.get("results", {}).get("bindings", []):
|
| 260 |
+
out.append({
|
| 261 |
+
"identificatie": b.get("identificatie", {}).get("value", ""),
|
| 262 |
+
"postcode": b.get("postcode", {}).get("value", ""),
|
| 263 |
+
"huisnummer_volledig": b.get("huisnummer_volledig", {}).get(
|
| 264 |
+
"value", ""
|
| 265 |
+
),
|
| 266 |
+
"straatnaam": b.get("straatnaam", {}).get("value", ""),
|
| 267 |
+
"plaatsnaam": b.get("plaatsnaam", {}).get("value", ""),
|
| 268 |
+
})
|
| 269 |
+
return out
|
| 270 |
+
except requests.exceptions.RequestException as e:
|
| 271 |
+
if attempt < MAX_RETRIES - 1:
|
| 272 |
+
delay = RETRY_DELAY * (2 ** attempt)
|
| 273 |
+
logger.warning(
|
| 274 |
+
f"Request error (attempt {attempt+1}/{MAX_RETRIES}) {e}; sleeping {delay}s"
|
| 275 |
+
)
|
| 276 |
+
time.sleep(delay)
|
| 277 |
+
continue
|
| 278 |
+
logger.error(f"All attempts failed for batch {ids[:3]}...")
|
| 279 |
+
return []
|
| 280 |
+
except Exception as e:
|
| 281 |
+
logger.error(f"Unexpected error for batch {ids[:3]}...: {e}")
|
| 282 |
+
return []
|
| 283 |
+
return []
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def load_checkpoint(path: str) -> Dict:
|
| 287 |
+
if Path(path).exists():
|
| 288 |
+
try:
|
| 289 |
+
obj = json.load(open(path, "r"))
|
| 290 |
+
completed = set(obj.get("completed_batches", []))
|
| 291 |
+
return {"completed_batches": completed}
|
| 292 |
+
except Exception as e:
|
| 293 |
+
logger.warning(f"Failed to load checkpoint: {e}")
|
| 294 |
+
return {"completed_batches": set()}
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def save_checkpoint(path: str, completed_batches: Iterable[int]):
|
| 298 |
+
data = {"completed_batches": sorted(int(i) for i in set(completed_batches))}
|
| 299 |
+
with open(path, "w") as f:
|
| 300 |
+
json.dump(data, f, indent=2)
|
| 301 |
+
logger.info(f"Checkpoint saved: {len(data['completed_batches'])} batches completed")
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def prefetch_to_sqlite(conn: sqlite3.Connection, all_ids: List[str]):
|
| 305 |
+
logger.info("Prefetching address data into SQLite cache...")
|
| 306 |
+
batches = [
|
| 307 |
+
(i // BATCH_SIZE, all_ids[i : i + BATCH_SIZE])
|
| 308 |
+
for i in range(0, len(all_ids), BATCH_SIZE)
|
| 309 |
+
]
|
| 310 |
+
ckpt = load_checkpoint(CHECKPOINT_FILE)
|
| 311 |
+
completed = ckpt["completed_batches"]
|
| 312 |
+
|
| 313 |
+
with tqdm(total=len(batches), desc="Prefetch batches") as pbar:
|
| 314 |
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
|
| 315 |
+
futures = {}
|
| 316 |
+
for (idx, ids) in batches:
|
| 317 |
+
if idx in completed:
|
| 318 |
+
placeholders = ",".join(["?"] * min(10, len(ids)))
|
| 319 |
+
sample_ids = ids[:10]
|
| 320 |
+
rows = conn.execute(
|
| 321 |
+
f"SELECT COUNT(1) FROM addresses WHERE identificatie IN ({placeholders})",
|
| 322 |
+
list(sample_ids),
|
| 323 |
+
).fetchone()
|
| 324 |
+
if rows and int(rows[0]) == len(sample_ids):
|
| 325 |
+
pbar.update(1)
|
| 326 |
+
continue
|
| 327 |
+
futures[executor.submit(query_kadaster, ids)] = idx
|
| 328 |
+
|
| 329 |
+
processed_since_save = 0
|
| 330 |
+
for fut in as_completed(futures):
|
| 331 |
+
if shutdown_requested:
|
| 332 |
+
break
|
| 333 |
+
idx = futures[fut]
|
| 334 |
+
try:
|
| 335 |
+
results = fut.result()
|
| 336 |
+
except Exception as e:
|
| 337 |
+
logger.warning(f"Batch {idx} raised: {e}")
|
| 338 |
+
results = []
|
| 339 |
+
upsert_results(conn, results)
|
| 340 |
+
completed.add(idx)
|
| 341 |
+
processed_since_save += 1
|
| 342 |
+
pbar.update(1)
|
| 343 |
+
if processed_since_save >= CHECKPOINT_INTERVAL:
|
| 344 |
+
save_checkpoint(CHECKPOINT_FILE, completed)
|
| 345 |
+
processed_since_save = 0
|
| 346 |
+
|
| 347 |
+
save_checkpoint(CHECKPOINT_FILE, completed)
|
| 348 |
+
logger.info("Prefetch stage completed (or interrupted).")
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def update_csv_from_db(
|
| 352 |
+
conn: sqlite3.Connection, input_file: str, output_file: str
|
| 353 |
+
):
|
| 354 |
+
logger.info("Updating CSV using SQLite cache...")
|
| 355 |
+
first_chunk = True
|
| 356 |
+
for chunk_idx, chunk in enumerate(
|
| 357 |
+
pd.read_csv(input_file, chunksize=CHUNK_SIZE, dtype={"bag_verblijfsobject_id": "str"})
|
| 358 |
+
):
|
| 359 |
+
logger.info(f"Processing chunk {chunk_idx + 1}")
|
| 360 |
+
for col in ["straatnaam", "huisnummer", "postcode", "plaatsnaam"]:
|
| 361 |
+
if col not in chunk.columns:
|
| 362 |
+
chunk[col] = ""
|
| 363 |
+
if "bag_verblijfsobject_id" not in chunk.columns:
|
| 364 |
+
continue
|
| 365 |
+
ids = chunk["bag_verblijfsobject_id"].fillna("").astype(str).tolist()
|
| 366 |
+
mapping = fetch_mapping_for_ids(conn, ids)
|
| 367 |
+
if mapping:
|
| 368 |
+
map_df = pd.DataFrame.from_dict(
|
| 369 |
+
mapping,
|
| 370 |
+
orient="index",
|
| 371 |
+
columns=["straatnaam_new", "huisnummer_new", "postcode_new", "plaatsnaam_new"],
|
| 372 |
+
)
|
| 373 |
+
map_df.index.name = "bag_verblijfsobject_id"
|
| 374 |
+
map_df = map_df.reset_index()
|
| 375 |
+
chunk = chunk.merge(map_df, on="bag_verblijfsobject_id", how="left")
|
| 376 |
+
for src, dst in [
|
| 377 |
+
("straatnaam_new", "straatnaam"),
|
| 378 |
+
("huisnummer_new", "huisnummer"),
|
| 379 |
+
("postcode_new", "postcode"),
|
| 380 |
+
("plaatsnaam_new", "plaatsnaam"),
|
| 381 |
+
]:
|
| 382 |
+
mask = (chunk[dst].isna()) | (chunk[dst] == "")
|
| 383 |
+
chunk.loc[mask, dst] = chunk.loc[mask, src].fillna("")
|
| 384 |
+
chunk.drop(
|
| 385 |
+
columns=["straatnaam_new", "huisnummer_new", "postcode_new", "plaatsnaam_new"],
|
| 386 |
+
inplace=True,
|
| 387 |
+
)
|
| 388 |
+
if first_chunk:
|
| 389 |
+
chunk.to_csv(output_file, index=False, mode="w")
|
| 390 |
+
first_chunk = False
|
| 391 |
+
else:
|
| 392 |
+
chunk.to_csv(output_file, index=False, mode="a", header=False)
|
| 393 |
+
logger.info(f"CSV update completed. Output saved to {output_file}")
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def add_postcode_main(reset_cache: bool = False) -> bool:
|
| 397 |
+
"""Run postcode stage: MONUMENTEN_CSV -> Kadaster + SQLite -> OUTPUT_CSV. Returns True on success."""
|
| 398 |
+
signal.signal(signal.SIGINT, signal_handler)
|
| 399 |
+
signal.signal(signal.SIGTERM, signal_handler)
|
| 400 |
+
|
| 401 |
+
if not Path(MONUMENTEN_CSV).exists():
|
| 402 |
+
logger.error(f"Input file {MONUMENTEN_CSV} not found")
|
| 403 |
+
return False
|
| 404 |
+
|
| 405 |
+
if reset_cache:
|
| 406 |
+
try:
|
| 407 |
+
if Path(SQLITE_DB).exists():
|
| 408 |
+
Path(SQLITE_DB).unlink()
|
| 409 |
+
logger.info("Removed existing SQLite cache for full refresh")
|
| 410 |
+
if Path(CHECKPOINT_FILE).exists():
|
| 411 |
+
Path(CHECKPOINT_FILE).unlink()
|
| 412 |
+
logger.info("Removed existing checkpoint for full refresh")
|
| 413 |
+
except Exception as e:
|
| 414 |
+
logger.warning(f"Failed to reset cache/checkpoint: {e}")
|
| 415 |
+
|
| 416 |
+
conn = init_db(SQLITE_DB)
|
| 417 |
+
|
| 418 |
+
total_rows = 0
|
| 419 |
+
unique_ids = set()
|
| 420 |
+
logger.info("Counting total rows and collecting IDs...")
|
| 421 |
+
for chunk in pd.read_csv(
|
| 422 |
+
MONUMENTEN_CSV, chunksize=CHUNK_SIZE, dtype={"bag_verblijfsobject_id": "str"}
|
| 423 |
+
):
|
| 424 |
+
total_rows += len(chunk)
|
| 425 |
+
if "bag_verblijfsobject_id" in chunk.columns:
|
| 426 |
+
unique_ids.update(chunk["bag_verblijfsobject_id"].dropna().unique())
|
| 427 |
+
logger.info(f"Total rows: {total_rows:,}")
|
| 428 |
+
logger.info(f"Unique IDs to process: {len(unique_ids):,}")
|
| 429 |
+
|
| 430 |
+
id_list = list(unique_ids)
|
| 431 |
+
prefetch_to_sqlite(conn, id_list)
|
| 432 |
+
|
| 433 |
+
if shutdown_requested:
|
| 434 |
+
logger.info("Shutdown requested. Exiting before CSV update.")
|
| 435 |
+
return False
|
| 436 |
+
|
| 437 |
+
update_csv_from_db(conn, MONUMENTEN_CSV, OUTPUT_CSV)
|
| 438 |
+
logger.info("Postcode stage done.")
|
| 439 |
+
return True
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
# ===================== Parquet and HF push =====================
|
| 443 |
+
|
| 444 |
+
def csv_to_parquet(csv_path: str, parquet_path: str) -> bool:
|
| 445 |
+
"""Stream CSV to Parquet in chunks to avoid loading 10M+ rows into memory."""
|
| 446 |
+
try:
|
| 447 |
+
import pyarrow as pa
|
| 448 |
+
import pyarrow.parquet as pq
|
| 449 |
+
except ImportError:
|
| 450 |
+
logger.error("pyarrow is required for Parquet output. pip install pyarrow")
|
| 451 |
+
return False
|
| 452 |
+
|
| 453 |
+
if not Path(csv_path).exists():
|
| 454 |
+
logger.error(f"CSV file not found: {csv_path}")
|
| 455 |
+
return False
|
| 456 |
+
|
| 457 |
+
logger.info(f"Writing Parquet from {csv_path} to {parquet_path} (streaming)...")
|
| 458 |
+
schema = None
|
| 459 |
+
writer = None
|
| 460 |
+
try:
|
| 461 |
+
for chunk_idx, chunk in enumerate(
|
| 462 |
+
pd.read_csv(
|
| 463 |
+
csv_path,
|
| 464 |
+
chunksize=CHUNK_SIZE,
|
| 465 |
+
dtype={"bag_verblijfsobject_id": str},
|
| 466 |
+
)
|
| 467 |
+
):
|
| 468 |
+
table = pa.Table.from_pandas(chunk, preserve_index=False)
|
| 469 |
+
if schema is None:
|
| 470 |
+
schema = table.schema
|
| 471 |
+
writer = pq.ParquetWriter(parquet_path, schema)
|
| 472 |
+
writer.write_table(table)
|
| 473 |
+
if (chunk_idx + 1) % 100 == 0:
|
| 474 |
+
logger.info(f"Parquet: wrote {(chunk_idx + 1) * CHUNK_SIZE:,} rows")
|
| 475 |
+
if writer:
|
| 476 |
+
writer.close()
|
| 477 |
+
logger.info(f"Parquet saved to {parquet_path}")
|
| 478 |
+
return True
|
| 479 |
+
except Exception as e:
|
| 480 |
+
logger.error(f"Error writing Parquet: {e}")
|
| 481 |
+
if writer:
|
| 482 |
+
try:
|
| 483 |
+
writer.close()
|
| 484 |
+
except Exception:
|
| 485 |
+
pass
|
| 486 |
+
return False
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def push_to_huggingface(result_df: pd.DataFrame) -> bool:
|
| 490 |
+
"""Push to HF: sort by postcode, huisnummer; Dataset.from_pandas -> push_to_hub; create date tag."""
|
| 491 |
+
if not HF_TOKEN:
|
| 492 |
+
logger.error("No Hugging Face token (HF_TOKEN)")
|
| 493 |
+
return False
|
| 494 |
+
if result_df.empty:
|
| 495 |
+
logger.warning("Result DataFrame is empty. Skipping push.")
|
| 496 |
+
return False
|
| 497 |
+
try:
|
| 498 |
+
logger.info(f"Converting {len(result_df):,} rows to Hugging Face Dataset.")
|
| 499 |
+
hf_dataset = Dataset.from_pandas(result_df)
|
| 500 |
+
hf_dataset.push_to_hub(
|
| 501 |
+
repo_id=HF_REPO_ID,
|
| 502 |
+
commit_message=f"Update monumenten dataset ({len(result_df):,} rows)",
|
| 503 |
+
token=HF_TOKEN,
|
| 504 |
+
)
|
| 505 |
+
logger.info(f"Successfully pushed to {HF_REPO_ID}")
|
| 506 |
+
current_date_tag = datetime.now().strftime("%Y-%m-%d")
|
| 507 |
+
try:
|
| 508 |
+
api = HfApi(token=HF_TOKEN)
|
| 509 |
+
api.create_tag(
|
| 510 |
+
repo_id=HF_REPO_ID,
|
| 511 |
+
repo_type="dataset",
|
| 512 |
+
tag=current_date_tag,
|
| 513 |
+
)
|
| 514 |
+
logger.info(f"Created tag '{current_date_tag}'")
|
| 515 |
+
except Exception as e:
|
| 516 |
+
logger.warning(f"Could not create tag '{current_date_tag}': {e}")
|
| 517 |
+
return True
|
| 518 |
+
except Exception as e:
|
| 519 |
+
logger.error(f"Error pushing to Hugging Face: {e}")
|
| 520 |
+
return False
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
# ===================== Stage 1: Process (XML + MonumentenClient) =====================
|
| 524 |
+
|
| 525 |
+
def is_valid_identificatie(id_value: str) -> bool:
|
| 526 |
+
if id_value is None:
|
| 527 |
+
return False
|
| 528 |
+
return (
|
| 529 |
+
len(id_value) == 16
|
| 530 |
+
and id_value.isdigit()
|
| 531 |
+
and id_value[4:6] in ["01", "02", "03"]
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
def extract_identificaties(xml_path: str) -> List[str]:
|
| 536 |
+
identificaties = []
|
| 537 |
+
try:
|
| 538 |
+
context = ET.iterparse(xml_path, events=("end",))
|
| 539 |
+
for event, elem in context:
|
| 540 |
+
if elem.tag.endswith("identificatie"):
|
| 541 |
+
id_value = elem.text
|
| 542 |
+
if is_valid_identificatie(id_value):
|
| 543 |
+
identificaties.append(id_value)
|
| 544 |
+
elem.clear()
|
| 545 |
+
return identificaties
|
| 546 |
+
except Exception as e:
|
| 547 |
+
logger.error(f"Error parsing XML file {xml_path}: {e}")
|
| 548 |
+
return []
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def get_xml_files() -> List[str]:
|
| 552 |
+
xml_files = glob.glob(os.path.join(XML_DIRECTORY, "*.xml"))
|
| 553 |
+
if not xml_files:
|
| 554 |
+
logger.error(f"No XML files found in {XML_DIRECTORY}")
|
| 555 |
+
else:
|
| 556 |
+
logger.info(f"Found {len(xml_files)} XML files in {XML_DIRECTORY}")
|
| 557 |
+
return xml_files
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
def process_files_parallel(xml_files: List[str]) -> Set[str]:
|
| 561 |
+
unique_identificaties = set()
|
| 562 |
+
logger.info(f"Starting parallel processing of {len(xml_files)} XML files...")
|
| 563 |
+
with multiprocessing.Pool() as pool:
|
| 564 |
+
results = pool.imap_unordered(extract_identificaties, xml_files)
|
| 565 |
+
for i, file_identificaties in enumerate(results):
|
| 566 |
+
unique_identificaties.update(file_identificaties)
|
| 567 |
+
if (i + 1) % 100 == 0:
|
| 568 |
+
logger.info(
|
| 569 |
+
f"Processed {i + 1}/{len(xml_files)} files. Unique: {len(unique_identificaties)}"
|
| 570 |
+
)
|
| 571 |
+
logger.info(f"Total unique identificaties: {len(unique_identificaties)}")
|
| 572 |
+
return unique_identificaties
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
def create_identificaties_dataframe(unique_ids: Set[str]) -> Optional[pd.DataFrame]:
|
| 576 |
+
if not unique_ids:
|
| 577 |
+
logger.info("No valid identificaties found.")
|
| 578 |
+
return None
|
| 579 |
+
df = pd.DataFrame(list(unique_ids), columns=["bag_verblijfsobject_id"])
|
| 580 |
+
logger.info(f"Created DataFrame with {len(df)} unique valid identificaties.")
|
| 581 |
+
df.to_csv(INTERMEDIATE_CSV_PATH, index=False)
|
| 582 |
+
logger.info(f"Saved to {INTERMEDIATE_CSV_PATH}")
|
| 583 |
+
return df
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
def get_processed_ids_set() -> Set[str]:
|
| 587 |
+
if not os.path.exists(MONUMENTEN_CSV):
|
| 588 |
+
return set()
|
| 589 |
+
try:
|
| 590 |
+
processed_df = pd.read_csv(
|
| 591 |
+
MONUMENTEN_CSV, dtype={"bag_verblijfsobject_id": str}
|
| 592 |
+
)
|
| 593 |
+
processed_ids = set(processed_df["bag_verblijfsobject_id"].unique())
|
| 594 |
+
logger.info(f"Loaded {len(processed_ids)} processed IDs from {MONUMENTEN_CSV}")
|
| 595 |
+
return processed_ids
|
| 596 |
+
except Exception as e:
|
| 597 |
+
logger.warning(f"Could not load from {MONUMENTEN_CSV}: {e}")
|
| 598 |
+
return set()
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
def get_unprocessed_ids_df() -> Optional[pd.DataFrame]:
|
| 602 |
+
if not os.path.exists(INTERMEDIATE_CSV_PATH):
|
| 603 |
+
logger.warning(f"Source IDs file not found: {INTERMEDIATE_CSV_PATH}")
|
| 604 |
+
return None
|
| 605 |
+
source_df = pd.read_csv(
|
| 606 |
+
INTERMEDIATE_CSV_PATH, dtype={"bag_verblijfsobject_id": str}
|
| 607 |
+
)
|
| 608 |
+
if "bag_verblijfsobject_id" not in source_df.columns:
|
| 609 |
+
logger.error("Source file missing column 'bag_verblijfsobject_id'")
|
| 610 |
+
return None
|
| 611 |
+
source_ids = set(source_df["bag_verblijfsobject_id"].unique())
|
| 612 |
+
logger.info(f"Source IDs: {len(source_ids)}")
|
| 613 |
+
processed_ids = get_processed_ids_set()
|
| 614 |
+
if not processed_ids:
|
| 615 |
+
return source_df
|
| 616 |
+
unprocessed_ids = source_ids - processed_ids
|
| 617 |
+
logger.info(f"Unprocessed: {len(unprocessed_ids)}")
|
| 618 |
+
if not unprocessed_ids:
|
| 619 |
+
logger.info("All IDs have been processed!")
|
| 620 |
+
return None
|
| 621 |
+
unprocessed_df = pd.DataFrame(
|
| 622 |
+
list(unprocessed_ids), columns=["bag_verblijfsobject_id"]
|
| 623 |
+
)
|
| 624 |
+
unprocessed_df.to_csv("unprocessed_ids.csv", index=False)
|
| 625 |
+
logger.info("Saved unprocessed IDs to unprocessed_ids.csv")
|
| 626 |
+
return unprocessed_df
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def load_existing_progress() -> Optional[pd.DataFrame]:
|
| 630 |
+
if os.path.exists(MONUMENTEN_CSV):
|
| 631 |
+
try:
|
| 632 |
+
existing_df = pd.read_csv(
|
| 633 |
+
MONUMENTEN_CSV, dtype={"bag_verblijfsobject_id": str}
|
| 634 |
+
)
|
| 635 |
+
if not existing_df.empty:
|
| 636 |
+
logger.info(
|
| 637 |
+
f"Loaded existing progress from {MONUMENTEN_CSV}: {len(existing_df)} rows"
|
| 638 |
+
)
|
| 639 |
+
return existing_df
|
| 640 |
+
except Exception as e:
|
| 641 |
+
logger.warning(f"Could not load from {MONUMENTEN_CSV}: {e}")
|
| 642 |
+
return None
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
async def process_with_monumenten_client_resume(
|
| 646 |
+
df: pd.DataFrame,
|
| 647 |
+
) -> Optional[pd.DataFrame]:
|
| 648 |
+
if df.empty:
|
| 649 |
+
logger.warning("Empty DataFrame provided to MonumentenClient.")
|
| 650 |
+
return None
|
| 651 |
+
|
| 652 |
+
from monumenten import MonumentenClient
|
| 653 |
+
|
| 654 |
+
total_rows = len(df)
|
| 655 |
+
if total_rows >= 10_000:
|
| 656 |
+
batch_size = max(1, total_rows // 100)
|
| 657 |
+
num_batches = 100
|
| 658 |
+
else:
|
| 659 |
+
batch_size = max(1, total_rows // 10)
|
| 660 |
+
num_batches = 10
|
| 661 |
+
logger.info(
|
| 662 |
+
f"Processing {total_rows} unprocessed in {num_batches} batches of ~{batch_size}"
|
| 663 |
+
)
|
| 664 |
+
|
| 665 |
+
existing_results = load_existing_progress()
|
| 666 |
+
processed_df = existing_results if existing_results is not None else pd.DataFrame()
|
| 667 |
+
|
| 668 |
+
try:
|
| 669 |
+
async with MonumentenClient() as client:
|
| 670 |
+
for batch_num in range(num_batches):
|
| 671 |
+
start_idx = batch_num * batch_size
|
| 672 |
+
end_idx = min(start_idx + batch_size, total_rows)
|
| 673 |
+
if start_idx >= total_rows:
|
| 674 |
+
break
|
| 675 |
+
current_batch = df.iloc[start_idx:end_idx].copy()
|
| 676 |
+
logger.info(
|
| 677 |
+
f"Batch {batch_num + 1}/{num_batches}: rows {start_idx + 1}-{end_idx}"
|
| 678 |
+
)
|
| 679 |
+
try:
|
| 680 |
+
start_time = time.time()
|
| 681 |
+
batch_result = await client.process_from_df(
|
| 682 |
+
df=current_batch,
|
| 683 |
+
verblijfsobject_id_col="bag_verblijfsobject_id",
|
| 684 |
+
)
|
| 685 |
+
if batch_result is not None and not batch_result.empty:
|
| 686 |
+
allowed_ids = set(
|
| 687 |
+
current_batch["bag_verblijfsobject_id"].astype(str).unique()
|
| 688 |
+
)
|
| 689 |
+
out_of_batch_mask = (
|
| 690 |
+
~batch_result["bag_verblijfsobject_id"]
|
| 691 |
+
.astype(str)
|
| 692 |
+
.isin(allowed_ids)
|
| 693 |
+
)
|
| 694 |
+
if out_of_batch_mask.any():
|
| 695 |
+
batch_result = batch_result.loc[~out_of_batch_mask].copy()
|
| 696 |
+
if batch_result.empty:
|
| 697 |
+
continue
|
| 698 |
+
processed_df = pd.concat(
|
| 699 |
+
[processed_df, batch_result], ignore_index=True
|
| 700 |
+
)
|
| 701 |
+
processed_df = processed_df.drop_duplicates(
|
| 702 |
+
subset=["bag_verblijfsobject_id"], keep="first"
|
| 703 |
+
)
|
| 704 |
+
processed_df["bag_verblijfsobject_id"] = processed_df[
|
| 705 |
+
"bag_verblijfsobject_id"
|
| 706 |
+
].astype(str)
|
| 707 |
+
processed_df.to_csv(MONUMENTEN_CSV, index=False)
|
| 708 |
+
logger.info(
|
| 709 |
+
f"Batch {batch_num + 1} done. Total rows: {len(processed_df)}"
|
| 710 |
+
)
|
| 711 |
+
except Exception as e:
|
| 712 |
+
logger.error(f"Error processing batch {batch_num + 1}: {e}")
|
| 713 |
+
if not processed_df.empty:
|
| 714 |
+
processed_df = processed_df.drop_duplicates(
|
| 715 |
+
subset=["bag_verblijfsobject_id"], keep="first"
|
| 716 |
+
)
|
| 717 |
+
processed_df.to_csv(MONUMENTEN_CSV, index=False)
|
| 718 |
+
continue
|
| 719 |
+
|
| 720 |
+
processed_df = processed_df.drop_duplicates(
|
| 721 |
+
subset=["bag_verblijfsobject_id"], keep="first"
|
| 722 |
+
)
|
| 723 |
+
logger.info(f"Final dataset: {len(processed_df)} unique IDs")
|
| 724 |
+
return processed_df
|
| 725 |
+
except Exception as e:
|
| 726 |
+
logger.error(f"Error with MonumentenClient: {e}")
|
| 727 |
+
if not processed_df.empty:
|
| 728 |
+
processed_df = processed_df.drop_duplicates(
|
| 729 |
+
subset=["bag_verblijfsobject_id"], keep="first"
|
| 730 |
+
)
|
| 731 |
+
processed_df.to_csv(MONUMENTEN_CSV, index=False)
|
| 732 |
+
return processed_df if not processed_df.empty else None
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
def save_final_results(result_df: Optional[pd.DataFrame]) -> None:
|
| 736 |
+
"""Save stage 1 results to MONUMENTEN_CSV (no HF push; that's in pipeline)."""
|
| 737 |
+
if result_df is not None and not result_df.empty:
|
| 738 |
+
result_df = result_df.drop_duplicates(
|
| 739 |
+
subset=["bag_verblijfsobject_id"], keep="first"
|
| 740 |
+
)
|
| 741 |
+
result_df["bag_verblijfsobject_id"] = result_df["bag_verblijfsobject_id"].astype(
|
| 742 |
+
str
|
| 743 |
+
)
|
| 744 |
+
result_df.to_csv(MONUMENTEN_CSV, index=False)
|
| 745 |
+
logger.info(f"Saved final monumenten data to {MONUMENTEN_CSV}")
|
| 746 |
+
elif result_df is not None and result_df.empty:
|
| 747 |
+
logger.info("Processing resulted in empty DataFrame.")
|
| 748 |
+
else:
|
| 749 |
+
logger.warning("No valid data to save.")
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
def cleanup_existing_duplicates() -> None:
|
| 753 |
+
if not os.path.exists(MONUMENTEN_CSV):
|
| 754 |
+
return
|
| 755 |
+
try:
|
| 756 |
+
df = pd.read_csv(MONUMENTEN_CSV, dtype={"bag_verblijfsobject_id": str})
|
| 757 |
+
original_count = len(df)
|
| 758 |
+
df_clean = df.drop_duplicates(
|
| 759 |
+
subset=["bag_verblijfsobject_id"], keep="first"
|
| 760 |
+
)
|
| 761 |
+
if len(df_clean) < original_count:
|
| 762 |
+
df_clean.to_csv(MONUMENTEN_CSV, index=False)
|
| 763 |
+
logger.info(f"Removed {original_count - len(df_clean)} duplicates")
|
| 764 |
+
except Exception as e:
|
| 765 |
+
logger.error(f"Error cleaning duplicates: {e}")
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
def remove_ids_not_in_source() -> None:
|
| 769 |
+
if not os.path.exists(MONUMENTEN_CSV) or not os.path.exists(INTERMEDIATE_CSV_PATH):
|
| 770 |
+
return
|
| 771 |
+
try:
|
| 772 |
+
results_df = pd.read_csv(
|
| 773 |
+
MONUMENTEN_CSV, dtype={"bag_verblijfsobject_id": str}
|
| 774 |
+
)
|
| 775 |
+
source_df = pd.read_csv(
|
| 776 |
+
INTERMEDIATE_CSV_PATH, dtype={"bag_verblijfsobject_id": str}
|
| 777 |
+
)
|
| 778 |
+
source_ids = set(source_df["bag_verblijfsobject_id"].unique())
|
| 779 |
+
in_source_mask = results_df["bag_verblijfsobject_id"].isin(source_ids)
|
| 780 |
+
results_df_filtered = results_df[in_source_mask].copy()
|
| 781 |
+
if len(results_df_filtered) < len(results_df):
|
| 782 |
+
results_df_filtered.to_csv(MONUMENTEN_CSV, index=False)
|
| 783 |
+
logger.info(
|
| 784 |
+
f"Removed {len(results_df) - len(results_df_filtered)} rows not in source"
|
| 785 |
+
)
|
| 786 |
+
except Exception as e:
|
| 787 |
+
logger.error(f"Error filtering non-source IDs: {e}")
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
def verify_data_integrity() -> None:
|
| 791 |
+
if not os.path.exists(MONUMENTEN_CSV):
|
| 792 |
+
return
|
| 793 |
+
try:
|
| 794 |
+
df = pd.read_csv(MONUMENTEN_CSV, dtype={"bag_verblijfsobject_id": str})
|
| 795 |
+
total_rows = len(df)
|
| 796 |
+
unique_ids = df["bag_verblijfsobject_id"].nunique()
|
| 797 |
+
logger.info(f"Data integrity: total={total_rows}, unique={unique_ids}")
|
| 798 |
+
except Exception as e:
|
| 799 |
+
logger.error(f"Error verifying: {e}")
|
| 800 |
+
|
| 801 |
+
|
| 802 |
+
async def process_main() -> Optional[pd.DataFrame]:
|
| 803 |
+
remove_ids_not_in_source()
|
| 804 |
+
cleanup_existing_duplicates()
|
| 805 |
+
verify_data_integrity()
|
| 806 |
+
|
| 807 |
+
if os.path.exists(INTERMEDIATE_CSV_PATH):
|
| 808 |
+
logger.info(f"Found {INTERMEDIATE_CSV_PATH}, using resume logic...")
|
| 809 |
+
try:
|
| 810 |
+
unprocessed_df = get_unprocessed_ids_df()
|
| 811 |
+
if unprocessed_df is None:
|
| 812 |
+
logger.info("All IDs processed.")
|
| 813 |
+
return load_existing_progress()
|
| 814 |
+
logger.info(f"Found {len(unprocessed_df)} IDs to process")
|
| 815 |
+
return await process_with_monumenten_client_resume(unprocessed_df)
|
| 816 |
+
except Exception as e:
|
| 817 |
+
logger.error(f"Resume error: {e}")
|
| 818 |
+
|
| 819 |
+
xml_files = get_xml_files()
|
| 820 |
+
if not xml_files:
|
| 821 |
+
return None
|
| 822 |
+
unique_identificaties = process_files_parallel(xml_files)
|
| 823 |
+
df = create_identificaties_dataframe(unique_identificaties)
|
| 824 |
+
if df is None:
|
| 825 |
+
return None
|
| 826 |
+
return await process_with_monumenten_client_resume(df)
|
| 827 |
+
|
| 828 |
+
|
| 829 |
+
async def process_until_complete() -> Optional[pd.DataFrame]:
|
| 830 |
+
attempt = 1
|
| 831 |
+
max_attempts = 100
|
| 832 |
+
while attempt <= max_attempts:
|
| 833 |
+
logger.info(f"=== Attempt {attempt}/{max_attempts} ===")
|
| 834 |
+
try:
|
| 835 |
+
result_df = await process_main()
|
| 836 |
+
if result_df is not None and not result_df.empty:
|
| 837 |
+
unprocessed_df = get_unprocessed_ids_df()
|
| 838 |
+
if unprocessed_df is None:
|
| 839 |
+
logger.info("All IDs processed successfully.")
|
| 840 |
+
return result_df
|
| 841 |
+
logger.info(f"Incomplete: {len(unprocessed_df)} IDs remain. Restarting...")
|
| 842 |
+
await asyncio.sleep(5)
|
| 843 |
+
attempt += 1
|
| 844 |
+
continue
|
| 845 |
+
attempt += 1
|
| 846 |
+
except KeyboardInterrupt:
|
| 847 |
+
logger.info("Interrupted. Saving progress...")
|
| 848 |
+
if "result_df" in locals() and result_df is not None and not result_df.empty:
|
| 849 |
+
save_final_results(result_df)
|
| 850 |
+
return result_df if "result_df" in locals() else None
|
| 851 |
+
except Exception as e:
|
| 852 |
+
logger.error(f"Attempt {attempt} error: {e}")
|
| 853 |
+
await asyncio.sleep(10)
|
| 854 |
+
attempt += 1
|
| 855 |
+
logger.error(f"Max attempts ({max_attempts}) reached.")
|
| 856 |
+
return None
|
| 857 |
+
|
| 858 |
+
|
| 859 |
+
# ===================== CLI and entry =====================
|
| 860 |
+
|
| 861 |
+
def run_full(push: bool, reset_postcode_cache: bool) -> None:
|
| 862 |
+
result = asyncio.run(process_until_complete())
|
| 863 |
+
if result is not None:
|
| 864 |
+
save_final_results(result)
|
| 865 |
+
if not Path(MONUMENTEN_CSV).exists():
|
| 866 |
+
logger.error("Stage 1 did not produce monumenten.csv. Skipping stage 2.")
|
| 867 |
+
return
|
| 868 |
+
if not add_postcode_main(reset_cache=reset_postcode_cache):
|
| 869 |
+
return
|
| 870 |
+
if not csv_to_parquet(OUTPUT_CSV, OUTPUT_PARQUET):
|
| 871 |
+
return
|
| 872 |
+
if push:
|
| 873 |
+
df = pd.read_parquet(OUTPUT_PARQUET)
|
| 874 |
+
if "straatnaam" in df.columns and "huisnummer" in df.columns:
|
| 875 |
+
df = df.sort_values(by=["straatnaam", "huisnummer"]).reset_index(drop=True)
|
| 876 |
+
push_to_huggingface(df)
|
| 877 |
+
|
| 878 |
+
|
| 879 |
+
def run_process_only() -> None:
|
| 880 |
+
result = asyncio.run(process_until_complete())
|
| 881 |
+
if result is not None:
|
| 882 |
+
save_final_results(result)
|
| 883 |
+
logger.info("Process stage done.")
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
def run_postcode_only(push: bool, reset_postcode_cache: bool) -> None:
|
| 887 |
+
if not Path(MONUMENTEN_CSV).exists():
|
| 888 |
+
logger.error(f"{MONUMENTEN_CSV} not found. Run stage 'process' first.")
|
| 889 |
+
return
|
| 890 |
+
if not add_postcode_main(reset_cache=reset_postcode_cache):
|
| 891 |
+
return
|
| 892 |
+
if not csv_to_parquet(OUTPUT_CSV, OUTPUT_PARQUET):
|
| 893 |
+
return
|
| 894 |
+
if push:
|
| 895 |
+
df = pd.read_parquet(OUTPUT_PARQUET)
|
| 896 |
+
if "postcode" in df.columns and "huisnummer" in df.columns:
|
| 897 |
+
df = df.sort_values(by=["postcode", "huisnummer"]).reset_index(drop=True)
|
| 898 |
+
push_to_huggingface(df)
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
def main():
|
| 902 |
+
parser = argparse.ArgumentParser(
|
| 903 |
+
description="Monumenten pipeline: process XML + postcodes -> Parquet (optional HF push)"
|
| 904 |
+
)
|
| 905 |
+
parser.add_argument(
|
| 906 |
+
"--stage",
|
| 907 |
+
choices=["full", "process", "postcode"],
|
| 908 |
+
default="full",
|
| 909 |
+
help="full (default): process then postcode then Parquet; process: only stage 1; postcode: only stage 2 + Parquet",
|
| 910 |
+
)
|
| 911 |
+
parser.add_argument(
|
| 912 |
+
"--push",
|
| 913 |
+
action="store_true",
|
| 914 |
+
help="After writing Parquet, push to Hugging Face (requires HF_TOKEN)",
|
| 915 |
+
)
|
| 916 |
+
parser.add_argument(
|
| 917 |
+
"--reset-postcode-cache",
|
| 918 |
+
action="store_true",
|
| 919 |
+
help="Force full postcode refresh (clear SQLite cache and checkpoint)",
|
| 920 |
+
)
|
| 921 |
+
args = parser.parse_args()
|
| 922 |
+
|
| 923 |
+
if args.stage == "full":
|
| 924 |
+
run_full(push=args.push, reset_postcode_cache=args.reset_postcode_cache)
|
| 925 |
+
elif args.stage == "process":
|
| 926 |
+
run_process_only()
|
| 927 |
+
else:
|
| 928 |
+
run_postcode_only(
|
| 929 |
+
push=args.push, reset_postcode_cache=args.reset_postcode_cache
|
| 930 |
+
)
|
| 931 |
+
logger.info("Done.")
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
if __name__ == "__main__":
|
| 935 |
+
main()
|
update-analysis.ipynb
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "87cab8dc",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"## Update Analysis\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"This Notebook contains the analysis of the update from the old dataset to the new one."
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "code",
|
| 15 |
+
"execution_count": null,
|
| 16 |
+
"id": "962fa8db",
|
| 17 |
+
"metadata": {},
|
| 18 |
+
"outputs": [],
|
| 19 |
+
"source": [
|
| 20 |
+
"import pandas as pd\n",
|
| 21 |
+
"import numpy as np"
|
| 22 |
+
]
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"cell_type": "code",
|
| 26 |
+
"execution_count": null,
|
| 27 |
+
"id": "16823168",
|
| 28 |
+
"metadata": {},
|
| 29 |
+
"outputs": [],
|
| 30 |
+
"source": [
|
| 31 |
+
"# Login using e.g. `huggingface-cli login` to access this dataset\n",
|
| 32 |
+
"old = df = pd.read_parquet(\"hf://datasets/woonstadrotterdam/monumenten/monumenten.parquet\")"
|
| 33 |
+
]
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"cell_type": "code",
|
| 37 |
+
"execution_count": null,
|
| 38 |
+
"id": "8a11d0e5",
|
| 39 |
+
"metadata": {},
|
| 40 |
+
"outputs": [],
|
| 41 |
+
"source": [
|
| 42 |
+
"old"
|
| 43 |
+
]
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"cell_type": "code",
|
| 47 |
+
"execution_count": null,
|
| 48 |
+
"id": "8bfe4964",
|
| 49 |
+
"metadata": {},
|
| 50 |
+
"outputs": [],
|
| 51 |
+
"source": [
|
| 52 |
+
"new = pd.read_csv('monumenten.csv', dtype={'bag_verblijfsobject_id': 'str'})"
|
| 53 |
+
]
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"cell_type": "code",
|
| 57 |
+
"execution_count": null,
|
| 58 |
+
"id": "e0c4b452",
|
| 59 |
+
"metadata": {},
|
| 60 |
+
"outputs": [],
|
| 61 |
+
"source": [
|
| 62 |
+
"new"
|
| 63 |
+
]
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"cell_type": "code",
|
| 67 |
+
"execution_count": null,
|
| 68 |
+
"id": "026d6b45",
|
| 69 |
+
"metadata": {},
|
| 70 |
+
"outputs": [],
|
| 71 |
+
"source": [
|
| 72 |
+
"def difference_report(old, new, id_col=\"bag_verblijfsobject_id\", columns=None, treat_na_equal=True):\n",
|
| 73 |
+
" \"\"\"\n",
|
| 74 |
+
" Compare two DataFrames that share an ID column and (mostly) the same other columns.\n",
|
| 75 |
+
" Returns a long 'difference report' with:\n",
|
| 76 |
+
" - rows missing in one DF (column='ALL')\n",
|
| 77 |
+
" - cell-level changes for common IDs\n",
|
| 78 |
+
" - change_type identifying NA transitions and other value changes\n",
|
| 79 |
+
"\n",
|
| 80 |
+
" Params\n",
|
| 81 |
+
" ------\n",
|
| 82 |
+
" id_col : str\n",
|
| 83 |
+
" Name of the ID column.\n",
|
| 84 |
+
" columns : list[str] | None\n",
|
| 85 |
+
" Columns (excluding id_col) to compare. If None, uses intersection of old/new.\n",
|
| 86 |
+
" treat_na_equal : bool\n",
|
| 87 |
+
" If True, NaN == NaN (no diff). If False, NaN vs NaN will be reported as diff.\n",
|
| 88 |
+
" \"\"\"\n",
|
| 89 |
+
" # Basic checks\n",
|
| 90 |
+
" if old[id_col].duplicated().any() or new[id_col].duplicated().any():\n",
|
| 91 |
+
" raise ValueError(\"Duplicate IDs detected. Ensure unique IDs before comparing.\")\n",
|
| 92 |
+
"\n",
|
| 93 |
+
" # Choose comparable columns (intersection by default, preserving old order)\n",
|
| 94 |
+
" if columns is None:\n",
|
| 95 |
+
" columns = [c for c in old.columns if c != id_col and c in new.columns]\n",
|
| 96 |
+
" else:\n",
|
| 97 |
+
" # ensure the id column isn't in 'columns'\n",
|
| 98 |
+
" columns = [c for c in columns if c != id_col]\n",
|
| 99 |
+
"\n",
|
| 100 |
+
" # Align by ID (index) and columns\n",
|
| 101 |
+
" a = old.set_index(id_col)[columns]\n",
|
| 102 |
+
" b = new.set_index(id_col)[columns]\n",
|
| 103 |
+
"\n",
|
| 104 |
+
" # Missing IDs\n",
|
| 105 |
+
" only_in_old = a.index.difference(b.index)\n",
|
| 106 |
+
" only_in_new = b.index.difference(a.index)\n",
|
| 107 |
+
"\n",
|
| 108 |
+
" missing1 = pd.DataFrame({\n",
|
| 109 |
+
" id_col: only_in_old,\n",
|
| 110 |
+
" \"column\": \"ALL\",\n",
|
| 111 |
+
" \"old\": \"Row not in new\",\n",
|
| 112 |
+
" \"new\": pd.NA,\n",
|
| 113 |
+
" \"change_type\": \"row_removed\"\n",
|
| 114 |
+
" })\n",
|
| 115 |
+
"\n",
|
| 116 |
+
" missing2 = pd.DataFrame({\n",
|
| 117 |
+
" id_col: only_in_new,\n",
|
| 118 |
+
" \"column\": \"ALL\",\n",
|
| 119 |
+
" \"old\": pd.NA,\n",
|
| 120 |
+
" \"new\": \"Row is new\",\n",
|
| 121 |
+
" \"change_type\": \"row_added\"\n",
|
| 122 |
+
" })\n",
|
| 123 |
+
"\n",
|
| 124 |
+
" # Compare common IDs\n",
|
| 125 |
+
" common_idx = a.index.intersection(b.index)\n",
|
| 126 |
+
" a2 = a.loc[common_idx, columns].sort_index()\n",
|
| 127 |
+
" b2 = b.loc[common_idx, columns].sort_index()\n",
|
| 128 |
+
"\n",
|
| 129 |
+
" # Build masks explicitly to capture NA transitions\n",
|
| 130 |
+
" a_na = a2.isna()\n",
|
| 131 |
+
" b_na = b2.isna()\n",
|
| 132 |
+
" both_na = a_na & b_na\n",
|
| 133 |
+
" na_xor = a_na ^ b_na\n",
|
| 134 |
+
" neq_non_na = (a2 != b2) & ~a_na & ~b_na\n",
|
| 135 |
+
"\n",
|
| 136 |
+
" if treat_na_equal:\n",
|
| 137 |
+
" mask = na_xor | neq_non_na\n",
|
| 138 |
+
" else:\n",
|
| 139 |
+
" mask = na_xor | both_na | neq_non_na\n",
|
| 140 |
+
"\n",
|
| 141 |
+
" # Stack with dropna=False so NA values are preserved in the result\n",
|
| 142 |
+
" diff_cells_idx = mask.stack(future_stack=True).pipe(lambda s: s[s].index)\n",
|
| 143 |
+
"\n",
|
| 144 |
+
" vals_old = a2.stack(future_stack=True)\n",
|
| 145 |
+
" vals_new = b2.stack(future_stack=True)\n",
|
| 146 |
+
"\n",
|
| 147 |
+
" diffs_long = (\n",
|
| 148 |
+
" pd.concat({\"old\": vals_old, \"new\": vals_new}, axis=1)\n",
|
| 149 |
+
" .loc[diff_cells_idx]\n",
|
| 150 |
+
" .reset_index()\n",
|
| 151 |
+
" .rename(columns={\"level_0\": id_col, \"level_1\": \"column\"})\n",
|
| 152 |
+
" )\n",
|
| 153 |
+
"\n",
|
| 154 |
+
" # Change type labeling\n",
|
| 155 |
+
" is_old_na = diffs_long[\"old\"].isna()\n",
|
| 156 |
+
" is_new_na = diffs_long[\"new\"].isna()\n",
|
| 157 |
+
" diffs_long[\"change_type\"] = np.where(\n",
|
| 158 |
+
" is_old_na & ~is_new_na, \"from_null\",\n",
|
| 159 |
+
" np.where(~is_old_na & is_new_na, \"to_null\", \"value_changed\")\n",
|
| 160 |
+
" )\n",
|
| 161 |
+
"\n",
|
| 162 |
+
" # Combine missing + changed\n",
|
| 163 |
+
" out = pd.concat([missing1, missing2, diffs_long], ignore_index=True)\n",
|
| 164 |
+
"\n",
|
| 165 |
+
" # Nice ordering\n",
|
| 166 |
+
" out = out.sort_values([id_col, \"column\"], kind=\"mergesort\").reset_index(drop=True)\n",
|
| 167 |
+
" return out"
|
| 168 |
+
]
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"cell_type": "code",
|
| 172 |
+
"execution_count": null,
|
| 173 |
+
"id": "eefc6d96",
|
| 174 |
+
"metadata": {},
|
| 175 |
+
"outputs": [],
|
| 176 |
+
"source": [
|
| 177 |
+
"# Harmonize integer-like numeric columns to avoid int vs float diffs\n",
|
| 178 |
+
"for col in [\"rijksmonument_nummer\"]:\n",
|
| 179 |
+
" if col in old.columns:\n",
|
| 180 |
+
" old[col] = pd.to_numeric(old[col], errors=\"coerce\").astype(\"Int64\")\n",
|
| 181 |
+
" if col in new.columns:\n",
|
| 182 |
+
" new[col] = pd.to_numeric(new[col], errors=\"coerce\").astype(\"Int64\")\n",
|
| 183 |
+
"\n",
|
| 184 |
+
"# Keep all columns that exist in both (include beschermd_gezicht_naam, grondslag, rijksmonument_url/nummer). Drop only old-only cols.\n",
|
| 185 |
+
"drop_cols = [c for c in old.columns if c not in new.columns] # e.g. straatnaam, huisnummer, postcode, plaatsnaam\n",
|
| 186 |
+
"old_for_diff = old.drop(columns=drop_cols, errors=\"ignore\").drop_duplicates().copy()\n",
|
| 187 |
+
"new_for_diff = new.copy()\n",
|
| 188 |
+
"old_for_diff[\"bag_verblijfsobject_id\"] = old_for_diff[\"bag_verblijfsobject_id\"].astype(str)\n",
|
| 189 |
+
"new_for_diff[\"bag_verblijfsobject_id\"] = new_for_diff[\"bag_verblijfsobject_id\"].astype(str)\n",
|
| 190 |
+
"\n",
|
| 191 |
+
"difference = difference_report(old_for_diff, new_for_diff)"
|
| 192 |
+
]
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"cell_type": "code",
|
| 196 |
+
"execution_count": null,
|
| 197 |
+
"id": "a5e21313",
|
| 198 |
+
"metadata": {},
|
| 199 |
+
"outputs": [],
|
| 200 |
+
"source": [
|
| 201 |
+
"# Diagnostic: why would difference be empty?\n",
|
| 202 |
+
"# 1) Ensure ID has same type in both (parquet often has int, CSV has str -> no index overlap)\n",
|
| 203 |
+
"_id = \"bag_verblijfsobject_id\"\n",
|
| 204 |
+
"old_clean = old.drop(columns=[\"beschermd_gezicht_naam\", \"grondslag_gemeentelijk_monument\", \"rijksmonument_url\", \"rijksmonument_nummer\"], errors=\"ignore\").drop_duplicates()\n",
|
| 205 |
+
"new_clean = new.drop(columns=[\"beschermd_gezicht_naam\", \"grondslag_gemeentelijk_monument\", \"rijksmonument_url\", \"rijksmonument_nummer\"], errors=\"ignore\")\n",
|
| 206 |
+
"old_clean[_id] = old_clean[_id].astype(str)\n",
|
| 207 |
+
"new_clean[_id] = new_clean[_id].astype(str)\n",
|
| 208 |
+
"cols = [c for c in old_clean.columns if c != _id and c in new_clean.columns]\n",
|
| 209 |
+
"a_idx = old_clean.set_index(_id)[cols].index\n",
|
| 210 |
+
"b_idx = new_clean.set_index(_id)[cols].index\n",
|
| 211 |
+
"only_old = a_idx.difference(b_idx)\n",
|
| 212 |
+
"only_new = b_idx.difference(a_idx)\n",
|
| 213 |
+
"common = a_idx.intersection(b_idx)\n",
|
| 214 |
+
"print(\"old rows:\", len(old_clean), \" new rows:\", len(new_clean))\n",
|
| 215 |
+
"print(\"comparable columns:\", cols)\n",
|
| 216 |
+
"print(\"IDs only in old:\", len(only_old), \" only in new:\", len(only_new), \" in both:\", len(common))\n",
|
| 217 |
+
"print(\"old ID dtype:\", old[_id].dtype, \" new ID dtype:\", new[_id].dtype)"
|
| 218 |
+
]
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"cell_type": "code",
|
| 222 |
+
"execution_count": null,
|
| 223 |
+
"id": "8992ebb7",
|
| 224 |
+
"metadata": {},
|
| 225 |
+
"outputs": [],
|
| 226 |
+
"source": [
|
| 227 |
+
"counts = difference.loc[\n",
|
| 228 |
+
" lambda df: ~df[\"column\"].isin([\n",
|
| 229 |
+
" \"beschermd_gezicht_naam\",\n",
|
| 230 |
+
" \"grondslag_gemeentelijk_monument\",\n",
|
| 231 |
+
" \"rijksmonument_url\",\n",
|
| 232 |
+
" \"rijksmonument_nummer\"\n",
|
| 233 |
+
" ])\n",
|
| 234 |
+
"].groupby([\"column\", \"old\", \"new\", \"change_type\"], dropna=False).size().reset_index(name=\"count\")"
|
| 235 |
+
]
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"cell_type": "code",
|
| 239 |
+
"execution_count": null,
|
| 240 |
+
"id": "ef420c13",
|
| 241 |
+
"metadata": {},
|
| 242 |
+
"outputs": [],
|
| 243 |
+
"source": [
|
| 244 |
+
"counts"
|
| 245 |
+
]
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"cell_type": "code",
|
| 249 |
+
"execution_count": null,
|
| 250 |
+
"id": "ca126bfb",
|
| 251 |
+
"metadata": {},
|
| 252 |
+
"outputs": [],
|
| 253 |
+
"source": []
|
| 254 |
+
}
|
| 255 |
+
],
|
| 256 |
+
"metadata": {
|
| 257 |
+
"kernelspec": {
|
| 258 |
+
"display_name": ".venv",
|
| 259 |
+
"language": "python",
|
| 260 |
+
"name": "python3"
|
| 261 |
+
},
|
| 262 |
+
"language_info": {
|
| 263 |
+
"codemirror_mode": {
|
| 264 |
+
"name": "ipython",
|
| 265 |
+
"version": 3
|
| 266 |
+
},
|
| 267 |
+
"file_extension": ".py",
|
| 268 |
+
"mimetype": "text/x-python",
|
| 269 |
+
"name": "python",
|
| 270 |
+
"nbconvert_exporter": "python",
|
| 271 |
+
"pygments_lexer": "ipython3",
|
| 272 |
+
"version": "3.13.11"
|
| 273 |
+
}
|
| 274 |
+
},
|
| 275 |
+
"nbformat": 4,
|
| 276 |
+
"nbformat_minor": 5
|
| 277 |
+
}
|