Datasets:
File size: 7,663 Bytes
caa2ed1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 | import asyncio
import glob
import json
import logging
import multiprocessing
import os
import xml.etree.ElementTree as ET
from datetime import datetime
from typing import List, Optional, Set
import pandas as pd
from datasets import Dataset
from dotenv import load_dotenv
from monumenten import MonumentenClient
load_dotenv()
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Define constants
XML_DIRECTORY = "vbo_xmls/"
INTERMEDIATE_CSV_PATH = "verblijfsobjecten_ids.csv"
FINAL_CSV_PATH = "monumenten.csv"
HF_REPO_ID = "woonstadrotterdam/monumenten"
HF_TOKEN = os.getenv("HF_TOKEN")
def is_valid_identificatie(id_value: str) -> bool:
"""
Validate if the ID is a proper verblijfsobject ID.
Valid IDs must be 16 characters long, consist of digits,
and have '01' at positions 4-5 (0-indexed).
Example: 'xxxx01xxxxxxxxxx' where x is a digit (e.g., '0304010000269586').
"""
if id_value is None:
return False
return len(id_value) == 16 and id_value.isdigit() and id_value[4:6] == "01"
def extract_identificaties(xml_path: str) -> List[str]:
"""
Extract all valid identificatie values from a single XML file using iterative parsing.
"""
identificaties = []
try:
context = ET.iterparse(xml_path, events=("end",))
for event, elem in context:
if elem.tag.endswith("identificatie"):
id_value = elem.text
if is_valid_identificatie(id_value):
identificaties.append(id_value)
elem.clear() # Free memory
if identificaties:
logger.debug(
f"Found {len(identificaties)} valid identificatie values in {xml_path}"
)
return identificaties
except Exception as e:
logger.error(f"Error parsing XML file {xml_path}: {e}")
return []
def get_xml_files() -> List[str]:
"""
Get list of XML files from the specified directory.
"""
xml_files = glob.glob(os.path.join(XML_DIRECTORY, "*.xml"))
if not xml_files:
logger.error(f"No XML files found in {XML_DIRECTORY}")
else:
logger.info(f"Found {len(xml_files)} XML files in {XML_DIRECTORY}")
return xml_files
def process_files_parallel(xml_files: List[str]) -> Set[str]:
"""
Process XML files in parallel using multiprocessing.
Returns a set of unique identificaties.
"""
unique_identificaties = set()
logger.info(f"Starting parallel processing of {len(xml_files)} XML files...")
with multiprocessing.Pool() as pool:
results = pool.imap_unordered(extract_identificaties, xml_files)
for i, file_identificaties in enumerate(results):
unique_identificaties.update(file_identificaties)
if (i + 1) % 100 == 0: # Log progress every 100 files
logger.info(
f"Processed {i + 1}/{len(xml_files)} files. "
f"Current unique identificaties: {len(unique_identificaties)}"
)
logger.info(
f"All files processed. Total unique identificaties found: {len(unique_identificaties)}"
)
return unique_identificaties
def create_identificaties_dataframe(unique_ids: Set[str]) -> Optional[pd.DataFrame]:
"""
Create and save DataFrame from unique identificaties.
Returns the DataFrame or None if no valid identificaties found.
"""
if not unique_ids:
logger.info("No valid identificaties found.")
return None
df = pd.DataFrame(list(unique_ids), columns=["bag_verblijfsobject_id"])
logger.info(f"Created DataFrame with {len(df)} unique valid identificaties.")
# Save intermediate results
df.to_csv(INTERMEDIATE_CSV_PATH, index=False)
logger.info(f"Saved DataFrame to {INTERMEDIATE_CSV_PATH}")
# Display info
print("\nFirst few rows of the extracted identificaties DataFrame:")
print(df.head())
print("\nIdentificaties DataFrame Info:")
df.info()
return df
async def process_with_monumenten_client(df: pd.DataFrame) -> Optional[pd.DataFrame]:
"""
Process the DataFrame using MonumentenClient.
Returns processed DataFrame or None if processing fails.
"""
if df.empty:
logger.warning("Empty DataFrame provided to MonumentenClient.")
return None
logger.info(f"Processing {len(df)} identificaties with MonumentenClient...")
try:
async with MonumentenClient() as client:
result_df = await client.process_from_df(
df=df, verblijfsobject_id_col="bag_verblijfsobject_id"
)
logger.info("Finished processing with MonumentenClient.")
return result_df
except Exception as e:
logger.error(f"Error processing with MonumentenClient: {e}")
return None
def save_final_results(result_df: Optional[pd.DataFrame]) -> None:
"""
Save the final results to CSV if valid data is present.
"""
if result_df is not None and not result_df.empty:
result_df.to_csv(FINAL_CSV_PATH, index=False)
logger.info(f"Successfully saved final monumenten data to {FINAL_CSV_PATH}")
print(f"\nFinal data saved to {FINAL_CSV_PATH}")
print(result_df.head())
# Push to Hugging Face
if push_to_huggingface(result_df):
print(f"\nData successfully pushed to Hugging Face dataset: {HF_REPO_ID}")
else:
print("\nFailed to push data to Hugging Face. Check logs for details.")
elif result_df is not None and result_df.empty:
logger.info("Processing resulted in an empty DataFrame. Nothing to save.")
print("\nProcessing resulted in an empty DataFrame.")
else:
logger.warning("No valid data to save. Process did not complete successfully.")
print("\nProcess did not complete successfully or returned no data.")
def push_to_huggingface(result_df: pd.DataFrame) -> bool:
"""
Push the final results to Hugging Face datasets hub using datasets.push_to_hub
with a custom split name.
Returns True if successful, False otherwise.
"""
if not HF_TOKEN:
logger.error("No Hugging Face token found in environment variables (HF_TOKEN)")
return False
if result_df.empty:
logger.warning(
"Result DataFrame is empty. Skipping push of main dataset to Hugging Face."
)
else:
logger.info(
f"Converting DataFrame with {len(result_df)} rows to Hugging Face Dataset."
)
hf_dataset_single = Dataset.from_pandas(result_df)
hf_dataset_single.push_to_hub(
repo_id=HF_REPO_ID,
commit_message=f"Update monumenten dataset",
token=HF_TOKEN,
)
logger.info(f"Successfully pushed dataset dictionary to {HF_REPO_ID}")
async def main() -> Optional[pd.DataFrame]:
"""
Main function orchestrating the entire process.
Returns the final processed DataFrame or None if processing fails.
"""
# Get XML files
xml_files = get_xml_files()
if not xml_files:
return None
# Process files and get unique identificaties
unique_identificaties = process_files_parallel(xml_files)
# Create DataFrame from unique identificaties
df = create_identificaties_dataframe(unique_identificaties)
if df is None:
return None
# Process with MonumentenClient
result_df = await process_with_monumenten_client(df)
return result_df
if __name__ == "__main__":
# Run main process
result_dataframe = asyncio.run(main())
# Save results
save_final_results(result_dataframe)
|