monumenten / run_pipeline.py
TomerGabay's picture
professionalization (#5)
21def8b
#!/usr/bin/env python3
"""
Combined pipeline: process XML + MonumentenClient -> monumenten.csv,
then add postcodes via Kadaster -> CSV -> Parquet.
Entry point; logic lives in pipeline/ modules.
"""
import argparse
import asyncio
import logging
from pathlib import Path
import pandas as pd
from pipeline.config import MONUMENTEN_CSV, OUTPUT_CSV, OUTPUT_PARQUET
from pipeline.logging_setup import setup_logging
from pipeline.parquet_hf import csv_to_parquet
from pipeline.postcode import add_postcode_main
from pipeline.process import (
extract_identificaties,
get_xml_files,
process_ids_for_dry_run,
process_until_complete,
process_with_monumenten_client_resume,
save_final_results,
)
from pipeline.update_analysis import run_update_analysis
logger = logging.getLogger(__name__)
def _with_suffix(path_str: str, suffix: str) -> Path:
"""Return a new path with the given suffix inserted before the file extension."""
p = Path(path_str)
return p.with_name(p.stem + suffix + p.suffix)
def run_full() -> None:
result = asyncio.run(process_until_complete())
if result is not None:
save_final_results(result)
run_update_analysis(MONUMENTEN_CSV)
if not Path(MONUMENTEN_CSV).exists():
logger.error("Stage 1 did not produce monumenten.csv. Skipping stage 2.")
return
if not add_postcode_main():
return
csv_to_parquet(OUTPUT_CSV, OUTPUT_PARQUET)
def _extract_ids_up_to(limit: int) -> list[str]:
"""Extract at least `limit` IDs from XML files, stopping early once enough are found."""
xml_files = get_xml_files()
unique_ids: set[str] = set()
for xml_file in xml_files:
unique_ids.update(extract_identificaties(xml_file))
if len(unique_ids) >= limit:
break
return list(unique_ids)
def run_dry_run(limit: int, suffix: str = "_dry_run") -> None:
dry_monumenten_csv = _with_suffix(MONUMENTEN_CSV, suffix)
dry_output_csv = _with_suffix(OUTPUT_CSV, suffix)
dry_output_parquet = _with_suffix(OUTPUT_PARQUET, suffix)
for path in [dry_monumenten_csv, dry_output_csv, dry_output_parquet]:
path.unlink(missing_ok=True)
ids = _extract_ids_up_to(limit)
if not ids:
return
ids_df = pd.DataFrame(ids[:limit], columns=["bag_verblijfsobject_id"])
logger.info("Dry run: processing %d IDs", len(ids_df))
result = asyncio.run(process_ids_for_dry_run(ids_df, str(dry_monumenten_csv)))
if result is None:
return
if not add_postcode_main(
monumenten_csv_path=str(dry_monumenten_csv),
output_csv_path=str(dry_output_csv),
):
return
csv_to_parquet(str(dry_output_csv), str(dry_output_parquet))
def run_process_only() -> None:
result = asyncio.run(process_until_complete())
if result is not None:
save_final_results(result)
run_update_analysis(MONUMENTEN_CSV)
logger.info("Process stage done.")
def run_postcode_only() -> None:
if not Path(MONUMENTEN_CSV).exists():
logger.error("%s not found. Run stage 'process' first.", MONUMENTEN_CSV)
return
if not add_postcode_main():
return
csv_to_parquet(OUTPUT_CSV, OUTPUT_PARQUET)
def main() -> None:
setup_logging()
parser = argparse.ArgumentParser(
description="Monumenten pipeline: process XML + postcodes -> Parquet"
)
parser.add_argument(
"--stage",
choices=["full", "process", "postcode"],
default="full",
help="full (default): process then postcode then Parquet; process: only stage 1; postcode: only stage 2 + Parquet",
)
parser.add_argument(
"--dry-run",
type=int,
metavar="N",
help="Process only N IDs (writes to *_dry_run CSV/Parquet files)",
)
args = parser.parse_args()
if args.dry_run is not None:
run_dry_run(args.dry_run)
elif args.stage == "full":
run_full()
elif args.stage == "process":
run_process_only()
else:
run_postcode_only()
logger.info("Done.")
if __name__ == "__main__":
main()