raw-philippine-data / scripts /load_persons_to_db.py
napppy's picture
feat: add memberships
e423ac0
#!/usr/bin/env python3
"""
Load person data from TOML files into a DuckDB database.
This script scans the data/person directory for TOML files and loads them
into a local DuckDB database for duplicate detection and processing.
"""
import sys
import argparse
from pathlib import Path
import tomlkit
import duckdb
import logging
from datetime import datetime
from typing import Tuple, List, Optional
# Add parent directory to path to import schemas and config
sys.path.insert(0, str(Path(__file__).parent.parent))
from schemas.person import PERSON_SCHEMA
from schemas.membership import MEMBERSHIP_SCHEMA
from schemas.base import transform_value
from config import DATABASE_PATH, PERSON_DATA_DIR
def load_toml_file(file_path: Path) -> dict:
"""Load and parse a TOML file."""
with open(file_path, 'r', encoding='utf-8') as f:
return tomlkit.load(f)
def get_person_toml_files(data_dir: Path):
"""Recursively find all TOML files in the person data directory (generator)."""
return data_dir.glob('**/*.toml')
def create_persons_table(conn: duckdb.DuckDBPyConnection):
"""Create the persons table using the explicit schema."""
create_sql = PERSON_SCHEMA.get_create_table_sql()
print(f"Creating table '{PERSON_SCHEMA.table_name}' with {len(PERSON_SCHEMA.schema)} columns:")
print(f" Fields: {', '.join(PERSON_SCHEMA.field_order)}")
conn.execute(create_sql)
def create_memberships_table(conn: duckdb.DuckDBPyConnection):
"""Create the memberships table using the explicit schema."""
create_sql = MEMBERSHIP_SCHEMA.get_create_table_sql()
print(f"Creating table '{MEMBERSHIP_SCHEMA.table_name}' with {len(MEMBERSHIP_SCHEMA.schema)} columns:")
print(f" Fields: {', '.join(MEMBERSHIP_SCHEMA.field_order)}")
conn.execute(create_sql)
def load_persons_to_db(
data_dir: Path,
db_path: Path,
export_parquet: bool = False,
parquet_path: Path = None,
memberships_parquet_path: Path = None,
batch_size: int = 1000,
progress_interval: int = 100
):
"""Load all person TOML files into the DuckDB database with batch inserts."""
# Setup error logging
error_log_path = db_path.parent / 'load_errors.log'
logging.basicConfig(
filename=str(error_log_path),
level=logging.ERROR,
format='%(asctime)s - %(message)s',
filemode='w' # Overwrite previous log
)
print(f"Connecting to database: {db_path}")
print(f"Error log: {error_log_path}")
print(f"Batch size: {batch_size}")
conn = duckdb.connect(str(db_path))
# Create tables with explicit schema
create_persons_table(conn)
create_memberships_table(conn)
# Build INSERT statements using schema definitions
insert_person_sql = PERSON_SCHEMA.get_insert_sql()
insert_membership_sql = MEMBERSHIP_SCHEMA.get_insert_sql()
# Get all TOML files
print("\nScanning for TOML files...")
toml_files = list(get_person_toml_files(data_dir))
total_files = len(toml_files)
print(f"Found {total_files} files to process")
# Load data with parallel processing and batch inserts
print("\nLoading person data...")
loaded_count = 0
error_count = 0
processed_count = 0
memberships_loaded_count = 0
unknown_fields_seen = set()
# Batches for accumulating records
person_batch = []
membership_batch = []
# Track timing for ETA
start_time = datetime.now()
def flush_batches():
"""Helper to insert accumulated batches and commit."""
nonlocal loaded_count, memberships_loaded_count
if not person_batch and not membership_batch:
return
batch_size_to_commit = len(person_batch)
print(f" Committing batch of {batch_size_to_commit} records...", end='', flush=True)
try:
conn.execute("BEGIN TRANSACTION")
# Batch insert persons
if person_batch:
conn.executemany(insert_person_sql, person_batch)
loaded_count += len(person_batch)
# Batch insert memberships
if membership_batch:
conn.executemany(insert_membership_sql, membership_batch)
memberships_loaded_count += len(membership_batch)
conn.execute("COMMIT")
print(f" done!")
except Exception as e:
conn.execute("ROLLBACK")
logging.error(f"Batch insert failed: {e}")
print(f"\n Warning: Batch insert failed, see error log")
person_batch.clear()
membership_batch.clear()
# Process files sequentially
try:
for toml_file in toml_files:
try:
# Read and parse TOML file
person_data = load_toml_file(toml_file)
person_id = person_data.get('id')
memberships = person_data.get('memberships', [])
# Warn about unknown fields (helps catch typos)
for field in person_data.keys():
if field not in PERSON_SCHEMA.schema and field != 'memberships' and field not in unknown_fields_seen:
print(f" Warning: Unknown field '{field}' (will be ignored)")
unknown_fields_seen.add(field)
# Build values list for person
values = [
transform_value(field, person_data.get(field), PERSON_SCHEMA.nested_fields)
for field in PERSON_SCHEMA.field_order
]
person_batch.append(values)
# Add memberships to batch
for idx, membership in enumerate(memberships):
membership_id = f"{person_id}_m{idx}"
membership_values = [
membership_id,
person_id,
membership.get('party'),
membership.get('region'),
membership.get('province'),
membership.get('locality'),
membership.get('position'),
membership.get('year'),
]
membership_batch.append(membership_values)
except Exception as e:
# File processing failed
error_count += 1
logging.error(f"{toml_file}: {e}")
processed_count += 1
# Flush batches when reaching batch size
if len(person_batch) >= batch_size:
flush_batches()
# Progress indicator
if processed_count % progress_interval == 0 or processed_count == total_files:
pct = (processed_count / total_files * 100) if total_files > 0 else 0
# Show committed + pending records
pending = len(person_batch)
total_loaded = loaded_count + pending
print(f" [{pct:5.1f}%] {processed_count}/{total_files} files | "
f"{total_loaded} loaded ({loaded_count} committed, {pending} pending) | "
f"{error_count} errors")
except KeyboardInterrupt:
print("\n\n*** Interrupted by user (Ctrl+C) ***")
print("Flushing any pending records to database...")
flush_batches()
# Calculate time up to interruption
end_time = datetime.now()
total_seconds = (end_time - start_time).total_seconds()
total_minutes = total_seconds / 60
print(f"\nPartial load completed:")
print(f" Files processed: {processed_count}/{total_files}")
print(f" Records loaded: {loaded_count}")
print(f" Errors: {error_count}")
print(f" Time elapsed: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)")
conn.close()
sys.exit(1)
# Flush any remaining records
flush_batches()
# Calculate total time
end_time = datetime.now()
total_seconds = (end_time - start_time).total_seconds()
total_minutes = total_seconds / 60
# Show summary
print(f"\n{'='*60}")
print(f"Load complete!")
print(f" Total files processed: {processed_count}")
print(f" Successfully loaded: {loaded_count}")
print(f" Errors: {error_count}")
print(f" Total time: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)")
if error_count > 0:
print(f" Error details logged to: {error_log_path}")
# Show database stats
persons_count = conn.execute("SELECT COUNT(*) as total FROM persons").fetchone()
memberships_count = conn.execute("SELECT COUNT(*) as total FROM memberships").fetchone()
print(f" Total persons in database: {persons_count[0]}")
print(f" Total memberships in database: {memberships_count[0]}")
print(f"{'='*60}")
# Show sample data
print("\nSample persons (first 5 rows):")
sample = conn.execute("""
SELECT id, first_name, last_name, name_suffix
FROM persons
LIMIT 5
""").fetchall()
for row in sample:
suffix = f" {row[3]}" if row[3] else ""
print(f" {row[0]}: {row[1]} {row[2]}{suffix}")
# Show sample memberships
print("\nSample memberships (first 5 rows):")
memberships_sample = conn.execute("""
SELECT
p.first_name,
p.last_name,
m.party,
m.position,
m.region,
m.year
FROM memberships m
JOIN persons p ON m.person_id = p.id
LIMIT 5
""").fetchall()
for row in memberships_sample:
print(f" {row[0]} {row[1]} - {row[3]} ({row[2]}) in {row[4]}, {row[5]}")
# Export to Parquet if requested
if export_parquet:
print(f"\n{'='*60}")
print("Exporting to Parquet format...")
# Export persons table
print(f"\nExporting persons table...")
print(f" Output: {parquet_path}")
try:
conn.execute(f"COPY persons TO '{parquet_path}' (FORMAT PARQUET)")
if parquet_path.exists():
file_size = parquet_path.stat().st_size
file_size_mb = file_size / (1024 * 1024)
print(f" ✓ Successfully exported persons.parquet ({file_size_mb:.2f} MB)")
else:
print(" ⚠ Export completed but file not found")
except Exception as e:
print(f" ✗ Error exporting persons: {e}")
# Export memberships table
print(f"\nExporting memberships table...")
print(f" Output: {memberships_parquet_path}")
try:
conn.execute(f"COPY memberships TO '{memberships_parquet_path}' (FORMAT PARQUET)")
if memberships_parquet_path.exists():
file_size = memberships_parquet_path.stat().st_size
file_size_mb = file_size / (1024 * 1024)
print(f" ✓ Successfully exported memberships.parquet ({file_size_mb:.2f} MB)")
else:
print(" ⚠ Export completed but file not found")
except Exception as e:
print(f" ✗ Error exporting memberships: {e}")
print(f"{'='*60}")
conn.close()
print(f"\nDatabase saved to: {db_path}")
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description='Load person data from TOML files into a DuckDB database',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Use default database path (databases/data.duckdb)
python scripts/load_persons_to_db.py
# Specify custom database path
python scripts/load_persons_to_db.py --db-path /path/to/custom.duckdb
# Use a different data directory
python scripts/load_persons_to_db.py --data-dir /path/to/person/data
# Export to Parquet for Hugging Face dataset viewer
python scripts/load_persons_to_db.py --export-parquet
"""
)
parser.add_argument(
'--db-path',
type=Path,
default=DATABASE_PATH,
help=f'Path to the DuckDB database (default: {DATABASE_PATH})'
)
parser.add_argument(
'--data-dir',
type=Path,
default=PERSON_DATA_DIR,
help=f'Path to the person data directory (default: {PERSON_DATA_DIR})'
)
parser.add_argument(
'--export-parquet',
action='store_true',
help='Export the persons and memberships tables to Parquet format after loading'
)
parser.add_argument(
'--parquet-path',
type=Path,
default=Path(__file__).parent.parent / 'databases' / 'persons.parquet',
help='Path for the exported persons Parquet file (default: databases/persons.parquet)'
)
parser.add_argument(
'--memberships-parquet-path',
type=Path,
default=Path(__file__).parent.parent / 'databases' / 'memberships.parquet',
help='Path for the exported memberships Parquet file (default: databases/memberships.parquet)'
)
parser.add_argument(
'--batch-size',
type=int,
default=1000,
help='Number of records to insert per batch/transaction (default: 1000)'
)
parser.add_argument(
'--progress-interval',
type=int,
default=100,
help='Show progress every N files (default: 100)'
)
args = parser.parse_args()
# Validate data directory exists
if not args.data_dir.exists():
print(f"Error: Data directory not found: {args.data_dir}")
sys.exit(1)
# Create databases directory if it doesn't exist
args.db_path.parent.mkdir(parents=True, exist_ok=True)
load_persons_to_db(
args.data_dir,
args.db_path,
args.export_parquet,
args.parquet_path,
args.memberships_parquet_path,
args.batch_size,
args.progress_interval
)
if __name__ == '__main__':
main()