File size: 13,847 Bytes
67e2a15 e423ac0 67e2a15 e71ccc4 67e2a15 e71ccc4 e423ac0 67e2a15 e423ac0 67e2a15 e71ccc4 67e2a15 e71ccc4 67e2a15 e71ccc4 67e2a15 e423ac0 67e2a15 e71ccc4 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e71ccc4 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 e71ccc4 e423ac0 e71ccc4 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e423ac0 67e2a15 e71ccc4 67e2a15 e71ccc4 67e2a15 f072b09 67e2a15 f072b09 67e2a15 e71ccc4 f5363ff e423ac0 f5363ff e423ac0 f5363ff e423ac0 f5363ff e423ac0 f5363ff 67e2a15 f5363ff 67e2a15 f5363ff e423ac0 f5363ff e423ac0 f5363ff 67e2a15 e423ac0 67e2a15 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 |
#!/usr/bin/env python3
"""
Load person data from TOML files into a DuckDB database.
This script scans the data/person directory for TOML files and loads them
into a local DuckDB database for duplicate detection and processing.
"""
import sys
import argparse
from pathlib import Path
import tomlkit
import duckdb
import logging
from datetime import datetime
from typing import Tuple, List, Optional
# Add parent directory to path to import schemas and config
sys.path.insert(0, str(Path(__file__).parent.parent))
from schemas.person import PERSON_SCHEMA
from schemas.membership import MEMBERSHIP_SCHEMA
from schemas.base import transform_value
from config import DATABASE_PATH, PERSON_DATA_DIR
def load_toml_file(file_path: Path) -> dict:
"""Load and parse a TOML file."""
with open(file_path, 'r', encoding='utf-8') as f:
return tomlkit.load(f)
def get_person_toml_files(data_dir: Path):
"""Recursively find all TOML files in the person data directory (generator)."""
return data_dir.glob('**/*.toml')
def create_persons_table(conn: duckdb.DuckDBPyConnection):
"""Create the persons table using the explicit schema."""
create_sql = PERSON_SCHEMA.get_create_table_sql()
print(f"Creating table '{PERSON_SCHEMA.table_name}' with {len(PERSON_SCHEMA.schema)} columns:")
print(f" Fields: {', '.join(PERSON_SCHEMA.field_order)}")
conn.execute(create_sql)
def create_memberships_table(conn: duckdb.DuckDBPyConnection):
"""Create the memberships table using the explicit schema."""
create_sql = MEMBERSHIP_SCHEMA.get_create_table_sql()
print(f"Creating table '{MEMBERSHIP_SCHEMA.table_name}' with {len(MEMBERSHIP_SCHEMA.schema)} columns:")
print(f" Fields: {', '.join(MEMBERSHIP_SCHEMA.field_order)}")
conn.execute(create_sql)
def load_persons_to_db(
data_dir: Path,
db_path: Path,
export_parquet: bool = False,
parquet_path: Path = None,
memberships_parquet_path: Path = None,
batch_size: int = 1000,
progress_interval: int = 100
):
"""Load all person TOML files into the DuckDB database with batch inserts."""
# Setup error logging
error_log_path = db_path.parent / 'load_errors.log'
logging.basicConfig(
filename=str(error_log_path),
level=logging.ERROR,
format='%(asctime)s - %(message)s',
filemode='w' # Overwrite previous log
)
print(f"Connecting to database: {db_path}")
print(f"Error log: {error_log_path}")
print(f"Batch size: {batch_size}")
conn = duckdb.connect(str(db_path))
# Create tables with explicit schema
create_persons_table(conn)
create_memberships_table(conn)
# Build INSERT statements using schema definitions
insert_person_sql = PERSON_SCHEMA.get_insert_sql()
insert_membership_sql = MEMBERSHIP_SCHEMA.get_insert_sql()
# Get all TOML files
print("\nScanning for TOML files...")
toml_files = list(get_person_toml_files(data_dir))
total_files = len(toml_files)
print(f"Found {total_files} files to process")
# Load data with parallel processing and batch inserts
print("\nLoading person data...")
loaded_count = 0
error_count = 0
processed_count = 0
memberships_loaded_count = 0
unknown_fields_seen = set()
# Batches for accumulating records
person_batch = []
membership_batch = []
# Track timing for ETA
start_time = datetime.now()
def flush_batches():
"""Helper to insert accumulated batches and commit."""
nonlocal loaded_count, memberships_loaded_count
if not person_batch and not membership_batch:
return
batch_size_to_commit = len(person_batch)
print(f" Committing batch of {batch_size_to_commit} records...", end='', flush=True)
try:
conn.execute("BEGIN TRANSACTION")
# Batch insert persons
if person_batch:
conn.executemany(insert_person_sql, person_batch)
loaded_count += len(person_batch)
# Batch insert memberships
if membership_batch:
conn.executemany(insert_membership_sql, membership_batch)
memberships_loaded_count += len(membership_batch)
conn.execute("COMMIT")
print(f" done!")
except Exception as e:
conn.execute("ROLLBACK")
logging.error(f"Batch insert failed: {e}")
print(f"\n Warning: Batch insert failed, see error log")
person_batch.clear()
membership_batch.clear()
# Process files sequentially
try:
for toml_file in toml_files:
try:
# Read and parse TOML file
person_data = load_toml_file(toml_file)
person_id = person_data.get('id')
memberships = person_data.get('memberships', [])
# Warn about unknown fields (helps catch typos)
for field in person_data.keys():
if field not in PERSON_SCHEMA.schema and field != 'memberships' and field not in unknown_fields_seen:
print(f" Warning: Unknown field '{field}' (will be ignored)")
unknown_fields_seen.add(field)
# Build values list for person
values = [
transform_value(field, person_data.get(field), PERSON_SCHEMA.nested_fields)
for field in PERSON_SCHEMA.field_order
]
person_batch.append(values)
# Add memberships to batch
for idx, membership in enumerate(memberships):
membership_id = f"{person_id}_m{idx}"
membership_values = [
membership_id,
person_id,
membership.get('party'),
membership.get('region'),
membership.get('province'),
membership.get('locality'),
membership.get('position'),
membership.get('year'),
]
membership_batch.append(membership_values)
except Exception as e:
# File processing failed
error_count += 1
logging.error(f"{toml_file}: {e}")
processed_count += 1
# Flush batches when reaching batch size
if len(person_batch) >= batch_size:
flush_batches()
# Progress indicator
if processed_count % progress_interval == 0 or processed_count == total_files:
pct = (processed_count / total_files * 100) if total_files > 0 else 0
# Show committed + pending records
pending = len(person_batch)
total_loaded = loaded_count + pending
print(f" [{pct:5.1f}%] {processed_count}/{total_files} files | "
f"{total_loaded} loaded ({loaded_count} committed, {pending} pending) | "
f"{error_count} errors")
except KeyboardInterrupt:
print("\n\n*** Interrupted by user (Ctrl+C) ***")
print("Flushing any pending records to database...")
flush_batches()
# Calculate time up to interruption
end_time = datetime.now()
total_seconds = (end_time - start_time).total_seconds()
total_minutes = total_seconds / 60
print(f"\nPartial load completed:")
print(f" Files processed: {processed_count}/{total_files}")
print(f" Records loaded: {loaded_count}")
print(f" Errors: {error_count}")
print(f" Time elapsed: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)")
conn.close()
sys.exit(1)
# Flush any remaining records
flush_batches()
# Calculate total time
end_time = datetime.now()
total_seconds = (end_time - start_time).total_seconds()
total_minutes = total_seconds / 60
# Show summary
print(f"\n{'='*60}")
print(f"Load complete!")
print(f" Total files processed: {processed_count}")
print(f" Successfully loaded: {loaded_count}")
print(f" Errors: {error_count}")
print(f" Total time: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)")
if error_count > 0:
print(f" Error details logged to: {error_log_path}")
# Show database stats
persons_count = conn.execute("SELECT COUNT(*) as total FROM persons").fetchone()
memberships_count = conn.execute("SELECT COUNT(*) as total FROM memberships").fetchone()
print(f" Total persons in database: {persons_count[0]}")
print(f" Total memberships in database: {memberships_count[0]}")
print(f"{'='*60}")
# Show sample data
print("\nSample persons (first 5 rows):")
sample = conn.execute("""
SELECT id, first_name, last_name, name_suffix
FROM persons
LIMIT 5
""").fetchall()
for row in sample:
suffix = f" {row[3]}" if row[3] else ""
print(f" {row[0]}: {row[1]} {row[2]}{suffix}")
# Show sample memberships
print("\nSample memberships (first 5 rows):")
memberships_sample = conn.execute("""
SELECT
p.first_name,
p.last_name,
m.party,
m.position,
m.region,
m.year
FROM memberships m
JOIN persons p ON m.person_id = p.id
LIMIT 5
""").fetchall()
for row in memberships_sample:
print(f" {row[0]} {row[1]} - {row[3]} ({row[2]}) in {row[4]}, {row[5]}")
# Export to Parquet if requested
if export_parquet:
print(f"\n{'='*60}")
print("Exporting to Parquet format...")
# Export persons table
print(f"\nExporting persons table...")
print(f" Output: {parquet_path}")
try:
conn.execute(f"COPY persons TO '{parquet_path}' (FORMAT PARQUET)")
if parquet_path.exists():
file_size = parquet_path.stat().st_size
file_size_mb = file_size / (1024 * 1024)
print(f" ✓ Successfully exported persons.parquet ({file_size_mb:.2f} MB)")
else:
print(" ⚠ Export completed but file not found")
except Exception as e:
print(f" ✗ Error exporting persons: {e}")
# Export memberships table
print(f"\nExporting memberships table...")
print(f" Output: {memberships_parquet_path}")
try:
conn.execute(f"COPY memberships TO '{memberships_parquet_path}' (FORMAT PARQUET)")
if memberships_parquet_path.exists():
file_size = memberships_parquet_path.stat().st_size
file_size_mb = file_size / (1024 * 1024)
print(f" ✓ Successfully exported memberships.parquet ({file_size_mb:.2f} MB)")
else:
print(" ⚠ Export completed but file not found")
except Exception as e:
print(f" ✗ Error exporting memberships: {e}")
print(f"{'='*60}")
conn.close()
print(f"\nDatabase saved to: {db_path}")
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description='Load person data from TOML files into a DuckDB database',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Use default database path (databases/data.duckdb)
python scripts/load_persons_to_db.py
# Specify custom database path
python scripts/load_persons_to_db.py --db-path /path/to/custom.duckdb
# Use a different data directory
python scripts/load_persons_to_db.py --data-dir /path/to/person/data
# Export to Parquet for Hugging Face dataset viewer
python scripts/load_persons_to_db.py --export-parquet
"""
)
parser.add_argument(
'--db-path',
type=Path,
default=DATABASE_PATH,
help=f'Path to the DuckDB database (default: {DATABASE_PATH})'
)
parser.add_argument(
'--data-dir',
type=Path,
default=PERSON_DATA_DIR,
help=f'Path to the person data directory (default: {PERSON_DATA_DIR})'
)
parser.add_argument(
'--export-parquet',
action='store_true',
help='Export the persons and memberships tables to Parquet format after loading'
)
parser.add_argument(
'--parquet-path',
type=Path,
default=Path(__file__).parent.parent / 'databases' / 'persons.parquet',
help='Path for the exported persons Parquet file (default: databases/persons.parquet)'
)
parser.add_argument(
'--memberships-parquet-path',
type=Path,
default=Path(__file__).parent.parent / 'databases' / 'memberships.parquet',
help='Path for the exported memberships Parquet file (default: databases/memberships.parquet)'
)
parser.add_argument(
'--batch-size',
type=int,
default=1000,
help='Number of records to insert per batch/transaction (default: 1000)'
)
parser.add_argument(
'--progress-interval',
type=int,
default=100,
help='Show progress every N files (default: 100)'
)
args = parser.parse_args()
# Validate data directory exists
if not args.data_dir.exists():
print(f"Error: Data directory not found: {args.data_dir}")
sys.exit(1)
# Create databases directory if it doesn't exist
args.db_path.parent.mkdir(parents=True, exist_ok=True)
load_persons_to_db(
args.data_dir,
args.db_path,
args.export_parquet,
args.parquet_path,
args.memberships_parquet_path,
args.batch_size,
args.progress_interval
)
if __name__ == '__main__':
main()
|