| |
| """Compact compact-v1 all_revisions Parquet metadata files. |
| |
| This rewrites table-level Parquet fragments under |
| ``data/v1/all_revisions/<table>/`` into fewer ``compact-*.parquet`` files. |
| Raw ``archives/`` are never touched, and ``latest_revisions`` is intentionally |
| left for ``rebuild_latest_snapshot.py`` to regenerate after compaction. Row |
| order is preserved in source-file order, not globally resorted; consumers must |
| use explicit sort keys when order matters. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import concurrent.futures |
| import json |
| import os |
| import shutil |
| import sys |
| import time |
| import uuid |
| from pathlib import Path |
| from typing import Any |
|
|
| import pyarrow as pa |
| import pyarrow.dataset as ds |
| import pyarrow.parquet as pq |
| from tqdm.auto import tqdm |
|
|
| from parquet_writer import ( |
| PARQUET_WRITE_KWARGS, |
| TableSchema, |
| _atomic_write_parquet, |
| load_schemas, |
| ) |
|
|
|
|
| EXCLUDED_TABLES = {"latest_revisions"} |
| DEFAULT_COMPACT_WORKERS = min(4, max(1, os.cpu_count() or 1)) |
|
|
|
|
| def _log(msg: str) -> None: |
| print(msg, file=sys.stderr, flush=True) |
|
|
|
|
| def _parquet_files(root: Path) -> list[Path]: |
| return sorted(p for p in root.rglob("*.parquet") if p.is_file()) |
|
|
|
|
| def _count_rows(files: list[Path]) -> int: |
| if not files: |
| return 0 |
| return int(ds.dataset([str(p) for p in files], format="parquet").count_rows()) |
|
|
|
|
| def _fallback_schema(files: list[Path]) -> pa.Schema: |
| if not files: |
| raise ValueError("cannot infer schema without parquet files") |
| return pq.read_schema(files[0]) |
|
|
|
|
| def _align_table(table: pa.Table, schema: pa.Schema) -> pa.Table: |
| for field in schema: |
| if field.name in table.column_names: |
| continue |
| table = table.append_column( |
| field.name, |
| pa.nulls(table.num_rows, type=field.type), |
| ) |
| table = table.select(schema.names) |
| return table.cast(schema, safe=False) |
|
|
|
|
| def _write_compacted_table( |
| *, |
| table_name: str, |
| table_dir: Path, |
| target_dir: Path, |
| table_schema: TableSchema | None, |
| target_rows: int, |
| batch_size: int, |
| ) -> dict[str, Any]: |
| source_files = _parquet_files(table_dir) |
| source_rows = _count_rows(source_files) |
| file_schema = table_schema.file_schema() if table_schema else _fallback_schema(source_files) |
|
|
| target_dir.mkdir(parents=True, exist_ok=True) |
| dataset = ds.dataset([str(p) for p in source_files], format="parquet") |
| scanner = dataset.scanner(batch_size=batch_size) |
|
|
| written_files = 0 |
| written_rows = 0 |
| buffered: list[pa.RecordBatch] = [] |
| buffered_rows = 0 |
|
|
| def flush() -> None: |
| nonlocal written_files, written_rows, buffered, buffered_rows |
| if not buffered: |
| return |
| table = pa.Table.from_batches(buffered) |
| table = _align_table(table, file_schema) |
| target = target_dir / f"compact-{written_files:05d}.parquet" |
| _atomic_write_parquet(table, target, **PARQUET_WRITE_KWARGS) |
| written_files += 1 |
| written_rows += table.num_rows |
| buffered = [] |
| buffered_rows = 0 |
|
|
| for batch in scanner.to_batches(): |
| if batch.num_rows <= 0: |
| continue |
| buffered.append(batch) |
| buffered_rows += batch.num_rows |
| if buffered_rows >= target_rows: |
| flush() |
| flush() |
|
|
| if written_rows != source_rows: |
| raise RuntimeError( |
| f"{table_name}: row count changed during compaction: " |
| f"source={source_rows}, written={written_rows}" |
| ) |
|
|
| return { |
| "table": table_name, |
| "source_files": len(source_files), |
| "source_rows": source_rows, |
| "written_files": written_files, |
| "written_rows": written_rows, |
| "compacted": True, |
| } |
|
|
|
|
| def compact_metadata_v1( |
| repo_root: Path, |
| *, |
| schema_version: str = "v1", |
| schemas_dir: Path | None = None, |
| target_rows: int = 1_000_000, |
| batch_size: int = 65_536, |
| min_files: int = 2, |
| workers: int = DEFAULT_COMPACT_WORKERS, |
| keep_backup: bool = False, |
| ) -> dict[str, Any]: |
| repo_root = repo_root.resolve() |
| all_revisions = repo_root / "data" / schema_version / "all_revisions" |
| if not all_revisions.exists(): |
| raise FileNotFoundError(all_revisions) |
|
|
| schemas_path = schemas_dir or repo_root / "schemas" / schema_version |
| schemas = load_schemas(schemas_path) if schemas_path.exists() else {} |
|
|
| tx_id = f"metadata-compact-{int(time.time())}-{uuid.uuid4().hex[:8]}" |
| tx_root = repo_root / ".scratch" / "metadata-compaction" / tx_id |
| staging_root = tx_root / "staging" / "all_revisions" |
| backup_root = tx_root / "backup" / "all_revisions" |
|
|
| table_dirs = [ |
| p |
| for p in sorted(all_revisions.iterdir()) |
| if p.is_dir() and p.name not in EXCLUDED_TABLES |
| ] |
|
|
| summaries: list[dict[str, Any]] = [] |
| compacted_tables: list[str] = [] |
| summary_by_table: dict[str, dict[str, Any]] = {} |
|
|
| try: |
| work_items: list[tuple[Path, list[Path]]] = [] |
| for table_dir in table_dirs: |
| files = _parquet_files(table_dir) |
| if len(files) < min_files: |
| rows = _count_rows(files) |
| summary_by_table[table_dir.name] = { |
| "table": table_dir.name, |
| "source_files": len(files), |
| "source_rows": rows, |
| "written_files": len(files), |
| "written_rows": rows, |
| "compacted": False, |
| } |
| else: |
| work_items.append((table_dir, files)) |
|
|
| work_items.sort( |
| key=lambda item: sum(p.stat().st_size for p in item[1]), |
| reverse=True, |
| ) |
| max_workers = min(max(1, workers), max(len(work_items), 1)) |
| desc = f"compacting all_revisions ({max_workers} workers)" |
| bar = tqdm( |
| total=len(work_items), |
| desc=desc, |
| unit="table", |
| file=sys.stderr, |
| mininterval=1.0, |
| dynamic_ncols=True, |
| ) |
| try: |
| if max_workers == 1: |
| for table_dir, _files in work_items: |
| summary = _write_compacted_table( |
| table_name=table_dir.name, |
| table_dir=table_dir, |
| target_dir=staging_root / table_dir.name, |
| table_schema=schemas.get(table_dir.name), |
| target_rows=target_rows, |
| batch_size=batch_size, |
| ) |
| summary_by_table[table_dir.name] = summary |
| bar.update(1) |
| else: |
| with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as ex: |
| futures = { |
| ex.submit( |
| _write_compacted_table, |
| table_name=table_dir.name, |
| table_dir=table_dir, |
| target_dir=staging_root / table_dir.name, |
| table_schema=schemas.get(table_dir.name), |
| target_rows=target_rows, |
| batch_size=batch_size, |
| ): table_dir.name |
| for table_dir, _files in work_items |
| } |
| for future in concurrent.futures.as_completed(futures): |
| table_name = futures[future] |
| summary_by_table[table_name] = future.result() |
| bar.update(1) |
| finally: |
| bar.close() |
|
|
| summaries = [ |
| summary_by_table[table_dir.name] |
| for table_dir in table_dirs |
| if table_dir.name in summary_by_table |
| ] |
| compacted_tables = [ |
| summary["table"] |
| for summary in summaries |
| if summary.get("compacted") |
| ] |
|
|
| for table in compacted_tables: |
| source = all_revisions / table |
| backup = backup_root / table |
| staged = staging_root / table |
| backup.parent.mkdir(parents=True, exist_ok=True) |
| if backup.exists(): |
| shutil.rmtree(backup) |
| source.rename(backup) |
| staged.rename(source) |
|
|
| total_before = sum(int(s["source_files"]) for s in summaries) |
| total_after = sum(int(s["written_files"]) for s in summaries) |
| total_rows = sum(int(s["written_rows"]) for s in summaries) |
| result = { |
| "ok": True, |
| "tx_id": tx_id, |
| "tables": summaries, |
| "compacted_tables": compacted_tables, |
| "files_before": total_before, |
| "files_after": total_after, |
| "rows": total_rows, |
| "backup_path": str(backup_root) if keep_backup else None, |
| } |
| if keep_backup: |
| shutil.rmtree(staging_root.parent, ignore_errors=True) |
| else: |
| shutil.rmtree(tx_root, ignore_errors=True) |
| return result |
| except Exception: |
| _log(f"compaction failed; transaction scratch kept at {tx_root}") |
| raise |
|
|
|
|
| def parse_args(argv: list[str] | None = None) -> argparse.Namespace: |
| parser = argparse.ArgumentParser(description=__doc__) |
| parser.add_argument("--repo-root", default=".") |
| parser.add_argument("--schema-version", default="v1") |
| parser.add_argument("--schemas-dir", default=None) |
| parser.add_argument("--target-rows", type=int, default=1_000_000) |
| parser.add_argument("--batch-size", type=int, default=65_536) |
| parser.add_argument("--min-files", type=int, default=2) |
| parser.add_argument( |
| "--workers", |
| type=int, |
| default=DEFAULT_COMPACT_WORKERS, |
| help=( |
| "Parallel table compaction workers. Defaults to min(4, CPU count), " |
| f"currently {DEFAULT_COMPACT_WORKERS}." |
| ), |
| ) |
| parser.add_argument("--keep-backup", action="store_true") |
| parser.add_argument("--json", action="store_true") |
| return parser.parse_args(argv) |
|
|
|
|
| def main(argv: list[str] | None = None) -> int: |
| args = parse_args(argv) |
| summary = compact_metadata_v1( |
| Path(args.repo_root), |
| schema_version=args.schema_version, |
| schemas_dir=Path(args.schemas_dir) if args.schemas_dir else None, |
| target_rows=max(1, args.target_rows), |
| batch_size=max(1, args.batch_size), |
| min_files=max(1, args.min_files), |
| workers=max(1, args.workers), |
| keep_backup=args.keep_backup, |
| ) |
| if args.json: |
| print(json.dumps(summary, indent=2, sort_keys=True)) |
| else: |
| print( |
| "compaction: " |
| f"files_before={summary['files_before']} " |
| f"files_after={summary['files_after']} " |
| f"rows={summary['rows']}" |
| ) |
| for table in summary["tables"]: |
| if table["compacted"]: |
| print( |
| f" {table['table']}: " |
| f"{table['source_files']} -> {table['written_files']} files, " |
| f"{table['written_rows']} rows" |
| ) |
| return 0 |
|
|
|
|
| if __name__ == "__main__": |
| raise SystemExit(main()) |
|
|