| |
| """Rebuild data/v1/latest from compact all_revisions parquet files. |
| |
| This is intended for destructive full rebuilds where chunked ingest writes |
| ``all_revisions`` only. It computes the latest set revision globally, writes |
| ``latest_revisions/part.parquet``, and materializes latest table views as |
| compact parquet shards. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import concurrent.futures |
| import os |
| import shutil |
| import sys |
| from pathlib import Path |
| from typing import Any |
|
|
| import pyarrow as pa |
| import pyarrow.compute as pc |
| import pyarrow.dataset as ds |
| import pyarrow.parquet as pq |
| from tqdm.auto import tqdm |
|
|
| from parquet_writer import ( |
| LATEST_TABLE_SPECS, |
| PARQUET_WRITE_KWARGS, |
| _atomic_write_parquet, |
| _coerce_ms, |
| derive_set_key, |
| load_schemas, |
| write_latest_revisions, |
| ) |
|
|
|
|
| DEFAULT_LATEST_REBUILD_WORKERS = min(8, max(1, os.cpu_count() or 1)) |
|
|
|
|
| def _log(msg: str) -> None: |
| print(msg, file=sys.stderr, flush=True) |
|
|
|
|
| def _read_rows(root: Path, columns: list[str]) -> list[dict[str, Any]]: |
| files = sorted(root.rglob("*.parquet")) if root.exists() else [] |
| if not files: |
| return [] |
| return ds.dataset([str(p) for p in files], format="parquet").to_table( |
| columns=columns |
| ).to_pylist() |
|
|
|
|
| def build_latest_revisions(repo_root: Path, schemas: dict, schema_version: str) -> dict[str, dict]: |
| all_rev = repo_root / "data" / schema_version / "all_revisions" |
| archive_rows = _read_rows( |
| all_rev / "archive_revisions", |
| ["archive_revision_id", "ingested_at"], |
| ) |
| set_rows = _read_rows( |
| all_rev / "set_revisions", |
| ["set_revision_id", "archive_revision_id"], |
| ) |
| if not archive_rows or not set_rows: |
| raise SystemExit("archive_revisions and set_revisions must both be present") |
|
|
| archive_ts = { |
| row["archive_revision_id"]: _coerce_ms(row["ingested_at"]) |
| for row in archive_rows |
| } |
| latest: dict[str, dict[str, Any]] = {} |
| for sr in set_rows: |
| srid = sr["set_revision_id"] |
| set_key = derive_set_key(sr) |
| ts = int(archive_ts.get(sr["archive_revision_id"]) or 0) |
| existing = latest.get(set_key) |
| if existing is None: |
| latest[set_key] = { |
| "set_key": set_key, |
| "set_revision_id": srid, |
| "first_seen_at": ts, |
| "last_updated_at": ts, |
| "revision_count": 1, |
| } |
| continue |
| existing["first_seen_at"] = min(int(existing["first_seen_at"]), ts) |
| existing["revision_count"] = int(existing["revision_count"]) + 1 |
| if (ts, srid) > (int(existing["last_updated_at"]), existing["set_revision_id"]): |
| existing["set_revision_id"] = srid |
| existing["last_updated_at"] = ts |
| latest_revisions_dir = ( |
| repo_root / "data" / schema_version / "all_revisions" / "latest_revisions" |
| ) |
| if latest_revisions_dir.exists(): |
| shutil.rmtree(latest_revisions_dir) |
| write_latest_revisions( |
| latest, |
| latest_revisions_dir / "part.parquet", |
| schemas, |
| ) |
| return latest |
|
|
|
|
| def _filter_latest(table: pa.Table, latest_srid_values: pa.Array, latest_dir: str) -> pa.Table: |
| mask = pc.is_in(table["set_revision_id"], value_set=latest_srid_values) |
| table = table.filter(mask) |
| if table.num_rows == 0: |
| return table |
| if latest_dir == "logical_files": |
| return table.filter(pc.not_equal(table["media_kind"], "video")) |
| if latest_dir == "logical_files_video": |
| return table.filter(pc.equal(table["media_kind"], "video")) |
| return table |
|
|
|
|
| def _rebuild_latest_source_file( |
| *, |
| source: Path, |
| source_idx: int, |
| latest_root: Path, |
| latest_dir: str, |
| schema: pa.Schema, |
| latest_srid_values: pa.Array, |
| ) -> int: |
| table = pq.read_table(source) |
| if "set_revision_id" not in table.column_names: |
| return 0 |
| table = _filter_latest(table, latest_srid_values, latest_dir) |
| if table.num_rows == 0: |
| return 0 |
| schema_names = schema.names |
| table = table.select(schema_names) |
| target = latest_root / latest_dir / f"part-{source_idx:05d}.parquet" |
| _atomic_write_parquet(table, target, **PARQUET_WRITE_KWARGS) |
| return 1 |
|
|
|
|
| def rebuild_latest_tables( |
| repo_root: Path, |
| schemas: dict, |
| schema_version: str, |
| latest: dict[str, dict], |
| workers: int = DEFAULT_LATEST_REBUILD_WORKERS, |
| ) -> dict[str, int]: |
| all_rev = repo_root / "data" / schema_version / "all_revisions" |
| latest_root = repo_root / "data" / schema_version / "latest" |
| if latest_root.exists(): |
| shutil.rmtree(latest_root) |
| latest_root.mkdir(parents=True, exist_ok=True) |
|
|
| latest_srid_values = pa.array( |
| sorted({row["set_revision_id"] for row in latest.values()}), |
| type=pa.string(), |
| ) |
| summary: dict[str, int] = {} |
| for schema_table, latest_dir, _row_filter in LATEST_TABLE_SPECS: |
| if schema_table not in schemas: |
| continue |
| source_files = sorted((all_rev / schema_table).rglob("*.parquet")) |
| if not source_files: |
| summary[latest_dir] = 0 |
| continue |
| schema = schemas[schema_table].file_schema() |
| written = 0 |
| max_workers = min(max(1, workers), len(source_files)) |
| desc = f"latest/{latest_dir}" |
| if max_workers > 1: |
| desc = f"{desc} ({max_workers} workers)" |
| bar = tqdm(total=len(source_files), desc=desc, unit="file") |
| try: |
| if max_workers == 1: |
| for idx, source in enumerate(source_files): |
| written += _rebuild_latest_source_file( |
| source=source, |
| source_idx=idx, |
| latest_root=latest_root, |
| latest_dir=latest_dir, |
| schema=schema, |
| latest_srid_values=latest_srid_values, |
| ) |
| bar.update(1) |
| else: |
| with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as ex: |
| futures = [ |
| ex.submit( |
| _rebuild_latest_source_file, |
| source=source, |
| source_idx=idx, |
| latest_root=latest_root, |
| latest_dir=latest_dir, |
| schema=schema, |
| latest_srid_values=latest_srid_values, |
| ) |
| for idx, source in enumerate(source_files) |
| ] |
| for future in concurrent.futures.as_completed(futures): |
| written += future.result() |
| bar.update(1) |
| finally: |
| bar.close() |
| summary[latest_dir] = written |
| return summary |
|
|
|
|
| def parse_args(argv: list[str] | None = None) -> argparse.Namespace: |
| parser = argparse.ArgumentParser( |
| description="Rebuild compact data/v1/latest from all_revisions." |
| ) |
| parser.add_argument("--repo-root", default=".") |
| parser.add_argument("--schema-version", default="v1") |
| parser.add_argument("--schemas-dir", default=None) |
| parser.add_argument( |
| "--workers", |
| type=int, |
| default=DEFAULT_LATEST_REBUILD_WORKERS, |
| help=( |
| "Parallel source Parquet files per latest table. Defaults to " |
| f"min(8, CPU count), currently {DEFAULT_LATEST_REBUILD_WORKERS}." |
| ), |
| ) |
| return parser.parse_args(argv) |
|
|
|
|
| def main(argv: list[str] | None = None) -> int: |
| args = parse_args(argv) |
| repo_root = Path(args.repo_root).resolve() |
| schemas_dir = ( |
| Path(args.schemas_dir).resolve() |
| if args.schemas_dir |
| else repo_root / "schemas" / args.schema_version |
| ) |
| schemas = load_schemas(schemas_dir) |
| _log(f"loaded {len(schemas)} schema(s) from {schemas_dir}") |
| latest = build_latest_revisions(repo_root, schemas, args.schema_version) |
| _log(f"latest_revisions: {len(latest):,} set key(s)") |
| summary = rebuild_latest_tables( |
| repo_root, |
| schemas, |
| args.schema_version, |
| latest, |
| workers=max(1, args.workers), |
| ) |
| _log("latest files: " + ", ".join(f"{k}={v}" for k, v in summary.items())) |
| return 0 |
|
|
|
|
| if __name__ == "__main__": |
| raise SystemExit(main()) |
|
|