| |
| """Ingest one or more .osz archives into the compact v1 store. |
| |
| End-to-end pipeline (PER CHUNK): |
| 1. Invoke the Rust ``osu_indexer`` binary on a chunk of archives; |
| the indexer copies the raw archive to ``archives/sha256/``, hashes each |
| member without writing extracted blob files, parses every .osu / .osb, |
| resolves references, computes rosu-pp difficulty attributes, and emits one |
| NDJSON line per row tagged with ``_table``. |
| 2. Group rows by ``_table``. |
| 3. Atomically write compact all-revisions Parquet files with |
| ``archive_revisions`` deferred. |
| 4. Atomically commit ``archive_revisions`` as the chunk commit marker. |
| Only after this row exists for an archive is the rest of its data |
| guaranteed to be on disk. |
| |
| Crash-safety guarantees: |
| - Archive copies are tmp+rename atomic in the indexer. |
| - Every parquet write is tmp+rename atomic via parquet_writer._atomic_write_parquet. |
| - The chunk commit marker is ``archive_revisions``: if a chunk crashes |
| before commit, ``--skip-already-ingested`` (default on) re-runs that |
| chunk on the next invocation. Re-running the same chunk_batch_id is |
| idempotent at every step. |
| - Across chunks, only the in-progress chunk's work is at risk on Ctrl+C. |
| - On startup we sweep orphan ``*.tmp.<pid>`` files older than |
| ``--gc-tmp-files-age-min`` (default 60 minutes) so leaks don't grow |
| unbounded. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import contextlib |
| import datetime as _dt |
| import hashlib |
| import json |
| import os |
| import re |
| import secrets |
| import subprocess |
| import sys |
| import tempfile |
| import time |
| from pathlib import Path |
|
|
| import pyarrow.dataset as ds |
| from tqdm.auto import tqdm |
|
|
| from parquet_writer import ( |
| commit_archive_revisions, |
| group_rows_by_table, |
| load_schemas, |
| write_all_revisions_tables, |
| ) |
|
|
|
|
| def _log(msg: str) -> None: |
| """Print to stderr with ``flush=True``. |
| |
| Once stderr is not a TTY, Python switches it to block buffering, and |
| progress lines for long-running steps stop appearing in real time. |
| Forcing a flush per line keeps the log live without changing the host |
| terminal's buffering policy. |
| """ |
| print(msg, file=sys.stderr, flush=True) |
|
|
|
|
| def _default_batch_id() -> str: |
| stamp = _dt.datetime.now(_dt.timezone.utc).strftime("%Y%m%dT%H%M%SZ") |
| return f"{stamp}-{secrets.token_hex(3)}" |
|
|
|
|
| |
| |
| |
| |
| _ES_CONTINUOUS = 0x80000000 |
| _ES_SYSTEM_REQUIRED = 0x00000001 |
|
|
|
|
| def _process_alive(pid: int) -> bool: |
| """Best-effort check whether process ``pid`` is still running. |
| |
| Used by :func:`repo_lock` to detect stale lock files left behind by a |
| crashed previous run. False positives (treating a dead process as alive) |
| only block the user with a clear "remove the lock" instruction; false |
| negatives (treating a live process as dead) would let two writers race, |
| which is the actual hazard — so the implementation errs on the side of |
| "alive" whenever it cannot definitively prove the process has exited. |
| """ |
| if pid <= 0: |
| return False |
| if sys.platform.startswith("win"): |
| try: |
| import ctypes |
|
|
| |
| |
| |
| kernel32 = ctypes.WinDLL("kernel32", use_last_error=True) |
| |
| |
| |
| handle = kernel32.OpenProcess(0x1000, False, pid) |
| if not handle: |
| err = ctypes.get_last_error() |
| |
| |
| |
| |
| |
| |
| return err != 87 |
| exit_code = ctypes.c_ulong(0) |
| ok = kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) |
| kernel32.CloseHandle(handle) |
| if not ok: |
| return True |
| |
| return exit_code.value == 259 |
| except (ImportError, OSError, AttributeError): |
| return True |
| |
| try: |
| os.kill(pid, 0) |
| return True |
| except ProcessLookupError: |
| return False |
| except PermissionError: |
| |
| return True |
| except OSError: |
| return True |
|
|
|
|
| def _try_atomic_create_lock(lock_path: Path, payload: str) -> bool: |
| """Atomically create the lock file with ``payload``. |
| |
| Returns ``True`` on success, ``False`` if the file already exists. |
| ``O_CREAT | O_EXCL`` is the kernel's only race-free way to take an |
| exclusive lock against a concurrent process — a separate ``exists()`` |
| check followed by ``write_text()`` has a TOCTOU window where two |
| launches both see "no lock" and both write, producing two writers |
| that each believe they hold it. |
| """ |
| try: |
| fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY) |
| except FileExistsError: |
| return False |
| with os.fdopen(fd, "w", encoding="utf-8") as f: |
| f.write(payload) |
| return True |
|
|
|
|
| @contextlib.contextmanager |
| def repo_lock(repo_root: Path, force: bool = False): |
| """Hold an exclusive single-writer lock on ``repo_root`` for the with body. |
| |
| Stores ``<pid>\\n<utc-iso-timestamp>\\n`` in ``repo_root/.ingest.lock``. |
| On entry, refuses to proceed if the lock already exists and points to a |
| PID that's still alive — two concurrent ingests against the same repo |
| would race on orphan-parquet cleanup and output file names, producing |
| duplicate rows or lost work. |
| |
| Acquisition uses ``O_CREAT | O_EXCL`` so two simultaneous launches can't |
| both pass the existence check before either writes. Stale locks (PID no |
| longer alive) are cleaned up automatically with a one-line stderr notice. |
| Pass ``force=True`` (CLI ``--force-lock``) to override even a live-PID |
| lock — only safe if the user has manually confirmed the other process |
| is gone. |
| |
| Released on exit even if the body raises. The lock file lives at the |
| repo root rather than under ``data/`` so that ``rm -rf data/`` for a |
| fresh restart doesn't accidentally remove an active lock. |
| |
| Yields ``had_stale_lock: bool`` so the body can decide whether paranoid |
| crash-recovery sweeps (e.g. orphan tmp file GC) need to run. Lock file |
| presence at startup means the previous run did not unwind cleanly: |
| either it was hard-killed (OS crash, SIGKILL, power loss) before its |
| ``finally`` could unlink the file, or the user is overriding a live |
| lock with ``--force-lock``. A clean exit (including caught Ctrl+C and |
| propagated exceptions) always unlinks the lock, so its absence on the |
| next startup is a positive proof of clean shutdown. |
| """ |
| repo_root.mkdir(parents=True, exist_ok=True) |
| lock_path = repo_root / ".ingest.lock" |
| payload = ( |
| f"{os.getpid()}\n" |
| f"{_dt.datetime.now(_dt.timezone.utc).isoformat()}\n" |
| ) |
| had_stale_lock = False |
|
|
| if not _try_atomic_create_lock(lock_path, payload): |
| |
| had_stale_lock = True |
| existing_pid: int | None = None |
| existing_started: str = "?" |
| try: |
| content = lock_path.read_text(encoding="utf-8").strip().splitlines() |
| existing_pid = int(content[0]) |
| if len(content) > 1: |
| existing_started = content[1] |
| except (OSError, ValueError, IndexError): |
| pass |
|
|
| if existing_pid is not None and _process_alive(existing_pid): |
| if not force: |
| raise SystemExit( |
| f"error: ingest lock at {lock_path} is held by PID " |
| f"{existing_pid} (started {existing_started}). " |
| f"Refusing to launch a second writer against {repo_root} — " |
| f"two concurrent ingests would corrupt the dataset " |
| f"(orphan-parquet cleanup and output file names are not " |
| f"multi-writer safe). If you are certain that PID is gone, " |
| f"remove the lock file or pass --force-lock." |
| ) |
| print( |
| f"warning: --force-lock overriding existing lock " |
| f"(PID {existing_pid}, started {existing_started}); this is " |
| f"unsafe if the other process is still writing.", |
| file=sys.stderr, |
| ) |
| elif existing_pid is not None: |
| print( |
| f"info: removing stale ingest lock at {lock_path} " |
| f"(PID {existing_pid} no longer alive; was started " |
| f"{existing_started})", |
| file=sys.stderr, |
| ) |
| else: |
| print( |
| f"info: removing unparseable ingest lock at {lock_path}", |
| file=sys.stderr, |
| ) |
|
|
| try: |
| lock_path.unlink() |
| except OSError: |
| pass |
| |
| |
| if not _try_atomic_create_lock(lock_path, payload): |
| raise SystemExit( |
| f"error: lock at {lock_path} was re-acquired by another " |
| f"process between cleanup and retry; refusing to proceed." |
| ) |
|
|
| try: |
| yield had_stale_lock |
| finally: |
| try: |
| lock_path.unlink() |
| except OSError: |
| pass |
|
|
|
|
| @contextlib.contextmanager |
| def keep_awake(enabled: bool = True): |
| """Inhibit system sleep for the duration of the ``with`` block. |
| |
| On Windows: ``SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED)`` |
| on entry, ``SetThreadExecutionState(ES_CONTINUOUS)`` on exit (clears the |
| flags so the system can sleep again on its normal idle policy). |
| |
| On non-Windows or when ``enabled=False``: no-op. |
| |
| The display is NOT kept awake — only the system. Phase 17 runs unattended, |
| so we want to let the screen turn off but keep the box awake. |
| """ |
| if not enabled or not sys.platform.startswith("win"): |
| yield |
| return |
|
|
| try: |
| import ctypes |
| except ImportError: |
| print( |
| "warning: ctypes unavailable; system may sleep during the run", |
| file=sys.stderr, |
| ) |
| yield |
| return |
|
|
| try: |
| kernel32 = ctypes.windll.kernel32 |
| except (AttributeError, OSError) as e: |
| print( |
| f"warning: SetThreadExecutionState unavailable ({e}); " |
| f"system may sleep during the run", |
| file=sys.stderr, |
| ) |
| yield |
| return |
|
|
| prev = kernel32.SetThreadExecutionState(_ES_CONTINUOUS | _ES_SYSTEM_REQUIRED) |
| if prev == 0: |
| |
| |
| |
| print( |
| "warning: SetThreadExecutionState failed; system may sleep during the run", |
| file=sys.stderr, |
| ) |
| yield |
| return |
|
|
| print( |
| "keep-awake: system sleep inhibited (ES_SYSTEM_REQUIRED). " |
| "Display can still sleep on its normal timer.", |
| file=sys.stderr, |
| ) |
| try: |
| yield |
| finally: |
| try: |
| kernel32.SetThreadExecutionState(_ES_CONTINUOUS) |
| print("keep-awake: released; normal sleep policy restored.", file=sys.stderr) |
| except Exception: |
| |
| pass |
|
|
|
|
| def parse_args(argv: list[str] | None = None) -> argparse.Namespace: |
| p = argparse.ArgumentParser( |
| prog="ingest_osz", |
| description="Ingest .osz archives into the compact v1 store.", |
| ) |
| p.add_argument( |
| "archives", |
| nargs="+", |
| help=".osz files (or directories of .osz files) to ingest", |
| ) |
| p.add_argument( |
| "--repo-root", |
| default=".", |
| help="root of the osu-everything repo (default: cwd)", |
| ) |
| p.add_argument( |
| "--schema-version", |
| default="v1", |
| help="schema version directory under data/", |
| ) |
| p.add_argument( |
| "--schemas-dir", |
| default=None, |
| help="override schema-loading directory (default: <repo>/schemas/<schema-version>)", |
| ) |
| p.add_argument( |
| "--rosu-indexer", |
| default="./target/release/osu_indexer", |
| help="path to the compiled Rust indexer binary", |
| ) |
|
|
| p.add_argument( |
| "--archives-dir", |
| default=None, |
| help="absolute directory where archive .osz CAS copies are written " |
| "(default: <repo-root>/archives)", |
| ) |
|
|
| p.add_argument( |
| "--indexer-workers", |
| type=int, |
| default=None, |
| help="number of archives the Rust indexer processes concurrently per " |
| "chunk (forwarded as --workers). Defaults to the indexer's own " |
| "default of min(4, available_parallelism). Higher values keep both " |
| "input and CAS drives busy continuously and parallelize rosu-pp " |
| "across cores; on a single HDD spindle, going past 4 workers " |
| "thrashes seeks.", |
| ) |
|
|
| p.add_argument( |
| "--write-all-revisions", |
| dest="write_all_revisions", |
| action="store_true", |
| default=True, |
| ) |
| p.add_argument( |
| "--no-write-all-revisions", |
| dest="write_all_revisions", |
| action="store_false", |
| ) |
|
|
| p.add_argument( |
| "--physical-partitioning", |
| choices=("schema", "none"), |
| default="none", |
| help="physical all_revisions partition layout. 'schema' preserves the " |
| "schema partition dirs; 'none' writes one parquet per table per chunk " |
| "while retaining partition columns in the file body.", |
| ) |
|
|
| p.add_argument( |
| "--ingest-batch-id", |
| default=None, |
| help="parent batch id; chunks get '-chunk-NNNN' suffix when >1 chunk " |
| "(defaults to UTC ISO-8601 + random suffix)", |
| ) |
| p.add_argument( |
| "--limit", |
| type=int, |
| default=None, |
| help="stop after N archives (useful for smoke tests)", |
| ) |
| p.add_argument( |
| "--dry-run", |
| action="store_true", |
| help="run the indexer but do not write Parquet", |
| ) |
|
|
| p.add_argument( |
| "--skip-difficulty", |
| action="store_true", |
| help="forward to osu_indexer: skip the rosu-pp difficulty pass", |
| ) |
| p.add_argument( |
| "--keep-ndjson", |
| type=Path, |
| default=None, |
| help="path to keep the indexer's NDJSON output for inspection " |
| "(default: write to a temp file and delete after; in chunked mode " |
| "the per-chunk NDJSON gets a '.<chunk_idx>' suffix)", |
| ) |
|
|
| |
| p.add_argument( |
| "--skip-already-ingested", |
| dest="skip_already_ingested", |
| action="store_true", |
| default=True, |
| help="hash each input archive at startup and skip those whose " |
| "archive_sha256 is already in archive_revisions/ (default on)", |
| ) |
| p.add_argument( |
| "--no-skip-already-ingested", |
| dest="skip_already_ingested", |
| action="store_false", |
| ) |
| p.add_argument( |
| "--archive-sha-cache", |
| dest="archive_sha_cache", |
| action="store_true", |
| default=True, |
| help="cache (path, mtime, size, sha256) for input archives in " |
| ".scratch/input_archive_shas.json so the pre-chunk hashing pass " |
| "skips files that haven't changed since last run (default on)", |
| ) |
| p.add_argument( |
| "--no-archive-sha-cache", |
| dest="archive_sha_cache", |
| action="store_false", |
| help="force-rehash every input archive at startup, ignoring the cache", |
| ) |
| p.add_argument( |
| "--chunk-size", |
| type=int, |
| default=1000, |
| help="archives per indexer invocation; smaller = finer resume " |
| "granularity, larger = fewer parquet files (default 1000)", |
| ) |
| p.add_argument( |
| "--gc-tmp-files", |
| dest="gc_tmp_files", |
| action="store_true", |
| default=True, |
| help="enable orphan *.tmp.<pid> file GC on startup (default on). " |
| "By default the walk only runs when a stale .ingest.lock is " |
| "detected at startup (= previous run was hard-killed); on clean " |
| "exits and Ctrl+C the walk is skipped because atomic tmp+rename + " |
| "the indexer's signal-aware drain leave no orphans. Pass " |
| "--always-gc-tmp-files to walk unconditionally.", |
| ) |
| p.add_argument( |
| "--no-gc-tmp-files", |
| dest="gc_tmp_files", |
| action="store_false", |
| help="never walk for orphan tmp files. Use only when you've " |
| "manually confirmed the repo is clean — leftover tmp files will " |
| "stay on disk forever as wasted space until cleaned by hand.", |
| ) |
| p.add_argument( |
| "--always-gc-tmp-files", |
| dest="always_gc_tmp_files", |
| action="store_true", |
| default=False, |
| help="walk archives/ and data/ for orphan *.tmp.<pid> files on every " |
| "startup, regardless of whether the previous run shut down cleanly.", |
| ) |
| p.add_argument( |
| "--always-crash-recovery", |
| dest="always_crash_recovery", |
| action="store_true", |
| default=False, |
| help="run orphan-Parquet cleanup on every startup, regardless of " |
| "whether the previous run shut down cleanly.", |
| ) |
| p.add_argument( |
| "--gc-tmp-files-age-min", |
| type=int, |
| default=60, |
| help="minimum age in minutes for tmp files to be eligible for GC " |
| "(default 60); set to 0 to delete all orphans", |
| ) |
| p.add_argument( |
| "--cleanup-orphan-parquets", |
| dest="cleanup_orphan_parquets", |
| action="store_true", |
| default=True, |
| help="on startup, delete all_revisions/<table>/.../part-<batch>.parquet " |
| "files whose batch_id has no archive_revisions commit marker — " |
| "fixes duplicate rows after a crashed chunk (default on)", |
| ) |
| p.add_argument( |
| "--no-cleanup-orphan-parquets", |
| dest="cleanup_orphan_parquets", |
| action="store_false", |
| ) |
|
|
| p.add_argument( |
| "--quiet-indexer", |
| action="store_true", |
| help="forward --quiet to the Rust indexer, suppressing the per-archive " |
| "progress line. The chunk-level ETA/timing summary is still printed.", |
| ) |
|
|
| p.add_argument( |
| "--keep-awake", |
| dest="keep_awake", |
| action="store_true", |
| default=True, |
| help="on Windows, prevent system sleep for the duration of the run " |
| "via SetThreadExecutionState (ES_SYSTEM_REQUIRED). Display sleep is " |
| "not inhibited. Default on; no-op on non-Windows.", |
| ) |
| p.add_argument( |
| "--no-keep-awake", |
| dest="keep_awake", |
| action="store_false", |
| ) |
|
|
| p.add_argument( |
| "--force-lock", |
| dest="force_lock", |
| action="store_true", |
| default=False, |
| help="override an existing repo lock even if the holding PID is still " |
| "alive. UNSAFE: two concurrent ingests against the same repo will " |
| "corrupt the dataset. Only pass this if you have manually confirmed " |
| "the previous process is gone.", |
| ) |
|
|
| return p.parse_args(argv) |
|
|
|
|
| def _resolve_indexer_path(indexer: Path, repo_root: Path) -> Path | None: |
| """Find the indexer binary by trying cwd- and repo-root-relative, |
| with and without a Windows .exe suffix.""" |
| candidates: list[Path] = [] |
| if indexer.is_absolute(): |
| candidates.append(indexer) |
| else: |
| candidates.extend([Path.cwd() / indexer, repo_root / indexer]) |
| if sys.platform.startswith("win"): |
| candidates += [c.with_suffix(c.suffix + ".exe") for c in list(candidates)] |
| for c in candidates: |
| if c.exists(): |
| return c.resolve() |
| return None |
|
|
|
|
| def collect_archives(inputs: list[str]) -> list[Path]: |
| out: list[Path] = [] |
| for arg in inputs: |
| p = Path(arg) |
| if p.is_dir(): |
| out.extend(sorted(p.rglob("*.osz"))) |
| elif p.is_file() and p.suffix.lower() == ".osz": |
| out.append(p) |
| else: |
| print(f"warning: skipping non-.osz argument {arg!r}", file=sys.stderr) |
| return out |
|
|
|
|
| def _format_duration(seconds: float) -> str: |
| """Compact human-readable duration ('42s', '7.3m', '4.2h').""" |
| if seconds < 60: |
| return f"{seconds:.0f}s" |
| if seconds < 3600: |
| return f"{seconds / 60:.1f}m" |
| return f"{seconds / 3600:.1f}h" |
|
|
|
|
| def _format_size(num_bytes: float) -> str: |
| """Compact human-readable byte size ('312 KB', '47.2 MB', '3.1 GB').""" |
| n = float(num_bytes) |
| for unit in ("B", "KB", "MB", "GB", "TB"): |
| if n < 1024 or unit == "TB": |
| return f"{n:.1f} {unit}" if unit != "B" else f"{int(n)} B" |
| n /= 1024 |
| return f"{n:.1f} TB" |
|
|
|
|
| def already_ingested_shas(archive_revisions_dir: Path) -> set[str]: |
| """Read existing all_revisions/archive_revisions/ parquets and return the |
| set of archive_sha256 values currently committed. |
| |
| Returns an empty set if the directory is empty or missing. Iterates |
| batches under a tqdm bar so the user sees progress while a full-corpus |
| archive_revisions/ (tens of thousands of rows across many parquets) is |
| being read. |
| """ |
| if not archive_revisions_dir.exists(): |
| return set() |
| parquets = list(archive_revisions_dir.rglob("*.parquet")) |
| if not parquets: |
| return set() |
| dataset = ds.dataset([str(p) for p in parquets], format="parquet") |
| scanner = dataset.scanner(columns=["archive_sha256"]) |
| total = scanner.count_rows() |
| out: set[str] = set() |
| bar = tqdm( |
| total=total, |
| desc=f"reading committed archive_sha256s ({len(parquets)} parquet(s))", |
| unit="row", |
| unit_scale=True, |
| file=sys.stderr, |
| mininterval=1.0, |
| dynamic_ncols=True, |
| ) |
| try: |
| for batch in scanner.to_batches(): |
| out.update(batch.column("archive_sha256").to_pylist()) |
| bar.update(batch.num_rows) |
| finally: |
| bar.close() |
| return out |
|
|
|
|
| def sha256_of_file(path: Path) -> str: |
| h = hashlib.sha256() |
| with path.open("rb") as f: |
| for chunk in iter(lambda: f.read(1 << 20), b""): |
| h.update(chunk) |
| return h.hexdigest() |
|
|
|
|
| |
| |
| |
| |
| |
| |
| _ARCHIVE_SHA_CACHE_BASENAME = "input_archive_shas.json" |
|
|
|
|
| def _archive_sha_cache_path(repo_root: Path) -> Path: |
| return repo_root / ".scratch" / _ARCHIVE_SHA_CACHE_BASENAME |
|
|
|
|
| def _load_archive_sha_cache(cache_path: Path) -> dict[str, dict]: |
| if not cache_path.exists(): |
| return {} |
| try: |
| data = json.loads(cache_path.read_text(encoding="utf-8")) |
| except (OSError, json.JSONDecodeError) as e: |
| _log(f" warning: ignoring unreadable sha cache at {cache_path}: {e}") |
| return {} |
| if not isinstance(data, dict): |
| return {} |
| return data |
|
|
|
|
| def _save_archive_sha_cache(cache_path: Path, cache: dict[str, dict]) -> None: |
| cache_path.parent.mkdir(parents=True, exist_ok=True) |
| tmp = cache_path.with_suffix(cache_path.suffix + f".tmp.{os.getpid()}") |
| try: |
| tmp.write_text(json.dumps(cache), encoding="utf-8") |
| os.replace(tmp, cache_path) |
| except OSError as e: |
| _log(f" warning: could not save sha cache to {cache_path}: {e}") |
| try: |
| tmp.unlink() |
| except OSError: |
| pass |
|
|
|
|
| def filter_already_ingested( |
| archives: list[Path], |
| known_shas: set[str], |
| cache_path: Path | None = None, |
| ) -> tuple[list[Path], list[Path], list[tuple[Path, Path]]]: |
| """Hash each archive and partition into (to_ingest, already_done, duplicates). |
| |
| The hashing pass also de-duplicates the *input* list by content SHA-256. |
| If ``osu_archives/`` accidentally contains two files whose bytes are |
| identical (e.g. ``1.osz`` plus ``copy_of_1.osz``), the duplicate would |
| otherwise produce two ``archive_revisions`` rows with the same primary |
| key in the same chunk. We keep the first occurrence (sorted glob order |
| is already deterministic) and report the rest as ``duplicates``. |
| |
| Always hashes — including when ``known_shas`` is empty, so duplicate |
| inputs in a fresh run are still caught. To opt out of the hashing cost |
| on small/trusted inputs, pass ``--no-skip-already-ingested`` to |
| ``ingest_osz.py``; that path skips this entire function. |
| |
| With ``cache_path`` set, an on-disk sidecar maps |
| ``str(path.resolve()) -> {mtime_ns, size_bytes, sha256}``. Inputs whose |
| (path, mtime_ns, size_bytes) match a cache entry skip the full hash; new |
| or modified files are hashed once and added to the cache. Saved every |
| 5,000 hashes and again at function exit so a Ctrl+C mid-pass keeps the |
| work done so far. The first full-corpus pass on HDD still takes ~50 min |
| (~12 archives/sec); subsequent passes finish in seconds. |
| """ |
| cache: dict[str, dict] = ( |
| _load_archive_sha_cache(cache_path) if cache_path is not None else {} |
| ) |
| cache_dirty = False |
| cache_hits = 0 |
| save_every = 5000 |
| hashes_since_save = 0 |
|
|
| def _persist() -> None: |
| nonlocal cache_dirty, hashes_since_save |
| if cache_path is None or not cache_dirty: |
| return |
| _save_archive_sha_cache(cache_path, cache) |
| cache_dirty = False |
| hashes_since_save = 0 |
|
|
| to_ingest: list[Path] = [] |
| already_done: list[Path] = [] |
| duplicates: list[tuple[Path, Path]] = [] |
| seen_in_input: dict[str, Path] = {} |
| try: |
| for p in tqdm( |
| archives, |
| total=len(archives), |
| desc="hashing archives", |
| unit="archive", |
| file=sys.stderr, |
| mininterval=1.0, |
| dynamic_ncols=True, |
| ): |
| cache_key: str | None = None |
| mtime_ns: int | None = None |
| size_bytes: int | None = None |
| try: |
| stat = p.stat() |
| size_bytes = stat.st_size |
| mtime_ns = stat.st_mtime_ns |
| cache_key = str(p.resolve()) |
| except OSError: |
| |
| |
| pass |
| entry = cache.get(cache_key) if cache_key is not None else None |
| if ( |
| isinstance(entry, dict) |
| and entry.get("mtime_ns") == mtime_ns |
| and entry.get("size_bytes") == size_bytes |
| and isinstance(entry.get("sha256"), str) |
| ): |
| sha = entry["sha256"] |
| cache_hits += 1 |
| else: |
| sha = sha256_of_file(p) |
| if cache_key is not None: |
| cache[cache_key] = { |
| "mtime_ns": mtime_ns, |
| "size_bytes": size_bytes, |
| "sha256": sha, |
| } |
| cache_dirty = True |
| hashes_since_save += 1 |
| if hashes_since_save >= save_every: |
| _persist() |
| if sha in seen_in_input: |
| duplicates.append((p, seen_in_input[sha])) |
| elif sha in known_shas: |
| already_done.append(p) |
| seen_in_input[sha] = p |
| else: |
| to_ingest.append(p) |
| seen_in_input[sha] = p |
| finally: |
| _persist() |
| if cache_path is not None: |
| _log( |
| f" sha cache: {cache_hits:,}/{len(archives):,} hit(s); " |
| f"{len(archives) - cache_hits:,} fresh hash(es) " |
| f"({cache_path})" |
| ) |
| return to_ingest, already_done, duplicates |
|
|
|
|
| _TMP_PATTERN = re.compile(r"\.tmp\.\d+$") |
| _PART_BATCH_ID_PATTERN = re.compile(r"^part-(.+)\.parquet$") |
|
|
|
|
| def committed_batch_ids(archive_revisions_dir: Path) -> set[str]: |
| """Return the set of ``batch_id``s with a committed ``archive_revisions`` |
| parquet file on disk. |
| |
| Each AR file is named ``part-<batch_id>.parquet``; its existence is the |
| commit marker for that batch. Used by :func:`cleanup_orphan_parquets` |
| to identify all_revisions parquets from crashed chunks. |
| """ |
| if not archive_revisions_dir.exists(): |
| return set() |
| out: set[str] = set() |
| bar = tqdm( |
| desc="scanning archive_revisions/", |
| unit="file", |
| file=sys.stderr, |
| mininterval=1.0, |
| dynamic_ncols=True, |
| ) |
| try: |
| for f in archive_revisions_dir.rglob("*.parquet"): |
| bar.update(1) |
| m = _PART_BATCH_ID_PATTERN.match(f.name) |
| if m: |
| out.add(m.group(1)) |
| finally: |
| bar.close() |
| return out |
|
|
|
|
| def committed_batch_ids_for_parent( |
| archive_revisions_dir: Path, |
| parent_batch_id: str, |
| ) -> list[str]: |
| """Return committed ingest_batch_id values matching a parent batch. |
| |
| This reads row data instead of relying on ``part-<batch>.parquet`` |
| filenames so the guard still works after metadata compaction rewrites old |
| commit-marker files into ``compact-*.parquet``. |
| """ |
|
|
| files = sorted(archive_revisions_dir.rglob("*.parquet")) if archive_revisions_dir.exists() else [] |
| if not files: |
| return [] |
| prefix = f"{parent_batch_id}-chunk-" |
| dataset = ds.dataset([str(p) for p in files], format="parquet") |
| if "ingest_batch_id" not in dataset.schema.names: |
| return [] |
| table = dataset.to_table(columns=["ingest_batch_id"]) |
| out = { |
| str(batch_id) |
| for batch_id in table["ingest_batch_id"].to_pylist() |
| if batch_id |
| and (str(batch_id) == parent_batch_id or str(batch_id).startswith(prefix)) |
| } |
| return sorted(out) |
|
|
|
|
| def cleanup_orphan_parquets( |
| all_revisions_root: Path, |
| committed: set[str], |
| ) -> dict[str, int]: |
| """Delete ``part-<batch_id>.parquet`` files in non-archive_revisions |
| tables whose ``batch_id`` is not in ``committed``. |
| |
| These are remnants of crashed chunks: their tables were written but |
| the chunk's ``archive_revisions`` row never made it to disk, so |
| skip-already-ingested will re-process the chunk and write fresh |
| parquets. Without cleanup the resume produces duplicate rows. |
| |
| AR itself is never touched. Returns a per-table count of deletions. |
| """ |
| out: dict[str, int] = {} |
| if not all_revisions_root.exists(): |
| return out |
| table_dirs = [ |
| d for d in all_revisions_root.iterdir() |
| if d.is_dir() and d.name != "archive_revisions" |
| ] |
| for table_dir in tqdm( |
| table_dirs, |
| total=len(table_dirs), |
| desc="scanning all_revisions tables", |
| unit="table", |
| file=sys.stderr, |
| mininterval=1.0, |
| dynamic_ncols=True, |
| ): |
| n = 0 |
| for parquet in table_dir.rglob("*.parquet"): |
| m = _PART_BATCH_ID_PATTERN.match(parquet.name) |
| if not m: |
| continue |
| if m.group(1) not in committed: |
| try: |
| parquet.unlink() |
| n += 1 |
| except OSError: |
| pass |
| if n: |
| out[table_dir.name] = n |
| return out |
|
|
|
|
| def cleanup_orphan_tmp_files( |
| repo_root: Path, |
| max_age_seconds: int, |
| extra_roots: list[tuple[str, Path]] | None = None, |
| ) -> dict[str, int]: |
| """Delete orphan ``*.tmp.<pid>`` files under archives/ and data/. |
| |
| Files are eligible when their mtime is older than ``max_age_seconds``; |
| very young tmp files might belong to a concurrent run, so we skip them. |
| Returns ``{subdir: count_deleted}``. |
| |
| ``extra_roots`` is an optional list of ``(label, absolute_path)`` pairs |
| to also walk, used when ``--archives-dir`` points outside ``repo_root``. |
| |
| Pass ``max_age_seconds=0`` to delete unconditionally (dangerous if any |
| other process is mid-write — only safe when we're the sole writer). |
| |
| Each subdir's rglob is wrapped in an indeterminate-total tqdm spinner |
| because we don't know the file count up-front and a full-corpus repo |
| can hold millions of entries — without the bar a user on HDD just sees |
| silence for 5-10 minutes. |
| """ |
| standard_subs = ("archives", "data") |
| out: dict[str, int] = {s: 0 for s in standard_subs} |
| extras = list(extra_roots or []) |
| for label, _ in extras: |
| out.setdefault(label, 0) |
|
|
| |
| |
| seen: set[Path] = set() |
| targets: list[tuple[str, Path]] = [] |
| for label, path in ( |
| [(s, repo_root / s) for s in standard_subs] + extras |
| ): |
| try: |
| key = Path(path).resolve() |
| except OSError: |
| key = Path(path) |
| if key in seen: |
| continue |
| seen.add(key) |
| targets.append((label, path)) |
|
|
| now = time.time() |
| for sub, root in targets: |
| if not root.exists(): |
| continue |
| bar = tqdm( |
| desc=f"scanning {sub}/", |
| unit="file", |
| file=sys.stderr, |
| mininterval=1.0, |
| dynamic_ncols=True, |
| ) |
| try: |
| for f in root.rglob("*"): |
| bar.update(1) |
| if not f.is_file(): |
| continue |
| if _TMP_PATTERN.search(f.name) is None: |
| continue |
| try: |
| age = now - f.stat().st_mtime |
| except OSError: |
| continue |
| if age < max_age_seconds: |
| continue |
| try: |
| f.unlink() |
| out[sub] += 1 |
| except OSError: |
| pass |
| finally: |
| bar.close() |
| return out |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| INDEXER_TIMEOUT_SECONDS = 1800 |
|
|
|
|
| def run_indexer( |
| indexer: Path, |
| archives: list[Path], |
| archives_dir: Path, |
| blobs_dir: Path, |
| out_file: Path, |
| batch_id: str, |
| schema_version: str, |
| skip_difficulty: bool, |
| quiet: bool = False, |
| workers: int | None = None, |
| timeout_seconds: int | None = INDEXER_TIMEOUT_SECONDS, |
| ) -> None: |
| """Invoke the Rust indexer; raise CalledProcessError on non-zero exit. |
| |
| The indexer streams a per-archive progress line to its own stderr, which |
| we let pass through unbuffered so the user sees real-time progress |
| during long chunks. Pass ``quiet=True`` to suppress (the indexer's |
| summary line still prints). |
| |
| ``workers`` overrides the indexer's parallelism; ``None`` lets the |
| indexer pick its own default (``min(4, available_parallelism)``). |
| Larger values keep input + CAS drives busy in parallel and parallelize |
| the per-beatmap rosu-pp pass across cores. |
| |
| A wall-time ``timeout_seconds`` (default ``INDEXER_TIMEOUT_SECONDS``) |
| bounds the call so a deadlocked subprocess cannot hang the orchestrator |
| forever; pass ``None`` to disable. On timeout, the call site sees |
| ``subprocess.TimeoutExpired`` propagate up; the chunk's |
| ``archive_revisions`` commit has not happened yet, so resume re-runs it. |
| """ |
| cmd: list[str] = [ |
| str(indexer), |
| "--archives-dir", str(archives_dir), |
| "--blobs-dir", str(blobs_dir), |
| "--out-file", str(out_file), |
| "--ingest-batch-id", batch_id, |
| "--schema-version", schema_version, |
| ] |
| if skip_difficulty: |
| cmd.append("--skip-difficulty") |
| cmd.append("--no-write-blobs") |
| if quiet: |
| cmd.append("--quiet") |
| if workers is not None: |
| cmd.extend(["--workers", str(workers)]) |
| cmd.extend(str(a) for a in archives) |
| _log( |
| f" indexer: {len(archives)} archive(s); batch_id={batch_id}" |
| ) |
| try: |
| subprocess.run(cmd, check=True, timeout=timeout_seconds) |
| except subprocess.TimeoutExpired as e: |
| _log( |
| f" indexer TIMEOUT after {e.timeout}s on batch_id={batch_id} " |
| f"({len(archives)} archive(s)); chunk will not commit, " |
| f"resume the run to retry." |
| ) |
| raise |
|
|
|
|
| def _process_chunk( |
| chunk: list[Path], |
| chunk_batch_id: str, |
| args: argparse.Namespace, |
| indexer_path: Path, |
| schemas: dict, |
| repo_root: Path, |
| archives_dir: Path, |
| blobs_dir: Path, |
| all_revisions_root: Path, |
| keep_ndjson: Path | None, |
| ) -> dict: |
| """Run the full per-chunk pipeline. Returns counts/paths/timings for reporting. |
| |
| Each step is timed separately (``*_seconds`` keys in the returned dict) |
| so a long-running chunk's bottleneck is visible without re-running. |
| """ |
|
|
| |
| |
| |
| |
| |
| |
| if keep_ndjson is not None: |
| ndjson_path = keep_ndjson |
| ndjson_path.parent.mkdir(parents=True, exist_ok=True) |
| cleanup_ndjson = False |
| else: |
| scratch_dir = repo_root / ".scratch" |
| scratch_dir.mkdir(parents=True, exist_ok=True) |
| tmp = tempfile.NamedTemporaryFile( |
| prefix=f"ingest-{chunk_batch_id}-", |
| suffix=".ndjson", |
| dir=str(scratch_dir), |
| delete=False, |
| ) |
| tmp.close() |
| ndjson_path = Path(tmp.name) |
| cleanup_ndjson = True |
|
|
| chunk_start = time.perf_counter() |
| summary: dict = {"chunk_batch_id": chunk_batch_id, "n_archives": len(chunk)} |
|
|
| try: |
| t0 = time.perf_counter() |
| run_indexer( |
| indexer=indexer_path, |
| archives=chunk, |
| archives_dir=archives_dir, |
| blobs_dir=blobs_dir, |
| out_file=ndjson_path, |
| batch_id=chunk_batch_id, |
| schema_version=args.schema_version, |
| skip_difficulty=args.skip_difficulty, |
| quiet=args.quiet_indexer, |
| workers=args.indexer_workers, |
| ) |
| summary["indexer_seconds"] = time.perf_counter() - t0 |
|
|
| try: |
| summary["ndjson_bytes"] = ndjson_path.stat().st_size |
| except OSError: |
| summary["ndjson_bytes"] = 0 |
|
|
| t0 = time.perf_counter() |
| rows_by_table = group_rows_by_table(ndjson_path) |
| summary["group_seconds"] = time.perf_counter() - t0 |
| n_rows = sum(len(rs) for rs in rows_by_table.values()) |
| summary["n_rows"] = n_rows |
| summary["archive_revisions_emitted"] = len(rows_by_table.get("archive_revisions", [])) |
| summary["set_revisions"] = len(rows_by_table.get("set_revisions", [])) |
| summary["beatmaps"] = len(rows_by_table.get("beatmaps", [])) |
|
|
| |
| |
| |
| |
| |
| if rows_by_table.get("archive_revisions") is None or not rows_by_table.get("archive_revisions"): |
| raise RuntimeError( |
| f"indexer exited 0 but produced no archive_revisions rows for " |
| f"chunk {chunk_batch_id} ({len(chunk)} input archive(s)); " |
| f"refusing to commit an empty chunk. NDJSON at {ndjson_path}" |
| ) |
|
|
| if args.dry_run: |
| print(" dry-run: skipping Parquet writes", file=sys.stderr) |
| summary["wall_seconds"] = time.perf_counter() - chunk_start |
| return summary |
|
|
| |
| |
| if args.write_all_revisions: |
| t0 = time.perf_counter() |
| written = write_all_revisions_tables( |
| rows_by_table, |
| schemas, |
| all_revisions_root, |
| chunk_batch_id, |
| defer_tables=("archive_revisions",), |
| physical_partitioning=args.physical_partitioning, |
| ) |
| summary["parquet_seconds"] = time.perf_counter() - t0 |
| summary["written_files"] = sum(len(ps) for ps in written.values()) |
|
|
| |
| |
| |
| if args.write_all_revisions: |
| t0 = time.perf_counter() |
| ar_paths = commit_archive_revisions( |
| rows_by_table, |
| schemas, |
| all_revisions_root, |
| chunk_batch_id, |
| physical_partitioning=args.physical_partitioning, |
| ) |
| summary["ar_commit_seconds"] = time.perf_counter() - t0 |
| summary["archive_revisions_committed"] = len(ar_paths) |
|
|
| finally: |
| if cleanup_ndjson: |
| try: |
| ndjson_path.unlink() |
| except OSError: |
| pass |
|
|
| summary["wall_seconds"] = time.perf_counter() - chunk_start |
| return summary |
|
|
|
|
| def main(argv: list[str] | None = None) -> int: |
| args = parse_args(argv) |
|
|
| repo_root = Path(args.repo_root).resolve() |
| if args.schemas_dir is not None: |
| schemas_dir = Path(args.schemas_dir).resolve() |
| else: |
| schemas_dir = repo_root / "schemas" / args.schema_version |
| if not schemas_dir.exists(): |
| print( |
| f"error: {schemas_dir} not found - run from repo root or pass --repo-root", |
| file=sys.stderr, |
| ) |
| return 2 |
|
|
| indexer_path = _resolve_indexer_path(Path(args.rosu_indexer), repo_root) |
| if indexer_path is None: |
| print( |
| f"error: indexer binary not found at {args.rosu_indexer} " |
| f"(searched cwd and {repo_root}) - build it with " |
| f"`cargo build --release -p osu_indexer`", |
| file=sys.stderr, |
| ) |
| return 2 |
|
|
| archives = collect_archives(args.archives) |
| if args.limit: |
| archives = archives[: args.limit] |
| if not archives: |
| print("error: no .osz archives to ingest", file=sys.stderr) |
| return 2 |
|
|
| parent_batch_id = args.ingest_batch_id or _default_batch_id() |
| archives_dir = ( |
| Path(args.archives_dir).resolve() |
| if args.archives_dir |
| else repo_root / "archives" |
| ) |
| blobs_dir = repo_root / "blobs" |
| all_revisions_root = repo_root / "data" / args.schema_version / "all_revisions" |
|
|
| if archives_dir != repo_root / "archives": |
| _log(f"archives dir: {archives_dir}") |
|
|
| |
| |
| |
| |
| |
| |
| try: |
| with repo_lock(repo_root, force=args.force_lock) as had_stale_lock: |
| with keep_awake(enabled=args.keep_awake): |
| return _run_main( |
| args=args, |
| repo_root=repo_root, |
| archives=archives, |
| parent_batch_id=parent_batch_id, |
| archives_dir=archives_dir, |
| blobs_dir=blobs_dir, |
| all_revisions_root=all_revisions_root, |
| schemas_dir=schemas_dir, |
| indexer_path=indexer_path, |
| had_stale_lock=had_stale_lock, |
| ) |
| except KeyboardInterrupt: |
| |
| |
| |
| |
| _log( |
| "\ningest_osz: ctrl+c received; in-flight chunk did not commit, " |
| "resume the run to retry it." |
| ) |
| return 130 |
|
|
|
|
| def _run_main( |
| args: argparse.Namespace, |
| repo_root: Path, |
| archives: list[Path], |
| parent_batch_id: str, |
| archives_dir: Path, |
| blobs_dir: Path, |
| all_revisions_root: Path, |
| schemas_dir: Path, |
| indexer_path: Path, |
| had_stale_lock: bool, |
| ) -> int: |
| """Body of ``main`` extracted so the wake-lock context manager wraps every |
| long-running step (GC, hashing pass, chunked ingest) without nesting |
| half the function under an extra indent.""" |
|
|
| schemas = load_schemas(schemas_dir) |
| _log(f"loaded {len(schemas)} table schema(s) from {schemas_dir}") |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| if args.gc_tmp_files and (args.always_gc_tmp_files or had_stale_lock): |
| reason = ( |
| "forced by --always-gc-tmp-files" |
| if args.always_gc_tmp_files |
| else "previous run left a stale lock" |
| ) |
| _log( |
| f"scanning for orphan *.tmp.<pid> files older than " |
| f"{args.gc_tmp_files_age_min} min ({reason})" |
| ) |
| |
| extra_roots = [] |
| if archives_dir != repo_root / "archives": |
| extra_roots.append((f"archives ({archives_dir})", archives_dir)) |
| gc_summary = cleanup_orphan_tmp_files( |
| repo_root, |
| max_age_seconds=args.gc_tmp_files_age_min * 60, |
| extra_roots=extra_roots, |
| ) |
| gc_total = sum(gc_summary.values()) |
| _log(f" removed {gc_total} orphan tmp file(s) ({gc_summary})") |
| elif args.gc_tmp_files: |
| _log( |
| "skipping orphan tmp file scan " |
| "(previous run shut down cleanly; pass --always-gc-tmp-files " |
| "to force a paranoid walk)" |
| ) |
|
|
| ar_dir = all_revisions_root / "archive_revisions" |
|
|
| |
| |
| |
| |
| should_run_recovery = had_stale_lock or args.always_crash_recovery |
| if should_run_recovery: |
| recovery_reason = ( |
| "forced by --always-crash-recovery" |
| if args.always_crash_recovery |
| else "previous run left a stale lock" |
| ) |
| _log(f"running crash-recovery suite ({recovery_reason})") |
|
|
| if args.cleanup_orphan_parquets: |
| _log("scanning all_revisions/ for orphan parquet files") |
| committed = committed_batch_ids(ar_dir) |
| orphan_summary = cleanup_orphan_parquets( |
| all_revisions_root, committed |
| ) |
| orphan_total = sum(orphan_summary.values()) |
| _log( |
| f" removed {orphan_total} orphan parquet(s) " |
| f"({len(orphan_summary)} table(s); " |
| f"{len(committed)} committed batch_id(s))" |
| ) |
| else: |
| _log( |
| "skipping crash-recovery suite " |
| "(previous run shut down cleanly; pass --always-crash-recovery " |
| "to force orphan-parquet cleanup)" |
| ) |
|
|
| |
| |
| |
| |
| if args.skip_already_ingested: |
| known = already_ingested_shas(ar_dir) |
| if known: |
| _log( |
| f"hashing {len(archives):,} input(s) against " |
| f"{len(known):,} known archive_sha256(s)" |
| ) |
| else: |
| _log( |
| f"hashing {len(archives):,} input(s) (dedup-only; " |
| f"no committed archives yet)" |
| ) |
| cache_path = ( |
| _archive_sha_cache_path(repo_root) if args.archive_sha_cache else None |
| ) |
| archives, already_done, duplicates = filter_already_ingested( |
| archives, known, cache_path=cache_path |
| ) |
| if already_done: |
| _log(f" skipped {len(already_done):,} already-ingested archive(s)") |
| if duplicates: |
| _log( |
| f" warning: dropped {len(duplicates):,} duplicate input archive(s) " |
| f"(same content SHA-256 as another input). First few:" |
| ) |
| for dup, kept in duplicates[:5]: |
| try: |
| dup_rel = dup.relative_to(repo_root) |
| except ValueError: |
| dup_rel = dup |
| try: |
| kept_rel = kept.relative_to(repo_root) |
| except ValueError: |
| kept_rel = kept |
| _log(f" {dup_rel} == {kept_rel}") |
| if len(duplicates) > 5: |
| _log(f" ... and {len(duplicates) - 5} more") |
| if not archives: |
| _log("all input archives are already ingested; nothing to do") |
| return 0 |
|
|
| if not args.dry_run: |
| existing_batches = committed_batch_ids_for_parent(ar_dir, parent_batch_id) |
| if existing_batches: |
| preview = ", ".join(existing_batches[:5]) |
| if len(existing_batches) > 5: |
| preview += f", ... ({len(existing_batches)} total)" |
| raise RuntimeError( |
| f"ingest_batch_id {parent_batch_id!r} already has committed " |
| f"archive_revisions parquet(s): {preview}. Refusing to ingest " |
| "new archives with a reused batch id because it would overwrite " |
| "existing part-<batch>.parquet files. Choose a fresh BATCH_ID." |
| ) |
|
|
| chunk_size = max(1, args.chunk_size) |
| chunks = [archives[i : i + chunk_size] for i in range(0, len(archives), chunk_size)] |
| multi_chunk = len(chunks) > 1 |
|
|
| _log( |
| f"ingest_osz: {len(archives)} archive(s); repo={repo_root}; " |
| f"parent_batch={parent_batch_id}; " |
| f"{len(chunks)} chunk(s) of up to {chunk_size}; dry_run={args.dry_run}" |
| ) |
|
|
| overall_start = time.perf_counter() |
| cumulative_archives = 0 |
| total_archives = len(archives) |
| chunk_summaries: list[dict] = [] |
| for chunk_idx, chunk in enumerate(chunks): |
| if multi_chunk: |
| chunk_batch_id = f"{parent_batch_id}-chunk-{chunk_idx:04d}" |
| else: |
| chunk_batch_id = parent_batch_id |
|
|
| |
| if args.keep_ndjson is not None: |
| if multi_chunk: |
| keep_ndjson = args.keep_ndjson.with_suffix( |
| f".chunk-{chunk_idx:04d}{args.keep_ndjson.suffix}" |
| ) |
| else: |
| keep_ndjson = args.keep_ndjson |
| else: |
| keep_ndjson = None |
|
|
| _log( |
| f"\n[{chunk_idx + 1}/{len(chunks)}] chunk_batch_id={chunk_batch_id} " |
| f"({len(chunk)} archive(s))" |
| ) |
|
|
| summary = _process_chunk( |
| chunk=chunk, |
| chunk_batch_id=chunk_batch_id, |
| args=args, |
| indexer_path=indexer_path, |
| schemas=schemas, |
| repo_root=repo_root, |
| archives_dir=archives_dir, |
| blobs_dir=blobs_dir, |
| all_revisions_root=all_revisions_root, |
| keep_ndjson=keep_ndjson, |
| ) |
| chunk_summaries.append(summary) |
| cumulative_archives += len(chunk) |
|
|
| if not args.dry_run: |
| elapsed = time.perf_counter() - overall_start |
| rate = cumulative_archives / elapsed if elapsed > 0 else 0.0 |
| remaining = max(total_archives - cumulative_archives, 0) |
| eta_seconds = remaining / rate if rate > 0 else 0.0 |
| wall = summary.get("wall_seconds", 0.0) |
| ndjson_b = summary.get("ndjson_bytes", 0) |
| pct = 100.0 * cumulative_archives / total_archives if total_archives else 100.0 |
| _log( |
| f" chunk {chunk_idx + 1}/{len(chunks)} done in " |
| f"{_format_duration(wall)} | " |
| f"{cumulative_archives:,}/{total_archives:,} ({pct:.1f}%) | " |
| f"{rate:.2f} ar/s | ETA {_format_duration(eta_seconds)}" |
| ) |
| _log( |
| f" timings: indexer {summary.get('indexer_seconds', 0):.1f}s | " |
| f"group {summary.get('group_seconds', 0):.1f}s | " |
| f"parquet {summary.get('parquet_seconds', 0):.1f}s | " |
| f"ar-commit {summary.get('ar_commit_seconds', 0):.1f}s" |
| ) |
| _log( |
| f" output: {summary.get('written_files', 0)} all_rev parquet(s), " |
| f"AR={summary.get('archive_revisions_committed', 0)}, " |
| f"NDJSON={_format_size(ndjson_b)} " |
| f"({summary.get('n_rows', 0):,} rows)" |
| ) |
|
|
| overall_elapsed = time.perf_counter() - overall_start |
| if cumulative_archives: |
| overall_rate = cumulative_archives / overall_elapsed if overall_elapsed > 0 else 0.0 |
| _log( |
| f"\ningest_osz: done. {len(chunks)} chunk(s), " |
| f"{cumulative_archives:,} archive(s) in " |
| f"{_format_duration(overall_elapsed)} " |
| f"({overall_rate:.2f} ar/s)." |
| ) |
| else: |
| _log(f"\ningest_osz: done. {len(chunks)} chunk(s) processed.") |
| return 0 |
|
|
|
|
| if __name__ == "__main__": |
| raise SystemExit(main()) |
|
|