| """Schema-driven Parquet writer for compact v1 metadata tables. |
| |
| Used by ``python/ingest_osz.py``. Consumes the NDJSON stream produced by |
| ``crates/osu_indexer/`` (one row per line, tagged with ``_table``). |
| |
| Key contracts: |
| - Compact all-revisions writes use one Parquet file per table per chunk. |
| - Rows are sorted within each output group by the schema's ``sort_keys``. |
| - Every parquet is written with |
| ``compression='zstd', use_dictionary=True, |
| use_content_defined_chunking=True, write_page_index=True`` |
| for Xet-friendly delta uploads. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import concurrent.futures |
| import json |
| import os |
| import sys |
| from collections import defaultdict |
| from dataclasses import dataclass |
| from pathlib import Path |
| from typing import Any, Callable, Iterator |
|
|
| import pyarrow as pa |
| import pyarrow.parquet as pq |
|
|
| from tqdm.auto import tqdm |
|
|
| try: |
| import orjson |
| except ImportError: |
| orjson = None |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| _BAR_MININTERVAL = 1.0 |
|
|
|
|
| def _log(msg: str) -> None: |
| """Print ``msg`` to stderr with ``flush=True``. |
| |
| Once stderr is no longer a TTY, Python switches it to block buffering, |
| and a one-off "doing X" line for a long-running step stops appearing in |
| real time. Forcing flush per-call keeps the log live without nudging the |
| host terminal's buffering policy. ``tqdm`` handles its own flushing. |
| """ |
| print(msg, file=sys.stderr, flush=True) |
|
|
|
|
| def _loads_ndjson_row(raw_line: bytes | str) -> dict[str, Any]: |
| if orjson is not None: |
| return orjson.loads(raw_line) |
| if isinstance(raw_line, bytes): |
| raw_line = raw_line.decode("utf-8") |
| return json.loads(raw_line) |
|
|
|
|
| def _tqdm(iterable=None, *, total=None, desc=None, unit="it", **kwargs): |
| """Project-wide tqdm wrapper with sensible defaults. |
| |
| ``mininterval=1.0`` keeps the bar feeling live in a TTY without flooding |
| a tee'd log file (one redraw per second instead of ten). ``leave=True`` |
| preserves the final state in captured logs. |
| """ |
| return tqdm( |
| iterable, |
| total=total, |
| desc=desc, |
| unit=unit, |
| file=sys.stderr, |
| mininterval=_BAR_MININTERVAL, |
| dynamic_ncols=True, |
| leave=True, |
| **kwargs, |
| ) |
|
|
|
|
| |
| |
| |
|
|
|
|
| @dataclass(frozen=True) |
| class TableSchema: |
| name: str |
| columns: tuple[dict[str, Any], ...] |
| primary_key: tuple[str, ...] |
| partition_keys: tuple[str, ...] |
| sort_keys: tuple[str, ...] |
| arrow_schema: pa.Schema |
|
|
| def file_schema(self) -> pa.Schema: |
| """Schema for the on-disk parquet. |
| |
| Partition columns stay in the file body so HF ``load_dataset`` exposes |
| them even when it reads files from explicit ``data_files`` globs instead |
| of reconstructing Hive path partitions. |
| """ |
| return self.arrow_schema |
|
|
|
|
| def parse_arrow_type(spec: str) -> pa.DataType: |
| s = spec.strip() |
| if s.startswith("list<") and s.endswith(">"): |
| return pa.list_(parse_arrow_type(s[len("list<") : -1])) |
| primitives = { |
| "string": pa.string(), |
| "int8": pa.int8(), |
| "int16": pa.int16(), |
| "int32": pa.int32(), |
| "int64": pa.int64(), |
| "float32": pa.float32(), |
| "float64": pa.float64(), |
| "bool": pa.bool_(), |
| "timestamp[ms]": pa.timestamp("ms"), |
| } |
| if s in primitives: |
| return primitives[s] |
| raise ValueError(f"unknown type spec: {spec!r}") |
|
|
|
|
| PARTITION_FIELD_TYPES: dict[str, pa.DataType] = { |
| "ruleset": pa.string(), |
| "key_count": pa.int32(), |
| } |
|
|
|
|
| def load_schemas(schemas_dir: Path) -> dict[str, TableSchema]: |
| out: dict[str, TableSchema] = {} |
| for path in sorted(schemas_dir.glob("*.schema.json")): |
| with path.open(encoding="utf-8") as f: |
| doc = json.load(f) |
| for tname, tspec in doc.get("tables", {}).items(): |
| cols = tspec["columns"] |
| column_names = {c["name"] for c in cols} |
| fields = [ |
| pa.field( |
| c["name"], |
| parse_arrow_type(c["type"]), |
| nullable=bool(c.get("nullable", True)), |
| ) |
| for c in cols |
| ] |
| for key in tspec.get("partition_keys", []): |
| if key in column_names: |
| continue |
| field_type = PARTITION_FIELD_TYPES.get(key) |
| if field_type is None: |
| raise ValueError( |
| f"partition key {key!r} in table {tname!r} has no column " |
| "definition and no registered Arrow type" |
| ) |
| fields.append(pa.field(key, field_type, nullable=False)) |
| out[tname] = TableSchema( |
| name=tname, |
| columns=tuple(cols), |
| primary_key=tuple(tspec.get("primary_key", [])), |
| partition_keys=tuple(tspec.get("partition_keys", [])), |
| sort_keys=tuple(tspec.get("sort_keys", [])), |
| arrow_schema=pa.schema(fields), |
| ) |
| return out |
|
|
|
|
| |
| |
| |
| |
|
|
|
|
| def iter_ndjson(path: Path) -> Iterator[tuple[str, dict[str, Any]]]: |
| """Stream ``(table_name, row_dict)`` pairs; the ``_table`` key is removed.""" |
| with path.open("rb") as f: |
| for line in f: |
| line = line.strip() |
| if not line: |
| continue |
| row = _loads_ndjson_row(line) |
| table = row.pop("_table", None) |
| if table is None: |
| raise ValueError(f"row missing _table: {row!r}") |
| yield table, row |
|
|
|
|
| def _parse_ndjson_chunk(raw_chunk: bytes) -> tuple[dict[str, list[dict[str, Any]]], int]: |
| """Parse one complete-line NDJSON byte chunk. |
| |
| Kept at module top level so ``ProcessPoolExecutor`` can pickle it. |
| """ |
| by_table: dict[str, list[dict[str, Any]]] = defaultdict(list) |
| rows = 0 |
| for raw_line in raw_chunk.splitlines(): |
| line = raw_line.strip() |
| if not line: |
| continue |
| row = _loads_ndjson_row(line) |
| table = row.pop("_table", None) |
| if table is None: |
| raise ValueError(f"row missing _table: {row!r}") |
| by_table[table].append(row) |
| rows += 1 |
| return dict(by_table), rows |
|
|
|
|
| def _iter_complete_ndjson_chunks(path: Path, chunk_bytes: int) -> Iterator[bytes]: |
| """Yield byte chunks that end on NDJSON line boundaries.""" |
| carry = b"" |
| with path.open("rb") as f: |
| while True: |
| block = f.read(chunk_bytes) |
| if not block: |
| break |
| data = carry + block |
| split_at = data.rfind(b"\n") |
| if split_at < 0: |
| carry = data |
| continue |
| yield data[: split_at + 1] |
| carry = data[split_at + 1 :] |
| if carry: |
| yield carry |
|
|
|
|
| def _merge_grouped_rows( |
| target: dict[str, list[dict[str, Any]]], |
| grouped: dict[str, list[dict[str, Any]]], |
| ) -> None: |
| for table, rows in grouped.items(): |
| target[table].extend(rows) |
|
|
|
|
| def group_rows_by_table(ndjson_path: Path) -> dict[str, list[dict[str, Any]]]: |
| """Parse the indexer's NDJSON output into ``{table_name: [row_dict, ...]}``. |
| |
| Renders a tqdm bar against the file's byte size — chunk_size=1000 NDJSON |
| is typically 2-5 GB and can take tens of seconds to parse on slow disks, |
| so a known-total bar gives an accurate ETA. The file is opened in binary |
| mode because text-mode line iteration disables ``f.tell()`` (read-ahead |
| buffer); we get byte-progress for free from ``len(raw_line)``. |
| """ |
| try: |
| total_bytes = ndjson_path.stat().st_size |
| except OSError: |
| total_bytes = 0 |
|
|
| |
| |
| |
| |
| worker_default = 1 |
| workers = max(1, int(os.environ.get("OSU_NDJSON_PARSE_WORKERS", worker_default))) |
| chunk_mb = max(1, int(os.environ.get("OSU_NDJSON_PARSE_CHUNK_MB", "8"))) |
| chunk_bytes = chunk_mb << 20 |
| min_parallel_bytes = int(os.environ.get("OSU_NDJSON_PARSE_MIN_MB", "64")) << 20 |
| backend = os.environ.get("OSU_NDJSON_PARSE_BACKEND", "process").strip().lower() |
| use_parallel = workers > 1 and total_bytes >= min_parallel_bytes |
| desc = f"parsing {ndjson_path.name}" |
| if use_parallel: |
| desc += f" ({workers} {backend} workers)" |
|
|
| bar = _tqdm( |
| total=total_bytes or None, |
| desc=desc, |
| unit="B", |
| unit_scale=True, |
| unit_divisor=1024, |
| ) |
| by_table: dict[str, list[dict[str, Any]]] = defaultdict(list) |
| rows = 0 |
| try: |
| if not use_parallel: |
| for chunk in _iter_complete_ndjson_chunks(ndjson_path, chunk_bytes): |
| grouped, chunk_rows = _parse_ndjson_chunk(chunk) |
| _merge_grouped_rows(by_table, grouped) |
| rows += chunk_rows |
| bar.update(len(chunk)) |
| else: |
| if backend == "thread": |
| executor_cls = concurrent.futures.ThreadPoolExecutor |
| elif backend == "process": |
| executor_cls = concurrent.futures.ProcessPoolExecutor |
| else: |
| raise ValueError( |
| "OSU_NDJSON_PARSE_BACKEND must be 'process' or 'thread', " |
| f"got {backend!r}" |
| ) |
| chunk_iter = iter(_iter_complete_ndjson_chunks(ndjson_path, chunk_bytes)) |
| pending: list[tuple[concurrent.futures.Future, int]] = [] |
|
|
| def submit_next(executor) -> bool: |
| try: |
| chunk = next(chunk_iter) |
| except StopIteration: |
| return False |
| pending.append((executor.submit(_parse_ndjson_chunk, chunk), len(chunk))) |
| return True |
|
|
| with executor_cls(max_workers=workers) as executor: |
| for _ in range(workers * 2): |
| if not submit_next(executor): |
| break |
| while pending: |
| future, nbytes = pending.pop(0) |
| grouped, chunk_rows = future.result() |
| _merge_grouped_rows(by_table, grouped) |
| rows += chunk_rows |
| bar.update(nbytes) |
| submit_next(executor) |
| finally: |
| bar.set_postfix_str(f"{rows:,} rows -> {len(by_table)} table(s)") |
| bar.close() |
| return dict(by_table) |
|
|
|
|
| |
| |
| |
|
|
|
|
| |
| |
| |
| |
| |
| _OSU_DIFFICULTY_HOISTS: tuple[tuple[str, str, str], ...] = ( |
| ("aim", "aim_difficulty", "f64"), |
| ("speed", "speed_difficulty", "f64"), |
| ("flashlight", "flashlight_difficulty", "f64"), |
| ("slider_factor", "slider_factor", "f64"), |
| ("speed_note_count", "speed_note_count", "f64"), |
| ("great_hit_window", "great_hit_window", "f64"), |
| ("ok_hit_window", "ok_hit_window", "f64"), |
| ("meh_hit_window", "meh_hit_window", "f64"), |
| ("ar", "ar", "f64"), |
| ("hp", "hp", "f64"), |
| ("n_circles", "n_circles", "i32"), |
| ("n_sliders", "n_sliders", "i32"), |
| ("n_spinners", "n_spinners", "i32"), |
| ) |
| _TAIKO_DIFFICULTY_HOISTS: tuple[tuple[str, str, str], ...] = ( |
| ("stamina", "stamina", "f64"), |
| ("rhythm", "rhythm", "f64"), |
| ("color", "color", "f64"), |
| ("reading", "reading", "f64"), |
| ("mono_stamina_factor", "mono_stamina_factor", "f64"), |
| ("great_hit_window", "great_hit_window", "f64"), |
| ("ok_hit_window", "ok_hit_window", "f64"), |
| ("is_convert", "is_convert", "bool"), |
| ) |
| _CATCH_DIFFICULTY_HOISTS: tuple[tuple[str, str, str], ...] = ( |
| ("n_fruits", "n_fruits", "i32"), |
| ("n_droplets", "n_droplets", "i32"), |
| ("n_tiny_droplets", "n_tiny_droplets", "i32"), |
| ("is_convert", "is_convert", "bool"), |
| ) |
| |
| |
| |
| |
| _MANIA_DIFFICULTY_HOISTS: tuple[tuple[str, str, str], ...] = ( |
| ("n_objects", "n_objects", "i32"), |
| ("n_hold_notes", "n_hold_notes", "i32"), |
| ("is_convert", "is_convert", "bool"), |
| ) |
|
|
|
|
| def _coerce_hoist(value: Any, kind: str) -> Any: |
| if value is None: |
| return None |
| if kind == "f64": |
| try: |
| return float(value) |
| except (TypeError, ValueError): |
| return None |
| if kind == "i32": |
| try: |
| return int(value) |
| except (TypeError, ValueError): |
| return None |
| if kind == "bool": |
| return bool(value) |
| return value |
|
|
|
|
| def hoist_difficulty_attributes( |
| difficulty_rows: list[dict[str, Any]], |
| ) -> list[dict[str, Any]]: |
| """Denormalize key ``attributes_json`` fields into typed top-level columns. |
| |
| Modifies and returns ``difficulty_rows``. Rows with |
| ``calculation_status='failed'`` keep nulls in every hoisted column. The |
| JSON blob is preserved in place so consumers can still recover any field |
| we don't denormalize. Idempotent: running twice on the same rows is a |
| no-op. |
| """ |
| if not difficulty_rows: |
| return difficulty_rows |
|
|
| for row in difficulty_rows: |
| attrs_text = row.get("attributes_json") or "" |
| try: |
| attrs = json.loads(attrs_text) if attrs_text else {} |
| except (TypeError, json.JSONDecodeError): |
| attrs = {} |
| ruleset = (attrs.get("ruleset") or row.get("ruleset") or "").lower() |
| if ruleset == "osu": |
| hoists = _OSU_DIFFICULTY_HOISTS |
| elif ruleset == "taiko": |
| hoists = _TAIKO_DIFFICULTY_HOISTS |
| elif ruleset == "catch": |
| hoists = _CATCH_DIFFICULTY_HOISTS |
| elif ruleset == "mania": |
| hoists = _MANIA_DIFFICULTY_HOISTS |
| else: |
| hoists = () |
| for json_key, column_name, kind in hoists: |
| if column_name not in row or row[column_name] is None: |
| row[column_name] = _coerce_hoist(attrs.get(json_key), kind) |
| return difficulty_rows |
|
|
|
|
| def _mania_key_count_from_cs(cs: Any) -> int | None: |
| """Mirror of the Rust indexer's ``mania_key_count`` (round + clamp 1..18).""" |
| if cs is None: |
| return None |
| try: |
| cs_f = float(cs) |
| except (TypeError, ValueError): |
| return None |
| rounded = int(round(cs_f)) |
| if rounded < 1: |
| return 1 |
| if rounded > 18: |
| return 18 |
| return rounded |
|
|
|
|
| def _beatmap_revision_key(row: dict[str, Any]) -> tuple[str, str] | None: |
| """Composite key for joining beatmap-scoped rows across tables. |
| |
| Returns ``(set_revision_id, beatmap_uid)`` — the actual primary key. Joining |
| on ``beatmap_uid`` alone is wrong because a single chunk can contain two |
| revisions of the same submitted beatmapset. Any in-memory denormalization |
| must use this composite key to avoid blending values across revisions. |
| """ |
| srid = row.get("set_revision_id") |
| uid = row.get("beatmap_uid") |
| if srid is None or uid is None: |
| return None |
| return (str(srid), str(uid)) |
|
|
|
|
| def enrich_beatmaps( |
| beatmaps: list[dict[str, Any]], |
| hit_objects_common: list[dict[str, Any]], |
| ) -> list[dict[str, Any]]: |
| """Backfill denormalized columns onto every beatmap row. |
| |
| Adds: |
| - ``key_count``: round(circle_size) clamped to 1..18 for mania, null else. |
| - ``first_object_time_ms`` / ``last_object_time_ms`` / ``total_length_ms``: |
| derived from ``hit_objects_common`` of this batch (one pass, O(n)). |
| |
| Aggregation key is ``(set_revision_id, beatmap_uid)`` — the schema's |
| primary key. Keying on ``beatmap_uid`` alone would blend bounds across |
| revisions when one chunk contains two ``.osz`` revisions of the same |
| submitted set. |
| |
| Audio metadata (``audio_duration_ms`` / ``audio_sample_rate`` / |
| ``audio_channels``) is filled separately by :func:`backfill_beatmap_audio_metadata` |
| when a caller provides probe results. The compact v1 workflow does not |
| probe extracted blobs, so those fields are normally null. |
| Idempotent: running twice on the same rows is a no-op. |
| """ |
| if not beatmaps: |
| return beatmaps |
|
|
| |
| |
| |
| bounds: dict[tuple[str, str], list[int | None]] = {} |
| for ho in hit_objects_common: |
| key = _beatmap_revision_key(ho) |
| if key is None: |
| continue |
| start = ho.get("time_ms") |
| if start is None: |
| continue |
| try: |
| start = int(start) |
| except (TypeError, ValueError): |
| continue |
| end_raw = ho.get("end_time_ms") |
| try: |
| end = int(end_raw) if end_raw is not None else start |
| except (TypeError, ValueError): |
| end = start |
| bucket = bounds.get(key) |
| if bucket is None: |
| bounds[key] = [start, max(start, end)] |
| else: |
| if bucket[0] is None or start < bucket[0]: |
| bucket[0] = start |
| if bucket[1] is None or end > bucket[1]: |
| bucket[1] = end |
|
|
| for bm in beatmaps: |
| ruleset = (bm.get("ruleset") or "").lower() |
| if "key_count" not in bm or bm["key_count"] is None: |
| bm["key_count"] = ( |
| _mania_key_count_from_cs(bm.get("circle_size")) |
| if ruleset == "mania" |
| else None |
| ) |
| key = _beatmap_revision_key(bm) |
| b = bounds.get(key) if key is not None else None |
| if b is not None: |
| bm.setdefault("first_object_time_ms", b[0]) |
| bm.setdefault("last_object_time_ms", b[1]) |
| if b[0] is not None and b[1] is not None and b[1] >= b[0]: |
| bm.setdefault("total_length_ms", b[1] - b[0]) |
| else: |
| bm.setdefault("total_length_ms", None) |
| else: |
| bm.setdefault("first_object_time_ms", None) |
| bm.setdefault("last_object_time_ms", None) |
| bm.setdefault("total_length_ms", None) |
| return beatmaps |
|
|
|
|
| def backfill_beatmap_audio_metadata( |
| beatmaps: list[dict[str, Any]], |
| audio_probe_by_sha: dict[str, dict[str, Any]], |
| ) -> list[dict[str, Any]]: |
| """Stamp audio probe results onto each beatmap row from a sha→probe map. |
| |
| ``audio_probe_by_sha`` maps ``audio_blob_sha256`` to a dict carrying at |
| least ``audio_duration_ms`` / ``audio_sample_rate`` / ``audio_channels``; |
| compact v1 normally passes an empty map because extracted blob probing is |
| not part of the maintained workflow. Beatmaps with a null/missing audio |
| reference get nulls. Idempotent. |
| """ |
| if not beatmaps: |
| return beatmaps |
| for bm in beatmaps: |
| sha = bm.get("audio_blob_sha256") |
| if not sha: |
| bm.setdefault("audio_duration_ms", None) |
| bm.setdefault("audio_sample_rate", None) |
| bm.setdefault("audio_channels", None) |
| continue |
| probe = audio_probe_by_sha.get(sha) or {} |
| if "audio_duration_ms" not in bm or bm["audio_duration_ms"] is None: |
| bm["audio_duration_ms"] = probe.get("audio_duration_ms") |
| if "audio_sample_rate" not in bm or bm["audio_sample_rate"] is None: |
| bm["audio_sample_rate"] = probe.get("audio_sample_rate") |
| if "audio_channels" not in bm or bm["audio_channels"] is None: |
| bm["audio_channels"] = probe.get("audio_channels") |
| return beatmaps |
|
|
|
|
| |
| _HIT_OBJECTS_TABLES_FOR_AUDIO_BACKFILL: tuple[str, ...] = ( |
| "hit_objects_common", |
| "hit_objects_osu", |
| "hit_objects_taiko", |
| "hit_objects_catch", |
| "hit_objects_mania", |
| ) |
|
|
|
|
| def backfill_hit_objects_audio_blob( |
| rows_by_table: dict[str, list[dict[str, Any]]], |
| ) -> None: |
| """Stamp ``audio_blob_sha256`` from beatmaps onto every hit-objects row. |
| |
| Audio-conditioned models that stream `hit_objects_*` partition-pruned by |
| ``key_count`` / ``ruleset`` would otherwise need a join to ``beatmaps`` to |
| locate the audio blob. Denormalize so the join is unnecessary. |
| |
| The mapping key is ``(set_revision_id, beatmap_uid)`` — the schema's |
| primary key. Keying on ``beatmap_uid`` alone is wrong because a single |
| chunk can contain two ``.osz`` revisions of the same submitted set with |
| different audio blobs; the second revision's sha would silently overwrite |
| the first, and hit objects from revision A would be stamped with revision |
| B's audio. That would corrupt audio-conditioned modeling pairs. |
| |
| Mutates rows in place. Idempotent: rows already carrying |
| ``audio_blob_sha256`` are not overwritten. |
| """ |
| beatmaps = rows_by_table.get("beatmaps", []) |
| if not beatmaps: |
| return |
| audio_by_key: dict[tuple[str, str], str | None] = {} |
| for bm in beatmaps: |
| key = _beatmap_revision_key(bm) |
| if key is None: |
| continue |
| audio_by_key[key] = bm.get("audio_blob_sha256") |
| if not audio_by_key: |
| return |
| for table_name in _HIT_OBJECTS_TABLES_FOR_AUDIO_BACKFILL: |
| rows = rows_by_table.get(table_name) |
| if not rows: |
| continue |
| for row in rows: |
| if row.get("audio_blob_sha256") is not None: |
| continue |
| key = _beatmap_revision_key(row) |
| if key is None: |
| continue |
| if key in audio_by_key: |
| row["audio_blob_sha256"] = audio_by_key[key] |
|
|
|
|
| def derive_storyboard_source_to_set_revision_map( |
| storyboard_sources: list[dict[str, Any]], |
| ) -> dict[str, str]: |
| return {r["storyboard_source_id"]: r["set_revision_id"] for r in storyboard_sources} |
|
|
|
|
| def derive_partition_values( |
| table_name: str, |
| row: dict[str, Any], |
| partition_keys: tuple[str, ...], |
| ) -> dict[str, Any]: |
| """Read physical partition values from a single row.""" |
| values: dict[str, Any] = {} |
| for k in partition_keys: |
| if k in row: |
| values[k] = row[k] |
| else: |
| raise ValueError( |
| f"cannot derive partition value for {k!r} in table {table_name}" |
| ) |
| return values |
|
|
|
|
| def partition_dir_name(key: str, value: Any) -> str: |
| """Physical directory name for a logical partition value. |
| |
| The ``p_`` prefix prevents PyArrow's automatic Hive partition discovery |
| from inventing a second column with the same name as the real in-file |
| column. That keeps ``pq.read_table(<file>)`` and ``ds.dataset(..., |
| partitioning="hive")`` from failing on duplicate logical fields while HF |
| still sees those fields in the Parquet body. |
| """ |
| return f"p_{key}={value}" |
|
|
|
|
| |
| |
| |
|
|
|
|
| def _sort_key_for(row: dict[str, Any], keys: tuple[str, ...]) -> tuple: |
| """Stable sort key handling None values (puts nulls first within a tier).""" |
| out = [] |
| for k in keys: |
| v = row.get(k) |
| if v is None: |
| out.append((0, 0)) |
| elif isinstance(v, bool): |
| out.append((1, int(v))) |
| elif isinstance(v, (int, float)): |
| out.append((1, v)) |
| else: |
| out.append((2, str(v))) |
| return tuple(out) |
|
|
|
|
| def rows_to_arrow_table( |
| rows: list[dict[str, Any]], file_schema: pa.Schema |
| ) -> pa.Table: |
| """Convert rows (dicts) to a PyArrow Table conforming to ``file_schema``. |
| |
| Missing keys become nulls. Extra keys are ignored. Type coercion is |
| delegated to PyArrow (ints accepted for timestamp[ms], etc.). |
| """ |
| columns: dict[str, list[Any]] = {name: [] for name in file_schema.names} |
| for row in rows: |
| for name in columns: |
| columns[name].append(row.get(name)) |
| arrays = [] |
| for field in file_schema: |
| arrays.append(pa.array(columns[field.name], type=field.type)) |
| return pa.Table.from_arrays(arrays, schema=file_schema) |
|
|
|
|
| |
| |
| |
|
|
|
|
| PARQUET_WRITE_KWARGS = dict( |
| compression="zstd", |
| use_dictionary=True, |
| use_content_defined_chunking=True, |
| write_page_index=True, |
| ) |
|
|
|
|
| def _atomic_write_parquet( |
| arrow_table: pa.Table, |
| target: Path, |
| **kwargs: Any, |
| ) -> Path: |
| """Write a parquet file via tmp-sidecar + rename — crash-safe. |
| |
| A SIGINT / power loss between ``pq.write_table`` and ``replace`` leaves |
| a ``<name>.tmp.<pid>`` file on disk (no corrupt ``<name>``); our startup |
| GC pass (``ingest_osz.cleanup_orphan_tmp_files``) sweeps these. The |
| final ``replace`` is atomic on NTFS within a volume (``MoveFileExW`` |
| with ``MOVEFILE_REPLACE_EXISTING``), so readers never observe a |
| truncated parquet. |
| """ |
| target.parent.mkdir(parents=True, exist_ok=True) |
| tmp = target.with_name(f"{target.name}.tmp.{os.getpid()}") |
| pq.write_table(arrow_table, tmp, **kwargs) |
| tmp.replace(target) |
| return target |
|
|
|
|
| def write_table_partitioned( |
| rows: list[dict[str, Any]], |
| table: TableSchema, |
| output_root: Path, |
| batch_id: str, |
| physical_partition_keys: tuple[str, ...] | None = None, |
| ) -> list[Path]: |
| """Partition rows by partition_keys, sort, and write Parquet files. |
| |
| Returns the list of files written. No-op (returns ``[]``) when ``rows`` |
| is empty — partition directories aren't created for empty tables. |
| Each parquet write is atomic (tmp+rename) so a crash mid-write never |
| leaves a truncated file at the canonical path. |
| """ |
| if not rows: |
| return [] |
|
|
| if physical_partition_keys is None: |
| physical_partition_keys = table.partition_keys |
|
|
| |
| |
| by_partition: dict[tuple, list[dict[str, Any]]] = defaultdict(list) |
| for row in rows: |
| pvs = derive_partition_values( |
| table.name, row, table.partition_keys |
| ) |
| ptuple = tuple(pvs[k] for k in physical_partition_keys) |
| body_row = dict(row) |
| body_row.update(pvs) |
| by_partition[ptuple].append(body_row) |
|
|
| file_schema = table.file_schema() |
| paths_written: list[Path] = [] |
|
|
| for ptuple, prows in by_partition.items(): |
| prows.sort(key=lambda r: _sort_key_for(r, table.sort_keys)) |
|
|
| if physical_partition_keys: |
| parts = [ |
| partition_dir_name(k, v) |
| for k, v in zip(physical_partition_keys, ptuple) |
| ] |
| target_dir = output_root / table.name |
| for p in parts: |
| target_dir = target_dir / p |
| else: |
| target_dir = output_root / table.name |
|
|
| arrow_table = rows_to_arrow_table(prows, file_schema) |
| target_path = target_dir / f"part-{batch_id}.parquet" |
| _atomic_write_parquet(arrow_table, target_path, **PARQUET_WRITE_KWARGS) |
| paths_written.append(target_path) |
|
|
| return paths_written |
|
|
|
|
| |
| |
| |
|
|
|
|
| def write_all_revisions_tables( |
| rows_by_table: dict[str, list[dict[str, Any]]], |
| schemas: dict[str, TableSchema], |
| all_revisions_root: Path, |
| batch_id: str, |
| defer_tables: tuple[str, ...] = (), |
| audio_probe_by_sha: dict[str, dict[str, Any]] | None = None, |
| physical_partitioning: str = "schema", |
| ) -> dict[str, list[Path]]: |
| """Write every recognized table from this batch into all_revisions/. |
| |
| ``defer_tables`` lets the caller skip selected tables here so it can |
| write them in a controlled order (e.g. defer ``archive_revisions`` so |
| it lands LAST as a chunk-level commit marker; then a crashed run |
| leaves no archive_revisions row, and ``ingest_osz`` skip-already- |
| ingested correctly re-tries the chunk). The deferred tables can be |
| written via :func:`commit_archive_revisions` (or directly via |
| :func:`write_table_partitioned`). |
| |
| Returns ``{table_name: [paths_written, ...]}``. Tables with zero rows |
| in the batch produce zero files. Deferred tables map to ``[]``. |
| """ |
| if physical_partitioning == "schema": |
| physical_keys_by_table: dict[str, tuple[str, ...]] | None = None |
| elif physical_partitioning == "none": |
| physical_keys_by_table = {name: () for name in schemas} |
| else: |
| raise ValueError(f"unknown physical partitioning mode: {physical_partitioning!r}") |
|
|
| |
| |
| |
| if "difficulty_attributes" in rows_by_table: |
| hoist_difficulty_attributes(rows_by_table["difficulty_attributes"]) |
|
|
| |
| |
| if "beatmaps" in rows_by_table: |
| enrich_beatmaps( |
| rows_by_table["beatmaps"], |
| rows_by_table.get("hit_objects_common", []), |
| ) |
| if audio_probe_by_sha: |
| backfill_beatmap_audio_metadata( |
| rows_by_table["beatmaps"], audio_probe_by_sha |
| ) |
|
|
| |
| |
| |
| |
| backfill_hit_objects_audio_blob(rows_by_table) |
|
|
| deferred = set(defer_tables) |
| written: dict[str, list[Path]] = {} |
| eligible = [ |
| (name, rows) |
| for name, rows in rows_by_table.items() |
| if name not in deferred and name in schemas and rows |
| ] |
| table_write_workers = max( |
| 1, |
| int(os.environ.get("OSU_PARQUET_WRITE_WORKERS", "1")), |
| ) |
| table_write_workers = min(table_write_workers, max(len(eligible), 1)) |
| total_rows = sum(len(rs) for _, rs in eligible) |
| bar = _tqdm( |
| total=len(eligible) or None, |
| desc=( |
| f"writing all_revisions ({total_rows:,} rows" |
| f", {table_write_workers} workers)" |
| ), |
| unit="table", |
| ) |
| try: |
| for table_name in rows_by_table: |
| if table_name in deferred: |
| written[table_name] = [] |
| elif table_name not in schemas: |
| |
| written[table_name] = [] |
|
|
| def write_one(table_name: str, rows: list[dict[str, Any]]) -> tuple[str, list[Path]]: |
| paths = write_table_partitioned( |
| rows, |
| schemas[table_name], |
| all_revisions_root, |
| batch_id, |
| physical_partition_keys=( |
| None |
| if physical_keys_by_table is None |
| else physical_keys_by_table[table_name] |
| ), |
| ) |
| return table_name, paths |
|
|
| if table_write_workers <= 1 or len(eligible) <= 1: |
| for table_name, rows in eligible: |
| bar.set_postfix_str(f"{table_name} ({len(rows):,} rows)") |
| table_name, paths = write_one(table_name, rows) |
| written[table_name] = paths |
| bar.update(1) |
| else: |
| with concurrent.futures.ThreadPoolExecutor( |
| max_workers=table_write_workers |
| ) as executor: |
| futures = { |
| executor.submit(write_one, table_name, rows): ( |
| table_name, |
| len(rows), |
| ) |
| for table_name, rows in eligible |
| } |
| for future in concurrent.futures.as_completed(futures): |
| table_name, row_count = futures[future] |
| bar.set_postfix_str(f"{table_name} ({row_count:,} rows)") |
| written_name, paths = future.result() |
| written[written_name] = paths |
| bar.update(1) |
| finally: |
| bar.close() |
| return written |
|
|
|
|
| def commit_archive_revisions( |
| rows_by_table: dict[str, list[dict[str, Any]]], |
| schemas: dict[str, TableSchema], |
| all_revisions_root: Path, |
| batch_id: str, |
| physical_partitioning: str = "schema", |
| ) -> list[Path]: |
| """Atomically write the chunk's ``archive_revisions`` parquet. |
| |
| This is the **chunk commit marker**. ``ingest_osz``'s skip-already- |
| ingested treats ``archive_sha256`` rows in ``archive_revisions/`` as |
| proof that the chunk's other tables are already on disk. So this MUST |
| be the last write in a chunk's pipeline. Only call it after |
| ``write_all_revisions_tables(defer_tables=("archive_revisions",))`` has |
| completed. |
| """ |
| rows = rows_by_table.get("archive_revisions", []) |
| if not rows: |
| return [] |
| if physical_partitioning == "schema": |
| physical_partition_keys = None |
| elif physical_partitioning == "none": |
| physical_partition_keys = () |
| else: |
| raise ValueError(f"unknown physical partitioning mode: {physical_partitioning!r}") |
| return write_table_partitioned( |
| rows, |
| schemas["archive_revisions"], |
| all_revisions_root, |
| batch_id, |
| physical_partition_keys=physical_partition_keys, |
| ) |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| LATEST_TABLE_SPECS: tuple[tuple[str, str, Callable[[dict], bool] | None], ...] = ( |
| ("logical_files", "logical_files", lambda r: (r.get("media_kind") or "") != "video"), |
| ("logical_files", "logical_files_video", lambda r: (r.get("media_kind") or "") == "video"), |
| ("beatmaps", "beatmaps", None), |
| ("hit_objects_common", "hit_objects_common", None), |
| ("hit_objects_osu", "hit_objects_osu", None), |
| ("hit_objects_taiko", "hit_objects_taiko", None), |
| ("hit_objects_catch", "hit_objects_catch", None), |
| ("hit_objects_mania", "hit_objects_mania", None), |
| ("storyboard_sources", "storyboard_sources", None), |
| ("storyboard_elements", "storyboard_elements", None), |
| ("storyboard_commands", "storyboard_commands", None), |
| ("storyboard_variables", "storyboard_variables", None), |
| ("asset_references", "asset_references", None), |
| ("difficulty_attributes", "difficulty_attributes", None), |
| ("colours", "colours", None), |
| ("breaks", "breaks", None), |
| ) |
|
|
|
|
| def derive_set_key(set_revision_row: dict[str, Any]) -> str: |
| """Compute the canonical ``set_key`` for a ``set_revisions`` row. |
| |
| The set revision id includes the per-archive SHA suffix. ``set_key`` strips |
| that suffix for known and fingerprinted sets so all revisions of the same |
| logical set share one key. Unknown archives keep their full id because |
| there is no stable cross-archive identity to merge on. |
| """ |
| srid = set_revision_row.get("set_revision_id") or "" |
| if not srid: |
| return srid |
| parts = str(srid).split(":", 2) |
| prefix = parts[0] |
| if prefix == "fingerprint" and len(parts) >= 2: |
| return f"fingerprint:{parts[1]}" |
| if prefix == "unknown": |
| return str(srid) |
| try: |
| return str(int(prefix)) |
| except ValueError: |
| return str(srid) |
|
|
|
|
| def _coerce_ms(value: Any) -> int: |
| """Coerce a timestamp-like value (int / datetime) to unix milliseconds. |
| |
| Avoids ``datetime.timestamp()`` because it makes an OS-level mktime call |
| that fails on Windows for sub-1970 (or sub-1980 on some locales) values. |
| Use direct epoch math instead. |
| """ |
| if value is None: |
| return 0 |
| if isinstance(value, bool): |
| return int(value) |
| if isinstance(value, int): |
| return value |
| |
| import datetime as _dt |
| if isinstance(value, _dt.datetime): |
| if value.tzinfo is None: |
| epoch = _dt.datetime(1970, 1, 1) |
| else: |
| epoch = _dt.datetime(1970, 1, 1, tzinfo=_dt.timezone.utc) |
| delta = value - epoch |
| return delta.days * 86400_000 + delta.seconds * 1000 + delta.microseconds // 1000 |
| return int(value) |
|
|
|
|
| def write_latest_revisions( |
| new_latest: dict[str, dict[str, Any]], |
| path: Path, |
| schemas: dict[str, TableSchema], |
| ) -> Path: |
| """Write the canonical ``latest_revisions.parquet`` atomically. |
| |
| Single small file (no partitioning per the schema). Atomic semantics |
| are critical here — this file is the COMMIT MARKER for a batch's |
| promotion of latest set_revisions; readers must never see a half- |
| written or empty version. |
| """ |
| table_schema = schemas["latest_revisions"] |
| rows = sorted(new_latest.values(), key=lambda r: r["set_key"]) |
| arrow_table = rows_to_arrow_table(rows, table_schema.file_schema()) |
| return _atomic_write_parquet(arrow_table, path, **PARQUET_WRITE_KWARGS) |
|
|