osu-everything-tools / python /deep_validate_compact_v1.py
Dan
Fix beatmap file identity
fb1f0a2
#!/usr/bin/env python
"""Deep validation for the compact v1 osu! dataset layout.
This is intentionally stricter than ``validate_compact_v1.py``. It checks exact
archive path parity, row identity for the small core tables, referential
integrity from large tables to ``set_revisions``, declared primary keys, and
latest-view row counts. Large tables are scanned as Arrow batches rather than
loaded into Python lists.
"""
from __future__ import annotations
import argparse
import json
import sys
from pathlib import Path
from typing import Any, Callable
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.dataset as ds
import pyarrow.parquet as pq
from tqdm.auto import tqdm
from parquet_writer import (
LATEST_TABLE_SPECS,
_coerce_ms,
derive_set_key,
load_schemas,
)
def _log(msg: str) -> None:
print(msg, file=sys.stderr, flush=True)
def _files(root: Path, pattern: str = "*.parquet") -> list[Path]:
return sorted(p for p in root.rglob(pattern) if p.is_file()) if root.exists() else []
def _dataset(root: Path) -> ds.Dataset | None:
files = _files(root)
if not files:
return None
return ds.dataset([str(p) for p in files], format="parquet")
def _count_rows(root: Path) -> int:
dataset = _dataset(root)
return int(dataset.count_rows()) if dataset is not None else 0
def _read_rows(root: Path, columns: list[str]) -> list[dict[str, Any]]:
dataset = _dataset(root)
if dataset is None:
return []
return dataset.to_table(columns=columns).to_pylist()
def _true_count(mask: pa.Array) -> int:
mask = pc.fill_null(mask, False)
return int(pc.sum(pc.cast(mask, pa.int64())).as_py() or 0)
def _batch_column_py(batch: pa.RecordBatch, column: str) -> list[Any]:
return batch.column(batch.schema.get_field_index(column)).to_pylist()
def _primary_key_exact_check(
root: Path,
primary_key: tuple[str, ...],
*,
batch_size: int,
) -> dict[str, Any]:
dataset = _dataset(root)
result: dict[str, Any] = {
"checked": False,
"mode": "exact",
"rows": 0,
"unique_keys": 0,
"duplicate_rows": 0,
"null_pk_rows": 0,
"missing_columns": [],
}
if dataset is None:
result["checked"] = True
return result
missing = [c for c in primary_key if c not in dataset.schema.names]
if missing:
result["missing_columns"] = missing
return result
seen: set[tuple[Any, ...]] = set()
scanner = dataset.scanner(columns=list(primary_key), batch_size=batch_size)
for batch in scanner.to_batches():
columns = [_batch_column_py(batch, c) for c in primary_key]
for values in zip(*columns):
result["rows"] += 1
if any(v is None for v in values):
result["null_pk_rows"] += 1
continue
key = tuple(values)
if key in seen:
result["duplicate_rows"] += 1
else:
seen.add(key)
result["checked"] = True
result["unique_keys"] = len(seen)
return result
def _primary_key_beatmap_scoped_stream_check(
root: Path,
primary_key: tuple[str, ...],
*,
batch_size: int,
) -> dict[str, Any]:
files = _files(root)
result: dict[str, Any] = {
"checked": False,
"mode": "beatmap_scoped_stream",
"rows": 0,
"unique_keys": None,
"duplicate_rows": 0,
"null_pk_rows": 0,
"missing_columns": [],
"non_contiguous_group_rows": 0,
}
if not files:
result["checked"] = True
result["unique_keys"] = 0
return result
schema = pq.read_schema(files[0])
missing = [c for c in primary_key if c not in schema.names]
if missing:
result["missing_columns"] = missing
return result
if primary_key[:2] != ("beatmap_uid", "set_revision_id"):
result["missing_columns"] = ["beatmap_uid", "set_revision_id"]
return result
current_group: tuple[Any, Any] | None = None
current_suffixes: set[tuple[Any, ...]] = set()
current_group_rows = 0
completed_groups: set[tuple[Any, Any]] = set()
unique_keys = 0
def finish_group() -> None:
nonlocal current_group, current_suffixes, current_group_rows
if current_group is not None:
completed_groups.add(current_group)
current_group = None
current_suffixes = set()
current_group_rows = 0
for file in files:
parquet = pq.ParquetFile(file)
file_schema_names = set(parquet.schema_arrow.names)
missing = [c for c in primary_key if c not in file_schema_names]
if missing:
result["missing_columns"] = missing
return result
for batch in parquet.iter_batches(batch_size=batch_size, columns=list(primary_key)):
columns = [_batch_column_py(batch, c) for c in primary_key]
for values in zip(*columns):
result["rows"] += 1
if any(v is None for v in values):
result["null_pk_rows"] += 1
continue
group = (values[0], values[1])
suffix = tuple(values[2:])
if group != current_group:
finish_group()
if group in completed_groups:
result["non_contiguous_group_rows"] += 1
current_group = group
if not suffix:
if current_group_rows > 0:
result["duplicate_rows"] += 1
else:
unique_keys += 1
current_group_rows += 1
continue
if suffix in current_suffixes:
result["duplicate_rows"] += 1
else:
current_suffixes.add(suffix)
unique_keys += 1
current_group_rows += 1
finish_group()
result["checked"] = True
result["unique_keys"] = unique_keys
return result
def _primary_key_check(
root: Path,
primary_key: tuple[str, ...],
*,
row_count: int,
exact_row_limit: int,
batch_size: int,
) -> dict[str, Any]:
if not primary_key:
return {
"checked": False,
"mode": "none",
"rows": row_count,
"unique_keys": None,
"duplicate_rows": 0,
"null_pk_rows": 0,
"missing_columns": [],
"skip_reason": "table has no declared primary key",
}
if row_count <= exact_row_limit:
return _primary_key_exact_check(root, primary_key, batch_size=batch_size)
if primary_key[:2] == ("beatmap_uid", "set_revision_id"):
return _primary_key_beatmap_scoped_stream_check(
root,
primary_key,
batch_size=batch_size,
)
return {
"checked": False,
"mode": "skipped",
"rows": row_count,
"unique_keys": None,
"duplicate_rows": 0,
"null_pk_rows": 0,
"missing_columns": [],
"skip_reason": f"row count {row_count} exceeds exact_row_limit {exact_row_limit}",
}
def _invalid_membership_count(
root: Path,
column: str,
valid_values: pa.Array,
*,
batch_size: int,
) -> int:
dataset = _dataset(root)
if dataset is None or column not in dataset.schema.names:
return 0
invalid = 0
scanner = dataset.scanner(columns=[column], batch_size=batch_size)
for batch in scanner.to_batches():
values = batch.column(column)
valid = pc.is_in(values, value_set=valid_values)
invalid += batch.num_rows - _true_count(valid)
return invalid
def _filtered_count(
root: Path,
latest_srids: pa.Array,
*,
latest_dir: str | None = None,
batch_size: int,
) -> int:
dataset = _dataset(root)
if dataset is None or "set_revision_id" not in dataset.schema.names:
return 0
columns = ["set_revision_id"]
if latest_dir in {"logical_files", "logical_files_video"}:
columns.append("media_kind")
total = 0
scanner = dataset.scanner(columns=columns, batch_size=batch_size)
for batch in scanner.to_batches():
mask = pc.is_in(batch.column("set_revision_id"), value_set=latest_srids)
if latest_dir == "logical_files":
media_mask = pc.not_equal(batch.column("media_kind"), "video")
mask = pc.and_(mask, media_mask)
elif latest_dir == "logical_files_video":
media_mask = pc.equal(batch.column("media_kind"), "video")
mask = pc.and_(mask, media_mask)
total += _true_count(mask)
return total
def _latest_actual_count_and_invalid(
root: Path,
latest_srids: pa.Array,
*,
batch_size: int,
) -> tuple[int, int]:
dataset = _dataset(root)
if dataset is None:
return 0, 0
total = 0
invalid = 0
if "set_revision_id" not in dataset.schema.names:
return int(dataset.count_rows()), 0
scanner = dataset.scanner(columns=["set_revision_id"], batch_size=batch_size)
for batch in scanner.to_batches():
total += batch.num_rows
valid = pc.is_in(batch.column("set_revision_id"), value_set=latest_srids)
invalid += batch.num_rows - _true_count(valid)
return total, invalid
def _build_expected_latest(
archive_rows: list[dict[str, Any]],
set_rows: list[dict[str, Any]],
) -> dict[str, dict[str, Any]]:
archive_ts = {
row["archive_revision_id"]: _coerce_ms(row["ingested_at"])
for row in archive_rows
}
latest: dict[str, dict[str, Any]] = {}
for sr in set_rows:
set_key = derive_set_key(sr)
srid = sr["set_revision_id"]
ts = int(archive_ts.get(sr["archive_revision_id"]) or 0)
existing = latest.get(set_key)
if existing is None:
latest[set_key] = {
"set_key": set_key,
"set_revision_id": srid,
"first_seen_at": ts,
"last_updated_at": ts,
"revision_count": 1,
}
continue
existing["first_seen_at"] = min(int(existing["first_seen_at"]), ts)
existing["revision_count"] = int(existing["revision_count"]) + 1
if (ts, srid) > (int(existing["last_updated_at"]), existing["set_revision_id"]):
existing["set_revision_id"] = srid
existing["last_updated_at"] = ts
return latest
def deep_validate_compact_v1(
repo_root: Path,
*,
schema_version: str = "v1",
batch_size: int = 262_144,
primary_key_exact_row_limit: int = 2_000_000,
) -> dict[str, Any]:
repo_root = repo_root.resolve()
all_rev = repo_root / "data" / schema_version / "all_revisions"
latest_root = repo_root / "data" / schema_version / "latest"
archives_root = repo_root / "archives"
schemas = load_schemas(repo_root / "schemas" / schema_version)
errors: list[str] = []
warnings: list[str] = []
archive_rows = _read_rows(
all_rev / "archive_revisions",
["archive_revision_id", "archive_sha256", "archive_path", "size_bytes", "ingested_at"],
)
set_rows = _read_rows(
all_rev / "set_revisions",
["set_revision_id", "archive_revision_id"],
)
latest_rows = _read_rows(
all_rev / "latest_revisions",
["set_key", "set_revision_id", "first_seen_at", "last_updated_at", "revision_count"],
)
archive_ids = [str(r["archive_revision_id"]) for r in archive_rows]
archive_sha = [str(r["archive_sha256"]) for r in archive_rows]
archive_paths = [str(r["archive_path"]) for r in archive_rows]
archive_id_set = set(archive_ids)
archive_sha_set = set(archive_sha)
archive_path_set = set(archive_paths)
if len(archive_id_set) != len(archive_ids):
errors.append("archive_revisions.archive_revision_id contains duplicates")
if len(archive_sha_set) != len(archive_sha):
errors.append("archive_revisions.archive_sha256 contains duplicates")
if len(archive_path_set) != len(archive_paths):
errors.append("archive_revisions.archive_path contains duplicates")
osz_files = {
str(p.relative_to(repo_root)).replace("\\", "/")
for p in archives_root.rglob("*.osz")
} if archives_root.exists() else set()
missing_paths = sorted(archive_path_set - osz_files)
extra_paths = sorted(osz_files - archive_path_set)
if missing_paths:
errors.append(f"{len(missing_paths)} archive_path value(s) missing on disk")
if extra_paths:
errors.append(f"{len(extra_paths)} local archive file(s) not referenced by archive_revisions")
bad_archive_paths = 0
bad_archive_sizes = 0
for row in archive_rows:
rel = str(row["archive_path"])
sha = str(row["archive_sha256"])
path = repo_root / rel
parts = Path(rel).parts
if (
len(parts) != 5
or parts[0] != "archives"
or parts[1] != "sha256"
or parts[2] != sha[:2]
or parts[3] != sha[2:4]
or parts[4] != f"{sha}.osz"
):
bad_archive_paths += 1
if path.exists() and row.get("size_bytes") is not None and path.stat().st_size != int(row["size_bytes"]):
bad_archive_sizes += 1
if bad_archive_paths:
errors.append(f"{bad_archive_paths} archive_path value(s) do not match sha256 CAS layout")
if bad_archive_sizes:
errors.append(f"{bad_archive_sizes} archive size_bytes value(s) differ from file size")
set_srids = [str(r["set_revision_id"]) for r in set_rows]
set_srid_set = set(set_srids)
if len(set_srid_set) != len(set_srids):
errors.append("set_revisions.set_revision_id contains duplicates")
bad_set_archive_ids = sum(1 for r in set_rows if str(r["archive_revision_id"]) not in archive_id_set)
if bad_set_archive_ids:
errors.append(f"{bad_set_archive_ids} set_revisions row(s) reference unknown archive_revision_id")
latest_by_key = {str(r["set_key"]): r for r in latest_rows}
if len(latest_by_key) != len(latest_rows):
errors.append("latest_revisions.set_key contains duplicates")
expected_latest = _build_expected_latest(archive_rows, set_rows)
if set(latest_by_key) != set(expected_latest):
errors.append(
"latest_revisions set_key mismatch: "
f"actual={len(latest_by_key)}, expected={len(expected_latest)}"
)
latest_mismatches = 0
for key, expected in expected_latest.items():
actual = latest_by_key.get(key)
if actual is None:
latest_mismatches += 1
continue
for col in ("set_revision_id", "revision_count"):
if str(actual[col]) != str(expected[col]):
latest_mismatches += 1
break
if latest_mismatches:
errors.append(f"{latest_mismatches} latest_revisions row(s) differ from recomputed latest state")
latest_srids = {str(r["set_revision_id"]) for r in latest_rows}
latest_srid_values = pa.array(sorted(latest_srids), type=pa.string())
set_srid_values = pa.array(sorted(set_srid_set), type=pa.string())
archive_id_values = pa.array(sorted(archive_id_set), type=pa.string())
table_counts: dict[str, int] = {}
table_invalid_srids: dict[str, int] = {}
table_invalid_archive_ids: dict[str, int] = {}
table_primary_key_checks: dict[str, dict[str, Any]] = {}
table_dirs = [p for p in sorted(all_rev.iterdir()) if p.is_dir()]
for table_dir in tqdm(
table_dirs,
desc="validating all_revisions tables",
unit="table",
file=sys.stderr,
mininterval=1.0,
dynamic_ncols=True,
):
dataset = _dataset(table_dir)
if dataset is None:
continue
row_count = int(dataset.count_rows())
table_counts[table_dir.name] = row_count
schema = schemas.get(table_dir.name)
if schema is not None and schema.primary_key:
pk_check = _primary_key_check(
table_dir,
schema.primary_key,
row_count=row_count,
exact_row_limit=max(0, primary_key_exact_row_limit),
batch_size=batch_size,
)
table_primary_key_checks[table_dir.name] = pk_check
missing_cols = pk_check.get("missing_columns") or []
if missing_cols:
errors.append(
f"{table_dir.name}: primary key column(s) missing: {missing_cols}"
)
duplicate_rows = int(pk_check.get("duplicate_rows") or 0)
if duplicate_rows:
errors.append(
f"{table_dir.name}: primary key has {duplicate_rows} duplicate row(s)"
)
null_pk_rows = int(pk_check.get("null_pk_rows") or 0)
if null_pk_rows:
errors.append(
f"{table_dir.name}: primary key has {null_pk_rows} row(s) with null key fields"
)
non_contiguous = int(pk_check.get("non_contiguous_group_rows") or 0)
if non_contiguous:
errors.append(
f"{table_dir.name}: {non_contiguous} row(s) have non-contiguous beatmap primary-key groups"
)
if table_dir.name not in {"archive_revisions", "set_revisions", "latest_revisions"}:
if "set_revision_id" in dataset.schema.names:
invalid = _invalid_membership_count(
table_dir,
"set_revision_id",
set_srid_values,
batch_size=batch_size,
)
table_invalid_srids[table_dir.name] = invalid
if invalid:
errors.append(f"{table_dir.name}: {invalid} row(s) reference unknown set_revision_id")
if "archive_revision_id" in dataset.schema.names:
invalid = _invalid_membership_count(
table_dir,
"archive_revision_id",
archive_id_values,
batch_size=batch_size,
)
table_invalid_archive_ids[table_dir.name] = invalid
if invalid:
errors.append(f"{table_dir.name}: {invalid} row(s) reference unknown archive_revision_id")
expected_latest_dirs = {latest_dir for _schema_table, latest_dir, _row_filter in LATEST_TABLE_SPECS}
actual_latest_dirs = {
p.name for p in latest_root.iterdir()
if p.is_dir() and not p.name.startswith("_")
} if latest_root.exists() else set()
unexpected_latest_dirs = sorted(actual_latest_dirs - expected_latest_dirs)
if unexpected_latest_dirs:
errors.append(f"unexpected latest/ table dir(s): {unexpected_latest_dirs}")
latest_checks: dict[str, dict[str, int]] = {}
for schema_table, latest_dir, _row_filter in tqdm(
LATEST_TABLE_SPECS,
desc="validating latest views",
unit="table",
file=sys.stderr,
mininterval=1.0,
dynamic_ncols=True,
):
if schema_table not in schemas:
continue
source_root = all_rev / schema_table
actual_root = latest_root / latest_dir
expected_count = _filtered_count(
source_root,
latest_srid_values,
latest_dir=latest_dir,
batch_size=batch_size,
)
actual_count, invalid_latest = _latest_actual_count_and_invalid(
actual_root,
latest_srid_values,
batch_size=batch_size,
)
latest_checks[latest_dir] = {
"expected_rows": expected_count,
"actual_rows": actual_count,
"invalid_latest_srids": invalid_latest,
}
if actual_count != expected_count:
errors.append(
f"latest/{latest_dir}: row count mismatch actual={actual_count}, expected={expected_count}"
)
if invalid_latest:
errors.append(f"latest/{latest_dir}: {invalid_latest} row(s) not in latest_revisions")
summary = {
"ok": not errors,
"errors": errors,
"warnings": warnings,
"schemas": len(schemas),
"archive_rows": len(archive_rows),
"archive_files": len(osz_files),
"set_rows": len(set_rows),
"latest_revision_rows": len(latest_rows),
"all_revisions_files": sum(len(_files(p)) for p in table_dirs),
"latest_files": sum(len(_files(latest_root / d)) for d in actual_latest_dirs),
"table_counts": table_counts,
"table_invalid_srids": table_invalid_srids,
"table_invalid_archive_ids": table_invalid_archive_ids,
"table_primary_key_checks": table_primary_key_checks,
"latest_checks": latest_checks,
}
return summary
def parse_args(argv: list[str] | None = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--repo-root", default=".")
parser.add_argument("--schema-version", default="v1")
parser.add_argument("--batch-size", type=int, default=262_144)
parser.add_argument(
"--primary-key-exact-row-limit",
type=int,
default=2_000_000,
help=(
"Use an exact in-memory primary-key set check up to this row count. "
"Larger beatmap-scoped tables use a streaming per-beatmap check."
),
)
parser.add_argument("--json", action="store_true")
return parser.parse_args(argv)
def main(argv: list[str] | None = None) -> int:
args = parse_args(argv)
summary = deep_validate_compact_v1(
Path(args.repo_root),
schema_version=args.schema_version,
batch_size=max(1, args.batch_size),
primary_key_exact_row_limit=max(0, args.primary_key_exact_row_limit),
)
if args.json:
print(json.dumps(summary, indent=2, sort_keys=True))
else:
print(f"ok={summary['ok']}")
print(f"archive_rows={summary['archive_rows']}")
print(f"set_rows={summary['set_rows']}")
print(f"latest_revision_rows={summary['latest_revision_rows']}")
print(f"all_revisions_files={summary['all_revisions_files']}")
print(f"latest_files={summary['latest_files']}")
if summary["errors"]:
print("errors:", file=sys.stderr)
for err in summary["errors"]:
print(f"- {err}", file=sys.stderr)
return 0 if summary["ok"] else 1
if __name__ == "__main__":
raise SystemExit(main())