| |
| """Destructively remove beatmapsets from the compact v1 dataset.""" |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import concurrent.futures |
| import json |
| import os |
| import shutil |
| import sqlite3 |
| import sys |
| import time |
| import uuid |
| from pathlib import Path |
| from typing import Any |
|
|
| import pyarrow as pa |
| import pyarrow.compute as pc |
| import pyarrow.dataset as ds |
| import pyarrow.parquet as pq |
| from tqdm.auto import tqdm |
|
|
| from parquet_writer import PARQUET_WRITE_KWARGS, _atomic_write_parquet |
|
|
|
|
| DEFAULT_WORKERS = min(4, max(1, os.cpu_count() or 1)) |
|
|
|
|
| def _parquet_files(path: Path) -> list[Path]: |
| return sorted(p for p in path.rglob("*.parquet") if p.is_file()) if path.exists() else [] |
|
|
|
|
| def _table(path: Path, columns: list[str]) -> pa.Table: |
| files = [str(p) for p in _parquet_files(path)] |
| if not files: |
| return pa.table({name: [] for name in columns}) |
| return ds.dataset(files, format="parquet").to_table(columns=columns) |
|
|
|
|
| def _hardlink_or_copy(src: Path, dst: Path) -> None: |
| dst.parent.mkdir(parents=True, exist_ok=True) |
| try: |
| os.link(src, dst) |
| except OSError: |
| shutil.copy2(src, dst) |
|
|
|
|
| def _batched(values: list[int], size: int = 500) -> list[list[int]]: |
| return [values[i : i + size] for i in range(0, len(values), size)] |
|
|
|
|
| def _choose_numeric_set_ids(repo_root: Path, schema_version: str, count: int) -> list[int]: |
| latest_dir = repo_root / "data" / schema_version / "all_revisions" / "latest_revisions" |
| table = _table(latest_dir, ["set_key"]) |
| set_ids: set[int] = set() |
| for value in table.column("set_key").to_pylist(): |
| if value is None: |
| continue |
| text = str(value) |
| if text.isdecimal(): |
| set_id = int(text) |
| if set_id > 0: |
| set_ids.add(set_id) |
| if len(set_ids) < count: |
| raise SystemExit(f"only {len(set_ids)} numeric set ids available; cannot remove {count}") |
| return sorted(set_ids, reverse=True)[:count] |
|
|
|
|
| def _selection_details( |
| repo_root: Path, |
| schema_version: str, |
| selected_set_ids: set[int], |
| ) -> dict[str, Any]: |
| all_rev = repo_root / "data" / schema_version / "all_revisions" |
| set_table = _table( |
| all_rev / "set_revisions", |
| ["set_revision_id", "archive_revision_id", "beatmapset_id"], |
| ) |
| set_revision_ids: set[str] = set() |
| archive_revision_ids: set[str] = set() |
| for row in set_table.to_pylist(): |
| beatmapset_id = row.get("beatmapset_id") |
| if beatmapset_id is None or int(beatmapset_id) not in selected_set_ids: |
| continue |
| set_revision_ids.add(str(row["set_revision_id"])) |
| archive_revision_ids.add(str(row["archive_revision_id"])) |
|
|
| if not set_revision_ids: |
| raise SystemExit("selected set ids did not match any set_revisions rows") |
|
|
| archive_table = _table( |
| all_rev / "archive_revisions", |
| ["archive_revision_id", "archive_path", "size_bytes"], |
| ) |
| archive_paths: list[str] = [] |
| archive_bytes = 0 |
| for row in archive_table.to_pylist(): |
| archive_revision_id = str(row["archive_revision_id"]) |
| if archive_revision_id not in archive_revision_ids: |
| continue |
| archive_paths.append(str(row["archive_path"])) |
| archive_bytes += int(row.get("size_bytes") or 0) |
|
|
| return { |
| "set_ids": sorted(selected_set_ids), |
| "set_revision_ids": sorted(set_revision_ids), |
| "archive_revision_ids": sorted(archive_revision_ids), |
| "archive_paths": sorted(archive_paths), |
| "archive_bytes": archive_bytes, |
| } |
|
|
|
|
| def _filter_one_file( |
| src: Path, |
| dst: Path, |
| root: Path, |
| set_values: pa.Array, |
| archive_values: pa.Array, |
| ) -> dict[str, Any]: |
| table = pq.read_table(src) |
| before = table.num_rows |
| removed = 0 |
| if "set_revision_id" in table.column_names: |
| keep = pc.invert(pc.is_in(table["set_revision_id"], value_set=set_values)) |
| table = table.filter(keep) |
| removed = before - table.num_rows |
| elif "archive_revision_id" in table.column_names: |
| keep = pc.invert(pc.is_in(table["archive_revision_id"], value_set=archive_values)) |
| table = table.filter(keep) |
| removed = before - table.num_rows |
|
|
| if removed == 0: |
| _hardlink_or_copy(src, dst) |
| elif table.num_rows > 0: |
| dst.parent.mkdir(parents=True, exist_ok=True) |
| _atomic_write_parquet(table, dst, **PARQUET_WRITE_KWARGS) |
|
|
| return { |
| "file": str(src.relative_to(root)), |
| "rows_before": before, |
| "rows_after": table.num_rows, |
| "rows_removed": removed, |
| } |
|
|
|
|
| def _rewrite_table_dir( |
| repo_root: Path, |
| table_dir: Path, |
| staging_dir: Path, |
| trash_dir: Path, |
| set_revision_ids: set[str], |
| archive_revision_ids: set[str], |
| workers: int, |
| ) -> dict[str, Any]: |
| files = _parquet_files(table_dir) |
| if not files: |
| return { |
| "table": table_dir.name, |
| "files_before": 0, |
| "files_after": 0, |
| "rows_before": 0, |
| "rows_after": 0, |
| "rows_removed": 0, |
| } |
|
|
| set_values = pa.array(sorted(set_revision_ids), type=pa.string()) |
| archive_values = pa.array(sorted(archive_revision_ids), type=pa.string()) |
| staged_table = staging_dir / table_dir.name |
| staged_table.mkdir(parents=True, exist_ok=True) |
|
|
| summaries: list[dict[str, Any]] = [] |
| max_workers = min(max(1, workers), len(files)) |
| bar = tqdm( |
| total=len(files), |
| desc=f"remove/{table_dir.name}", |
| unit="file", |
| file=sys.stderr, |
| mininterval=1.0, |
| dynamic_ncols=True, |
| ) |
| try: |
| if max_workers == 1: |
| for src in files: |
| dst = staged_table / src.relative_to(table_dir) |
| summaries.append( |
| _filter_one_file(src, dst, repo_root, set_values, archive_values) |
| ) |
| bar.update(1) |
| else: |
| with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as ex: |
| futures = [ |
| ex.submit( |
| _filter_one_file, |
| src, |
| staged_table / src.relative_to(table_dir), |
| repo_root, |
| set_values, |
| archive_values, |
| ) |
| for src in files |
| ] |
| for future in concurrent.futures.as_completed(futures): |
| summaries.append(future.result()) |
| bar.update(1) |
| finally: |
| bar.close() |
|
|
| table_trash = trash_dir / table_dir.name |
| table_trash.parent.mkdir(parents=True, exist_ok=True) |
| table_dir.rename(table_trash) |
| staged_table.rename(table_dir) |
|
|
| rows_before = sum(int(s["rows_before"]) for s in summaries) |
| rows_after = sum(int(s["rows_after"]) for s in summaries) |
| rows_removed = sum(int(s["rows_removed"]) for s in summaries) |
| return { |
| "table": table_dir.name, |
| "files_before": len(files), |
| "files_after": len(_parquet_files(table_dir)), |
| "rows_before": rows_before, |
| "rows_after": rows_after, |
| "rows_removed": rows_removed, |
| } |
|
|
|
|
| def _remove_archive_files(repo_root: Path, archive_paths: list[str]) -> dict[str, int]: |
| removed = 0 |
| missing = 0 |
| for rel in archive_paths: |
| path = repo_root / rel |
| if path.exists(): |
| path.unlink() |
| removed += 1 |
| else: |
| missing += 1 |
| return {"removed": removed, "missing": missing} |
|
|
|
|
| def _update_state_db( |
| state_db: Path, |
| set_ids: list[int], |
| *, |
| clear_enumerate_state: bool, |
| ) -> dict[str, int]: |
| if not state_db.exists(): |
| return { |
| "sets_deleted": 0, |
| "attempts_deleted": 0, |
| "discoveries_deleted": 0, |
| "meta_deleted": 0, |
| } |
| conn = sqlite3.connect(state_db) |
| conn.execute("PRAGMA foreign_keys=ON") |
| try: |
| attempts_deleted = 0 |
| discoveries_deleted = 0 |
| sets_deleted = 0 |
| for batch in _batched(sorted(set_ids)): |
| placeholders = ",".join("?" for _ in batch) |
| attempts_deleted += conn.execute( |
| f"DELETE FROM mirror_attempts WHERE set_id IN ({placeholders})", |
| batch, |
| ).rowcount |
| discoveries_deleted += conn.execute( |
| f"DELETE FROM mirror_discoveries WHERE set_id IN ({placeholders})", |
| batch, |
| ).rowcount |
| sets_deleted += conn.execute( |
| f"DELETE FROM sets WHERE set_id IN ({placeholders})", |
| batch, |
| ).rowcount |
|
|
| meta_deleted = 0 |
| if clear_enumerate_state: |
| meta_deleted += conn.execute( |
| "DELETE FROM meta WHERE key LIKE 'enumerate.high_water.%'" |
| ).rowcount |
| meta_deleted += conn.execute( |
| "DELETE FROM meta WHERE key LIKE 'enumerate.cursor.%'" |
| ).rowcount |
|
|
| conn.commit() |
| conn.execute("PRAGMA wal_checkpoint(TRUNCATE)") |
| finally: |
| conn.close() |
|
|
| return { |
| "sets_deleted": sets_deleted, |
| "attempts_deleted": attempts_deleted, |
| "discoveries_deleted": discoveries_deleted, |
| "meta_deleted": meta_deleted, |
| } |
|
|
|
|
| def remove_maps_v1( |
| repo_root: Path, |
| *, |
| schema_version: str, |
| count: int, |
| state_db: Path | None, |
| clear_enumerate_state: bool, |
| workers: int, |
| ) -> dict[str, Any]: |
| repo_root = repo_root.resolve() |
| all_revisions = repo_root / "data" / schema_version / "all_revisions" |
| if not all_revisions.exists(): |
| raise FileNotFoundError(all_revisions) |
|
|
| selected_set_ids = set(_choose_numeric_set_ids(repo_root, schema_version, count)) |
| details = _selection_details(repo_root, schema_version, selected_set_ids) |
| set_revision_ids = set(details["set_revision_ids"]) |
| archive_revision_ids = set(details["archive_revision_ids"]) |
|
|
| tx_id = f"remove-maps-{int(time.time())}-{uuid.uuid4().hex[:8]}" |
| tx_root = repo_root / ".scratch" / "remove-maps-v1" / tx_id |
| staging_root = tx_root / "staging" / "all_revisions" |
| trash_root = tx_root / "trash" / "all_revisions" |
| staging_root.mkdir(parents=True, exist_ok=True) |
|
|
| table_summaries: list[dict[str, Any]] = [] |
| try: |
| for table_dir in sorted(p for p in all_revisions.iterdir() if p.is_dir()): |
| table_summaries.append( |
| _rewrite_table_dir( |
| repo_root, |
| table_dir, |
| staging_root, |
| trash_root, |
| set_revision_ids, |
| archive_revision_ids, |
| workers, |
| ) |
| ) |
|
|
| latest_dir = repo_root / "data" / schema_version / "latest" |
| if latest_dir.exists(): |
| shutil.rmtree(latest_dir) |
|
|
| archive_delete = _remove_archive_files(repo_root, details["archive_paths"]) |
| state_summary = ( |
| _update_state_db( |
| state_db, |
| list(selected_set_ids), |
| clear_enumerate_state=clear_enumerate_state, |
| ) |
| if state_db is not None |
| else { |
| "sets_deleted": 0, |
| "attempts_deleted": 0, |
| "discoveries_deleted": 0, |
| "meta_deleted": 0, |
| } |
| ) |
| finally: |
| if trash_root.exists(): |
| shutil.rmtree(trash_root, ignore_errors=True) |
| if staging_root.exists(): |
| shutil.rmtree(staging_root, ignore_errors=True) |
|
|
| return { |
| "ok": True, |
| "tx_id": tx_id, |
| "requested_count": count, |
| "selected_set_count": len(selected_set_ids), |
| "selected_set_min": min(selected_set_ids), |
| "selected_set_max": max(selected_set_ids), |
| "set_revision_count": len(set_revision_ids), |
| "archive_revision_count": len(archive_revision_ids), |
| "archive_bytes_removed": int(details["archive_bytes"]), |
| "archive_files": archive_delete, |
| "state": state_summary, |
| "tables": table_summaries, |
| } |
|
|
|
|
| def parse_args(argv: list[str] | None = None) -> argparse.Namespace: |
| parser = argparse.ArgumentParser(description=__doc__) |
| parser.add_argument("--repo-root", default=".") |
| parser.add_argument("--schema-version", default="v1") |
| parser.add_argument("--count", type=int, default=1000) |
| parser.add_argument("--state-db", default=None) |
| parser.add_argument("--clear-enumerate-state", action="store_true") |
| parser.add_argument("--workers", type=int, default=DEFAULT_WORKERS) |
| parser.add_argument("--summary-path", default=None) |
| parser.add_argument("--json", action="store_true") |
| return parser.parse_args(argv) |
|
|
|
|
| def main(argv: list[str] | None = None) -> int: |
| args = parse_args(argv) |
| repo_root = Path(args.repo_root).resolve() |
| state_db = Path(args.state_db) if args.state_db else None |
| if state_db is not None and not state_db.is_absolute(): |
| state_db = repo_root / state_db |
|
|
| summary = remove_maps_v1( |
| repo_root, |
| schema_version=args.schema_version, |
| count=args.count, |
| state_db=state_db, |
| clear_enumerate_state=args.clear_enumerate_state, |
| workers=max(1, args.workers), |
| ) |
| text = json.dumps(summary, indent=2, sort_keys=True) |
| if args.summary_path: |
| path = Path(args.summary_path) |
| if not path.is_absolute(): |
| path = repo_root / path |
| path.parent.mkdir(parents=True, exist_ok=True) |
| path.write_text(text + "\n", encoding="utf-8") |
| if args.json: |
| print(text) |
| else: |
| print( |
| "removed " |
| f"{summary['selected_set_count']} set(s), " |
| f"{summary['archive_revision_count']} archive revision(s), " |
| f"{summary['archive_files']['removed']} local archive file(s)" |
| ) |
| return 0 |
|
|
|
|
| if __name__ == "__main__": |
| raise SystemExit(main()) |
|
|