| |
| """Seed ``osu_fetcher`` state from compact v1 metadata. |
| |
| An update worker should not download the historical ``archives/`` tree just to |
| learn which sets already exist. This helper reads compact |
| ``latest_revisions`` Parquet, extracts numeric beatmapset IDs, and inserts |
| missing rows into the fetcher SQLite DB as already acquired. Existing fetcher |
| rows are left untouched so pending updates, failures, API watermarks, and |
| download audit data survive reruns. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import sqlite3 |
| import sys |
| import time |
| from pathlib import Path |
| from typing import Any |
|
|
| import pyarrow.dataset as ds |
|
|
|
|
| SCHEMA_SQL = """ |
| CREATE TABLE IF NOT EXISTS meta ( |
| key TEXT PRIMARY KEY, |
| value TEXT NOT NULL, |
| updated_at INTEGER NOT NULL |
| ); |
| |
| CREATE TABLE IF NOT EXISTS sets ( |
| set_id INTEGER PRIMARY KEY, |
| wanted INTEGER NOT NULL DEFAULT 0, |
| ranked_status TEXT, |
| artist TEXT, |
| title TEXT, |
| creator TEXT, |
| api_last_updated TEXT, |
| discovered_at INTEGER, |
| |
| download_status TEXT NOT NULL DEFAULT 'pending', |
| attempts INTEGER NOT NULL DEFAULT 0, |
| last_error TEXT, |
| last_mirror TEXT, |
| last_attempt_at INTEGER, |
| bytes INTEGER, |
| sha256 TEXT, |
| flavor TEXT, |
| saved_path TEXT, |
| completed_at INTEGER |
| ); |
| |
| CREATE INDEX IF NOT EXISTS idx_sets_status ON sets(download_status); |
| CREATE INDEX IF NOT EXISTS idx_sets_wanted_st ON sets(wanted, download_status); |
| |
| CREATE TABLE IF NOT EXISTS mirror_attempts ( |
| id INTEGER PRIMARY KEY AUTOINCREMENT, |
| set_id INTEGER NOT NULL, |
| mirror TEXT NOT NULL, |
| attempted_at INTEGER NOT NULL, |
| duration_ms INTEGER, |
| http_status INTEGER, |
| bytes INTEGER, |
| outcome TEXT NOT NULL, |
| error TEXT, |
| FOREIGN KEY(set_id) REFERENCES sets(set_id) ON DELETE CASCADE |
| ); |
| |
| CREATE INDEX IF NOT EXISTS idx_attempts_set ON mirror_attempts(set_id); |
| CREATE INDEX IF NOT EXISTS idx_attempts_mirror ON mirror_attempts(mirror, outcome); |
| |
| CREATE TABLE IF NOT EXISTS mirror_discoveries ( |
| set_id INTEGER NOT NULL, |
| mirror TEXT NOT NULL, |
| claimed_status TEXT NOT NULL, |
| has_osu_std INTEGER NOT NULL, |
| mirror_last_updated TEXT, |
| last_seen_at INTEGER NOT NULL, |
| PRIMARY KEY(set_id, mirror) |
| ); |
| |
| CREATE INDEX IF NOT EXISTS idx_disc_set ON mirror_discoveries(set_id); |
| CREATE INDEX IF NOT EXISTS idx_disc_mirror ON mirror_discoveries(mirror); |
| CREATE INDEX IF NOT EXISTS idx_disc_status ON mirror_discoveries(claimed_status, has_osu_std); |
| """ |
|
|
|
|
| def _parquet_files(path: Path) -> list[str]: |
| return [str(p) for p in sorted(path.rglob("*.parquet"))] if path.exists() else [] |
|
|
|
|
| def numeric_latest_set_ids(repo_root: Path, schema_version: str = "v1") -> set[int]: |
| latest_dir = ( |
| repo_root |
| / "data" |
| / schema_version |
| / "all_revisions" |
| / "latest_revisions" |
| ) |
| files = _parquet_files(latest_dir) |
| if not files: |
| raise SystemExit(f"missing latest_revisions parquet under {latest_dir}") |
|
|
| table = ds.dataset(files, format="parquet").to_table(columns=["set_key"]) |
| out: set[int] = set() |
| for value in table.column("set_key").to_pylist(): |
| if value is None: |
| continue |
| text = str(value) |
| if text.isdecimal(): |
| n = int(text) |
| if n > 0: |
| out.add(n) |
| return out |
|
|
|
|
| def open_state_db(path: Path) -> sqlite3.Connection: |
| path.parent.mkdir(parents=True, exist_ok=True) |
| conn = sqlite3.connect(path) |
| conn.execute("PRAGMA journal_mode=WAL") |
| conn.execute("PRAGMA synchronous=NORMAL") |
| conn.execute("PRAGMA foreign_keys=ON") |
| conn.executescript(SCHEMA_SQL) |
| now = int(time.time()) |
| conn.execute( |
| """ |
| INSERT INTO meta(key, value, updated_at) |
| VALUES('schema_version', '2', ?) |
| ON CONFLICT(key) DO UPDATE SET |
| value=excluded.value, |
| updated_at=excluded.updated_at |
| """, |
| (now,), |
| ) |
| return conn |
|
|
|
|
| def seed_state( |
| repo_root: Path, |
| state_db: Path, |
| *, |
| schema_version: str = "v1", |
| ) -> dict[str, Any]: |
| set_ids = numeric_latest_set_ids(repo_root, schema_version=schema_version) |
| now = int(time.time()) |
| conn = open_state_db(state_db) |
| try: |
| before = conn.total_changes |
| rows = [ |
| ( |
| set_id, |
| now, |
| f"compact://data/{schema_version}/latest_revisions/{set_id}", |
| now, |
| ) |
| for set_id in sorted(set_ids) |
| ] |
| conn.executemany( |
| """ |
| INSERT OR IGNORE INTO sets( |
| set_id, wanted, discovered_at, download_status, saved_path, |
| completed_at |
| ) |
| VALUES(?, 0, ?, 'success', ?, ?) |
| """, |
| rows, |
| ) |
| inserted = conn.total_changes - before |
| conn.commit() |
| conn.execute("PRAGMA wal_checkpoint(TRUNCATE)") |
| finally: |
| conn.close() |
|
|
| return { |
| "known_numeric_sets": len(set_ids), |
| "inserted_success_rows": inserted, |
| "state_db": str(state_db), |
| } |
|
|
|
|
| def checkpoint_state_db(state_db: Path) -> None: |
| conn = open_state_db(state_db) |
| try: |
| conn.commit() |
| conn.execute("PRAGMA wal_checkpoint(TRUNCATE)") |
| finally: |
| conn.close() |
|
|
|
|
| def parse_args(argv: list[str] | None = None) -> argparse.Namespace: |
| parser = argparse.ArgumentParser(description=__doc__) |
| parser.add_argument("--repo-root", default=".") |
| parser.add_argument("--schema-version", default="v1") |
| parser.add_argument("--state-db", default=".fetcher/state.db") |
| parser.add_argument( |
| "--checkpoint-only", |
| action="store_true", |
| help="initialize/checkpoint the DB without reading compact metadata", |
| ) |
| return parser.parse_args(argv) |
|
|
|
|
| def main(argv: list[str] | None = None) -> int: |
| args = parse_args(argv) |
| repo_root = Path(args.repo_root).resolve() |
| state_db = Path(args.state_db) |
| if not state_db.is_absolute(): |
| state_db = repo_root / state_db |
|
|
| if args.checkpoint_only: |
| checkpoint_state_db(state_db) |
| print(f"state_db_checkpointed={state_db}") |
| return 0 |
|
|
| summary = seed_state(repo_root, state_db, schema_version=args.schema_version) |
| for key, value in summary.items(): |
| print(f"{key}={value}") |
| return 0 |
|
|
|
|
| if __name__ == "__main__": |
| raise SystemExit(main()) |
|
|