| #!/usr/bin/env bash |
| set -euo pipefail |
|
|
| SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
| source "$SCRIPT_DIR/common.sh" |
| prepare_dataset_root |
|
|
| BUCKET="${BUCKET:-hf://buckets/lekdan/osu-everything}" |
| BUCKET_ID="${BUCKET_ID:-lekdan/osu-everything}" |
| STATE_DB="${STATE_DB:-.fetcher/state.db}" |
| TOKEN_FILE="${TOKEN_FILE:-.fetcher/osu_token.json}" |
| INPUT_DIR="${INPUT_DIR:-incoming_osz}" |
| BATCH_ID="${BATCH_ID:-update-v1-$(date -u +%Y%m%dT%H%M%SZ)}" |
| CHUNK_SIZE="${CHUNK_SIZE:-1000}" |
| UPLOAD="${UPLOAD:-1}" |
| HYDRATE="${HYDRATE:-1}" |
| FETCH="${FETCH:-1}" |
| DISCOVER="${DISCOVER:-0}" |
| CLEAN_INPUT="${CLEAN_INPUT:-1}" |
| STATUSES="${STATUSES:-ranked,approved,loved,qualified}" |
| MODE="${MODE:-0}" |
| RANKED_FRONT_PAGES="${RANKED_FRONT_PAGES:-5}" |
| POPULAR="${POPULAR:-1}" |
| POPULAR_MIN_FAVOURITES="${POPULAR_MIN_FAVOURITES:-100}" |
| POPULAR_MIN_PLAYCOUNT="${POPULAR_MIN_PLAYCOUNT:-1000}" |
| POPULAR_MAX_PAGES="${POPULAR_MAX_PAGES:-}" |
| DOWNLOAD_CONCURRENCY="${DOWNLOAD_CONCURRENCY:-16}" |
| DOWNLOAD_LIMIT="${DOWNLOAD_LIMIT:-}" |
| RETRY_FAILED="${RETRY_FAILED:-1}" |
| RETRY_MISSING="${RETRY_MISSING:-0}" |
| POST_DOWNLOAD_RETRY_FAILED="${POST_DOWNLOAD_RETRY_FAILED:-1}" |
| POST_DOWNLOAD_RETRY_DELAY_SECONDS="${POST_DOWNLOAD_RETRY_DELAY_SECONDS:-30}" |
| DEFAULT_WORKERS=4 |
| if command -v nproc >/dev/null 2>&1; then |
| DEFAULT_WORKERS="$(nproc)" |
| if [ "$DEFAULT_WORKERS" -gt 4 ]; then |
| DEFAULT_WORKERS=4 |
| fi |
| fi |
| INGEST_WORKERS="${INGEST_WORKERS:-${WORKERS:-$DEFAULT_WORKERS}}" |
| INDEXER_WORKERS="${INDEXER_WORKERS:-$INGEST_WORKERS}" |
| NDJSON_PARSE_WORKERS="${NDJSON_PARSE_WORKERS:-$INGEST_WORKERS}" |
| NDJSON_PARSE_CHUNK_MB="${NDJSON_PARSE_CHUNK_MB:-64}" |
| PARQUET_WRITE_WORKERS="${PARQUET_WRITE_WORKERS:-$INGEST_WORKERS}" |
| COMPACT="${COMPACT:-1}" |
| COMPACT_TARGET_ROWS="${COMPACT_TARGET_ROWS:-1000000}" |
| COMPACT_BATCH_SIZE="${COMPACT_BATCH_SIZE:-65536}" |
| COMPACT_MIN_FILES="${COMPACT_MIN_FILES:-2}" |
| COMPACT_WORKERS="${COMPACT_WORKERS:-${WORKERS:-4}}" |
| COMPACT_CLEAN_STALE_SCRATCH="${COMPACT_CLEAN_STALE_SCRATCH:-1}" |
| LATEST_REBUILD_WORKERS="${LATEST_REBUILD_WORKERS:-${WORKERS:-8}}" |
| FETCHER_PROGRESS="${FETCHER_PROGRESS:-force}" |
| FETCHER_RPM="${FETCHER_RPM:-480}" |
| ENABLE_OSUAPI="${ENABLE_OSUAPI:-0}" |
| DISCOVER_SEARCH_RPM="${DISCOVER_SEARCH_RPM:-$FETCHER_RPM}" |
|
|
| mkdir -p logs .fetcher |
|
|
| active_pipeline_processes() { |
| pgrep -af '[o]su_fetcher|[o]su_indexer|[i]ngest_osz|[r]ebuild_latest_snapshot|[c]ompact_metadata_v1|[h]f( sync| buckets sync)' || true |
| } |
|
|
| cleanup_stale_lock_file() { |
| local lock_path="$1" |
| local label="$2" |
| if [ ! -e "$lock_path" ]; then |
| return |
| fi |
| local active |
| active="$(active_pipeline_processes)" |
| if [ -n "$active" ]; then |
| echo "leaving ${label} lock at ${lock_path}; active writer/fetcher process detected" |
| echo "$active" |
| else |
| echo "removing stale ${label} lock at ${lock_path}" |
| rm -f "$lock_path" |
| fi |
| } |
|
|
| batch_archive_path_count() { |
| "$PYTHON" "$PY_SOURCE_ROOT/python/list_batch_archives.py" --repo-root "$PY_DATASET_ROOT" --batch-id "$BATCH_ID" | wc -l | tr -d ' ' |
| } |
|
|
| refuse_reused_batch_id_for_ingest() { |
| local count |
| count="$(batch_archive_path_count)" |
| if [ "$count" -gt 0 ]; then |
| echo "BATCH_ID=${BATCH_ID} already has ${count} committed archive path(s)." >&2 |
| echo "Refusing to fetch or ingest with a reused batch id because it can overwrite existing part-${BATCH_ID}.parquet files." >&2 |
| echo "Choose a fresh BATCH_ID, for example: BATCH_ID=update-v1-\$(date -u +%Y%m%dT%H%M%SZ)" >&2 |
| exit 64 |
| fi |
| } |
|
|
| cleanup_stale_lock_file "${STATE_DB}.run.lock" "fetcher" |
| cleanup_stale_lock_file ".ingest.lock" "ingest" |
|
|
| LOG="logs/update-maps-v1-${BATCH_ID}.log" |
| exec > >(tee -a "$LOG") 2>&1 |
|
|
| echo "started_at=$(date -u +%Y-%m-%dT%H:%M:%SZ)" |
| echo "batch_id=${BATCH_ID}" |
| echo "source_root=${SOURCE_ROOT}" |
| echo "dataset_root=${DATASET_ROOT}" |
| echo "bucket=${BUCKET}" |
| echo "state_db=${STATE_DB}" |
| echo "input_dir=${INPUT_DIR}" |
| echo "hydrate=${HYDRATE}" |
| echo "fetch=${FETCH}" |
| echo "discover=${DISCOVER}" |
| echo "upload=${UPLOAD}" |
| echo "popular=${POPULAR}" |
| echo "popular_min_favourites=${POPULAR_MIN_FAVOURITES}" |
| echo "popular_min_playcount=${POPULAR_MIN_PLAYCOUNT}" |
| echo "fetcher_progress=${FETCHER_PROGRESS}" |
| echo "fetcher_rpm=${FETCHER_RPM}" |
| echo "enable_osuapi=${ENABLE_OSUAPI}" |
| echo "discover_search_rpm=${DISCOVER_SEARCH_RPM}" |
| echo "retry_failed=${RETRY_FAILED}" |
| echo "retry_missing=${RETRY_MISSING}" |
| echo "post_download_retry_failed=${POST_DOWNLOAD_RETRY_FAILED}" |
| echo "post_download_retry_delay_seconds=${POST_DOWNLOAD_RETRY_DELAY_SECONDS}" |
| echo "compact=${COMPACT}" |
| echo "ingest_workers=${INGEST_WORKERS}" |
| echo "indexer_workers=${INDEXER_WORKERS}" |
| echo "ndjson_parse_workers=${NDJSON_PARSE_WORKERS}" |
| echo "ndjson_parse_chunk_mb=${NDJSON_PARSE_CHUNK_MB}" |
| echo "parquet_write_workers=${PARQUET_WRITE_WORKERS}" |
| echo "compact_target_rows=${COMPACT_TARGET_ROWS}" |
| echo "compact_batch_size=${COMPACT_BATCH_SIZE}" |
| echo "compact_min_files=${COMPACT_MIN_FILES}" |
| echo "compact_workers=${COMPACT_WORKERS}" |
| echo "latest_rebuild_workers=${LATEST_REBUILD_WORKERS}" |
|
|
| export HF_HUB_ENABLE_HF_TRANSFER=1 |
| export HF_XET_HIGH_PERFORMANCE=1 |
| export HF_XET_CLIENT_AC_INITIAL_UPLOAD_CONCURRENCY="${HF_XET_CLIENT_AC_INITIAL_UPLOAD_CONCURRENCY:-4}" |
| export HF_XET_CLIENT_AC_MAX_UPLOAD_CONCURRENCY="${HF_XET_CLIENT_AC_MAX_UPLOAD_CONCURRENCY:-8}" |
| export HF_XET_CLIENT_RETRY_MAX_DURATION="${HF_XET_CLIENT_RETRY_MAX_DURATION:-600}" |
| export OSU_NDJSON_PARSE_WORKERS="$NDJSON_PARSE_WORKERS" |
| export OSU_NDJSON_PARSE_CHUNK_MB="$NDJSON_PARSE_CHUNK_MB" |
| export OSU_NDJSON_PARSE_BACKEND="${OSU_NDJSON_PARSE_BACKEND:-process}" |
| export OSU_PARQUET_WRITE_WORKERS="$PARQUET_WRITE_WORKERS" |
|
|
| fetcher_args=( |
| --state-db "$STATE_DB" |
| --archives-dir "$INPUT_DIR" |
| --osu-token-file "$TOKEN_FILE" |
| --osu-rpm "$FETCHER_RPM" |
| --nerinyan-rpm "$FETCHER_RPM" |
| --catboy-rpm "$FETCHER_RPM" |
| --sayobot-rpm "$FETCHER_RPM" |
| --nekoha-rpm "$FETCHER_RPM" |
| --beatconnect-rpm "$FETCHER_RPM" |
| --osudirect-rpm "$FETCHER_RPM" |
| --nzbasic-rpm "$FETCHER_RPM" |
| --osudl-rpm "$FETCHER_RPM" |
| --osuapi-rpm "$FETCHER_RPM" |
| ) |
|
|
| if [ "$ENABLE_OSUAPI" = "1" ]; then |
| fetcher_args+=(--enable-osuapi) |
| fi |
|
|
| case "$FETCHER_PROGRESS" in |
| auto) |
| fetcher_args+=(--force-progress) |
| ;; |
| 1|true|TRUE|on|ON|force|FORCE) |
| fetcher_args+=(--force-progress) |
| ;; |
| 0|false|FALSE|off|OFF|none|NONE) |
| fetcher_args+=(--no-progress) |
| ;; |
| *) |
| echo "invalid FETCHER_PROGRESS=${FETCHER_PROGRESS}; use auto, force, or off" >&2 |
| exit 1 |
| ;; |
| esac |
|
|
| fetcher_status_count() { |
| local key="$1" |
| "$FETCHER" "${fetcher_args[@]}" status \ |
| | awk -v key="${key}:" '$1 == key { print $2; found = 1; exit } END { if (!found) print 0 }' |
| } |
|
|
| if [ "$HYDRATE" = "1" ]; then |
| echo "hydrating compact metadata from bucket" |
| "$HF" sync "${BUCKET}/data" data --delete |
| "$HF" sync "${BUCKET}/schemas" schemas --delete |
| if "$HF" buckets list "${BUCKET_ID}/state/fetcher" >/dev/null 2>&1; then |
| rm -rf .scratch/fetcher-state-download |
| mkdir -p .scratch/fetcher-state-download |
| "$HF" sync "${BUCKET}/state/fetcher" .scratch/fetcher-state-download --delete |
| if [ -f .scratch/fetcher-state-download/state.db ]; then |
| rm -f "$STATE_DB" "${STATE_DB}-shm" "${STATE_DB}-wal" |
| cp .scratch/fetcher-state-download/state.db "$STATE_DB" |
| fi |
| else |
| echo "no bucket fetcher state snapshot found; seeding from compact metadata" |
| fi |
| fi |
|
|
| "$PYTHON" "$PY_SOURCE_ROOT/python/validate_compact_v1.py" --repo-root "$PY_DATASET_ROOT" --max-data-files 10000 |
| "$PYTHON" "$PY_SOURCE_ROOT/python/seed_fetcher_state.py" --repo-root "$PY_DATASET_ROOT" --state-db "$STATE_DB" |
|
|
| mkdir -p "$INPUT_DIR" |
|
|
| if [ "$FETCH" = "1" ]; then |
| refuse_reused_batch_id_for_ingest |
|
|
| enumerate_args=(--statuses "$STATUSES" --mode "$MODE") |
| if [ "${ENUMERATE_FULL_RESCAN:-0}" = "1" ]; then |
| enumerate_args+=(--full-rescan) |
| fi |
| if [ -n "${ENUMERATE_MAX_PAGES:-}" ]; then |
| enumerate_args+=(--max-pages "$ENUMERATE_MAX_PAGES") |
| fi |
|
|
| "$FETCHER" "${fetcher_args[@]}" enumerate "${enumerate_args[@]}" |
|
|
| if [ "$RANKED_FRONT_PAGES" -gt 0 ]; then |
| "$FETCHER" "${fetcher_args[@]}" enumerate \ |
| --statuses "$STATUSES" \ |
| --mode "$MODE" \ |
| --sort ranked-desc \ |
| --stateless \ |
| --max-pages "$RANKED_FRONT_PAGES" |
| fi |
|
|
| if [ "$POPULAR" = "1" ]; then |
| popular_args=( |
| --statuses any |
| --mode "$MODE" |
| --sort favourites-desc |
| --min-favourites "$POPULAR_MIN_FAVOURITES" |
| --min-playcount "$POPULAR_MIN_PLAYCOUNT" |
| --stateless |
| ) |
| if [ -n "$POPULAR_MAX_PAGES" ]; then |
| popular_args+=(--max-pages "$POPULAR_MAX_PAGES") |
| fi |
| "$FETCHER" "${fetcher_args[@]}" enumerate "${popular_args[@]}" |
| fi |
|
|
| if [ "$DISCOVER" = "1" ]; then |
| discover_args=( |
| --statuses "$STATUSES" |
| --mode "$MODE" |
| --min-quorum "${DISCOVER_MIN_QUORUM:-2}" |
| --nerinyan-search-rpm "$DISCOVER_SEARCH_RPM" |
| --osudirect-search-rpm "$DISCOVER_SEARCH_RPM" |
| --sayobot-search-rpm "$DISCOVER_SEARCH_RPM" |
| --nekoha-search-rpm "$DISCOVER_SEARCH_RPM" |
| ) |
| if [ -n "${DISCOVER_MAX_PAGES:-}" ]; then |
| discover_args+=(--max-pages "$DISCOVER_MAX_PAGES") |
| fi |
| "$FETCHER" "${fetcher_args[@]}" discover "${discover_args[@]}" |
| fi |
|
|
| if [ "$RETRY_FAILED" = "1" ]; then |
| retry_args=() |
| if [ "$RETRY_MISSING" = "1" ]; then |
| retry_args+=(--include-missing) |
| fi |
| "$FETCHER" "${fetcher_args[@]}" retry "${retry_args[@]}" |
| fi |
|
|
| "$FETCHER" "${fetcher_args[@]}" status |
|
|
| download_args=(--concurrency "$DOWNLOAD_CONCURRENCY") |
| if [ -n "$DOWNLOAD_LIMIT" ]; then |
| download_args+=(--limit "$DOWNLOAD_LIMIT") |
| fi |
| "$FETCHER" "${fetcher_args[@]}" download "${download_args[@]}" |
|
|
| if [ "$POST_DOWNLOAD_RETRY_FAILED" = "1" ]; then |
| if [ -n "$DOWNLOAD_LIMIT" ]; then |
| echo "post-download failed retry skipped because DOWNLOAD_LIMIT=${DOWNLOAD_LIMIT} is set" |
| else |
| failed_after_download="$(fetcher_status_count failed | tr -d '[:space:]')" |
| if [ "${failed_after_download:-0}" -gt 0 ]; then |
| echo "post-download failed retry: ${failed_after_download} failed row(s) will be reset and retried" |
| if [ "$POST_DOWNLOAD_RETRY_DELAY_SECONDS" -gt 0 ]; then |
| echo "post-download failed retry: sleeping ${POST_DOWNLOAD_RETRY_DELAY_SECONDS}s before retry" |
| sleep "$POST_DOWNLOAD_RETRY_DELAY_SECONDS" |
| fi |
| "$FETCHER" "${fetcher_args[@]}" retry |
| "$FETCHER" "${fetcher_args[@]}" status |
| "$FETCHER" "${fetcher_args[@]}" download "${download_args[@]}" |
| else |
| echo "post-download failed retry: no failed rows" |
| fi |
| fi |
| fi |
|
|
| "$FETCHER" "${fetcher_args[@]}" verify --fix |
| "$FETCHER" "${fetcher_args[@]}" status |
| fi |
|
|
| input_count="$(find "$INPUT_DIR" -type f -name '*.osz' | wc -l)" |
| echo "input_archive_count=${input_count}" |
|
|
| if [ "$input_count" -gt 0 ]; then |
| refuse_reused_batch_id_for_ingest |
|
|
| "$PYTHON" "$PY_SOURCE_ROOT/python/ingest_osz.py" "$INPUT_DIR" \ |
| --repo-root "$PY_DATASET_ROOT" \ |
| --rosu-indexer "$PY_INDEXER" \ |
| --ingest-batch-id "$BATCH_ID" \ |
| --chunk-size "$CHUNK_SIZE" \ |
| --indexer-workers "$INDEXER_WORKERS" \ |
| --skip-already-ingested \ |
| --physical-partitioning none \ |
| --quiet-indexer \ |
| --no-keep-awake |
|
|
| if [ "$COMPACT" = "1" ]; then |
| if [ "$COMPACT_CLEAN_STALE_SCRATCH" = "1" ]; then |
| rm -rf .scratch/metadata-compaction |
| fi |
| "$PYTHON" "$PY_SOURCE_ROOT/python/compact_metadata_v1.py" \ |
| --repo-root "$PY_DATASET_ROOT" \ |
| --target-rows "$COMPACT_TARGET_ROWS" \ |
| --batch-size "$COMPACT_BATCH_SIZE" \ |
| --min-files "$COMPACT_MIN_FILES" \ |
| --workers "$COMPACT_WORKERS" |
| fi |
|
|
| "$PYTHON" "$PY_SOURCE_ROOT/python/rebuild_latest_snapshot.py" --repo-root "$PY_DATASET_ROOT" --workers "$LATEST_REBUILD_WORKERS" |
| "$PYTHON" "$PY_SOURCE_ROOT/python/validate_compact_v1.py" --repo-root "$PY_DATASET_ROOT" --max-data-files 10000 |
|
|
| batch_archive_paths="$(batch_archive_path_count)" |
| echo "batch_archive_paths=${batch_archive_paths}" |
| else |
| batch_archive_paths=0 |
| "$PYTHON" "$PY_SOURCE_ROOT/python/validate_compact_v1.py" --repo-root "$PY_DATASET_ROOT" --max-data-files 10000 |
| fi |
|
|
| "$PYTHON" "$PY_SOURCE_ROOT/python/seed_fetcher_state.py" --repo-root "$PY_DATASET_ROOT" --state-db "$STATE_DB" --checkpoint-only |
|
|
| if [ "$UPLOAD" = "1" ]; then |
| archive_file_count=0 |
| if [ -d archives ]; then |
| archive_file_count="$(find archives -type f -name '*.osz' | wc -l | tr -d ' ')" |
| fi |
|
|
| if [ "$archive_file_count" -gt 0 ]; then |
| echo "syncing ${archive_file_count} local archive object(s) to bucket archives/ (append-only, ignore existing)" |
| "$HF" buckets sync archives "${BUCKET}/archives" --ignore-existing --no-delete --quiet |
| else |
| echo "no local archive objects to upload" |
| fi |
|
|
| if [ "$input_count" -gt 0 ]; then |
| "$HF" sync data "${BUCKET}/data" --delete |
| "$HF" sync schemas "${BUCKET}/schemas" --delete |
| fi |
|
|
| rm -rf .scratch/fetcher-state-upload |
| mkdir -p .scratch/fetcher-state-upload |
| cp "$STATE_DB" .scratch/fetcher-state-upload/state.db |
| "$HF" sync .scratch/fetcher-state-upload "${BUCKET}/state/fetcher" --delete |
|
|
| "$HF" buckets info "$BUCKET_ID" |
| fi |
|
|
| if [ "$CLEAN_INPUT" = "1" ] && [ "$input_count" -gt 0 ]; then |
| rm -rf "$INPUT_DIR" |
| fi |
|
|
| echo "finished_at=$(date -u +%Y-%m-%dT%H:%M:%SZ)" |
|
|