Datasets:
File size: 1,752 Bytes
be6cbb5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 | #!/usr/bin/env python
"""List raw archive CAS paths committed by a compact ingest batch."""
from __future__ import annotations
import argparse
from pathlib import Path
import pyarrow.compute as pc
import pyarrow.dataset as ds
def list_batch_archive_paths(
repo_root: Path,
batch_id: str,
*,
schema_version: str = "v1",
) -> list[str]:
archive_revisions = (
repo_root
/ "data"
/ schema_version
/ "all_revisions"
/ "archive_revisions"
)
files = [str(p) for p in sorted(archive_revisions.rglob("*.parquet"))]
if not files:
return []
dataset = ds.dataset(files, format="parquet")
table = dataset.to_table(columns=["ingest_batch_id", "archive_path"])
batch_col = table["ingest_batch_id"]
mask = pc.or_(
pc.equal(batch_col, batch_id),
pc.starts_with(batch_col, f"{batch_id}-chunk-"),
)
filtered = table.filter(mask)
paths = {
str(path)
for path in filtered["archive_path"].to_pylist()
if path and str(path).startswith("archives/")
}
return sorted(paths)
def parse_args(argv: list[str] | None = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--repo-root", default=".")
parser.add_argument("--schema-version", default="v1")
parser.add_argument("--batch-id", required=True)
return parser.parse_args(argv)
def main(argv: list[str] | None = None) -> int:
args = parse_args(argv)
for path in list_batch_archive_paths(
Path(args.repo_root),
args.batch_id,
schema_version=args.schema_version,
):
print(path)
return 0
if __name__ == "__main__":
raise SystemExit(main())
|