nix-cache-dataset / bin /dangling-references.py
brianmcgee's picture
fix: ensure system and ca fields are written as varchar in dangling references file
2334704 unverified
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p "python3.withPackages (ps: [ps.pyarrow])"
"""
Dangling References Analysis
Usage: ./bin/dangling-references.py
"""
import pyarrow.parquet as pq
import pyarrow as pa
from pathlib import Path
import time
import sys
import multiprocessing as mp
NUM_WORKERS = 7
BATCH_SIZE = 50_000
WRITE_BATCH_SIZE = 10_000 # Write to parquet every N results
def process_row_groups(args):
"""Stream through assigned row groups, send results to queue."""
main_file, row_group_indices, deletion_set, column_names, result_queue = args
processed = 0
found = 0
parquet_file = pq.ParquetFile(main_file)
for batch in parquet_file.iter_batches(
batch_size=BATCH_SIZE,
row_groups=row_group_indices
):
hashes = batch.column("hash").to_pylist()
refs_list = batch.column("references").to_pylist()
# Build indices of rows with dangling refs
matching_indices = []
dangling_refs_list = []
for i, (row_hash, refs) in enumerate(zip(hashes, refs_list)):
if row_hash in deletion_set:
continue
if not refs:
continue
dangling_refs = [ref for ref in refs if ref in deletion_set]
if dangling_refs:
matching_indices.append(i)
dangling_refs_list.append(dangling_refs)
if matching_indices:
# Extract matching rows as dict of columns
row_data = {}
for col in column_names:
values = [batch.column(col)[i].as_py() for i in matching_indices]
# Decode binary to string for system and ca_algo
if col in ("system", "ca_algo"):
values = [v.decode("utf-8") if v else None for v in values]
row_data[col] = values
row_data["dangling_refs"] = dangling_refs_list
result_queue.put(row_data)
found += len(matching_indices)
processed += len(hashes)
return processed, found
def writer_process(result_queue, output_file, output_schema, num_workers):
"""Receive results from queue and write to parquet incrementally."""
writer = None
total_written = 0
buffer = None # Dict of column_name -> list of values
buffer_len = 0
workers_done = 0
while workers_done < num_workers:
item = result_queue.get()
if item is None:
# Sentinel: one worker finished
workers_done += 1
continue
# item is a dict of column_name -> list of values
if buffer is None:
buffer = {col: [] for col in item.keys()}
for col, values in item.items():
buffer[col].extend(values)
buffer_len += len(item["hash"])
# Write when buffer is large enough
if buffer_len >= WRITE_BATCH_SIZE:
table = pa.Table.from_pydict(buffer, schema=output_schema)
if writer is None:
writer = pq.ParquetWriter(output_file, output_schema, compression="zstd")
writer.write_table(table)
total_written += buffer_len
buffer = None
buffer_len = 0
# Write remaining buffer
if buffer and buffer_len > 0:
table = pa.Table.from_pydict(buffer, schema=output_schema)
if writer is None:
writer = pq.ParquetWriter(output_file, output_schema, compression="zstd")
writer.write_table(table)
total_written += buffer_len
if writer:
writer.close()
else:
# No results - write empty file
pq.write_table(pa.table({}, schema=output_schema), output_file)
return total_written
def worker_wrapper(args):
"""Wrapper that sends sentinel when done."""
main_file, row_group_indices, deletion_set, column_names, result_queue = args
processed, found = process_row_groups(args)
result_queue.put(None) # Sentinel
return processed, found
def main():
base = Path("datasets")
main_file = base / "narinfos-2026-01-06T01-13Z.parquet"
deletion_file = base / "narinfos-nixos-images-2026-01-06T01-13Z.parquet"
dangling_refs_file = base / "narinfos-nixos-images-dangling-refs-2026-01-06T01-13Z.parquet"
output_file = base / "narinfos-nixos-images-dangling-refs-with-parents-2026-01-06T01-13Z.parquet"
# Step 1: Load deletion hashes from both files
print("Loading deletion set...")
start = time.time()
deletion_table = pq.read_table(deletion_file, columns=["hash"])
deletion_hashes = set(h.as_py() for h in deletion_table.column("hash"))
del deletion_table
print(f" Loaded {len(deletion_hashes):,} hashes from nixos-images")
dangling_table = pq.read_table(dangling_refs_file, columns=["hash"])
dangling_hashes = set(h.as_py() for h in dangling_table.column("hash"))
del dangling_table
print(f" Loaded {len(dangling_hashes):,} hashes from dangling-refs")
deletion_set = frozenset(deletion_hashes | dangling_hashes)
del deletion_hashes, dangling_hashes
print(f" Total deletion set: {len(deletion_set):,} hashes in {time.time()-start:.1f}s")
# Step 2: Get row group info and schema
parquet_file = pq.ParquetFile(main_file)
num_row_groups = parquet_file.metadata.num_row_groups
total_rows = parquet_file.metadata.num_rows
input_schema = parquet_file.schema_arrow
column_names = [field.name for field in input_schema]
print(f" Total: {total_rows:,} rows in {num_row_groups} row groups")
# Build output schema: input columns + dangling_refs
# Convert system and ca_algo from binary to string for better compatibility
output_fields = []
for field in input_schema:
if field.name in ("system", "ca_algo"):
output_fields.append(pa.field(field.name, pa.string()))
else:
output_fields.append(field)
output_fields.append(pa.field("dangling_refs", pa.list_(pa.binary())))
output_schema = pa.schema(output_fields)
# Distribute row groups across workers
workers = min(NUM_WORKERS, num_row_groups)
row_groups_per_worker = [[] for _ in range(workers)]
for i in range(num_row_groups):
row_groups_per_worker[i % workers].append(i)
# Create queue for results
manager = mp.Manager()
result_queue = manager.Queue()
# Start writer process
writer_proc = mp.Process(
target=writer_process,
args=(result_queue, str(output_file), output_schema, workers)
)
writer_proc.start()
# Prepare worker args
worker_args = [
(str(main_file), rgs, deletion_set, column_names, result_queue)
for rgs in row_groups_per_worker
]
# Step 3: Process in parallel
print(f"Processing with {workers} workers (batch size {BATCH_SIZE:,})...")
print(f" Streaming results to {output_file}")
sys.stdout.flush()
start = time.time()
with mp.Pool(workers) as pool:
worker_results = pool.map(worker_wrapper, worker_args)
# Wait for writer to finish
writer_proc.join()
# Aggregate stats
total_processed = sum(r[0] for r in worker_results)
total_found = sum(r[1] for r in worker_results)
elapsed = time.time() - start
print(f" Completed in {elapsed:.1f}s ({elapsed/60:.1f} min)")
print(f" Rate: {total_processed/elapsed:,.0f} rows/sec")
# Summary
print("\n" + "="*50)
print("SUMMARY")
print("="*50)
print(f"Entries with dangling refs: {total_found:,}")
if total_found > 0:
# Read back sample for display
result_table = pq.read_table(output_file)
# Sample up to 50 entries evenly across the output
num_rows = len(result_table)
sample_count = min(50, num_rows)
step = max(1, num_rows // sample_count)
sample_indices = [i * step for i in range(sample_count) if i * step < num_rows]
print(f"\nSample pnames with dangling refs ({len(sample_indices)} of {num_rows:,}):")
for i in sample_indices:
pname = result_table.column("pname")[i].as_py()
num_refs = len(result_table.column("dangling_refs")[i].as_py())
print(f" {pname}: {num_refs} dangling ref(s)")
else:
print("No dangling references found")
if __name__ == "__main__":
main()