feat: add an analysis of affected store paths if we delete the proposed nixos images
Browse files
README.md
CHANGED
|
@@ -16,6 +16,8 @@ configs:
|
|
| 16 |
data_files: datasets/narinfos-2026-01-06T01-13Z.parquet
|
| 17 |
- config_name: narinfos in the inventory - nixos images only
|
| 18 |
data_files: datasets/narinfos-nixos-images-2026-01-06T01-13Z.parquet
|
|
|
|
|
|
|
| 19 |
- config_name: buildstepoutputs
|
| 20 |
data_files: datasets/buildstepoutputs-2025-12-05-17:38:30Z.csv.zst
|
| 21 |
- config_name: narinfos in inventory but not in buildstepoutputs
|
|
@@ -180,6 +182,13 @@ they are stored elsewhere.
|
|
| 180 |
|
| 181 |
It was compiled using `./duckdb/nixos-images.sql`;
|
| 182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
### `datasets/buildstepoutputs-2025-12-05-17:38:30Z.csv.zst`
|
| 184 |
|
| 185 |
This is a dump of the `buildstepoutputs` table taken from [Hydra] on `2025-12-05T17:38:30Z`. It can be thought of as a
|
|
|
|
| 16 |
data_files: datasets/narinfos-2026-01-06T01-13Z.parquet
|
| 17 |
- config_name: narinfos in the inventory - nixos images only
|
| 18 |
data_files: datasets/narinfos-nixos-images-2026-01-06T01-13Z.parquet
|
| 19 |
+
- config_name: store paths affected by the removal of nixos images
|
| 20 |
+
data_files: datasets/narinfos-nixos-images-dangling-refs-2026-01-06T01-13Z.parquet
|
| 21 |
- config_name: buildstepoutputs
|
| 22 |
data_files: datasets/buildstepoutputs-2025-12-05-17:38:30Z.csv.zst
|
| 23 |
- config_name: narinfos in inventory but not in buildstepoutputs
|
|
|
|
| 182 |
|
| 183 |
It was compiled using `./duckdb/nixos-images.sql`;
|
| 184 |
|
| 185 |
+
### `datasets/narinfos-nixos-images-dangling-refs-2026-01-06T01-13Z.parquet`
|
| 186 |
+
|
| 187 |
+
This is a list of store paths which reference one or more of the store paths contained in `datasets/narinfos-nixos-images-2026-01-06T01-13Z.parquet`,
|
| 188 |
+
which will be affected by their removal from https://cache.nixos.org.
|
| 189 |
+
|
| 190 |
+
It was compiled using `bin/dangling-references.py`.
|
| 191 |
+
|
| 192 |
### `datasets/buildstepoutputs-2025-12-05-17:38:30Z.csv.zst`
|
| 193 |
|
| 194 |
This is a dump of the `buildstepoutputs` table taken from [Hydra] on `2025-12-05T17:38:30Z`. It can be thought of as a
|
bin/dangling-references.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env nix-shell
|
| 2 |
+
#!nix-shell -i python3 -p "python3.withPackages (ps: [ps.pyarrow])"
|
| 3 |
+
"""
|
| 4 |
+
Dangling References Analysis
|
| 5 |
+
|
| 6 |
+
Usage: ./bin/dangling-references.py
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import pyarrow.parquet as pq
|
| 10 |
+
import pyarrow as pa
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
import time
|
| 13 |
+
import sys
|
| 14 |
+
import multiprocessing as mp
|
| 15 |
+
|
| 16 |
+
NUM_WORKERS = 10
|
| 17 |
+
BATCH_SIZE = 50_000
|
| 18 |
+
WRITE_BATCH_SIZE = 10_000 # Write to parquet every N results
|
| 19 |
+
|
| 20 |
+
OUTPUT_SCHEMA = pa.schema([
|
| 21 |
+
("hash", pa.binary()),
|
| 22 |
+
("pname", pa.string()),
|
| 23 |
+
("dangling_refs", pa.list_(pa.binary())),
|
| 24 |
+
])
|
| 25 |
+
|
| 26 |
+
def process_row_groups(args):
|
| 27 |
+
"""Stream through assigned row groups, send results to queue."""
|
| 28 |
+
main_file, row_group_indices, deletion_set, result_queue = args
|
| 29 |
+
|
| 30 |
+
processed = 0
|
| 31 |
+
found = 0
|
| 32 |
+
|
| 33 |
+
parquet_file = pq.ParquetFile(main_file)
|
| 34 |
+
|
| 35 |
+
for batch in parquet_file.iter_batches(
|
| 36 |
+
batch_size=BATCH_SIZE,
|
| 37 |
+
columns=["hash", "pname", "references"],
|
| 38 |
+
row_groups=row_group_indices
|
| 39 |
+
):
|
| 40 |
+
hashes = batch.column("hash").to_pylist()
|
| 41 |
+
pnames = batch.column("pname").to_pylist()
|
| 42 |
+
refs_list = batch.column("references").to_pylist()
|
| 43 |
+
|
| 44 |
+
batch_results = []
|
| 45 |
+
for row_hash, pname, refs in zip(hashes, pnames, refs_list):
|
| 46 |
+
if row_hash in deletion_set:
|
| 47 |
+
# this entry is being deleted anyway
|
| 48 |
+
continue
|
| 49 |
+
if not refs:
|
| 50 |
+
# this entry has no refs
|
| 51 |
+
continue
|
| 52 |
+
|
| 53 |
+
# check for any references which point to an entry that will be deleted
|
| 54 |
+
dangling_refs = [ref for ref in refs if ref in deletion_set]
|
| 55 |
+
if dangling_refs:
|
| 56 |
+
# this entry will be affected by one or more of the deleted entries
|
| 57 |
+
# add to output file
|
| 58 |
+
batch_results.append((row_hash, pname, dangling_refs))
|
| 59 |
+
|
| 60 |
+
if batch_results:
|
| 61 |
+
# append to queue
|
| 62 |
+
result_queue.put(batch_results)
|
| 63 |
+
found += len(batch_results)
|
| 64 |
+
|
| 65 |
+
processed += len(hashes)
|
| 66 |
+
|
| 67 |
+
return processed, found
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def writer_process(result_queue, output_file, num_workers):
|
| 71 |
+
"""Receive results from queue and write to parquet incrementally."""
|
| 72 |
+
writer = None
|
| 73 |
+
total_written = 0
|
| 74 |
+
buffer = []
|
| 75 |
+
workers_done = 0
|
| 76 |
+
|
| 77 |
+
while workers_done < num_workers:
|
| 78 |
+
item = result_queue.get()
|
| 79 |
+
|
| 80 |
+
if item is None:
|
| 81 |
+
# Sentinel: one worker finished
|
| 82 |
+
workers_done += 1
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
buffer.extend(item)
|
| 86 |
+
|
| 87 |
+
# Write when buffer is large enough
|
| 88 |
+
if len(buffer) >= WRITE_BATCH_SIZE:
|
| 89 |
+
table = pa.Table.from_pydict(
|
| 90 |
+
{
|
| 91 |
+
"hash": [r[0] for r in buffer],
|
| 92 |
+
"pname": [r[1] for r in buffer],
|
| 93 |
+
"dangling_refs": [r[2] for r in buffer],
|
| 94 |
+
},
|
| 95 |
+
schema=OUTPUT_SCHEMA
|
| 96 |
+
)
|
| 97 |
+
if writer is None:
|
| 98 |
+
writer = pq.ParquetWriter(output_file, OUTPUT_SCHEMA, compression="zstd")
|
| 99 |
+
writer.write_table(table)
|
| 100 |
+
total_written += len(buffer)
|
| 101 |
+
buffer = []
|
| 102 |
+
|
| 103 |
+
# Write remaining buffer
|
| 104 |
+
if buffer:
|
| 105 |
+
table = pa.Table.from_pydict(
|
| 106 |
+
{
|
| 107 |
+
"hash": [r[0] for r in buffer],
|
| 108 |
+
"pname": [r[1] for r in buffer],
|
| 109 |
+
"dangling_refs": [r[2] for r in buffer],
|
| 110 |
+
},
|
| 111 |
+
schema=OUTPUT_SCHEMA
|
| 112 |
+
)
|
| 113 |
+
if writer is None:
|
| 114 |
+
writer = pq.ParquetWriter(output_file, OUTPUT_SCHEMA, compression="zstd")
|
| 115 |
+
writer.write_table(table)
|
| 116 |
+
total_written += len(buffer)
|
| 117 |
+
|
| 118 |
+
if writer:
|
| 119 |
+
writer.close()
|
| 120 |
+
else:
|
| 121 |
+
# No results - write empty file
|
| 122 |
+
pq.write_table(pa.table({}, schema=OUTPUT_SCHEMA), output_file)
|
| 123 |
+
|
| 124 |
+
return total_written
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def worker_wrapper(args):
|
| 128 |
+
"""Wrapper that sends sentinel when done."""
|
| 129 |
+
main_file, row_group_indices, deletion_set, result_queue = args
|
| 130 |
+
processed, found = process_row_groups(args)
|
| 131 |
+
result_queue.put(None) # Sentinel
|
| 132 |
+
return processed, found
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def main():
|
| 136 |
+
base = Path("datasets")
|
| 137 |
+
main_file = base / "narinfos-2026-01-06T01-13Z.parquet"
|
| 138 |
+
deletion_file = base / "narinfos-nixos-images-2026-01-06T01-13Z.parquet"
|
| 139 |
+
output_file = base / "narinfos-nixos-images-dangling-refs-2026-01-06T01-13Z.parquet"
|
| 140 |
+
|
| 141 |
+
# Step 1: Load deletion hashes
|
| 142 |
+
print("Loading deletion set...")
|
| 143 |
+
start = time.time()
|
| 144 |
+
deletion_table = pq.read_table(deletion_file, columns=["hash"])
|
| 145 |
+
deletion_set = frozenset(h.as_py() for h in deletion_table.column("hash"))
|
| 146 |
+
del deletion_table
|
| 147 |
+
print(f" Loaded {len(deletion_set):,} deletion hashes in {time.time()-start:.1f}s")
|
| 148 |
+
|
| 149 |
+
# Step 2: Get row group info
|
| 150 |
+
parquet_file = pq.ParquetFile(main_file)
|
| 151 |
+
num_row_groups = parquet_file.metadata.num_row_groups
|
| 152 |
+
total_rows = parquet_file.metadata.num_rows
|
| 153 |
+
print(f" Total: {total_rows:,} rows in {num_row_groups} row groups")
|
| 154 |
+
|
| 155 |
+
# Distribute row groups across workers
|
| 156 |
+
workers = min(NUM_WORKERS, num_row_groups)
|
| 157 |
+
row_groups_per_worker = [[] for _ in range(workers)]
|
| 158 |
+
for i in range(num_row_groups):
|
| 159 |
+
row_groups_per_worker[i % workers].append(i)
|
| 160 |
+
|
| 161 |
+
# Create queue for results
|
| 162 |
+
manager = mp.Manager()
|
| 163 |
+
result_queue = manager.Queue()
|
| 164 |
+
|
| 165 |
+
# Start writer process
|
| 166 |
+
writer_proc = mp.Process(
|
| 167 |
+
target=writer_process,
|
| 168 |
+
args=(result_queue, str(output_file), workers)
|
| 169 |
+
)
|
| 170 |
+
writer_proc.start()
|
| 171 |
+
|
| 172 |
+
# Prepare worker args
|
| 173 |
+
worker_args = [
|
| 174 |
+
(str(main_file), rgs, deletion_set, result_queue)
|
| 175 |
+
for rgs in row_groups_per_worker
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
# Step 3: Process in parallel
|
| 179 |
+
print(f"Processing with {workers} workers (batch size {BATCH_SIZE:,})...")
|
| 180 |
+
print(f" Streaming results to {output_file}")
|
| 181 |
+
sys.stdout.flush()
|
| 182 |
+
start = time.time()
|
| 183 |
+
|
| 184 |
+
with mp.Pool(workers) as pool:
|
| 185 |
+
worker_results = pool.map(worker_wrapper, worker_args)
|
| 186 |
+
|
| 187 |
+
# Wait for writer to finish
|
| 188 |
+
writer_proc.join()
|
| 189 |
+
|
| 190 |
+
# Aggregate stats
|
| 191 |
+
total_processed = sum(r[0] for r in worker_results)
|
| 192 |
+
total_found = sum(r[1] for r in worker_results)
|
| 193 |
+
|
| 194 |
+
elapsed = time.time() - start
|
| 195 |
+
print(f" Completed in {elapsed:.1f}s ({elapsed/60:.1f} min)")
|
| 196 |
+
print(f" Rate: {total_processed/elapsed:,.0f} rows/sec")
|
| 197 |
+
|
| 198 |
+
# Summary
|
| 199 |
+
print("\n" + "="*50)
|
| 200 |
+
print("SUMMARY")
|
| 201 |
+
print("="*50)
|
| 202 |
+
print(f"Entries with dangling refs: {total_found:,}")
|
| 203 |
+
|
| 204 |
+
if total_found > 0:
|
| 205 |
+
# Read back sample for display
|
| 206 |
+
result_table = pq.read_table(output_file)
|
| 207 |
+
|
| 208 |
+
# Sample up to 50 entries evenly across the output
|
| 209 |
+
num_rows = len(result_table)
|
| 210 |
+
sample_count = min(50, num_rows)
|
| 211 |
+
step = max(1, num_rows // sample_count)
|
| 212 |
+
sample_indices = [i * step for i in range(sample_count) if i * step < num_rows]
|
| 213 |
+
|
| 214 |
+
print(f"\nSample pnames with dangling refs ({len(sample_indices)} of {num_rows:,}):")
|
| 215 |
+
for i in sample_indices:
|
| 216 |
+
pname = result_table.column("pname")[i].as_py()
|
| 217 |
+
num_refs = len(result_table.column("dangling_refs")[i].as_py())
|
| 218 |
+
print(f" {pname}: {num_refs} dangling ref(s)")
|
| 219 |
+
else:
|
| 220 |
+
print("No dangling references found")
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
if __name__ == "__main__":
|
| 224 |
+
main()
|
datasets/narinfos-nixos-images-dangling-refs-2026-01-06T01-13Z.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:633929171cf5032f11b163bd7b8e45756f7c500111d5016f3afb62ee1212a76d
|
| 3 |
+
size 4816554
|