File size: 12,539 Bytes
d9796d3 8ae72a1 d9796d3 8ae72a1 e197dc2 d9796d3 8ae72a1 d9796d3 8ae72a1 d9796d3 8ae72a1 d9796d3 8ae72a1 e197dc2 d9796d3 8ae72a1 d9796d3 8ae72a1 e197dc2 8ae72a1 d9796d3 8ae72a1 d9796d3 8ae72a1 d9796d3 8ae72a1 e197dc2 d9796d3 8ae72a1 d9796d3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 | #!/usr/bin/env python3
"""
verify.py - standalone chain + body verifier for append.page
Common usage (one URL, full check):
python verify.py https://append.page/p/<slug>
Other modes:
python verify.py path/to/page.jsonl # chain only, offline
curl -sS https://append.page/p/<slug>/raw | \\
python verify.py /dev/stdin # chain only, offline
# After unzipping https://append.page/p/<slug>/archive.zip:
python verify.py chain.jsonl --with-bodies bodies.jsonl # chain + bodies, offline
Exit codes:
0 chain (and optionally bodies) intact
1 verification failed (details on stderr)
2 usage error / network failure
What it checks:
1. Each entry's `hash` matches SHA-256(JCS-canonical(entry_minus_hash)).
2. Each entry's `prev_hash` matches the previous entry's `hash`.
3. seq increments by 1 from 0; page slug is constant across the chain.
4. The first entry's `prev_hash` matches the genesis seed
SHA-256("genesis|<slug>|<page_created_at>"). Pass --genesis-at <ISO>
to enforce this; without it the genesis check is skipped with a note.
5. URL mode + bodies mode: each non-erased entry's body+salt satisfies
SHA-256(salt || body) == entry.body_commitment. Erased entries skip
this check (the body is gone). The API still returns salt for
erased entries, so anyone who archived a body privately before
erasure can re-verify it offline by passing --with-bodies on a
JSON file they assemble themselves.
Dependencies: stdlib only (json, hashlib, sys, argparse, urllib) + the
`jcs` PyPI package for RFC 8785 canonicalization. Install with:
pip install jcs
(falls back to sort_keys + compact-separators, which is byte-equivalent
for our entry shape since we use only string and integer values).
"""
import argparse
import hashlib
import json
import sys
import urllib.error
import urllib.request
from typing import Optional
try:
import jcs as _jcs
def canonicalize(obj: dict) -> bytes:
return _jcs.canonicalize(obj)
except ImportError:
# Fallback: byte-equivalent for the v0 entry shape (string + int values only).
# If you start using floats, install jcs.
def canonicalize(obj: dict) -> bytes:
return json.dumps(
obj, sort_keys=True, separators=(",", ":"), ensure_ascii=False
).encode("utf-8")
def sha256_hex(data: bytes) -> str:
return "sha256:" + hashlib.sha256(data).hexdigest()
def genesis_seed(slug: str, page_created_at_iso: str) -> str:
return sha256_hex(f"genesis|{slug}|{page_created_at_iso}".encode("utf-8"))
def verify_chain(
entries: list[dict], bodies_by_id: Optional[dict[str, dict]] = None
) -> tuple[bool, str]:
if not entries:
return True, "empty chain (0 entries)"
page_slug = entries[0].get("page")
expected_prev_hash: Optional[str] = None # set after we see entry[0]
for i, entry in enumerate(entries):
# 1. recompute the entry's own hash
recorded_hash = entry.get("hash")
if not recorded_hash:
return False, f"entry {i} (id={entry.get('id')}) has no `hash` field"
body = {k: v for k, v in entry.items() if k != "hash"}
recomputed = sha256_hex(canonicalize(body))
if recomputed != recorded_hash:
return (
False,
f"entry {i} (id={entry.get('id')}): recorded hash {recorded_hash} "
f"!= recomputed {recomputed}",
)
# 2. check prev_hash against the previous entry's hash
prev_hash = entry.get("prev_hash")
if i == 0:
# We can't verify the genesis seed without knowing the page's
# creation timestamp. Just record what we'd expect; skip enforcement
# unless the caller supplies it via --genesis-at.
pass
else:
if prev_hash != expected_prev_hash:
return (
False,
f"entry {i} (id={entry.get('id')}): prev_hash {prev_hash} "
f"!= entry[{i - 1}].hash {expected_prev_hash}",
)
# 3. seq must increase by 1
seq = entry.get("seq")
if seq != i:
return False, f"entry {i}: seq={seq}, expected {i}"
# 4. page must match across the chain
if entry.get("page") != page_slug:
return False, (
f"entry {i}: page={entry.get('page')!r}, expected {page_slug!r} "
f"(mixed pages in one chain?)"
)
# 5. optional body verification
if bodies_by_id:
entry_id = entry.get("id")
if entry_id in bodies_by_id:
reveal = bodies_by_id[entry_id]
body_text = reveal.get("body")
salt_hex = reveal.get("salt")
if body_text is None or salt_hex is None:
return False, (
f"entry {i} (id={entry_id}): bodies file is missing "
f"`body` or `salt`"
)
salt_bytes = bytes.fromhex(salt_hex)
computed = sha256_hex(salt_bytes + body_text.encode("utf-8"))
if computed != entry.get("body_commitment"):
return False, (
f"entry {i} (id={entry_id}): revealed body_commitment "
f"{computed} != on-chain {entry.get('body_commitment')}"
)
expected_prev_hash = recorded_hash
return True, f"verified {len(entries)} entries, chain intact, head: {expected_prev_hash}"
def _http_get_jsonl(url: str) -> list[dict]:
"""Download a JSONL endpoint and parse one object per non-empty line."""
with urllib.request.urlopen(url, timeout=30) as resp:
text = resp.read().decode("utf-8")
return [json.loads(line) for line in text.splitlines() if line.strip()]
def _http_post_json(url: str, payload: dict) -> dict:
req = urllib.request.Request(
url,
data=json.dumps(payload).encode("utf-8"),
headers={"content-type": "application/json"},
method="POST",
)
with urllib.request.urlopen(req, timeout=30) as resp:
return json.loads(resp.read().decode("utf-8"))
def fetch_url(base_url: str) -> tuple[list[dict], dict[str, dict]]:
"""
Fetch the chain (one HTTP call) plus all bodies+salts (one bulk POST per
200-id batch). Returns (entries, bodies_by_id) where bodies_by_id maps
entry id -> {body, salt}. Erased entries are omitted from bodies_by_id
(no body to verify against); the chain check still applies to them.
The API does still return salt for erased entries — useful if you
have a private archive of the body and want to recheck offline.
"""
base_url = base_url.rstrip("/")
raw_url = base_url + "/raw"
bodies_url = base_url + "/bodies"
entries = _http_get_jsonl(raw_url)
if not entries:
return entries, {}
ids = [e["id"] for e in entries]
bodies_by_id: dict[str, dict] = {}
for batch_start in range(0, len(ids), 200):
batch = ids[batch_start : batch_start + 200]
resp = _http_post_json(bodies_url, {"ids": batch})
for item in resp.get("entries", []):
entry = item.get("entry", {})
entry_id = entry.get("id")
if not entry_id:
continue
if item.get("erased"):
continue
body = item.get("body")
salt = item.get("salt")
if body is None or salt is None:
continue
bodies_by_id[entry_id] = {"body": body, "salt": salt}
return entries, bodies_by_id
def _load_bodies_file(path: str) -> dict[str, dict]:
"""
Read a --with-bodies file. Auto-detect JSONL (one {entry_id, body, salt}
object per line; what /p/<slug>/bodies.jsonl serves and what's bundled
inside /p/<slug>/archive.zip) vs the legacy JSON shape (a single object
mapping entry_id -> {body, salt}).
Detection: read the first non-blank line; if it parses to an object
that has an "entry_id" key, treat the whole file as JSONL. Otherwise
fall back to json.load on the whole file.
"""
with open(path, "r", encoding="utf-8") as f:
text = f.read()
first_nonblank: Optional[str] = None
for line in text.splitlines():
if line.strip():
first_nonblank = line
break
if first_nonblank is not None:
try:
probe = json.loads(first_nonblank)
except json.JSONDecodeError:
probe = None
if isinstance(probe, dict) and "entry_id" in probe:
# JSONL — one object per line.
out: dict[str, dict] = {}
for line in text.splitlines():
if not line.strip():
continue
row = json.loads(line)
entry_id = row.get("entry_id")
if not entry_id:
continue
if row.get("erased"):
# Erased entries have body=None; skip — chain check
# still applies to them, body check has nothing to do.
continue
body = row.get("body")
salt = row.get("salt")
if body is None or salt is None:
continue
out[entry_id] = {"body": body, "salt": salt}
return out
# Fallback: legacy JSON object {entry_id: {body, salt}}.
return json.loads(text)
def main() -> int:
ap = argparse.ArgumentParser(
description="Verify an append.page chain (and optionally bodies)."
)
ap.add_argument(
"source",
help=(
"either an append.page URL like https://append.page/p/<slug> "
"(fetches chain + bodies + salts and verifies everything), or a "
"path to a .jsonl file (chain-only unless --with-bodies given)"
),
)
ap.add_argument(
"--with-bodies",
metavar="PATH",
help=(
"Path to bodies file. Two formats are accepted, auto-detected:\n"
" (1) JSONL — one object per line, "
'{"entry_id": "...", "body": "...", "salt": "..."} '
"(this is the format /p/<slug>/bodies.jsonl serves and the "
"format bundled inside /p/<slug>/archive.zip).\n"
" (2) JSON — a single object mapping entry_id -> "
"{body, salt} (the legacy hand-assembled shape).\n"
"Ignored when SOURCE is a URL — bodies are fetched live in "
"that case."
),
)
ap.add_argument(
"--genesis-at",
metavar="ISO8601",
help=(
"page creation timestamp; if supplied, also verifies "
'entry[0].prev_hash == SHA-256("genesis|<slug>|<ts>")'
),
)
args = ap.parse_args()
is_url = args.source.startswith("http://") or args.source.startswith(
"https://"
)
bodies_by_id: Optional[dict[str, dict]] = None
if is_url:
try:
entries, bodies_by_id = fetch_url(args.source)
except urllib.error.URLError as e:
print(f"FAIL: could not fetch {args.source}: {e}", file=sys.stderr)
return 2
else:
with open(args.source, "r", encoding="utf-8") as f:
entries = [json.loads(line) for line in f if line.strip()]
if args.with_bodies:
bodies_by_id = _load_bodies_file(args.with_bodies)
ok, msg = verify_chain(entries, bodies_by_id)
if not ok:
print(f"FAIL: {msg}", file=sys.stderr)
return 1
if args.genesis_at and entries:
expected = genesis_seed(entries[0]["page"], args.genesis_at)
if entries[0]["prev_hash"] != expected:
print(
f"FAIL: entry[0].prev_hash {entries[0]['prev_hash']} != "
f"genesis seed {expected} (slug={entries[0]['page']}, "
f"genesis_at={args.genesis_at})",
file=sys.stderr,
)
return 1
body_note = ""
if bodies_by_id is not None:
verified = sum(1 for _ in bodies_by_id)
skipped = len(entries) - verified
body_note = (
f"; verified {verified} bodies (commitment matches),"
f" skipped {skipped} (erased or no body)"
)
print(f"OK: {msg}{body_note}")
return 0
if __name__ == "__main__":
sys.exit(main())
|