UIPress / scripts /stats_html_token_lengths.py
DesonDai's picture
Add files using upload-large-folder tool
7c33ebc verified
"""
Statistics for teacher-forcing HTML token lengths (same encoding as train_compressor).
Uses Qwen3-VL tokenizer with encode(..., add_special_tokens=False) to match
_rebuild_sequence (excluding the trailing eos added in training).
Usage:
PYTHONPATH=. python scripts/stats_html_token_lengths.py
PYTHONPATH=. python scripts/stats_html_token_lengths.py --model_id Qwen/Qwen3-VL-8B-Instruct
"""
from __future__ import annotations
import argparse
import json
import os
import sys
from pathlib import Path
os.environ.setdefault("HF_ENDPOINT", "https://hf-mirror.com")
os.environ.setdefault("HF_HOME", os.environ.get("HF_HOME", "/root/rivermind-data/huggingface"))
PROJECT_ROOT = Path(__file__).resolve().parent.parent
def collect_lengths(html_paths: list[Path], tokenizer) -> list[int]:
lengths: list[int] = []
for p in html_paths:
try:
text = p.read_text(encoding="utf-8", errors="ignore")
except OSError:
continue
ids = tokenizer.encode(text, add_special_tokens=False)
lengths.append(len(ids))
return lengths
def percentile(sorted_vals: list[int], q: float) -> float:
"""Linear interpolation, q in [0,1]."""
if not sorted_vals:
return 0.0
n = len(sorted_vals)
if n == 1:
return float(sorted_vals[0])
pos = (n - 1) * q
lo = int(pos)
hi = min(lo + 1, n - 1)
w = pos - lo
return sorted_vals[lo] * (1 - w) + sorted_vals[hi] * w
def main() -> int:
ap = argparse.ArgumentParser()
ap.add_argument("--model_id", default="Qwen/Qwen3-VL-8B-Instruct")
ap.add_argument("--websight_dir", type=Path, default=PROJECT_ROOT / "data" / "websight")
ap.add_argument("--gt_html_dir", type=Path, default=PROJECT_ROOT / "data" / "gt_html")
ap.add_argument("--json_out", type=Path, default=None, help="Optional path to write summary JSON.")
args = ap.parse_args()
from transformers import AutoTokenizer
tok = AutoTokenizer.from_pretrained(args.model_id, trust_remote_code=True)
ws = sorted(args.websight_dir.glob("*.html")) if args.websight_dir.is_dir() else []
gt = sorted(args.gt_html_dir.glob("*.html")) if args.gt_html_dir.is_dir() else []
len_ws = collect_lengths(ws, tok)
len_gt = collect_lengths(gt, tok)
merged = len_ws + len_gt
def summarize(name: str, vals: list[int]) -> dict:
if not vals:
return {"name": name, "n": 0}
s = sorted(vals)
return {
"name": name,
"n": len(vals),
"min": s[0],
"max": s[-1],
"mean": round(sum(s) / len(s), 1),
"p50": int(percentile(s, 0.50)),
"p90": int(percentile(s, 0.90)),
"p95": int(percentile(s, 0.95)),
"p99": int(percentile(s, 0.99)),
}
out = {
"model_id": args.model_id,
"websight": summarize("websight", len_ws),
"gt_html": summarize("gt_html (Design2Code eval GT)", len_gt),
"merged": summarize("merged (websight + gt_html)", merged),
"recommend_max_html_tokens": {},
}
if merged:
s = sorted(merged)
p95 = int(percentile(s, 0.95))
p99 = int(percentile(s, 0.99))
ws_p99 = int(percentile(sorted(len_ws), 0.99)) if len_ws else 0
out["recommend_max_html_tokens"] = {
"websight_only_suggest": min(max(ws_p99 + 128, 1024), 2048),
"design2code_gt_reality": (
"gt_html is very long (often 20k–80k+ tokens). Any fixed cap heavily truncates CE on eval-GT."
),
"mixed_train_practical": {
"8192": "Still truncates most Design2Code HTML but keeps speed reasonable",
"16384": "Better tail for long pages if VRAM/time allow",
"2048": "Fine for WebSight-dominated runs; largely drops Design2Code GT tail",
},
"note": "See websight vs gt_html blocks; do not use merged p95 alone when mixing splits.",
}
print(json.dumps(out, indent=2, ensure_ascii=False))
if args.json_out:
args.json_out.parent.mkdir(parents=True, exist_ok=True)
args.json_out.write_text(json.dumps(out, indent=2, ensure_ascii=False), encoding="utf-8")
print(f"Wrote {args.json_out}", file=sys.stderr)
return 0
if __name__ == "__main__":
raise SystemExit(main())