UIPress / scripts /step_case_study.py
DesonDai's picture
Add files using upload-large-folder tool
bac741f verified
"""
Case Study: Generate side-by-side visual comparisons.
Selects representative examples and creates HTML comparison pages.
Usage:
python scripts/step_case_study.py
"""
import json
import os
import sys
from pathlib import Path
import numpy as np
from PIL import Image
PROJECT_ROOT = Path(__file__).parent.parent
sys.path.insert(0, str(PROJECT_ROOT))
METHODS_TO_COMPARE = [
"deepseek_tiny",
"deepseek_base",
"deepseek_large",
"qwen3_256",
"qwen3_1k",
"qwen3_full",
]
METHOD_LABELS = {
"deepseek_tiny": "DeepSeek-OCR tiny (73 tok)",
"deepseek_base": "DeepSeek-OCR base (273 tok)",
"deepseek_large": "DeepSeek-OCR large (421 tok)",
"qwen3_256": "Qwen3-VL 256 (722 tok)",
"qwen3_1k": "Qwen3-VL 1k (3043 tok)",
"qwen3_full": "Qwen3-VL full (6746 tok)",
}
def select_representative_samples(benchmark_dir, n=8):
"""Select diverse, representative samples based on CLIP score variance."""
clips_by_sample = {}
for method in METHODS_TO_COMPARE:
clip_file = Path(benchmark_dir) / method / "clip_scores.json"
if not clip_file.exists():
continue
with open(clip_file) as f:
data = json.load(f)
per_sample = data.get("per_sample", {})
for sid, val in per_sample.items():
score = val.get("clip_score", val) if isinstance(val, dict) else float(val)
clips_by_sample.setdefault(sid, {})[method] = score
candidates = []
for sid, scores in clips_by_sample.items():
if len(scores) < 4:
continue
vals = list(scores.values())
candidates.append({
"id": sid,
"mean_clip": np.mean(vals),
"std_clip": np.std(vals),
"max_clip": max(vals),
"min_clip": min(vals),
"range": max(vals) - min(vals),
"scores": scores,
})
candidates.sort(key=lambda c: -c["range"])
selected = []
high_quality = [c for c in candidates if c["mean_clip"] > 0.85]
if high_quality:
selected.append(high_quality[0])
low_quality = [c for c in candidates if c["mean_clip"] < 0.65 and c not in selected]
if low_quality:
selected.append(low_quality[0])
high_variance = [c for c in candidates if c not in selected]
high_variance.sort(key=lambda c: -c["range"])
for c in high_variance[:3]:
if c not in selected:
selected.append(c)
mid_range = [c for c in candidates if 0.70 < c["mean_clip"] < 0.80 and c not in selected]
mid_range.sort(key=lambda c: -c["range"])
for c in mid_range[:3]:
if c not in selected:
selected.append(c)
return selected[:n]
def render_html_to_png(html_path, output_path, width=1280, height=1024):
"""Render HTML to PNG screenshot."""
try:
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
browser = p.chromium.launch(headless=True, args=['--no-sandbox', '--disable-gpu'])
page = browser.new_page(viewport={"width": width, "height": height})
page.goto(f"file://{html_path}", wait_until="networkidle", timeout=15000)
page.wait_for_timeout(500)
page.screenshot(path=str(output_path), full_page=False)
browser.close()
return True
except Exception as e:
print(f" Render failed: {e}")
return False
def generate_case_study_html(selected, benchmark_dir, ref_dir, output_dir):
"""Generate an HTML page with side-by-side comparisons."""
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
images_dir = output_dir / "images"
images_dir.mkdir(exist_ok=True)
for sample in selected:
sid = sample["id"]
ref_src = Path(ref_dir) / f"{sid}.png"
if ref_src.exists():
ref_dst = images_dir / f"ref_{sid}.png"
if not ref_dst.exists():
img = Image.open(ref_src)
img.thumbnail((640, 800))
img.save(str(ref_dst))
for method in METHODS_TO_COMPARE:
html_path = Path(benchmark_dir) / method / "html_predictions" / f"{sid}.html"
render_path = images_dir / f"{method}_{sid}.png"
if html_path.exists() and not render_path.exists():
print(f" Rendering {method}/{sid}...")
ok = render_html_to_png(str(html_path.resolve()), str(render_path))
if ok:
img = Image.open(render_path)
img.thumbnail((640, 800))
img.save(str(render_path))
rows_html = []
for i, sample in enumerate(selected):
sid = sample["id"]
scores_str = " | ".join(
f"{METHOD_LABELS.get(m, m).split('(')[0].strip()}: {sample['scores'].get(m, 'N/A'):.3f}"
if isinstance(sample['scores'].get(m), float) else f"{m}: N/A"
for m in METHODS_TO_COMPARE
)
cells = [f'<td><img src="images/ref_{sid}.png" alt="ref"><br><b>Original</b></td>']
for method in METHODS_TO_COMPARE:
label = METHOD_LABELS.get(method, method)
clip = sample["scores"].get(method)
clip_str = f"CLIP: {clip:.3f}" if clip else "N/A"
img_file = f"images/{method}_{sid}.png"
cells.append(f'<td><img src="{img_file}" alt="{method}"><br><b>{label}</b><br>{clip_str}</td>')
row = f"""
<tr class="case-header">
<td colspan="{len(METHODS_TO_COMPARE) + 1}">
<b>Case {i+1}</b> (Sample ID: {sid}) — Mean CLIP: {sample['mean_clip']:.3f}, Range: {sample['range']:.3f}
</td>
</tr>
<tr class="case-images">
{''.join(cells)}
</tr>
"""
rows_html.append(row)
html = f"""<!DOCTYPE html>
<html>
<head>
<title>UIPress Case Study</title>
<style>
body {{ font-family: 'Segoe UI', Arial, sans-serif; margin: 20px; background: #f5f5f5; }}
h1 {{ color: #333; }}
table {{ border-collapse: collapse; width: 100%; background: white; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }}
.case-header td {{ background: #2c3e50; color: white; padding: 10px 15px; font-size: 14px; }}
.case-images td {{ padding: 8px; text-align: center; vertical-align: top; border: 1px solid #ddd; font-size: 12px; }}
.case-images img {{ max-width: 200px; max-height: 300px; border: 1px solid #ccc; display: block; margin: 0 auto 5px; }}
b {{ display: block; margin-top: 3px; }}
</style>
</head>
<body>
<h1>UIPress: Visual Token Compression Case Study</h1>
<p>Side-by-side comparison of {len(selected)} representative examples across {len(METHODS_TO_COMPARE)} methods.</p>
<table>
{''.join(rows_html)}
</table>
</body>
</html>"""
output_file = output_dir / "case_study.html"
output_file.write_text(html)
print(f"Case study saved to {output_file}")
summary = {
"n_cases": len(selected),
"methods": METHODS_TO_COMPARE,
"cases": [{
"id": s["id"],
"mean_clip": round(s["mean_clip"], 4),
"clip_range": round(s["range"], 4),
"scores": {k: round(v, 4) for k, v in s["scores"].items()},
} for s in selected],
}
with open(output_dir / "case_study_summary.json", "w") as f:
json.dump(summary, f, indent=2)
return output_file
def main():
benchmark_dir = PROJECT_ROOT / "results" / "benchmark"
ref_dir = PROJECT_ROOT / "data" / "ref_screenshots"
output_dir = PROJECT_ROOT / "results" / "case_study"
print("Selecting representative samples...")
selected = select_representative_samples(str(benchmark_dir), n=8)
print(f"\nSelected {len(selected)} cases:")
for s in selected:
print(f" ID={s['id']}: mean_clip={s['mean_clip']:.3f}, range={s['range']:.3f}")
print("\nGenerating case study...")
output_file = generate_case_study_html(selected, str(benchmark_dir), str(ref_dir), str(output_dir))
print(f"\nDone! Open {output_file} in a browser to view.")
if __name__ == "__main__":
main()