| """ |
| UIPress Step 1b: Compute evaluation metrics on generated HTML. |
| Uses Design2Code's official metrics + our custom token analysis. |
| |
| Usage: |
| # Evaluate a single model's predictions |
| python scripts/step1_eval_metrics.py --pred_dir results/qwen2_5_vl_7b/html_predictions |
| |
| # Evaluate all models in results/ |
| python scripts/step1_eval_metrics.py --all |
| |
| # Quick CLIP-only evaluation (much faster, no browser needed) |
| python scripts/step1_eval_metrics.py --all --clip_only |
| """ |
|
|
| |
| import os |
| os.environ["HF_ENDPOINT"] = os.environ.get("HF_ENDPOINT", "https://hf-mirror.com") |
| os.environ["HF_HOME"] = os.environ.get("HF_HOME", "/root/rivermind-data/huggingface") |
|
|
| import argparse |
| import json |
| import sys |
| import subprocess |
| import tempfile |
| from pathlib import Path |
|
|
| import torch |
| from PIL import Image |
| from tqdm import tqdm |
|
|
| PROJECT_ROOT = Path(__file__).parent.parent |
|
|
|
|
| |
| |
| |
|
|
| class CLIPScorer: |
| """Compute CLIP similarity between reference screenshot and generated HTML rendering.""" |
|
|
| def __init__(self, device="cuda"): |
| import open_clip |
| self.device = device |
| self.model, _, self.preprocess = open_clip.create_model_and_transforms( |
| "ViT-B-32", pretrained="openai" |
| ) |
| self.model = self.model.to(device).eval() |
|
|
| @torch.no_grad() |
| def score_images(self, img1: Image.Image, img2: Image.Image) -> float: |
| """Compute CLIP cosine similarity between two images.""" |
| t1 = self.preprocess(img1).unsqueeze(0).to(self.device) |
| t2 = self.preprocess(img2).unsqueeze(0).to(self.device) |
| f1 = self.model.encode_image(t1) |
| f2 = self.model.encode_image(t2) |
| f1 = f1 / f1.norm(dim=-1, keepdim=True) |
| f2 = f2 / f2.norm(dim=-1, keepdim=True) |
| return float((f1 * f2).sum()) |
|
|
|
|
| |
| |
| |
|
|
| def render_html_to_image(html_path: str, output_path: str, width: int = 1280, height: int = 1024) -> bool: |
| """Render HTML file to screenshot. Tries Playwright first, falls back to Selenium.""" |
| |
| if _render_playwright(html_path, output_path, width, height): |
| return True |
| |
| return _render_selenium(html_path, output_path, width, height) |
|
|
|
|
| def _render_playwright(html_path: str, output_path: str, width: int, height: int) -> bool: |
| """Render using Playwright (recommended, matches Design2Code pipeline).""" |
| try: |
| from playwright.sync_api import sync_playwright |
| abs_path = os.path.abspath(html_path) |
| with sync_playwright() as p: |
| browser = p.chromium.launch(headless=True) |
| page = browser.new_page(viewport={"width": width, "height": height}) |
| page.goto(f"file://{abs_path}", wait_until="networkidle") |
| page.screenshot(path=output_path, full_page=False) |
| browser.close() |
| return True |
| except ImportError: |
| return False |
| except Exception as e: |
| print(f" [WARN] Playwright render failed for {html_path}: {e}") |
| return False |
|
|
|
|
| def _render_selenium(html_path: str, output_path: str, width: int, height: int) -> bool: |
| """Fallback: render using Selenium + Chrome.""" |
| try: |
| from selenium import webdriver |
| from selenium.webdriver.chrome.options import Options |
|
|
| options = Options() |
| options.add_argument("--headless=new") |
| options.add_argument("--no-sandbox") |
| options.add_argument("--disable-dev-shm-usage") |
| options.add_argument(f"--window-size={width},{height}") |
| options.add_argument("--disable-gpu") |
| options.add_argument("--force-device-scale-factor=1") |
|
|
| driver = webdriver.Chrome(options=options) |
| driver.set_window_size(width, height) |
|
|
| abs_path = os.path.abspath(html_path) |
| driver.get(f"file://{abs_path}") |
|
|
| import time |
| time.sleep(1) |
|
|
| driver.save_screenshot(output_path) |
| driver.quit() |
| return True |
|
|
| except Exception as e: |
| print(f" [WARN] Selenium render failed for {html_path}: {e}") |
| return False |
|
|
|
|
| |
| |
| |
|
|
| def evaluate_predictions(pred_dir: str, ref_dir: str, clip_only: bool = False): |
| """ |
| Evaluate HTML predictions against reference screenshots. |
| |
| Args: |
| pred_dir: Directory containing predicted .html files |
| ref_dir: Directory containing reference .png screenshots (and .html ground truth) |
| clip_only: If True, only compute CLIP score (faster) |
| """ |
| pred_path = Path(pred_dir) |
| ref_path = Path(ref_dir) |
|
|
| pred_files = sorted(pred_path.glob("*.html")) |
| if not pred_files: |
| print(f"No HTML files found in {pred_dir}") |
| return None |
|
|
| print(f"Found {len(pred_files)} predictions in {pred_dir}") |
|
|
| |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| clip_scorer = CLIPScorer(device=device) |
|
|
| results = {} |
| clip_scores = [] |
|
|
| with tempfile.TemporaryDirectory() as tmp_dir: |
| for pred_file in tqdm(pred_files, desc="Evaluating"): |
| sample_id = pred_file.stem |
|
|
| |
| ref_img_path = ref_path / f"{sample_id}.png" |
| if not ref_img_path.exists(): |
| |
| for ext in [".png", ".jpg", ".jpeg"]: |
| candidate = ref_path / f"{sample_id}{ext}" |
| if candidate.exists(): |
| ref_img_path = candidate |
| break |
|
|
| if not ref_img_path.exists(): |
| continue |
|
|
| ref_img = Image.open(ref_img_path).convert("RGB") |
|
|
| |
| pred_img_path = os.path.join(tmp_dir, f"{sample_id}.png") |
| success = render_html_to_image(str(pred_file), pred_img_path) |
|
|
| if success and os.path.exists(pred_img_path): |
| pred_img = Image.open(pred_img_path).convert("RGB") |
| clip_score = clip_scorer.score_images(ref_img, pred_img) |
| else: |
| clip_score = 0.0 |
|
|
| clip_scores.append(clip_score) |
| results[sample_id] = {"clip_score": round(clip_score, 4)} |
|
|
| |
| avg_clip = sum(clip_scores) / len(clip_scores) if clip_scores else 0 |
| summary = { |
| "n_evaluated": len(clip_scores), |
| "avg_clip_score": round(avg_clip, 4), |
| "min_clip_score": round(min(clip_scores), 4) if clip_scores else 0, |
| "max_clip_score": round(max(clip_scores), 4) if clip_scores else 0, |
| "per_sample": results, |
| } |
|
|
| print(f"\n{'='*50}") |
| print(f"CLIP Score: {avg_clip:.4f} (n={len(clip_scores)})") |
| print(f" Min: {summary['min_clip_score']:.4f}") |
| print(f" Max: {summary['max_clip_score']:.4f}") |
| print(f"{'='*50}") |
|
|
| return summary |
|
|
|
|
| def run_design2code_official_eval(pred_dir: str): |
| """ |
| Run Design2Code's official evaluation script if available. |
| This provides Block-Match, Text, Position, Color metrics. |
| """ |
| eval_script = PROJECT_ROOT / "repos" / "Design2Code" / "metrics" / "multi_processing_eval.py" |
| if not eval_script.exists(): |
| print("[INFO] Design2Code official eval not available.") |
| print(" Clone the repo first: git clone https://github.com/NoviScl/Design2Code repos/Design2Code") |
| print(" Only CLIP score was computed.") |
| return None |
|
|
| print(f"\nRunning Design2Code official evaluation...") |
| print(f" Script: {eval_script}") |
| print(f" Predictions: {pred_dir}") |
| |
| |
| print(f"\n To run manually:") |
| print(f" 1. Edit {eval_script} line ~54") |
| print(f" 2. Set prediction directory to: {pred_dir}") |
| print(f" 3. Run: python {eval_script}") |
| return None |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="UIPress: Evaluate generated HTML") |
| parser.add_argument("--pred_dir", type=str, help="Directory with predicted .html files") |
| parser.add_argument("--ref_dir", type=str, default=None, |
| help="Directory with reference .png screenshots") |
| parser.add_argument("--all", action="store_true", |
| help="Evaluate all models in results/") |
| parser.add_argument("--clip_only", action="store_true", |
| help="Only compute CLIP score (skip Design2Code metrics)") |
| parser.add_argument("--output", type=str, default=None, |
| help="Output JSON path") |
| args = parser.parse_args() |
|
|
| |
| if args.ref_dir is None: |
| candidates = [ |
| PROJECT_ROOT / "data" / "testset_final", |
| PROJECT_ROOT / "repos" / "Design2Code" / "testset_final", |
| ] |
| for c in candidates: |
| if c.exists(): |
| args.ref_dir = str(c) |
| break |
|
|
| |
| if args.ref_dir is None: |
| hf_path = PROJECT_ROOT / "data" / "design2code" |
| if hf_path.exists(): |
| ref_tmp = PROJECT_ROOT / "data" / "ref_screenshots" |
| ref_tmp.mkdir(parents=True, exist_ok=True) |
| if not any(ref_tmp.iterdir()): |
| print("Extracting reference screenshots from HF dataset...") |
| from datasets import load_from_disk |
| ds = load_from_disk(str(hf_path)) |
| if hasattr(ds, 'keys'): |
| ds = ds[list(ds.keys())[0]] |
| for i, item in enumerate(ds): |
| img = item.get("image") or item.get("screenshot") |
| if img is not None: |
| if not isinstance(img, Image.Image): |
| img = Image.open(img).convert("RGB") |
| img.save(str(ref_tmp / f"{i}.png")) |
| print(f" Extracted {len(list(ref_tmp.glob('*.png')))} screenshots") |
| args.ref_dir = str(ref_tmp) |
|
|
| if args.ref_dir is None: |
| print("Cannot find reference data. Specify --ref_dir or run download_data.py") |
| sys.exit(1) |
|
|
| if args.all: |
| |
| results_dir = PROJECT_ROOT / "results" |
| all_results = {} |
|
|
| for model_dir in sorted(results_dir.iterdir()): |
| pred_html = model_dir / "html_predictions" |
| if pred_html.exists(): |
| print(f"\n{'='*60}") |
| print(f"Evaluating: {model_dir.name}") |
| print(f"{'='*60}") |
| summary = evaluate_predictions( |
| str(pred_html), args.ref_dir, args.clip_only |
| ) |
| if summary: |
| all_results[model_dir.name] = summary |
|
|
| |
| if all_results: |
| print(f"\n{'='*70}") |
| print("FINAL COMPARISON") |
| print(f"{'='*70}") |
| print(f"{'Model':<30} {'CLIP Score':>12} {'N Samples':>12}") |
| print("-" * 55) |
| for name, s in sorted(all_results.items(), key=lambda x: -x[1]["avg_clip_score"]): |
| print(f"{name:<30} {s['avg_clip_score']:>12.4f} {s['n_evaluated']:>12}") |
|
|
| |
| out = results_dir / "step1_metrics_comparison.json" |
| with open(out, "w") as f: |
| json.dump(all_results, f, indent=2) |
| print(f"\nSaved to: {out}") |
|
|
| elif args.pred_dir: |
| summary = evaluate_predictions(args.pred_dir, args.ref_dir, args.clip_only) |
| if summary and args.output: |
| with open(args.output, "w") as f: |
| json.dump(summary, f, indent=2) |
|
|
| |
| if not args.clip_only: |
| run_design2code_official_eval(args.pred_dir) |
| else: |
| parser.print_help() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|