| """ |
| Per-Element HTML Structure Analysis (Cross-Method Comparison) |
| ============================================================== |
| Analyzes generated HTML across all methods without requiring ground truth. |
| Uses the best-performing method (qwen3_1k) as reference baseline. |
| |
| Metrics per method: |
| - DOM depth, node count, element type distribution |
| - CSS property count & diversity |
| - Text content F1 vs reference method |
| - Element recall/precision vs reference method |
| - Output token efficiency (quality per token) |
| |
| Usage: |
| python scripts/step_element_analysis.py |
| """ |
|
|
| import json |
| import re |
| import sys |
| from collections import Counter, defaultdict |
| from pathlib import Path |
|
|
| PROJECT_ROOT = Path(__file__).parent.parent |
| sys.path.insert(0, str(PROJECT_ROOT)) |
|
|
| from bs4 import BeautifulSoup |
|
|
|
|
| TAG_GROUPS = { |
| "buttons": {"button"}, |
| "inputs": {"input", "textarea", "select"}, |
| "images": {"img", "svg", "picture"}, |
| "links": {"a"}, |
| "headings": {"h1", "h2", "h3", "h4", "h5", "h6"}, |
| "lists": {"ul", "ol", "li"}, |
| "tables": {"table", "tr", "td", "th"}, |
| "forms": {"form"}, |
| "nav": {"nav", "header", "footer", "aside"}, |
| "containers": {"div", "section", "article", "main"}, |
| "text_inline": {"p", "span", "label", "strong", "em", "b", "i"}, |
| } |
|
|
|
|
| def extract_elements(html_str): |
| try: |
| soup = BeautifulSoup(html_str, "html.parser") |
| except Exception: |
| return {} |
| counts = {} |
| for category, tags in TAG_GROUPS.items(): |
| counts[category] = sum(len(soup.find_all(tag)) for tag in tags) |
| counts["total_tags"] = len(soup.find_all(True)) |
| return counts |
|
|
|
|
| def extract_css_props(html_str): |
| props = Counter() |
| for match in re.finditer(r'style\s*=\s*"([^"]*)"', html_str, re.IGNORECASE): |
| for prop in match.group(1).split(";"): |
| if ":" in prop: |
| name = prop.split(":")[0].strip().lower() |
| if name: |
| props[name] += 1 |
| for match in re.finditer(r'<style[^>]*>(.*?)</style>', html_str, re.DOTALL | re.IGNORECASE): |
| for prop in re.findall(r'([\w-]+)\s*:', match.group(1)): |
| props[prop.lower()] += 1 |
| return dict(props) |
|
|
|
|
| def extract_text(html_str): |
| try: |
| soup = BeautifulSoup(html_str, "html.parser") |
| for tag in soup(["script", "style", "meta", "link"]): |
| tag.decompose() |
| return soup.get_text(separator=" ", strip=True) |
| except Exception: |
| return "" |
|
|
|
|
| def dom_metrics(html_str): |
| try: |
| soup = BeautifulSoup(html_str, "html.parser") |
| except Exception: |
| return {"max_depth": 0, "total_nodes": 0} |
|
|
| max_depth = 0 |
| stack = [(soup, 0)] |
| while stack: |
| el, d = stack.pop() |
| if d > max_depth: |
| max_depth = d |
| if d > 200: |
| continue |
| for c in el.children: |
| if hasattr(c, 'name') and c.name: |
| stack.append((c, d + 1)) |
|
|
| return { |
| "max_depth": max_depth, |
| "total_nodes": len(soup.find_all(True)), |
| } |
|
|
|
|
| def char_f1(pred, ref): |
| if not pred and not ref: |
| return 1.0 |
| if not pred or not ref: |
| return 0.0 |
| pc, rc = Counter(pred.lower()), Counter(ref.lower()) |
| common = sum((pc & rc).values()) |
| if common == 0: |
| return 0.0 |
| p = common / sum(pc.values()) |
| r = common / sum(rc.values()) |
| return 2 * p * r / (p + r) |
|
|
|
|
| def element_f1(pred_counts, ref_counts): |
| results = {} |
| for cat in TAG_GROUPS: |
| rn = ref_counts.get(cat, 0) |
| pn = pred_counts.get(cat, 0) |
| if rn == 0 and pn == 0: |
| results[cat] = 1.0 |
| elif rn == 0 or pn == 0: |
| results[cat] = 0.0 |
| else: |
| matched = min(pn, rn) |
| recall = matched / rn |
| precision = matched / pn |
| results[cat] = 2 * recall * precision / (recall + precision) |
| return results |
|
|
|
|
| def analyze_all(benchmark_dir, ref_method="qwen3_1k"): |
| bench = Path(benchmark_dir) |
| methods = sorted(d.name for d in bench.iterdir() |
| if d.is_dir() and (d / "html_predictions").exists()) |
|
|
| if ref_method not in methods: |
| print(f"Reference method {ref_method} not found, using first: {methods[0]}") |
| ref_method = methods[0] |
|
|
| ref_dir = bench / ref_method / "html_predictions" |
| ref_htmls = {} |
| for f in sorted(ref_dir.glob("*.html")): |
| ref_htmls[f.stem] = f.read_text(encoding="utf-8", errors="ignore") |
|
|
| print(f"Reference: {ref_method} ({len(ref_htmls)} samples)") |
|
|
| ref_elements = {sid: extract_elements(h) for sid, h in ref_htmls.items()} |
| ref_texts = {sid: extract_text(h) for sid, h in ref_htmls.items()} |
| ref_css = {sid: extract_css_props(h) for sid, h in ref_htmls.items()} |
|
|
| all_results = {} |
|
|
| for method in methods: |
| html_dir = bench / method / "html_predictions" |
| pred_htmls = {} |
| for f in sorted(html_dir.glob("*.html")): |
| if f.stem in ref_htmls: |
| pred_htmls[f.stem] = f.read_text(encoding="utf-8", errors="ignore") |
|
|
| if not pred_htmls: |
| continue |
|
|
| text_f1s = [] |
| dom_depths = [] |
| dom_nodes = [] |
| css_counts = [] |
| css_unique = [] |
| elem_f1s = defaultdict(list) |
| total_element_f1s = [] |
|
|
| for sid, pred_html in pred_htmls.items(): |
| pred_elem = extract_elements(pred_html) |
| ref_elem = ref_elements.get(sid, {}) |
| pred_text = extract_text(pred_html) |
| ref_text = ref_texts.get(sid, "") |
| pred_css = extract_css_props(pred_html) |
| dm = dom_metrics(pred_html) |
|
|
| text_f1s.append(char_f1(pred_text, ref_text)) |
| dom_depths.append(dm["max_depth"]) |
| dom_nodes.append(dm["total_nodes"]) |
| css_counts.append(sum(pred_css.values())) |
| css_unique.append(len(pred_css)) |
|
|
| ef1 = element_f1(pred_elem, ref_elem) |
| for cat, val in ef1.items(): |
| elem_f1s[cat].append(val) |
| total_element_f1s.append(sum(ef1.values()) / len(ef1)) |
|
|
| n = len(pred_htmls) |
| per_cat = {} |
| for cat in TAG_GROUPS: |
| vals = elem_f1s[cat] |
| per_cat[cat] = round(sum(vals) / len(vals), 4) if vals else 0 |
|
|
| result = { |
| "n_samples": n, |
| "avg_text_f1": round(sum(text_f1s) / n, 4), |
| "avg_element_f1": round(sum(total_element_f1s) / n, 4), |
| "avg_dom_depth": round(sum(dom_depths) / n, 1), |
| "avg_dom_nodes": round(sum(dom_nodes) / n, 1), |
| "avg_css_properties": round(sum(css_counts) / n, 1), |
| "avg_css_unique_props": round(sum(css_unique) / n, 1), |
| "per_category_f1": per_cat, |
| } |
| all_results[method] = result |
|
|
| return all_results |
|
|
|
|
| def main(): |
| import argparse |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--benchmark_dir", default=str(PROJECT_ROOT / "results" / "benchmark")) |
| parser.add_argument("--ref_method", default="qwen3_1k") |
| parser.add_argument("--output", default=str(PROJECT_ROOT / "results" / "element_analysis.json")) |
| args = parser.parse_args() |
|
|
| results = analyze_all(args.benchmark_dir, args.ref_method) |
|
|
| Path(args.output).parent.mkdir(parents=True, exist_ok=True) |
| with open(args.output, "w") as f: |
| json.dump(results, f, indent=2) |
|
|
| print(f"\n{'='*90}") |
| print(f"{'Method':<25} {'TextF1':>8} {'ElemF1':>8} {'Depth':>6} {'Nodes':>7} {'CSS':>6} {'N':>4}") |
| print("-" * 70) |
| for k in sorted(results, key=lambda x: results[x]["avg_text_f1"], reverse=True): |
| v = results[k] |
| print(f"{k:<25} {v['avg_text_f1']:>8.4f} {v['avg_element_f1']:>8.4f} " |
| f"{v['avg_dom_depth']:>6.1f} {v['avg_dom_nodes']:>7.0f} " |
| f"{v['avg_css_properties']:>6.0f} {v['n_samples']:>4}") |
| print(f"{'='*90}") |
|
|
| print("\nPer-category Element F1 (vs qwen3_1k):") |
| cats = list(TAG_GROUPS.keys()) |
| header = f"{'Method':<25}" + "".join(f"{c[:6]:>8}" for c in cats) |
| print(header) |
| print("-" * (25 + 8 * len(cats))) |
| for method in sorted(results): |
| row = f"{method:<25}" |
| for cat in cats: |
| val = results[method]["per_category_f1"].get(cat, 0) |
| row += f"{val:>8.3f}" |
| print(row) |
|
|
| print(f"\nSaved to: {args.output}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|