UIPress / scripts /step1_baseline.py
DesonDai's picture
Upload folder using huggingface_hub
bb01096 verified
"""
UIPress Step 1: Baseline Evaluation
====================================
Test multiple VLMs on Design2Code to establish baselines.
Measures: visual token count, generation quality, latency.
IMPORTANT: This script runs in the uipress-qwen conda environment.
For Qwen3-VL: needs transformers>=4.57 (recommended default path)
For Qwen2.5-VL: works well on transformers==4.49.0 (compat path)
Usage:
conda activate uipress-qwen
# Quick test (5 samples)
python scripts/step1_baseline.py --max_samples 5
# Full eval (all samples)
python scripts/step1_baseline.py --max_samples -1
# With 4-bit quantization (saves VRAM)
python scripts/step1_baseline.py --model qwen3_vl_8b --max_samples 20 --use_4bit
# Qwen3-VL (requires latest transformers)
python scripts/step1_baseline.py --model qwen3_vl_2b --max_samples 5
"""
# ---- HuggingFace 镜像 (必须在其他 import 之前) ----
import os
os.environ["HF_ENDPOINT"] = os.environ.get("HF_ENDPOINT", "https://hf-mirror.com")
os.environ["HF_HOME"] = os.environ.get("HF_HOME", "/root/rivermind-data/huggingface")
import argparse
import json
import sys
import time
from pathlib import Path
import torch
from PIL import Image
from tqdm import tqdm
# Project root
PROJECT_ROOT = Path(__file__).parent.parent
sys.path.insert(0, str(PROJECT_ROOT))
# ============================================================
# Model Loaders
# ============================================================
class Qwen25VLModel:
"""Wrapper for Qwen2.5-VL models."""
def __init__(self, model_id: str, use_4bit: bool = False):
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
self.model_id = model_id
print(f"Loading {model_id}...")
load_kwargs = {
"trust_remote_code": True,
"torch_dtype": torch.bfloat16,
"device_map": "auto",
}
if use_4bit:
from transformers import BitsAndBytesConfig
load_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
)
self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_id, **load_kwargs
)
self.processor = AutoProcessor.from_pretrained(
model_id, trust_remote_code=True
)
self.model.eval()
def generate(self, image: Image.Image, prompt: str, max_new_tokens: int = 4096):
"""Generate HTML code from a UI screenshot."""
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": prompt},
],
}
]
text = self.processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
inputs = self.processor(
text=[text],
images=[image],
padding=True,
return_tensors="pt",
).to(self.model.device)
# Count visual tokens
n_visual_tokens = 0
if "image_grid_thw" in inputs:
grid = inputs["image_grid_thw"]
n_visual_tokens = int(grid.prod(dim=-1).sum().item())
# Generate
t_start = time.time()
with torch.no_grad():
output_ids = self.model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=0.0,
do_sample=False,
repetition_penalty=1.0,
)
t_end = time.time()
# Decode output (skip input tokens)
input_len = inputs["input_ids"].shape[1]
generated_ids = output_ids[0][input_len:]
output_text = self.processor.tokenizer.decode(
generated_ids, skip_special_tokens=True
)
return {
"output": output_text,
"n_visual_tokens": n_visual_tokens,
"n_input_tokens": int(input_len),
"n_output_tokens": len(generated_ids),
"latency_s": t_end - t_start,
}
class Qwen3VLModel:
"""Wrapper for Qwen3-VL models. Requires transformers>=4.57."""
def __init__(self, model_id: str, use_4bit: bool = False):
# Check transformers version
import transformers
version = transformers.__version__
print(f" transformers version: {version}")
try:
from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
except ImportError:
print(f"[ERROR] Qwen3VLForConditionalGeneration not found in transformers=={version}")
print(f" Qwen3-VL requires transformers>=4.57 or install from source:")
print(f" pip install git+https://github.com/huggingface/transformers")
sys.exit(1)
self.model_id = model_id
print(f"Loading {model_id}...")
load_kwargs = {
"trust_remote_code": True,
"torch_dtype": torch.bfloat16,
"device_map": "auto",
}
if use_4bit:
from transformers import BitsAndBytesConfig
load_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
)
self.model = Qwen3VLForConditionalGeneration.from_pretrained(
model_id, **load_kwargs
)
self.processor = AutoProcessor.from_pretrained(
model_id, trust_remote_code=True
)
self.model.eval()
def generate(self, image: Image.Image, prompt: str, max_new_tokens: int = 4096):
"""Generate HTML code from a UI screenshot."""
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": prompt},
],
}
]
# Qwen3-VL uses return_dict=True in apply_chat_template
inputs = self.processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
).to(self.model.device)
# Count visual tokens
n_visual_tokens = 0
if "image_grid_thw" in inputs:
grid = inputs["image_grid_thw"]
n_visual_tokens = int(grid.prod(dim=-1).sum().item())
# Generate
t_start = time.time()
with torch.no_grad():
output_ids = self.model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=0.7,
top_p=0.8,
top_k=20,
)
t_end = time.time()
# Decode output (skip input tokens)
generated_ids = [
out_ids[len(in_ids):]
for in_ids, out_ids in zip(inputs["input_ids"], output_ids)
]
output_text = self.processor.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
)[0]
input_len = inputs["input_ids"].shape[1]
return {
"output": output_text,
"n_visual_tokens": n_visual_tokens,
"n_input_tokens": int(input_len),
"n_output_tokens": len(generated_ids[0]),
"latency_s": t_end - t_start,
}
# ============================================================
# Prompt Templates
# ============================================================
UI2CODE_PROMPT = """You are an expert web developer. Given a screenshot of a webpage, generate the complete HTML code that would reproduce this webpage as closely as possible.
Requirements:
- Generate a single, self-contained HTML file
- Include inline CSS styles (no external stylesheets)
- Reproduce the layout, colors, text content, and visual structure
- Use semantic HTML elements where appropriate
Generate ONLY the HTML code, nothing else."""
UI2CODE_PROMPT_SHORT = """Convert this webpage screenshot to HTML code. Generate a complete, self-contained HTML file with inline CSS. Output only the code."""
# ============================================================
# Data Loading
# ============================================================
def load_design2code(data_dir: str, max_samples: int = -1):
"""Load Design2Code test set."""
data_path = Path(data_dir)
# Try HuggingFace datasets format first
hf_path = data_path / "design2code"
if hf_path.exists():
from datasets import load_from_disk
ds = load_from_disk(str(hf_path))
if hasattr(ds, 'keys'):
split = list(ds.keys())[0]
print(f" Using split: '{split}' from DatasetDict")
ds = ds[split]
samples = []
for i, item in enumerate(ds):
if max_samples > 0 and i >= max_samples:
break
img = item.get("image") or item.get("screenshot")
if img is not None and not isinstance(img, Image.Image):
img = Image.open(img).convert("RGB")
samples.append({
"id": str(i),
"image": img,
"html": item.get("text", item.get("code", item.get("html", ""))),
})
return samples
# Try raw file format (testset_final/)
testset_dir = data_path / "testset_final"
if testset_dir.exists():
samples = []
png_files = sorted(testset_dir.glob("*.png"))
for i, png_path in enumerate(png_files):
if max_samples > 0 and i >= max_samples:
break
html_path = png_path.with_suffix(".html")
html_code = html_path.read_text(encoding="utf-8") if html_path.exists() else ""
samples.append({
"id": png_path.stem,
"image": Image.open(png_path).convert("RGB"),
"html": html_code,
})
return samples
print(f"[WARNING] No Design2Code data found at {data_path}")
print("Run: python scripts/download_data.py")
return []
# ============================================================
# Main Evaluation
# ============================================================
def evaluate_model(model, samples, prompt, output_dir, model_name):
"""Run inference on all samples and save results."""
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
results = []
html_dir = output_dir / "html_predictions"
html_dir.mkdir(exist_ok=True)
for sample in tqdm(samples, desc=f"Evaluating {model_name}"):
try:
result = model.generate(
image=sample["image"],
prompt=prompt,
max_new_tokens=4096,
)
html_output = extract_html(result["output"])
html_file = html_dir / f"{sample['id']}.html"
html_file.write_text(html_output, encoding="utf-8")
results.append({
"id": sample["id"],
"n_visual_tokens": result["n_visual_tokens"],
"n_input_tokens": result["n_input_tokens"],
"n_output_tokens": result["n_output_tokens"],
"latency_s": round(result["latency_s"], 2),
"output_length": len(html_output),
})
except Exception as e:
import traceback
print(f"[ERROR] Sample {sample['id']}: {e}")
traceback.print_exc()
results.append({
"id": sample["id"],
"error": str(e),
})
# Save summary
summary = {
"model": model_name,
"n_samples": len(results),
"n_errors": sum(1 for r in results if "error" in r),
"avg_visual_tokens": avg([r.get("n_visual_tokens", 0) for r in results if "error" not in r]),
"avg_output_tokens": avg([r.get("n_output_tokens", 0) for r in results if "error" not in r]),
"avg_latency_s": avg([r.get("latency_s", 0) for r in results if "error" not in r]),
"results": results,
}
summary_file = output_dir / "summary.json"
with open(summary_file, "w") as f:
json.dump(summary, f, indent=2, ensure_ascii=False)
print(f"\n{'='*60}")
print(f"Model: {model_name}")
print(f"Samples: {summary['n_samples']} (errors: {summary['n_errors']})")
print(f"Avg visual tokens: {summary['avg_visual_tokens']:.0f}")
print(f"Avg output tokens: {summary['avg_output_tokens']:.0f}")
print(f"Avg latency: {summary['avg_latency_s']:.2f}s")
print(f"Results saved to: {output_dir}")
print(f"{'='*60}\n")
return summary
def extract_html(text: str) -> str:
"""Extract HTML code from model output, handling markdown fences."""
if "```html" in text:
start = text.find("```html") + 7
end = text.find("```", start)
if end > start:
return text[start:end].strip()
if "```" in text:
start = text.find("```") + 3
end = text.find("```", start)
if end > start:
return text[start:end].strip()
stripped = text.strip()
if stripped.startswith(("<!DOCTYPE", "<html", "<HTML", "<!doctype")):
return stripped
if "<" in stripped and ">" in stripped:
return stripped
return stripped
def avg(lst):
"""Safe average."""
valid = [x for x in lst if x is not None and x > 0]
return sum(valid) / len(valid) if valid else 0
# ============================================================
# CLI
# ============================================================
MODEL_REGISTRY = {
# Qwen2.5-VL (works with transformers>=4.46)
"qwen2_5_vl_7b": ("qwen25", "Qwen/Qwen2.5-VL-7B-Instruct"),
# Qwen3-VL (needs transformers>=4.57)
"qwen3_vl_2b": ("qwen3", "Qwen/Qwen3-VL-2B-Instruct"),
"qwen3_vl_4b": ("qwen3", "Qwen/Qwen3-VL-4B-Instruct"),
"qwen3_vl_8b": ("qwen3", "Qwen/Qwen3-VL-8B-Instruct"),
}
def main():
parser = argparse.ArgumentParser(description="UIPress Step 1: Baseline Evaluation")
parser.add_argument("--model", type=str, default="qwen3_vl_2b",
choices=list(MODEL_REGISTRY.keys()),
help="Model to evaluate (default: qwen3_vl_2b)")
parser.add_argument("--max_samples", type=int, default=5,
help="Max samples to evaluate (-1 for all)")
parser.add_argument("--use_4bit", action="store_true",
help="Use 4-bit quantization (saves VRAM)")
parser.add_argument("--data_dir", type=str,
default=str(PROJECT_ROOT / "data"),
help="Path to data directory")
parser.add_argument("--output_dir", type=str,
default=str(PROJECT_ROOT / "results"),
help="Path to output directory")
parser.add_argument("--prompt", type=str, default="short",
choices=["full", "short"],
help="Prompt style")
args = parser.parse_args()
prompt = UI2CODE_PROMPT if args.prompt == "full" else UI2CODE_PROMPT_SHORT
# Load data
print("Loading Design2Code dataset...")
samples = load_design2code(args.data_dir, args.max_samples)
if not samples:
print("No data loaded. Run: python scripts/download_data.py")
sys.exit(1)
print(f"Loaded {len(samples)} samples")
# Create model
model_type, model_id = MODEL_REGISTRY[args.model]
if model_type == "qwen25":
model = Qwen25VLModel(model_id, use_4bit=args.use_4bit)
elif model_type == "qwen3":
model = Qwen3VLModel(model_id, use_4bit=args.use_4bit)
# Output directory
model_output_dir = Path(args.output_dir) / args.model
# Run evaluation
summary = evaluate_model(
model=model,
samples=samples,
prompt=prompt,
output_dir=model_output_dir,
model_name=args.model,
)
# Save comparison
comp_file = Path(args.output_dir) / "step1_comparison.json"
existing = {}
if comp_file.exists():
with open(comp_file) as f:
existing = json.load(f)
existing[args.model] = summary
with open(comp_file, "w") as f:
json.dump(existing, f, indent=2, default=str)
print(f"Comparison updated: {comp_file}")
if __name__ == "__main__":
main()