| """ |
| Unified evaluation script for all compression methods on Qwen3-VL-8B. |
| |
| Methods: |
| - baseline: Qwen3-VL full resolution (no compression) |
| - resolution: Qwen3-VL with min/max_pixels control |
| - visionzip: VisionZip dominant+contextual token selection |
| - efficientui: EfficientUICoder ELTC+RTR strategy |
| - uipress: UIPress optical compressor (trained) |
| |
| Usage: |
| # Baseline |
| CUDA_VISIBLE_DEVICES=0 python scripts/eval_all.py --method baseline --max_samples 50 |
| |
| # Resolution scaling |
| CUDA_VISIBLE_DEVICES=0 python scripts/eval_all.py --method resolution --max_pixels 230400 |
| |
| # VisionZip |
| CUDA_VISIBLE_DEVICES=0 python scripts/eval_all.py --method visionzip --keep_tokens 256 |
| |
| # EfficientUICoder strategy |
| CUDA_VISIBLE_DEVICES=0 python scripts/eval_all.py --method efficientui --prune_ratio 0.6 |
| |
| # UIPress |
| CUDA_VISIBLE_DEVICES=0 python scripts/eval_all.py --method uipress \ |
| --checkpoint checkpoints/optical/best.pt --target_tokens 256 |
| """ |
|
|
| import os |
| os.environ["HF_ENDPOINT"] = os.environ.get("HF_ENDPOINT", "https://hf-mirror.com") |
| os.environ["HF_HOME"] = os.environ.get("HF_HOME", "/root/rivermind-data/huggingface") |
|
|
| import argparse |
| import json |
| import sys |
| import time |
| from pathlib import Path |
|
|
| import torch |
| from PIL import Image |
| from tqdm import tqdm |
|
|
| PROJECT_ROOT = Path(__file__).parent.parent |
| sys.path.insert(0, str(PROJECT_ROOT)) |
|
|
| from models.qwen3_vl_compat import get_visual_module, set_visual_module |
|
|
|
|
| def _llm_hidden(model): |
| cfg = model.config |
| return cfg.text_config.hidden_size if hasattr(cfg, "text_config") else cfg.hidden_size |
|
|
| UI2CODE_PROMPT = ( |
| "Convert this webpage screenshot to HTML code. " |
| "Generate a complete, self-contained HTML file with inline CSS. " |
| "Output only the code." |
| ) |
| IMAGE_TOKEN_ID = 151655 |
|
|
|
|
| def extract_html(text: str) -> str: |
| if "```html" in text: |
| s = text.find("```html") + 7 |
| e = text.find("```", s) |
| if e > s: |
| return text[s:e].strip() |
| if "```" in text: |
| s = text.find("```") + 3 |
| e = text.find("```", s) |
| if e > s: |
| return text[s:e].strip() |
| t = text.strip() |
| if t.startswith(("<!DOCTYPE", "<html", "<HTML", "<!doctype")): |
| return t |
| return t |
|
|
|
|
| def load_test_images(data_dir, max_samples=50): |
| data_path = Path(data_dir) |
| for subdir in ["testset_final", "ref_screenshots"]: |
| d = data_path / subdir |
| if d.exists(): |
| samples = [] |
| for png in sorted(d.glob("*.png")): |
| try: |
| img = Image.open(png).convert("RGB") |
| img.load() |
| samples.append({"id": png.stem, "image": img}) |
| except Exception: |
| continue |
| if 0 < max_samples <= len(samples): |
| break |
| return samples |
| return [] |
|
|
|
|
| def peak_mem_gb(): |
| if torch.cuda.is_available(): |
| return torch.cuda.max_memory_allocated() / 1024**3 |
| return 0 |
|
|
|
|
| |
| |
| |
| class BaselineMethod: |
| def __init__(self, min_pixels=None, max_pixels=None): |
| from transformers import Qwen3VLForConditionalGeneration, AutoProcessor |
| model_id = "Qwen/Qwen3-VL-8B-Instruct" |
| print(f"Loading {model_id} (min_px={min_pixels}, max_px={max_pixels})") |
| self.model = Qwen3VLForConditionalGeneration.from_pretrained( |
| model_id, trust_remote_code=True, torch_dtype=torch.bfloat16, |
| device_map="auto", |
| ).eval() |
| proc_kw = {"trust_remote_code": True} |
| if min_pixels is not None: |
| proc_kw["min_pixels"] = min_pixels |
| if max_pixels is not None: |
| proc_kw["max_pixels"] = max_pixels |
| self.processor = AutoProcessor.from_pretrained(model_id, **proc_kw) |
|
|
| def generate(self, image): |
| messages = [{"role": "user", "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": UI2CODE_PROMPT}, |
| ]}] |
| inputs = self.processor.apply_chat_template( |
| messages, tokenize=True, add_generation_prompt=True, |
| return_dict=True, return_tensors="pt", |
| ).to(self.model.device) |
|
|
| n_vis = 0 |
| if "image_grid_thw" in inputs: |
| n_vis = int(inputs["image_grid_thw"].prod(dim=-1).sum().item()) |
|
|
| torch.cuda.reset_peak_memory_stats() |
| t0 = time.time() |
| with torch.no_grad(): |
| out = self.model.generate( |
| **inputs, max_new_tokens=4096, |
| temperature=0.1, do_sample=True, top_p=0.9, |
| ) |
| latency = time.time() - t0 |
|
|
| gen_ids = out[0][inputs["input_ids"].shape[1]:] |
| text = self.processor.tokenizer.decode(gen_ids, skip_special_tokens=True) |
| return { |
| "output": text, "n_visual_tokens": n_vis, |
| "latency_s": latency, "peak_mem_gb": peak_mem_gb(), |
| } |
|
|
|
|
| |
| |
| |
| class VisionZipMethod: |
| """ |
| VisionZip: select dominant + contextual tokens from visual encoder output. |
| Training-free, plug-and-play on Qwen3-VL. |
| |
| Requires: pip install visionzip OR clone dvlab-research/VisionZip |
| Falls back to attention-score-based selection if visionzip not installed. |
| """ |
|
|
| def __init__(self, keep_tokens=256, dominant_ratio=0.8): |
| from transformers import Qwen3VLForConditionalGeneration, AutoProcessor |
| model_id = "Qwen/Qwen3-VL-8B-Instruct" |
| self.keep_tokens = keep_tokens |
| self.n_dominant = int(keep_tokens * dominant_ratio) |
| self.n_contextual = keep_tokens - self.n_dominant |
|
|
| print(f"Loading {model_id} + VisionZip (keep={keep_tokens}, " |
| f"dom={self.n_dominant}, ctx={self.n_contextual})") |
| self.model = Qwen3VLForConditionalGeneration.from_pretrained( |
| model_id, trust_remote_code=True, torch_dtype=torch.bfloat16, |
| device_map="auto", |
| ).eval() |
| self.processor = AutoProcessor.from_pretrained( |
| model_id, trust_remote_code=True, |
| ) |
|
|
| |
| self._compressed_embeds = None |
| self._new_grid_thw = None |
| self._install_hook() |
|
|
| def _install_hook(self): |
| """Patch Qwen3VLModel.forward to inject compression and fix masked_scatter dimension.""" |
| self_ = self |
|
|
| def hooked_forward(self, input_ids=None, attention_mask=None, position_ids=None, |
| past_key_values=None, inputs_embeds=None, |
| pixel_values=None, pixel_values_videos=None, |
| image_grid_thw=None, video_grid_thw=None, |
| mm_token_type_ids=None, cache_position=None, **kwargs): |
| if pixel_values is not None and image_grid_thw is not None: |
| vo = self.get_image_features(pixel_values, image_grid_thw, return_dict=True) |
| pooler = vo.pooler_output |
| flat = torch.cat(pooler, dim=0) if isinstance(pooler, (list, tuple)) else pooler |
|
|
| |
| sms = self_._get_spatial_merge_size() |
| gl = image_grid_thw.clone() |
| gl[:, 1] = gl[:, 1] // sms |
| gl[:, 2] = gl[:, 2] // sms |
|
|
| parts, offset = [], 0 |
| for i in range(image_grid_thw.shape[0]): |
| t, h, w = gl[i].tolist() |
| n = int(t) * int(h) * int(w) |
| tok = flat[offset:offset + n] |
| offset += n |
| parts.append(self_._select_tokens(tok)) |
|
|
| comp = torch.cat(parts, dim=0) |
|
|
| |
| k = self_.keep_tokens |
| sq = int(k ** 0.5) |
| for hh in range(sq, 0, -1): |
| if k % hh == 0: |
| ww = k // hh |
| break |
| else: |
| hh, ww = k, 1 |
| |
| new_grid = torch.tensor( |
| [[1, hh * sms, ww * sms]] * image_grid_thw.shape[0], |
| device=image_grid_thw.device, dtype=image_grid_thw.dtype, |
| ) |
| self_._new_grid_thw = new_grid |
|
|
| |
| if inputs_embeds is None: |
| inputs_embeds = self.get_input_embeddings()(input_ids) |
|
|
| B, S, D = inputs_embeds.shape |
| flat_embeds = inputs_embeds.reshape(B * S, D).contiguous() |
| image_mask_2d = (input_ids == self.config.image_token_id) |
| image_mask_flat = image_mask_2d.reshape(B * S) |
| flat_indices = image_mask_flat.nonzero(as_tuple=True)[0] |
| n_comp = comp.shape[0] |
| flat_indices = flat_indices[:n_comp] |
|
|
| flat_embeds[flat_indices] = comp.to(inputs_embeds.dtype) |
| inputs_embeds = flat_embeds.view(B, S, D) |
|
|
| |
| rope_mm_token_type_ids = torch.zeros_like(input_ids, dtype=torch.int) |
| batch_idx = flat_indices // S |
| seq_idx = flat_indices % S |
| rope_mm_token_type_ids[batch_idx, seq_idx] = 1 |
|
|
| |
| position_ids = self.compute_3d_position_ids( |
| input_ids=input_ids, image_grid_thw=new_grid, |
| video_grid_thw=video_grid_thw, inputs_embeds=inputs_embeds, |
| attention_mask=attention_mask, past_key_values=past_key_values, |
| mm_token_type_ids=rope_mm_token_type_ids, |
| ) |
|
|
| |
| outputs = self.language_model( |
| input_ids=None, position_ids=position_ids, |
| attention_mask=attention_mask, past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, cache_position=cache_position, |
| visual_pos_masks=None, deepstack_visual_embeds=None, **kwargs, |
| ) |
| from transformers.models.qwen3_vl.modeling_qwen3_vl import Qwen3VLModelOutputWithPast |
| return Qwen3VLModelOutputWithPast( |
| **outputs, rope_deltas=getattr(self, 'rope_deltas', None) |
| ) |
| else: |
| return self_._orig_fwd( |
| input_ids=input_ids, attention_mask=attention_mask, |
| position_ids=position_ids, past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, pixel_values=pixel_values, |
| pixel_values_videos=pixel_values_videos, |
| image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, |
| mm_token_type_ids=mm_token_type_ids, cache_position=cache_position, **kwargs |
| ) |
|
|
| import types |
| self._orig_fwd = self.model.model.forward |
| self.model.model.forward = types.MethodType(hooked_forward, self.model.model) |
|
|
| def _get_spatial_merge_size(self): |
| return self.model.model.visual.spatial_merge_size |
|
|
| def _select_tokens(self, tokens): |
| """Select dominant + contextual tokens based on L2 norm (proxy for attention).""" |
| n, d = tokens.shape |
| k = min(self.keep_tokens, n) |
| n_dom = min(self.n_dominant, k) |
| n_ctx = k - n_dom |
|
|
| |
| norms = tokens.norm(dim=-1) |
| _, top_idx = norms.topk(n_dom) |
| dominant = tokens[top_idx] |
|
|
| if n_ctx > 0: |
| |
| mask = torch.ones(n, dtype=torch.bool, device=tokens.device) |
| mask[top_idx] = False |
| remaining = tokens[mask] |
|
|
| if remaining.shape[0] <= n_ctx: |
| contextual = remaining |
| else: |
| |
| indices = torch.linspace( |
| 0, remaining.shape[0] - 1, n_ctx, |
| ).long().to(tokens.device) |
| contextual = remaining[indices] |
|
|
| return torch.cat([dominant, contextual], dim=0) |
| return dominant |
|
|
| def generate(self, image): |
| messages = [{"role": "user", "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": UI2CODE_PROMPT}, |
| ]}] |
| inputs = self.processor.apply_chat_template( |
| messages, tokenize=True, add_generation_prompt=True, |
| return_dict=True, return_tensors="pt", |
| ).to(self.model.device) |
|
|
| |
| |
| |
| |
| |
|
|
| torch.cuda.reset_peak_memory_stats() |
| t0 = time.time() |
|
|
| with torch.no_grad(): |
| out = self.model.generate( |
| **inputs, max_new_tokens=4096, |
| temperature=0.1, do_sample=True, top_p=0.9, |
| ) |
| latency = time.time() - t0 |
|
|
| gen_ids = out[0][inputs["input_ids"].shape[1]:] |
| text = self.processor.tokenizer.decode(gen_ids, skip_special_tokens=True) |
| return { |
| "output": text, "n_visual_tokens": self.keep_tokens, |
| "latency_s": latency, "peak_mem_gb": peak_mem_gb(), |
| } |
|
|
|
|
| |
| |
| |
| class EfficientUIMethod: |
| """ |
| Re-implement EfficientUICoder's input-side strategy on Qwen3-VL: |
| 1. ELTC: Detect UI elements → keep tokens in element regions |
| 2. RTR: Refine with CLS attention (keep high-attn bg, drop low-attn fg) |
| |
| Simplified version: use edge detection as proxy for element regions |
| (avoids UIED dependency). Tokens in high-edge-density areas are kept. |
| """ |
|
|
| def __init__(self, prune_ratio=0.6): |
| from transformers import Qwen3VLForConditionalGeneration, AutoProcessor |
| model_id = "Qwen/Qwen3-VL-8B-Instruct" |
| self.prune_ratio = prune_ratio |
|
|
| print(f"Loading {model_id} + EfficientUI strategy (prune={prune_ratio})") |
| self.model = Qwen3VLForConditionalGeneration.from_pretrained( |
| model_id, trust_remote_code=True, torch_dtype=torch.bfloat16, |
| device_map="auto", |
| ).eval() |
| self.processor = AutoProcessor.from_pretrained( |
| model_id, trust_remote_code=True, |
| ) |
| self._install_hook() |
|
|
| def _compute_element_mask(self, image, grid_h, grid_w): |
| """ |
| Compute per-patch importance using edge density as UI element proxy. |
| Returns importance scores [grid_h, grid_w] in [0, 1]. |
| """ |
| import numpy as np |
| img_np = np.array(image.convert("L")) |
| ih, iw = img_np.shape |
|
|
| |
| from PIL import ImageFilter |
| edges = np.array(image.convert("L").filter(ImageFilter.FIND_EDGES)) |
|
|
| |
| patch_h = ih / grid_h |
| patch_w = iw / grid_w |
| importance = np.zeros((grid_h, grid_w)) |
|
|
| for i in range(grid_h): |
| for j in range(grid_w): |
| y0, y1 = int(i * patch_h), int((i + 1) * patch_h) |
| x0, x1 = int(j * patch_w), int((j + 1) * patch_w) |
| patch = edges[y0:y1, x0:x1] |
| importance[i, j] = patch.mean() / 255.0 |
|
|
| |
| if importance.max() > 0: |
| importance = importance / importance.max() |
| return importance |
|
|
| def _install_hook(self): |
| """Patch Qwen3VLModel.forward to prune tokens by element importance.""" |
| self_ = self |
|
|
| def hooked_forward(self, input_ids=None, attention_mask=None, position_ids=None, |
| past_key_values=None, inputs_embeds=None, |
| pixel_values=None, pixel_values_videos=None, |
| image_grid_thw=None, video_grid_thw=None, |
| mm_token_type_ids=None, cache_position=None, **kwargs): |
| if pixel_values is not None and image_grid_thw is not None: |
| vo = self.get_image_features(pixel_values, image_grid_thw, return_dict=True) |
| pooler = vo.pooler_output |
| flat = torch.cat(pooler, dim=0) if isinstance(pooler, (list, tuple)) else pooler |
|
|
| sms = self_._get_spatial_merge_size() |
| gl = image_grid_thw.clone() |
| gl[:, 1] = gl[:, 1] // sms |
| gl[:, 2] = gl[:, 2] // sms |
|
|
| parts, offset = [], 0 |
| for i in range(image_grid_thw.shape[0]): |
| t, h, w = gl[i].tolist() |
| n = int(t) * int(h) * int(w) |
| tok = flat[offset:offset + n] |
| offset += n |
| cur_img = self_._current_image |
| if cur_img is None: |
| cur_img = Image.new("RGB", (224, 224)) |
| imp = self_._compute_element_mask(cur_img, int(h), int(w)) |
| imp_flat = torch.tensor(imp.flatten(), device=tok.device) |
| n_keep = max(int(n * (1 - self_.prune_ratio)), 16) |
| _, top_idx = imp_flat.topk(n_keep) |
| top_idx, _ = top_idx.sort() |
| parts.append(tok[top_idx]) |
|
|
| comp = torch.cat(parts, dim=0) |
| self_._n_kept = int(comp.shape[0]) |
| nk = parts[0].shape[0] |
| sq = int(nk ** 0.5) |
| for hh in range(sq, 0, -1): |
| if nk % hh == 0: |
| ww = nk // hh |
| break |
| else: |
| hh, ww = nk, 1 |
| |
| new_grid = torch.tensor( |
| [[1, hh * sms, ww * sms]] * image_grid_thw.shape[0], |
| device=image_grid_thw.device, dtype=image_grid_thw.dtype, |
| ) |
| self_._new_grid = new_grid |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.get_input_embeddings()(input_ids) |
|
|
| B, S, D = inputs_embeds.shape |
| flat_embeds = inputs_embeds.reshape(B * S, D).contiguous() |
| image_mask_2d = (input_ids == self.config.image_token_id) |
| image_mask_flat = image_mask_2d.reshape(B * S) |
| flat_indices = image_mask_flat.nonzero(as_tuple=True)[0] |
| n_comp = comp.shape[0] |
| flat_indices = flat_indices[:n_comp] |
|
|
| flat_embeds[flat_indices] = comp.to(inputs_embeds.dtype) |
| inputs_embeds = flat_embeds.view(B, S, D) |
|
|
| |
| rope_mm_token_type_ids = torch.zeros_like(input_ids, dtype=torch.int) |
| batch_idx = flat_indices // S |
| seq_idx = flat_indices % S |
| rope_mm_token_type_ids[batch_idx, seq_idx] = 1 |
|
|
| position_ids = self.compute_3d_position_ids( |
| input_ids=input_ids, image_grid_thw=new_grid, |
| video_grid_thw=video_grid_thw, inputs_embeds=inputs_embeds, |
| attention_mask=attention_mask, past_key_values=past_key_values, |
| mm_token_type_ids=rope_mm_token_type_ids, |
| ) |
|
|
| outputs = self.language_model( |
| input_ids=None, position_ids=position_ids, |
| attention_mask=attention_mask, past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, cache_position=cache_position, |
| visual_pos_masks=None, deepstack_visual_embeds=None, **kwargs, |
| ) |
| from transformers.models.qwen3_vl.modeling_qwen3_vl import Qwen3VLModelOutputWithPast |
| return Qwen3VLModelOutputWithPast( |
| **outputs, rope_deltas=getattr(self, 'rope_deltas', None) |
| ) |
| else: |
| return self_._orig_fwd( |
| input_ids=input_ids, attention_mask=attention_mask, |
| position_ids=position_ids, past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, pixel_values=pixel_values, |
| pixel_values_videos=pixel_values_videos, |
| image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, |
| mm_token_type_ids=mm_token_type_ids, cache_position=cache_position, **kwargs |
| ) |
|
|
| import types |
| self._orig_fwd = self.model.model.forward |
| self.model.model.forward = types.MethodType(hooked_forward, self.model.model) |
|
|
| def _get_spatial_merge_size(self): |
| return self.model.model.visual.spatial_merge_size |
|
|
| def generate(self, image): |
| self._current_image = image |
| messages = [{"role": "user", "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": UI2CODE_PROMPT}, |
| ]}] |
| inputs = self.processor.apply_chat_template( |
| messages, tokenize=True, add_generation_prompt=True, |
| return_dict=True, return_tensors="pt", |
| ).to(self.model.device) |
|
|
| torch.cuda.reset_peak_memory_stats() |
| t0 = time.time() |
|
|
| with torch.no_grad(): |
| out = self.model.generate( |
| **inputs, max_new_tokens=4096, |
| temperature=0.1, do_sample=True, top_p=0.9, |
| ) |
| latency = time.time() - t0 |
|
|
| gen_ids = out[0][inputs["input_ids"].shape[1]:] |
| text = self.processor.tokenizer.decode(gen_ids, skip_special_tokens=True) |
| n_vis = getattr(self, "_n_kept", 0) |
| self._current_image = None |
| return { |
| "output": text, "n_visual_tokens": n_vis, |
| "latency_s": latency, "peak_mem_gb": peak_mem_gb(), |
| } |
|
|
|
|
| |
| |
| |
| class UIPressMethod: |
| """UIPress with trained OpticalCompressor.""" |
|
|
| def __init__(self, checkpoint, target_tokens=256, force_cpu=False): |
| from transformers import Qwen3VLForConditionalGeneration, AutoProcessor |
| from models.optical_compressor import OpticalCompressor |
|
|
| model_id = "Qwen/Qwen3-VL-8B-Instruct" |
| print(f"Loading {model_id} + UIPress (ckpt={checkpoint}, tokens={target_tokens})") |
| if force_cpu: |
| print(" force_cpu=True: model on CPU (slow; use when all GPUs are full).") |
|
|
| dtype = torch.float32 if force_cpu else torch.bfloat16 |
| map_kw = dict(device_map="cpu") if force_cpu else dict(device_map="auto") |
|
|
| self.model = Qwen3VLForConditionalGeneration.from_pretrained( |
| model_id, trust_remote_code=True, torch_dtype=dtype, |
| **map_kw, |
| ).eval() |
| self.processor = AutoProcessor.from_pretrained( |
| model_id, trust_remote_code=True, |
| ) |
|
|
| dev = next(self.model.parameters()).device |
| |
| llm_hidden = _llm_hidden(self.model) |
| self.compressor = OpticalCompressor( |
| hidden_dim=llm_hidden, target_tokens=target_tokens, |
| ).to(dev, dtype).eval() |
|
|
| ckpt = torch.load(checkpoint, map_location=dev) |
| comp_state = ckpt.get("compressor", ckpt) |
| clean_state = {k.replace("module.", ""): v for k, v in comp_state.items()} |
| missing, unexpected = self.compressor.load_state_dict(clean_state, strict=False) |
| if missing: |
| print(f" Warning: missing compressor keys: {missing}") |
| if unexpected: |
| print(f" Warning: unexpected compressor keys: {unexpected}") |
|
|
| |
| if "lora" in ckpt: |
| print(" Loading LoRA weights...") |
| |
| |
| |
|
|
| self.target_tokens = target_tokens |
| self._install_hook() |
|
|
| def _install_hook(self): |
| """Patch Qwen3VLModel.forward to apply OpticalCompressor compression.""" |
| self_ = self |
|
|
| def hooked_forward(self, input_ids=None, attention_mask=None, position_ids=None, |
| past_key_values=None, inputs_embeds=None, |
| pixel_values=None, pixel_values_videos=None, |
| image_grid_thw=None, video_grid_thw=None, |
| mm_token_type_ids=None, cache_position=None, **kwargs): |
| if pixel_values is not None and image_grid_thw is not None: |
| vo = self.get_image_features(pixel_values, image_grid_thw, return_dict=True) |
| pooler = vo.pooler_output |
| flat = torch.cat(pooler, dim=0) if isinstance(pooler, (list, tuple)) else pooler |
|
|
| sms = self_._get_spatial_merge_size() |
| gl = image_grid_thw.clone() |
| gl[:, 1] = gl[:, 1] // sms |
| gl[:, 2] = gl[:, 2] // sms |
|
|
| num_images = image_grid_thw.shape[0] |
| parts, new_grids_llm, offset = [], [], 0 |
| for i in range(num_images): |
| t, h, w = gl[i].tolist() |
| n = int(t) * int(h) * int(w) |
| tok = flat[offset:offset + n] |
| offset += n |
| comp, new_grid_img = self_.compressor(tok.unsqueeze(0), gl[i:i+1]) |
| parts.append(comp.squeeze(0)) |
| new_grids_llm.append(new_grid_img.squeeze(0)) |
|
|
| comp = torch.cat(parts, dim=0) |
| new_grid_llm = torch.stack(new_grids_llm, dim=0) |
| new_grid = new_grid_llm.clone() |
| new_grid[:, 1] = new_grid[:, 1] * sms |
| new_grid[:, 2] = new_grid[:, 2] * sms |
| self_._new_grid = new_grid |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.get_input_embeddings()(input_ids) |
|
|
| B, S, D = inputs_embeds.shape |
| flat_embeds = inputs_embeds.reshape(B * S, D).contiguous() |
| image_mask_2d = (input_ids == self.config.image_token_id) |
| image_mask_flat = image_mask_2d.reshape(B * S) |
| flat_indices = image_mask_flat.nonzero(as_tuple=True)[0] |
| n_comp = comp.shape[0] |
| flat_indices = flat_indices[:n_comp] |
|
|
| flat_embeds[flat_indices] = comp.to(inputs_embeds.dtype) |
| inputs_embeds = flat_embeds.view(B, S, D) |
|
|
| |
| rope_mm_token_type_ids = torch.zeros_like(input_ids, dtype=torch.int) |
| batch_idx = flat_indices // S |
| seq_idx = flat_indices % S |
| rope_mm_token_type_ids[batch_idx, seq_idx] = 1 |
|
|
| position_ids = self.compute_3d_position_ids( |
| input_ids=input_ids, image_grid_thw=new_grid, |
| video_grid_thw=video_grid_thw, inputs_embeds=inputs_embeds, |
| attention_mask=attention_mask, past_key_values=past_key_values, |
| mm_token_type_ids=rope_mm_token_type_ids, |
| ) |
|
|
| outputs = self.language_model( |
| input_ids=None, position_ids=position_ids, |
| attention_mask=attention_mask, past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, cache_position=cache_position, |
| visual_pos_masks=None, deepstack_visual_embeds=None, **kwargs, |
| ) |
| from transformers.models.qwen3_vl.modeling_qwen3_vl import Qwen3VLModelOutputWithPast |
| return Qwen3VLModelOutputWithPast( |
| **outputs, rope_deltas=getattr(self, 'rope_deltas', None) |
| ) |
| else: |
| return self_._orig_fwd( |
| input_ids=input_ids, attention_mask=attention_mask, |
| position_ids=position_ids, past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, pixel_values=pixel_values, |
| pixel_values_videos=pixel_values_videos, |
| image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, |
| mm_token_type_ids=mm_token_type_ids, cache_position=cache_position, **kwargs |
| ) |
|
|
| import types |
| self._orig_fwd = self.model.model.forward |
| self.model.model.forward = types.MethodType(hooked_forward, self.model.model) |
|
|
| def _get_spatial_merge_size(self): |
| return self.model.model.visual.spatial_merge_size |
|
|
| def generate(self, image): |
| messages = [{"role": "user", "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": UI2CODE_PROMPT}, |
| ]}] |
| dev = next(self.model.parameters()).device |
| inputs = self.processor.apply_chat_template( |
| messages, tokenize=True, add_generation_prompt=True, |
| return_dict=True, return_tensors="pt", |
| ).to(dev) |
|
|
| if torch.cuda.is_available(): |
| torch.cuda.reset_peak_memory_stats() |
| t0 = time.time() |
|
|
| with torch.no_grad(): |
| out = self.model.generate( |
| **inputs, max_new_tokens=4096, |
| temperature=0.1, do_sample=True, top_p=0.9, |
| ) |
| latency = time.time() - t0 |
|
|
| gen_ids = out[0][inputs["input_ids"].shape[1]:] |
| text = self.processor.tokenizer.decode(gen_ids, skip_special_tokens=True) |
| return { |
| "output": text, "n_visual_tokens": self.target_tokens, |
| "latency_s": latency, "peak_mem_gb": peak_mem_gb(), |
| } |
|
|
|
|
| |
| |
| |
| def run_eval(args): |
| |
| if args.method == "baseline": |
| method = BaselineMethod() |
| run_name = "qwen3_full" |
| elif args.method == "resolution": |
| method = BaselineMethod( |
| min_pixels=args.min_pixels, max_pixels=args.max_pixels, |
| ) |
| mp = args.max_pixels or "full" |
| run_name = f"qwen3_res_{mp}" |
| elif args.method == "visionzip": |
| method = VisionZipMethod(keep_tokens=args.keep_tokens) |
| run_name = f"visionzip_{args.keep_tokens}" |
| elif args.method == "efficientui": |
| method = EfficientUIMethod(prune_ratio=args.prune_ratio) |
| pct = int(args.prune_ratio * 100) |
| run_name = f"efficientui_prune{pct}" |
| elif args.method == "uipress": |
| method = UIPressMethod( |
| checkpoint=args.checkpoint, |
| target_tokens=args.target_tokens, |
| force_cpu=args.force_cpu, |
| ) |
| run_name = f"uipress_{args.target_tokens}" |
| else: |
| raise ValueError(f"Unknown method: {args.method}") |
|
|
| |
| samples = load_test_images(args.data_dir, args.max_samples) |
| print(f"Evaluating {run_name} on {len(samples)} samples") |
|
|
| |
| out_dir = Path(args.output_dir) / run_name |
| html_dir = out_dir / "html_predictions" |
| html_dir.mkdir(parents=True, exist_ok=True) |
|
|
| results = [] |
| for sample in tqdm(samples, desc=run_name): |
| try: |
| res = method.generate(sample["image"]) |
| html = extract_html(res["output"]) |
|
|
| |
| html_path = html_dir / f"{sample['id']}.html" |
| html_path.write_text(html, encoding="utf-8") |
|
|
| results.append({ |
| "id": sample["id"], |
| "n_visual_tokens": res["n_visual_tokens"], |
| "latency_s": round(res["latency_s"], 2), |
| "peak_mem_gb": round(res["peak_mem_gb"], 2), |
| "output_len": len(html), |
| }) |
| except Exception as e: |
| print(f" Error on {sample['id']}: {e}") |
| results.append({"id": sample["id"], "error": str(e)}) |
|
|
| |
| valid = [r for r in results if "error" not in r] |
| summary = { |
| "method": run_name, |
| "n_samples": len(samples), |
| "n_success": len(valid), |
| "avg_visual_tokens": round( |
| sum(r["n_visual_tokens"] for r in valid) / max(len(valid), 1), 1, |
| ), |
| "avg_latency_s": round( |
| sum(r["latency_s"] for r in valid) / max(len(valid), 1), 2, |
| ), |
| "avg_peak_mem_gb": round( |
| sum(r["peak_mem_gb"] for r in valid) / max(len(valid), 1), 2, |
| ), |
| } |
| print(f"\n=== {run_name} Summary ===") |
| for k, v in summary.items(): |
| print(f" {k}: {v}") |
|
|
| |
| with open(out_dir / "summary.json", "w") as f: |
| json.dump(summary, f, indent=2) |
| with open(out_dir / "per_sample.json", "w") as f: |
| json.dump(results, f, indent=2) |
|
|
| print(f"Results saved to {out_dir}") |
| return summary |
|
|
|
|
| def parse_args(): |
| p = argparse.ArgumentParser() |
| p.add_argument("--method", required=True, |
| choices=["baseline", "resolution", "visionzip", |
| "efficientui", "uipress"]) |
| p.add_argument("--data_dir", default="data") |
| p.add_argument("--output_dir", default="results/comparison") |
| p.add_argument("--max_samples", type=int, default=50) |
|
|
| |
| p.add_argument("--min_pixels", type=int, default=None) |
| p.add_argument("--max_pixels", type=int, default=None) |
|
|
| |
| p.add_argument("--keep_tokens", type=int, default=256) |
|
|
| |
| p.add_argument("--prune_ratio", type=float, default=0.6) |
|
|
| |
| p.add_argument("--checkpoint", type=str, default=None) |
| p.add_argument("--target_tokens", type=int, default=256) |
| p.add_argument( |
| "--force_cpu", |
| action="store_true", |
| help="Load UIPress model on CPU (very slow; when GPUs have no free VRAM).", |
| ) |
|
|
| return p.parse_args() |
|
|
|
|
| if __name__ == "__main__": |
| run_eval(parse_args()) |
|
|