UIPress / models /qwen3_vl_compat.py
DesonDai's picture
Add files using upload-large-folder tool
bed4da7 verified
"""
Helpers for Qwen3-VL (transformers): vision lives on model.model.visual,
and ViT+merger returns BaseModelOutputWithDeepstackFeatures.pooler_output.
"""
from __future__ import annotations
import torch
def get_visual_module(model: torch.nn.Module):
inner = getattr(model, "model", None)
if inner is not None and hasattr(inner, "visual"):
return inner.visual
return model.visual
def set_visual_module(model: torch.nn.Module, value) -> None:
inner = getattr(model, "model", None)
if inner is not None and hasattr(inner, "visual"):
inner.visual = value
return
model.visual = value
def merged_spatial_grid(image_grid_thw: torch.Tensor, spatial_merge_size: int) -> torch.Tensor:
"""Processor grid (T,H,W) -> LLM-side grid after spatial merge on H,W."""
g = image_grid_thw.clone()
g[:, 1] = g[:, 1] // spatial_merge_size
g[:, 2] = g[:, 2] // spatial_merge_size
return g
def pack_image_features(model, pixel_values: torch.Tensor, image_grid_thw: torch.Tensor):
"""
Returns flat merger embeddings [total_tokens, D] and LLM grid [num_imgs, 3]
consistent with OpticalCompressor token counts.
"""
inner = model.model if hasattr(model, "model") else model
out = inner.get_image_features(pixel_values, image_grid_thw)
parts = out.pooler_output
if isinstance(parts, (list, tuple)):
packed = torch.cat(parts, dim=0)
else:
packed = parts
vis = get_visual_module(model)
grid_llm = merged_spatial_grid(image_grid_thw, vis.spatial_merge_size)
return packed, grid_llm