File size: 5,566 Bytes
bb01096 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | """
OpticalCompressor: learned Conv2D compression for Qwen3-VL visual tokens.
Inserted AFTER model.visual() (ViT + Merger), BEFORE the LLM.
Reduces ~6700 merged visual tokens → target_tokens (e.g. 256).
The key idea from DeepSeek-OCR: compress at the encoding stage via
learned spatial downsampling, rather than pruning at inference time.
"""
import math
import torch
import torch.nn as nn
class OpticalCompressor(nn.Module):
def __init__(self, hidden_dim=3584, target_tokens=256, num_refine_layers=1):
"""
Args:
hidden_dim: LLM hidden dimension (3584 for Qwen3-VL-8B after merger)
target_tokens: number of output tokens (default 256 = 16x16 grid)
num_refine_layers: number of Transformer refinement layers
"""
super().__init__()
self.hidden_dim = hidden_dim
self.target_tokens = target_tokens
self.target_h = int(math.sqrt(target_tokens))
self.target_w = self.target_h
assert self.target_h * self.target_w == target_tokens
# Stage 1: Spatial Conv compression
# Input: [B, D, H, W] → progressive downsampling
# Using depthwise-separable conv to keep params manageable
# Note: GroupNorm(1, D) == LayerNorm over channels, works on [B,C,H,W]
num_groups = 32 if hidden_dim % 32 == 0 else 1
self.compress = nn.Sequential(
# Block 1: reduce spatial by 2x
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=2,
padding=1, groups=hidden_dim, bias=False),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=1, bias=False),
nn.GroupNorm(num_groups, hidden_dim),
nn.GELU(),
# Block 2: reduce spatial by 2x again
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=2,
padding=1, groups=hidden_dim, bias=False),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=1, bias=False),
nn.GroupNorm(num_groups, hidden_dim),
nn.GELU(),
)
# Stage 2: Adaptive pool to fixed grid
self.pool = nn.AdaptiveAvgPool2d((self.target_h, self.target_w))
# Stage 3: Transformer refinement (recover inter-token relationships)
self.refine = nn.ModuleList([
nn.TransformerEncoderLayer(
d_model=hidden_dim, nhead=8,
dim_feedforward=hidden_dim * 2,
dropout=0.1, activation="gelu",
batch_first=True, norm_first=True,
)
for _ in range(num_refine_layers)
])
# Learnable position embedding for output tokens
self.pos_embed = nn.Parameter(
torch.randn(1, target_tokens, hidden_dim) * 0.02
)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, visual_embeds, grid_thw):
"""
Args:
visual_embeds: [total_tokens, hidden_dim] — flat output from model.visual()
Contains tokens for ALL images in the batch concatenated.
grid_thw: [num_images, 3] — (temporal, height, width) for each image
after the merger's spatial merge.
Returns:
compressed: [total_compressed, hidden_dim] — flat compressed tokens
new_grid_thw: [num_images, 3] — new grid dimensions (1, target_h, target_w)
"""
device = visual_embeds.device
compressed_list = []
offset = 0
for i in range(grid_thw.shape[0]):
t, h, w = grid_thw[i].tolist()
t, h, w = int(t), int(h), int(w)
n_tokens = t * h * w
img_tokens = visual_embeds[offset:offset + n_tokens] # [n, D]
offset += n_tokens
# Reshape to spatial: take first temporal frame for 2D conv
# For images (not video), t=1 always
x = img_tokens.reshape(t * h, w, self.hidden_dim)
# Merge temporal into height for simplicity
x = x.reshape(1, t * h, w, self.hidden_dim) # [1, H, W, D]
x = x.permute(0, 3, 1, 2) # [1, D, H, W]
# Stage 1: Conv compression
x = self.compress(x) # [1, D, H', W']
# Stage 2: Pool to fixed size
x = self.pool(x) # [1, D, target_h, target_w]
x = x.flatten(2).transpose(1, 2) # [1, target_tokens, D]
# Stage 3: Add position embedding + refine
x = x + self.pos_embed.to(x.dtype)
for layer in self.refine:
x = layer(x)
compressed_list.append(x.squeeze(0)) # [target_tokens, D]
compressed = torch.cat(compressed_list, dim=0) # [total_compressed, D]
new_grid_thw = torch.tensor(
[[1, self.target_h, self.target_w]] * grid_thw.shape[0],
device=device, dtype=grid_thw.dtype,
)
return compressed, new_grid_thw
def count_parameters(self):
total = sum(p.numel() for p in self.parameters())
trainable = sum(p.numel() for p in self.parameters() if p.requires_grad)
return {"total": total, "trainable": trainable}
|