| """ |
| OpticalCompressor: learned Conv2D compression for Qwen3-VL visual tokens. |
| |
| Inserted AFTER model.visual() (ViT + Merger), BEFORE the LLM. |
| Reduces ~6700 merged visual tokens → target_tokens (e.g. 256). |
| |
| The key idea from DeepSeek-OCR: compress at the encoding stage via |
| learned spatial downsampling, rather than pruning at inference time. |
| """ |
|
|
| import math |
| import torch |
| import torch.nn as nn |
|
|
|
|
| class OpticalCompressor(nn.Module): |
| def __init__(self, hidden_dim=3584, target_tokens=256, num_refine_layers=1): |
| """ |
| Args: |
| hidden_dim: LLM hidden dimension (3584 for Qwen3-VL-8B after merger) |
| target_tokens: number of output tokens (default 256 = 16x16 grid) |
| num_refine_layers: number of Transformer refinement layers |
| """ |
| super().__init__() |
| self.hidden_dim = hidden_dim |
| self.target_tokens = target_tokens |
| self.target_h = int(math.sqrt(target_tokens)) |
| self.target_w = self.target_h |
| assert self.target_h * self.target_w == target_tokens |
|
|
| |
| |
| |
| |
| num_groups = 32 if hidden_dim % 32 == 0 else 1 |
| self.compress = nn.Sequential( |
| |
| nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=2, |
| padding=1, groups=hidden_dim, bias=False), |
| nn.Conv2d(hidden_dim, hidden_dim, kernel_size=1, bias=False), |
| nn.GroupNorm(num_groups, hidden_dim), |
| nn.GELU(), |
| |
| nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=2, |
| padding=1, groups=hidden_dim, bias=False), |
| nn.Conv2d(hidden_dim, hidden_dim, kernel_size=1, bias=False), |
| nn.GroupNorm(num_groups, hidden_dim), |
| nn.GELU(), |
| ) |
|
|
| |
| self.pool = nn.AdaptiveAvgPool2d((self.target_h, self.target_w)) |
|
|
| |
| self.refine = nn.ModuleList([ |
| nn.TransformerEncoderLayer( |
| d_model=hidden_dim, nhead=8, |
| dim_feedforward=hidden_dim * 2, |
| dropout=0.1, activation="gelu", |
| batch_first=True, norm_first=True, |
| ) |
| for _ in range(num_refine_layers) |
| ]) |
|
|
| |
| self.pos_embed = nn.Parameter( |
| torch.randn(1, target_tokens, hidden_dim) * 0.02 |
| ) |
|
|
| self._init_weights() |
|
|
| def _init_weights(self): |
| for m in self.modules(): |
| if isinstance(m, nn.Conv2d): |
| nn.init.kaiming_normal_(m.weight, mode="fan_out") |
| if m.bias is not None: |
| nn.init.zeros_(m.bias) |
| elif isinstance(m, nn.Linear): |
| nn.init.trunc_normal_(m.weight, std=0.02) |
| if m.bias is not None: |
| nn.init.zeros_(m.bias) |
|
|
| def forward(self, visual_embeds, grid_thw): |
| """ |
| Args: |
| visual_embeds: [total_tokens, hidden_dim] — flat output from model.visual() |
| Contains tokens for ALL images in the batch concatenated. |
| grid_thw: [num_images, 3] — (temporal, height, width) for each image |
| after the merger's spatial merge. |
| |
| Returns: |
| compressed: [total_compressed, hidden_dim] — flat compressed tokens |
| new_grid_thw: [num_images, 3] — new grid dimensions (1, target_h, target_w) |
| """ |
| device = visual_embeds.device |
| compressed_list = [] |
| offset = 0 |
|
|
| for i in range(grid_thw.shape[0]): |
| t, h, w = grid_thw[i].tolist() |
| t, h, w = int(t), int(h), int(w) |
| n_tokens = t * h * w |
| img_tokens = visual_embeds[offset:offset + n_tokens] |
| offset += n_tokens |
|
|
| |
| |
| x = img_tokens.reshape(t * h, w, self.hidden_dim) |
| |
| x = x.reshape(1, t * h, w, self.hidden_dim) |
| x = x.permute(0, 3, 1, 2) |
|
|
| |
| x = self.compress(x) |
|
|
| |
| x = self.pool(x) |
|
|
| x = x.flatten(2).transpose(1, 2) |
|
|
| |
| x = x + self.pos_embed.to(x.dtype) |
| for layer in self.refine: |
| x = layer(x) |
|
|
| compressed_list.append(x.squeeze(0)) |
|
|
| compressed = torch.cat(compressed_list, dim=0) |
| new_grid_thw = torch.tensor( |
| [[1, self.target_h, self.target_w]] * grid_thw.shape[0], |
| device=device, dtype=grid_thw.dtype, |
| ) |
| return compressed, new_grid_thw |
|
|
| def count_parameters(self): |
| total = sum(p.numel() for p in self.parameters()) |
| trainable = sum(p.numel() for p in self.parameters() if p.requires_grad) |
| return {"total": total, "trainable": trainable} |
|
|