Image-Text-to-Text
Transformers
Safetensors
English
Helium1_VL_2B
custom_code
File size: 2,103 Bytes
1126ea7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""Qwen2.5VL encoder with delayed normalization"""

import torch
from einops import rearrange
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import (
    Qwen2_5_VisionTransformerPretrainedModel,
)


def prepare_for_qwen_encoder(
    x: torch.Tensor | list[torch.Tensor], mean: torch.Tensor, std: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
    """
    Preprocessing for Qwen encoder
    Image mean and std come from processor.image_processor.image_mean and image_std
    """
    grid_thw = torch.Tensor([[1, img.shape[0], img.shape[1]] for img in x]).to(x[0].device)
    hws_flatten_shape = torch.prod(grid_thw, dim=-1)
    x = torch.cat(
        [img.reshape((int(hws_flatten_shape[idx].item()), -1)) for idx, img in enumerate(x)],
        dim=0,
    )
    assert x.min() >= 0.0 and x.max() <= 1.0
    og_shape = x.shape
    x = rearrange(x, "L (c d) -> L c d", c=3)
    x = (x - mean) / std
    x = x.view(og_shape).to(torch.bfloat16)
    return x, grid_thw


class Qwen25VLEncoder(torch.nn.Module):
    """Qwen2.5 VL encoder with pre/post processing to be compatible for
    our CASA attention implementation"""

    def __init__(
        self,
        visual: "Qwen2_5_VisionTransformerPretrainedModel",
    ):
        super().__init__()
        self.visual = visual
        self.image_mean = torch.tensor(self.visual.config.image_mean).view(1, 3, 1)
        self.image_std = torch.tensor(self.visual.config.image_std).view(1, 3, 1)

    def forward(
        self, x: torch.Tensor | list[torch.Tensor]
    ) -> dict[str, torch.Tensor | list[torch.Tensor]]:
        x, grid_thw = prepare_for_qwen_encoder(
            x, mean=self.image_mean.to(x[0].device), std=self.image_std.to(x[0].device)
        )

        grid_thw = grid_thw.type(torch.int)
        assert len(x) == grid_thw.prod(dim=1).sum()
        out = self.visual(x, grid_thw=grid_thw)

        split_sizes = (grid_thw.prod(dim=-1) // self.visual.spatial_merge_size**2).tolist()
        embeds = list(torch.split(out, split_sizes, dim=0))  # Ni * (seq, C)
        return {"image_embeds": embeds, "grid_thw": grid_thw}