| | """ |
| | Author: Luigi Piccinelli |
| | Licensed under the CC-BY NC 4.0 license (http://creativecommons.org/licenses/by-nc/4.0/) |
| | """ |
| |
|
| | from math import pi |
| | from typing import Optional |
| |
|
| | import torch |
| | import torch.nn as nn |
| |
|
| | from einops import rearrange, repeat |
| |
|
| |
|
| | class PositionEmbeddingSine(nn.Module): |
| | def __init__( |
| | self, num_pos_feats=64, temperature=10000, normalize=False, scale=None |
| | ): |
| | super().__init__() |
| | self.num_pos_feats = num_pos_feats |
| | self.temperature = temperature |
| | self.normalize = normalize |
| | if scale is not None and normalize is False: |
| | raise ValueError("normalize should be True if scale is passed") |
| | if scale is None: |
| | scale = 2 * pi |
| | self.scale = scale |
| |
|
| | def forward( |
| | self, x: torch.Tensor, mask: Optional[torch.Tensor] = None |
| | ) -> torch.Tensor: |
| | if mask is None: |
| | mask = torch.zeros( |
| | (x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool |
| | ) |
| | not_mask = ~mask |
| | y_embed = not_mask.cumsum(1, dtype=torch.float32) |
| | x_embed = not_mask.cumsum(2, dtype=torch.float32) |
| | if self.normalize: |
| | eps = 1e-6 |
| | y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale |
| | x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale |
| |
|
| | dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) |
| | dim_t = self.temperature ** ( |
| | 2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats |
| | ) |
| |
|
| | pos_x = x_embed[:, :, :, None] / dim_t |
| | pos_y = y_embed[:, :, :, None] / dim_t |
| | pos_x = torch.stack( |
| | (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 |
| | ).flatten(3) |
| | pos_y = torch.stack( |
| | (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 |
| | ).flatten(3) |
| | pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) |
| | return pos |
| |
|
| | def __repr__(self, _repr_indent=4): |
| | head = "Positional encoding " + self.__class__.__name__ |
| | body = [ |
| | "num_pos_feats: {}".format(self.num_pos_feats), |
| | "temperature: {}".format(self.temperature), |
| | "normalize: {}".format(self.normalize), |
| | "scale: {}".format(self.scale), |
| | ] |
| | |
| | lines = [head] + [" " * _repr_indent + line for line in body] |
| | return "\n".join(lines) |
| |
|
| |
|
| | class LearnedSinusoidalPosEmb(nn.Module): |
| | def __init__(self, dim): |
| | super().__init__() |
| | assert (dim % 2) == 0 |
| | half_dim = dim // 2 |
| | self.weights = nn.Parameter(torch.randn(half_dim)) |
| |
|
| | def forward(self, x): |
| | x = rearrange(x, "b -> b 1") |
| | freqs = x * rearrange(self.weights, "d -> 1 d") * 2 * pi |
| | fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1) |
| | fouriered = torch.cat((x, fouriered), dim=-1) |
| | return fouriered |
| |
|
| |
|
| | def broadcat(tensors, dim=-1): |
| | num_tensors = len(tensors) |
| | shape_lens = set(list(map(lambda t: len(t.shape), tensors))) |
| | assert len(shape_lens) == 1, "tensors must all have the same number of dimensions" |
| | shape_len = list(shape_lens)[0] |
| | dim = (dim + shape_len) if dim < 0 else dim |
| | dims = list(zip(*map(lambda t: list(t.shape), tensors))) |
| | expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim] |
| | assert all( |
| | [*map(lambda t: len(set(t[1])) <= 2, expandable_dims)] |
| | ), "invalid dimensions for broadcastable concatentation" |
| | max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims)) |
| | expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims)) |
| | expanded_dims.insert(dim, (dim, dims[dim])) |
| | expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims))) |
| | tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes))) |
| | return torch.cat(tensors, dim=dim) |
| |
|
| |
|
| | def rotate_half(x): |
| | x = rearrange(x, "... (d r) -> ... d r", r=2) |
| | x1, x2 = x.unbind(dim=-1) |
| | x = torch.stack((-x2, x1), dim=-1) |
| | return rearrange(x, "... d r -> ... (d r)") |
| |
|
| |
|
| | class VisionRotaryEmbedding(nn.Module): |
| | def __init__( |
| | self, |
| | dim, |
| | pt_seq_len, |
| | ft_seq_len=None, |
| | custom_freqs=None, |
| | freqs_for="lang", |
| | theta=10000, |
| | max_freq=10, |
| | num_freqs=1, |
| | ): |
| | super().__init__() |
| | if custom_freqs: |
| | freqs = custom_freqs |
| | elif freqs_for == "lang": |
| | freqs = 1.0 / ( |
| | theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim) |
| | ) |
| | elif freqs_for == "pixel": |
| | freqs = torch.linspace(1.0, max_freq / 2, dim // 2) * pi |
| | elif freqs_for == "constant": |
| | freqs = torch.ones(num_freqs).float() |
| | else: |
| | raise ValueError(f"unknown modality {freqs_for}") |
| |
|
| | if ft_seq_len is None: |
| | ft_seq_len = pt_seq_len |
| | t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len |
| |
|
| | freqs_h = torch.einsum("..., f -> ... f", t, freqs) |
| | freqs_h = repeat(freqs_h, "... n -> ... (n r)", r=2) |
| |
|
| | freqs_w = torch.einsum("..., f -> ... f", t, freqs) |
| | freqs_w = repeat(freqs_w, "... n -> ... (n r)", r=2) |
| |
|
| | freqs = broadcat((freqs_h[:, None, :], freqs_w[None, :, :]), dim=-1) |
| |
|
| | self.register_buffer("freqs_cos", freqs.cos()) |
| | self.register_buffer("freqs_sin", freqs.sin()) |
| |
|
| | print("======== shape of rope freq", self.freqs_cos.shape, "========") |
| |
|
| | def forward(self, t, start_index=0): |
| | rot_dim = self.freqs_cos.shape[-1] |
| | end_index = start_index + rot_dim |
| | assert ( |
| | rot_dim <= t.shape[-1] |
| | ), f"feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}" |
| | t_left, t, t_right = ( |
| | t[..., :start_index], |
| | t[..., start_index:end_index], |
| | t[..., end_index:], |
| | ) |
| | t = (t * self.freqs_cos) + (rotate_half(t) * self.freqs_sin) |
| | return torch.cat((t_left, t, t_right), dim=-1) |
| |
|
| |
|
| | class VisionRotaryEmbeddingFast(nn.Module): |
| | def __init__( |
| | self, |
| | dim, |
| | pt_seq_len, |
| | ft_seq_len=None, |
| | custom_freqs=None, |
| | freqs_for="lang", |
| | theta=10000, |
| | max_freq=10, |
| | num_freqs=1, |
| | ): |
| | super().__init__() |
| | if custom_freqs: |
| | freqs = custom_freqs |
| | elif freqs_for == "lang": |
| | freqs = 1.0 / ( |
| | theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim) |
| | ) |
| | elif freqs_for == "pixel": |
| | freqs = torch.linspace(1.0, max_freq / 2, dim // 2) * pi |
| | elif freqs_for == "constant": |
| | freqs = torch.ones(num_freqs).float() |
| | else: |
| | raise ValueError(f"unknown modality {freqs_for}") |
| |
|
| | if ft_seq_len is None: |
| | ft_seq_len = pt_seq_len |
| | t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len |
| |
|
| | freqs = torch.einsum("..., f -> ... f", t, freqs) |
| | freqs = repeat(freqs, "... n -> ... (n r)", r=2) |
| | freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim=-1) |
| |
|
| | freqs_cos = freqs.cos().view(-1, freqs.shape[-1]) |
| | freqs_sin = freqs.sin().view(-1, freqs.shape[-1]) |
| |
|
| | self.register_buffer("freqs_cos", freqs_cos) |
| | self.register_buffer("freqs_sin", freqs_sin) |
| |
|
| | def forward(self, t): |
| | return t * self.freqs_cos + rotate_half(t) * self.freqs_sin |
| |
|
| |
|
| | from math import log2 |
| |
|
| |
|
| | def generate_fourier_features( |
| | x: torch.Tensor, |
| | dim: int = 512, |
| | max_freq: int = 64, |
| | use_cos: bool = False, |
| | use_log: bool = False, |
| | cat_orig: bool = False, |
| | ): |
| | x_orig = x |
| | device, dtype, input_dim = x.device, x.dtype, x.shape[-1] |
| | num_bands = dim // (2 * input_dim) if use_cos else dim // input_dim |
| |
|
| | if use_log: |
| | scales = 2.0 ** torch.linspace( |
| | 0.0, log2(max_freq), steps=num_bands, device=device, dtype=dtype |
| | ) |
| | else: |
| | scales = torch.linspace( |
| | 1.0, max_freq / 2, num_bands, device=device, dtype=dtype |
| | ) |
| |
|
| | x = x.unsqueeze(-1) |
| | scales = scales[(*((None,) * (len(x.shape) - 1)), Ellipsis)] |
| |
|
| | x = x * scales * pi |
| | x = torch.cat( |
| | ( |
| | [x.sin(), x.cos()] |
| | if use_cos |
| | else [ |
| | x.sin(), |
| | ] |
| | ), |
| | dim=-1, |
| | ) |
| | x = x.flatten(-2) |
| | if cat_orig: |
| | return torch.cat((x, x_orig), dim=-1) |
| | return x |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| |
|