| | |
| | |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| |
|
| |
|
| | import numpy as np |
| |
|
| | import torch |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | def get_2d_sincos_pos_embed(embed_dim, grid_size, n_cls_token=0): |
| | """ |
| | grid_size: int of the grid height and width |
| | return: |
| | pos_embed: [grid_size*grid_size, embed_dim] or [n_cls_token+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
| | """ |
| | grid_h = np.arange(grid_size, dtype=np.float32) |
| | grid_w = np.arange(grid_size, dtype=np.float32) |
| | grid = np.meshgrid(grid_w, grid_h) |
| | grid = np.stack(grid, axis=0) |
| |
|
| | grid = grid.reshape([2, 1, grid_size, grid_size]) |
| | pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
| | if n_cls_token>0: |
| | pos_embed = np.concatenate([np.zeros([n_cls_token, embed_dim]), pos_embed], axis=0) |
| | return pos_embed |
| |
|
| |
|
| | def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): |
| | assert embed_dim % 2 == 0 |
| |
|
| | |
| | emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) |
| | emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) |
| |
|
| | emb = np.concatenate([emb_h, emb_w], axis=1) |
| | return emb |
| |
|
| |
|
| | def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): |
| | """ |
| | embed_dim: output dimension for each position |
| | pos: a list of positions to be encoded: size (M,) |
| | out: (M, D) |
| | """ |
| | assert embed_dim % 2 == 0 |
| | omega = np.arange(embed_dim // 2, dtype=float) |
| | omega /= embed_dim / 2. |
| | omega = 1. / 10000**omega |
| |
|
| | pos = pos.reshape(-1) |
| | out = np.einsum('m,d->md', pos, omega) |
| |
|
| | emb_sin = np.sin(out) |
| | emb_cos = np.cos(out) |
| |
|
| | emb = np.concatenate([emb_sin, emb_cos], axis=1) |
| | return emb |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | def interpolate_pos_embed(model, checkpoint_model): |
| | if 'pos_embed' in checkpoint_model: |
| | pos_embed_checkpoint = checkpoint_model['pos_embed'] |
| | embedding_size = pos_embed_checkpoint.shape[-1] |
| | num_patches = model.patch_embed.num_patches |
| | num_extra_tokens = model.pos_embed.shape[-2] - num_patches |
| | |
| | orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) |
| | |
| | new_size = int(num_patches ** 0.5) |
| | |
| | if orig_size != new_size: |
| | print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) |
| | extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] |
| | |
| | pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] |
| | pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) |
| | pos_tokens = torch.nn.functional.interpolate( |
| | pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) |
| | pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) |
| | new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) |
| | checkpoint_model['pos_embed'] = new_pos_embed |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | try: |
| | from .curope import cuRoPE2D |
| | RoPE2D = cuRoPE2D |
| | except ImportError: |
| | print('Warning, cannot find cuda-compiled version of RoPE2D, using a slow pytorch version instead') |
| |
|
| | class RoPE2D(torch.nn.Module): |
| | |
| | def __init__(self, freq=100.0, F0=1.0): |
| | super().__init__() |
| | self.base = freq |
| | self.F0 = F0 |
| | self.cache = {} |
| |
|
| | def get_cos_sin(self, D, seq_len, device, dtype): |
| | if (D,seq_len,device,dtype) not in self.cache: |
| | inv_freq = 1.0 / (self.base ** (torch.arange(0, D, 2).float().to(device) / D)) |
| | t = torch.arange(seq_len, device=device, dtype=inv_freq.dtype) |
| | freqs = torch.einsum("i,j->ij", t, inv_freq).to(dtype) |
| | freqs = torch.cat((freqs, freqs), dim=-1) |
| | cos = freqs.cos() |
| | sin = freqs.sin() |
| | self.cache[D,seq_len,device,dtype] = (cos,sin) |
| | return self.cache[D,seq_len,device,dtype] |
| | |
| | @staticmethod |
| | def rotate_half(x): |
| | x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :] |
| | return torch.cat((-x2, x1), dim=-1) |
| | |
| | def apply_rope1d(self, tokens, pos1d, cos, sin): |
| | assert pos1d.ndim==2 |
| | cos = torch.nn.functional.embedding(pos1d, cos)[:, None, :, :] |
| | sin = torch.nn.functional.embedding(pos1d, sin)[:, None, :, :] |
| | return (tokens * cos) + (self.rotate_half(tokens) * sin) |
| | |
| | def forward(self, tokens, positions): |
| | """ |
| | input: |
| | * tokens: batch_size x nheads x ntokens x dim |
| | * positions: batch_size x ntokens x 2 (y and x position of each token) |
| | output: |
| | * tokens after appplying RoPE2D (batch_size x nheads x ntokens x dim) |
| | """ |
| | assert tokens.size(3)%2==0, "number of dimensions should be a multiple of two" |
| | D = tokens.size(3) // 2 |
| | assert positions.ndim==3 and positions.shape[-1] == 2 |
| | cos, sin = self.get_cos_sin(D, int(positions.max())+1, tokens.device, tokens.dtype) |
| | |
| | y, x = tokens.chunk(2, dim=-1) |
| | y = self.apply_rope1d(y, positions[:,:,0], cos, sin) |
| | x = self.apply_rope1d(x, positions[:,:,1], cos, sin) |
| | tokens = torch.cat((y, x), dim=-1) |
| | return tokens |