| |
| |
| |
| |
| |
|
|
| import torch |
|
|
|
|
| def compute_rotary_embeddings( |
| embed_dim: int, |
| positions: torch.Tensor, |
| base_freq: float = 10000.0, |
| freq_scale: float = 1.0, |
| dtype: torch.dtype = torch.float32, |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| """ |
| Compute rotary positional embeddings (RoPE) for transformer attention. |
| |
| Generates sinusoidal frequency tensors used to inject positional information |
| into attention queries and keys via rotation in the complex plane. |
| |
| Shape legend: |
| S: sequence length (number of positions) |
| D: embedding dimension (must be even) |
| |
| Args: |
| embed_dim (int): Dimension of the rotary embedding. Must be even. |
| positions (S,): Position values for each element in the sequence (e.g., video timesteps). |
| base_freq: Base frequency for the sinusoidal encoding. |
| freq_scale: Scaling factor for frequency computation (for context length extrapolation). |
| dtype: Data type for intermediate frequency computation (float32 or float64). |
| |
| Returns: |
| tuple[torch.Tensor, torch.Tensor]: (cos_embed, sin_embed) |
| - cos_embed (S, D): Cosine component of rotary embeddings. |
| - sin_embed (S, D): Sine component of rotary embeddings. |
| """ |
| assert embed_dim % 2 == 0, f"embed_dim must be even, got {embed_dim}" |
|
|
| |
| inv_freq = ( |
| 1.0 |
| / ( |
| base_freq |
| ** ( |
| torch.arange(0, embed_dim, 2, dtype=dtype, device=positions.device) |
| / embed_dim |
| ) |
| ) |
| / freq_scale |
| ) |
|
|
| |
| phases = torch.outer(positions, inv_freq) |
|
|
| |
| cos_embed = ( |
| phases.cos() |
| .repeat_interleave(2, dim=1, output_size=phases.shape[1] * 2) |
| .float() |
| ) |
| sin_embed = ( |
| phases.sin() |
| .repeat_interleave(2, dim=1, output_size=phases.shape[1] * 2) |
| .float() |
| ) |
|
|
| return cos_embed, sin_embed |
|
|
|
|
| def apply_rotary_embedding( |
| x: torch.Tensor, |
| cos_embed: torch.Tensor, |
| sin_embed: torch.Tensor, |
| ) -> torch.Tensor: |
| """ |
| Apply rotary positional embeddings (RoPE) to query or key tensors. |
| |
| Rotates pairs of adjacent dimensions in the input tensor using precomputed |
| cos/sin embeddings, injecting positional information into attention. |
| |
| Shape legend: |
| B: batch size |
| H: number of attention heads |
| S: sequence length |
| D: head dimension (must be even) |
| |
| Args: |
| x (B, H, S, D): Query or key tensor to apply rotary embeddings to. |
| cos_embed (S, D) or (B, S, D): Cosine component of rotary embeddings. |
| sin_embed (S, D) or (B, S, D): Sine component of rotary embeddings. |
| |
| Returns: |
| torch.Tensor (B, H, S, D): Input tensor with rotary embeddings applied. |
| """ |
| cos, sin = cos_embed, sin_embed |
| assert cos.ndim == sin.ndim |
|
|
| |
| if cos.ndim == 2: |
| |
| cos = cos[None, None] |
| sin = sin[None, None] |
| elif cos.ndim == 3: |
| |
| cos = cos[:, None] |
| sin = sin[:, None] |
| else: |
| raise ValueError(f"cos_embed/sin_embed should be 2D or 3D, got {cos.ndim}D.") |
|
|
| cos, sin = cos.to(x.device), sin.to(x.device) |
|
|
| |
| |
| x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) |
|
|
| |
| x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) |
|
|
| |
| out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) |
|
|
| return out |
|
|