Update rope embeddings for rope_type='default'

#3
by sirorezka - opened
Files changed (1) hide show
  1. modeling_ouro.py +28 -2
modeling_ouro.py CHANGED
@@ -456,12 +456,38 @@ class OuroRotaryEmbedding(nn.Module):
456
  self.original_max_seq_len = config.max_position_embeddings
457
 
458
  self.config = config
459
- self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
460
 
461
- inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
 
 
 
 
462
  self.register_buffer("inv_freq", inv_freq, persistent=False)
463
  self.original_inv_freq = self.inv_freq
464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465
  @torch.no_grad()
466
  @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
467
  def forward(self, x, position_ids):
 
456
  self.original_max_seq_len = config.max_position_embeddings
457
 
458
  self.config = config
 
459
 
460
+ rope_init_fn: Callable = compute_default_rope_parameters
461
+ if self.rope_type != "default":
462
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
463
+
464
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
465
  self.register_buffer("inv_freq", inv_freq, persistent=False)
466
  self.original_inv_freq = self.inv_freq
467
 
468
+
469
+ @staticmethod
470
+ def compute_default_rope_parameters(
471
+ config: Optional[OuroConfig] = None,
472
+ device: Optional["torch.device"] = None,
473
+ seq_len: Optional[int] = None,
474
+ ) -> tuple["torch.Tensor", float]:
475
+ """
476
+ Computes the inverse frequencies according to the original RoPE implementation
477
+ """
478
+
479
+ base = config.rope_theta
480
+ partial_rotary_factor = getattr(config, "partial_rotary_factor", 1.0)
481
+ head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
482
+ dim = int(head_dim * partial_rotary_factor)
483
+
484
+ attention_factor = 1.0 # Unused in this type of RoPE
485
+
486
+ # Compute the inverse frequencies
487
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim))
488
+ return inv_freq, attention_factor
489
+
490
+
491
  @torch.no_grad()
492
  @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
493
  def forward(self, x, position_ids):