andersonbcdefg commited on
Commit
422979b
·
1 Parent(s): a9ab032

Upload modeling_flash_llama.py

Browse files
Files changed (1) hide show
  1. modeling_flash_llama.py +4 -3
modeling_flash_llama.py CHANGED
@@ -290,9 +290,10 @@ class LlamaAttention(nn.Module):
290
  scaling_type = self.config.rope_scaling["type"]
291
  scaling_factor = self.config.rope_scaling["factor"]
292
  assert scaling_type == 'linear'
293
-
 
294
  self.rotary_emb = FlashRotaryEmbedding(
295
- self.head_dim, base=10000, interleaved=False, scaling_factor=scaling_factor,
296
  )
297
 
298
  def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
@@ -362,7 +363,7 @@ class LlamaAttention(nn.Module):
362
  past_key_value = (past_kv, past_len+q.size(1)) if use_cache else None
363
 
364
  # no padding tokens, more efficient
365
- attn_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
366
  attn_outputs = flash_attn_kvpacked_func(
367
  q.type(attn_dtype), kv.type(attn_dtype), dropout_p=0.0, softmax_scale=1.0/self.norm_factor, causal=(not has_layer_past), return_attn_probs=output_attentions)
368
 
 
290
  scaling_type = self.config.rope_scaling["type"]
291
  scaling_factor = self.config.rope_scaling["factor"]
292
  assert scaling_type == 'linear'
293
+ rotary_base = self.config.__dict__.get("rope_theta", 10000.0)
294
+
295
  self.rotary_emb = FlashRotaryEmbedding(
296
+ self.head_dim, base=rotary_base, interleaved=False, scaling_factor=scaling_factor,
297
  )
298
 
299
  def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
 
363
  past_key_value = (past_kv, past_len+q.size(1)) if use_cache else None
364
 
365
  # no padding tokens, more efficient
366
+ attn_dtype = self.o_proj.weight.dtype
367
  attn_outputs = flash_attn_kvpacked_func(
368
  q.type(attn_dtype), kv.type(attn_dtype), dropout_p=0.0, softmax_scale=1.0/self.norm_factor, causal=(not has_layer_past), return_attn_probs=output_attentions)
369