andersonbcdefg commited on
Commit
c5cda5b
·
1 Parent(s): 422979b

Upload modeling_flash_llama.py

Browse files
Files changed (1) hide show
  1. modeling_flash_llama.py +1 -1
modeling_flash_llama.py CHANGED
@@ -363,7 +363,7 @@ class LlamaAttention(nn.Module):
363
  past_key_value = (past_kv, past_len+q.size(1)) if use_cache else None
364
 
365
  # no padding tokens, more efficient
366
- attn_dtype = self.o_proj.weight.dtype
367
  attn_outputs = flash_attn_kvpacked_func(
368
  q.type(attn_dtype), kv.type(attn_dtype), dropout_p=0.0, softmax_scale=1.0/self.norm_factor, causal=(not has_layer_past), return_attn_probs=output_attentions)
369
 
 
363
  past_key_value = (past_kv, past_len+q.size(1)) if use_cache else None
364
 
365
  # no padding tokens, more efficient
366
+ attn_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
367
  attn_outputs = flash_attn_kvpacked_func(
368
  q.type(attn_dtype), kv.type(attn_dtype), dropout_p=0.0, softmax_scale=1.0/self.norm_factor, causal=(not has_layer_past), return_attn_probs=output_attentions)
369