andersonbcdefg commited on
Commit
cc046e1
·
1 Parent(s): c5cda5b

Upload modeling_flash_llama.py

Browse files
Files changed (1) hide show
  1. modeling_flash_llama.py +6 -1
modeling_flash_llama.py CHANGED
@@ -363,12 +363,17 @@ class LlamaAttention(nn.Module):
363
  past_key_value = (past_kv, past_len+q.size(1)) if use_cache else None
364
 
365
  # no padding tokens, more efficient
 
 
 
 
 
366
  attn_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
367
  attn_outputs = flash_attn_kvpacked_func(
368
  q.type(attn_dtype), kv.type(attn_dtype), dropout_p=0.0, softmax_scale=1.0/self.norm_factor, causal=(not has_layer_past), return_attn_probs=output_attentions)
369
 
370
  attn_output = attn_outputs[0] if output_attentions else attn_outputs
371
- attn_output = attn_output.reshape(bsz, q_len, h_size)
372
  attn_weights = attn_outputs[2] if output_attentions else None
373
 
374
  if self.config.pretraining_tp > 1:
 
363
  past_key_value = (past_kv, past_len+q.size(1)) if use_cache else None
364
 
365
  # no padding tokens, more efficient
366
+ # the basic problem here is that for qlora, stuff is stored in float32, but attention needs float16 or bfloat16.
367
+ # if you cast just based on torch.cuda.is_bf_16_supported(), it works for training, but it breaks in evals
368
+ # that load the model in fp16 on a gpu where bf16 is supported. so, we cast based on the GPU support, but then
369
+ # cast back to whatever q originally was. hopefully that works!
370
+ orig_dtype = q.dtype
371
  attn_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
372
  attn_outputs = flash_attn_kvpacked_func(
373
  q.type(attn_dtype), kv.type(attn_dtype), dropout_p=0.0, softmax_scale=1.0/self.norm_factor, causal=(not has_layer_past), return_attn_probs=output_attentions)
374
 
375
  attn_output = attn_outputs[0] if output_attentions else attn_outputs
376
+ attn_output = attn_output.reshape(bsz, q_len, h_size).type(orig_dtype)
377
  attn_weights = attn_outputs[2] if output_attentions else None
378
 
379
  if self.config.pretraining_tp > 1: