Taykhoom commited on
Commit
2f21a1b
·
verified ·
1 Parent(s): 6faf7ed

delete flash attention

Browse files

remove flash attention so the model can be integrated in mRNABench (has triton, which can't be used with flash-attn with this model)

Files changed (1) hide show
  1. bert_layers.py +1 -4
bert_layers.py CHANGED
@@ -24,10 +24,7 @@ from .bert_padding import (index_first_axis,
24
  index_put_first_axis, pad_input,
25
  unpad_input, unpad_input_only)
26
 
27
- try:
28
- from .flash_attn_triton import flash_attn_qkvpacked_func
29
- except ImportError as e:
30
- flash_attn_qkvpacked_func = None
31
 
32
  logger = logging.getLogger(__name__)
33
 
 
24
  index_put_first_axis, pad_input,
25
  unpad_input, unpad_input_only)
26
 
27
+ flash_attn_qkvpacked_func = None
 
 
 
28
 
29
  logger = logging.getLogger(__name__)
30