ccclemenfff commited on
Commit
8e5db6d
·
1 Parent(s): 357c846

block flash_attn

Browse files
.idea/.name ADDED
@@ -0,0 +1 @@
 
 
1
+ inference.py
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  torch==2.1.2+cu121
2
  torchvision==0.17.0
3
  torchaudio==2.0.2
4
- transformers==4.34.1
5
  decord==0.6.0
6
  peft==0.4.0
7
  huggingface_hub==0.23.4
@@ -10,4 +10,4 @@ einops
10
  scipy
11
  numpy==1.24.4
12
  tqdm
13
- flash-attn
 
1
  torch==2.1.2+cu121
2
  torchvision==0.17.0
3
  torchaudio==2.0.2
4
+ transformers==4.40.0
5
  decord==0.6.0
6
  peft==0.4.0
7
  huggingface_hub==0.23.4
 
10
  scipy
11
  numpy==1.24.4
12
  tqdm
13
+
robohusky/model/modeling_husky_embody2.py CHANGED
@@ -39,15 +39,15 @@ from transformers.utils import (
39
  add_start_docstrings_to_model_forward,
40
  logging,
41
  replace_return_docstrings,
42
- is_flash_attn_available
43
  )
44
  from transformers import AutoModelForCausalLM, GenerationConfig
45
 
46
  from .configuration_husky import HuskyConfig, HuskyQFormerConfig, HuskyVisionConfig
47
 
48
- if is_flash_attn_available():
49
- from flash_attn import flash_attn_func
50
- from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
 
52
  try:
53
  from apex.normalization import FusedLayerNorm as LayerNorm
 
39
  add_start_docstrings_to_model_forward,
40
  logging,
41
  replace_return_docstrings,
42
+ # is_flash_attn_available
43
  )
44
  from transformers import AutoModelForCausalLM, GenerationConfig
45
 
46
  from .configuration_husky import HuskyConfig, HuskyQFormerConfig, HuskyVisionConfig
47
 
48
+ #if is_flash_attn_available():
49
+ # from flash_attn import flash_attn_func
50
+ # from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
 
52
  try:
53
  from apex.normalization import FusedLayerNorm as LayerNorm