ccclemenfff commited on
Commit
dc4090a
·
1 Parent(s): 718b339
requirements.txt CHANGED
@@ -3,12 +3,11 @@
3
  torch==2.2.1
4
  torchvision==0.17.1
5
  torchaudio==2.2.1
6
- transformers==4.34.1
7
  sentence-transformers==2.7.0
8
  diffusers==0.26.3
9
  decord==0.6.0
10
- huggingface_hub==0.22.1
11
- tokenizers==0.21.0
12
  peft==0.11.1
13
  Pillow
14
  einops
 
3
  torch==2.2.1
4
  torchvision==0.17.1
5
  torchaudio==2.2.1
6
+ transformers==4.51.3
7
  sentence-transformers==2.7.0
8
  diffusers==0.26.3
9
  decord==0.6.0
10
+ huggingface_hub==0.30.2
 
11
  peft==0.11.1
12
  Pillow
13
  einops
robohusky/model/modeling_husky_embody2.py CHANGED
@@ -39,13 +39,13 @@ from transformers.utils import (
39
  add_start_docstrings_to_model_forward,
40
  logging,
41
  replace_return_docstrings,
42
- is_flash_attn_available
43
  )
44
  from transformers import AutoModelForCausalLM, GenerationConfig
45
 
46
  from .configuration_husky import HuskyConfig, HuskyQFormerConfig, HuskyVisionConfig
47
 
48
- if is_flash_attn_available():
49
  from flash_attn import flash_attn_func
50
  from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
 
 
39
  add_start_docstrings_to_model_forward,
40
  logging,
41
  replace_return_docstrings,
42
+ is_flash_attn_2_available
43
  )
44
  from transformers import AutoModelForCausalLM, GenerationConfig
45
 
46
  from .configuration_husky import HuskyConfig, HuskyQFormerConfig, HuskyVisionConfig
47
 
48
+ if is_flash_attn_2_available():
49
  from flash_attn import flash_attn_func
50
  from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51