Maxtimer97 commited on
Commit
0bda180
·
verified ·
1 Parent(s): 8a2bc5d

Put relative import first for Huggingface

Browse files
Files changed (1) hide show
  1. modeling_chatglm.py +7 -5
modeling_chatglm.py CHANGED
@@ -24,15 +24,17 @@ from transformers.generation.utils import GenerationMixin
24
 
25
 
26
  try:
27
- from configuration_chatglm import ChatGLMConfig
28
- from ops.pooling import mean_pooling
29
- from ops.compressed_attention import compressed_attention
30
- from ops.topk_sparse_attention import topk_sparse_attention
31
- except ImportError:
32
  from .configuration_chatglm import ChatGLMConfig
33
  from .ops.pooling import mean_pooling
34
  from .ops.compressed_attention import compressed_attention
35
  from .ops.topk_sparse_attention import topk_sparse_attention
 
 
 
 
 
 
 
36
 
37
  try:
38
  from transformers.utils import is_flash_attn_greater_or_equal_2_10, is_flash_attn_2_available
 
24
 
25
 
26
  try:
 
 
 
 
 
27
  from .configuration_chatglm import ChatGLMConfig
28
  from .ops.pooling import mean_pooling
29
  from .ops.compressed_attention import compressed_attention
30
  from .ops.topk_sparse_attention import topk_sparse_attention
31
+ except ImportError:
32
+ from configuration_chatglm import ChatGLMConfig
33
+ from ops.pooling import mean_pooling
34
+ from ops.compressed_attention import compressed_attention
35
+ from ops.topk_sparse_attention import topk_sparse_attention
36
+
37
+
38
 
39
  try:
40
  from transformers.utils import is_flash_attn_greater_or_equal_2_10, is_flash_attn_2_available