aadex commited on
Commit
acc1499
·
verified ·
1 Parent(s): 6028ebb

Upload EarthMind-4B GRPO fine-tuned model

Browse files
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:97f3792a0d86308d529a858ac40fb0d704ffa3a4da4a042a6acb77b184e5eb97
3
  size 4993044040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95c37865bba2047023551374f7f52de2865c291a5aab8bae66ff4a4da06bf8ef
3
  size 4993044040
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a5ada102da6c3ed05981f12a81dc425a3aa173c9e18778530ff3fab08ee9313
3
  size 2890805372
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a362812e67807d16402e375cc8e8a9c6a72a6af22582af52a8de3ad6d6ed79
3
  size 2890805372
modeling_earthmind_chat.py CHANGED
@@ -37,14 +37,9 @@ from torchvision.transforms.functional import resize, to_pil_image
37
  from types import MethodType
38
  import torch.nn.functional as F
39
 
40
- try:
41
- # flash_attention import removed for inference without flash_attn
42
- # from .flash_attention import FlashAttention
43
  FlashAttention = None
44
- has_flash_attn = True
45
- except:
46
- print('FlashAttention is not installed.')
47
- has_flash_attn = False
48
 
49
  logger = logging.get_logger(__name__)
50
 
 
37
  from types import MethodType
38
  import torch.nn.functional as F
39
 
40
+ # flash_attention removed - not needed for inference
 
 
41
  FlashAttention = None
42
+ has_flash_attn = False
 
 
 
43
 
44
  logger = logging.get_logger(__name__)
45
 
modeling_intern_vit.py CHANGED
@@ -20,14 +20,9 @@ from transformers.utils import logging
20
 
21
  from .configuration_intern_vit import InternVisionConfig
22
 
23
- try:
24
- # flash_attention import removed for inference without flash_attn
25
- # from .flash_attention import FlashAttention
26
  FlashAttention = None
27
- has_flash_attn = True
28
- except:
29
- print('FlashAttention is not installed.')
30
- has_flash_attn = False
31
 
32
  logger = logging.get_logger(__name__)
33
 
 
20
 
21
  from .configuration_intern_vit import InternVisionConfig
22
 
23
+ # flash_attention removed - not needed for inference
 
 
24
  FlashAttention = None
25
+ has_flash_attn = False
 
 
 
26
 
27
  logger = logging.get_logger(__name__)
28