Correct pipeline tag and add library name
#1
by
nielsr HF Staff - opened
README.md
CHANGED
|
@@ -1,4 +1,124 @@
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
-
pipeline_tag:
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
+
pipeline_tag: image-text-to-text
|
| 4 |
+
library_name: transformers
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
# EarthMind: Towards Multi-Granular and Multi-Sensor Earth Observation with Large Multimodal Models
|
| 8 |
+
|
| 9 |
+
The model was presented in the paper [EarthMind: Towards Multi-Granular and Multi-Sensor Earth Observation with Large Multimodal Models](https://huggingface.co/papers/2506.01667).
|
| 10 |
+
|
| 11 |
+
## Paper abstract
|
| 12 |
+
|
| 13 |
+
Large Multimodal Models (LMMs) have demonstrated strong performance in various vision-language tasks. However, they often struggle to comprehensively understand Earth Observation (EO) data, which is critical for monitoring the environment and the effects of human activity on it. In this work, we present EarthMind, a novel vision-language framework for multi-granular and multi-sensor EO data understanding. EarthMind features two core components: (1) Spatial Attention Prompting (SAP), which reallocates attention within the LLM to enhance pixel-level understanding; and (2) Cross-modal Fusion, which aligns heterogeneous modalities into a shared space and adaptively reweighs tokens based on their information density for effective fusion. To facilitate multi-sensor fusion evaluation, we propose EarthMind-Bench, a comprehensive benchmark with over 2,000 human-annotated multi-sensor image-question pairs, covering a wide range of perception and reasoning tasks. Extensive experiments demonstrate the effectiveness of EarthMind. It achieves state-of-the-art performance on EarthMind-Bench, surpassing GPT-4o despite being only 4B in scale. Moreover, EarthMind outperforms existing methods on multiple public EO benchmarks, showcasing its potential to handle both multi-granular and multi-sensor challenges in a unified framework.
|
| 14 |
+
|
| 15 |
+
# Quick Start
|
| 16 |
+
|
| 17 |
+
```python
|
| 18 |
+
import argparse
|
| 19 |
+
import os
|
| 20 |
+
|
| 21 |
+
from PIL import Image
|
| 22 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 23 |
+
|
| 24 |
+
import cv2
|
| 25 |
+
try:
|
| 26 |
+
from mmengine.visualization import Visualizer
|
| 27 |
+
except ImportError:
|
| 28 |
+
Visualizer = None
|
| 29 |
+
print("Warning: mmengine is not installed, visualization is disabled.")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def parse_args():
|
| 33 |
+
parser = argparse.ArgumentParser(description='Video Reasoning Segmentation')
|
| 34 |
+
parser.add_argument('--image_folder', default="demo_images", help='Path to image file')
|
| 35 |
+
parser.add_argument('--model_path', default="/scqian/EarthMind-4B")
|
| 36 |
+
parser.add_argument('--work-dir', default="result", help='The dir to save results.')
|
| 37 |
+
parser.add_argument('--text', type=str, default="<image>Please segment the left chimney.")
|
| 38 |
+
parser.add_argument('--select', type=int, default=-1)
|
| 39 |
+
args = parser.parse_args()
|
| 40 |
+
return args
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def visualize(pred_mask, image_path, work_dir):
|
| 44 |
+
visualizer = Visualizer()
|
| 45 |
+
img = cv2.imread(image_path)
|
| 46 |
+
visualizer.set_image(img)
|
| 47 |
+
visualizer.draw_binary_masks(pred_mask, colors='g', alphas=0.4)
|
| 48 |
+
visual_result = visualizer.get_image()
|
| 49 |
+
|
| 50 |
+
output_path = os.path.join(work_dir, os.path.basename(image_path))
|
| 51 |
+
cv2.imwrite(output_path, visual_result)
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
cfg = parse_args()
|
| 55 |
+
model_path = cfg.model_path
|
| 56 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 57 |
+
model_path,
|
| 58 |
+
torch_dtype="auto",
|
| 59 |
+
device_map="cuda:0",
|
| 60 |
+
trust_remote_code=True
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 64 |
+
model_path,
|
| 65 |
+
trust_remote_code=True
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
image_files = []
|
| 69 |
+
image_paths = []
|
| 70 |
+
image_extensions = {".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"}
|
| 71 |
+
for filename in sorted(list(os.listdir(cfg.image_folder))):
|
| 72 |
+
if os.path.splitext(filename)[1].lower() in image_extensions:
|
| 73 |
+
image_files.append(filename)
|
| 74 |
+
image_paths.append(os.path.join(cfg.image_folder, filename))
|
| 75 |
+
|
| 76 |
+
vid_frames = []
|
| 77 |
+
for img_path in image_paths:
|
| 78 |
+
img = Image.open(img_path).convert('RGB')
|
| 79 |
+
vid_frames.append(img)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
if cfg.select > 0:
|
| 83 |
+
img_frame = vid_frames[cfg.select - 1]
|
| 84 |
+
|
| 85 |
+
print(f"Selected frame {cfg.select}")
|
| 86 |
+
print(f"The input is:
|
| 87 |
+
{cfg.text}")
|
| 88 |
+
result = model.predict_forward(
|
| 89 |
+
image=img_frame,
|
| 90 |
+
text=cfg.text,
|
| 91 |
+
tokenizer=tokenizer,
|
| 92 |
+
)
|
| 93 |
+
else:
|
| 94 |
+
print("##########",vid_frames[0])
|
| 95 |
+
print(f"The input is:
|
| 96 |
+
{cfg.text}")
|
| 97 |
+
result = model.predict_forward(
|
| 98 |
+
video=vid_frames,
|
| 99 |
+
text=cfg.text,
|
| 100 |
+
tokenizer=tokenizer,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
prediction = result['prediction']
|
| 104 |
+
print(f"The output is:
|
| 105 |
+
{prediction}")
|
| 106 |
+
|
| 107 |
+
if '[SEG]' in prediction and Visualizer is not None:
|
| 108 |
+
_seg_idx = 0
|
| 109 |
+
pred_masks = result['prediction_masks'][_seg_idx]
|
| 110 |
+
for frame_idx in range(len(vid_frames)):
|
| 111 |
+
pred_mask = pred_masks[frame_idx]
|
| 112 |
+
if cfg.work_dir:
|
| 113 |
+
os.makedirs(cfg.work_dir, exist_ok=True)
|
| 114 |
+
visualize(pred_mask, image_paths[frame_idx], cfg.work_dir)
|
| 115 |
+
else:
|
| 116 |
+
os.makedirs('./temp_visualize_results', exist_ok=True)
|
| 117 |
+
visualize(pred_mask, image_paths[frame_idx], './temp_visualize_results')
|
| 118 |
+
else:
|
| 119 |
+
pass
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
Code: [https://github.com/sy1998/EarthMind](https://github.com/sy1998/EarthMind)
|
| 123 |
+
|
| 124 |
+
Data: [https://huggingface.co/datasets/sy1998/EarthMind-data](https://huggingface.co/datasets/sy1998/EarthMind-data)
|