Update all files for SegEarth-OV
Browse files- pipeline.py +476 -0
pipeline.py
ADDED
|
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unified SegEarth pipeline: OV, OV-2 (CLIP-based), OV-3 (SAM3-based).
|
| 3 |
+
Training-free open-vocabulary segmentation for remote sensing.
|
| 4 |
+
"""
|
| 5 |
+
import contextlib
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from PIL import Image
|
| 13 |
+
from torchvision import transforms
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from .upsamplers import get_upsampler, FEATUP_CHECKPOINTS
|
| 17 |
+
except ImportError:
|
| 18 |
+
from upsamplers import get_upsampler, FEATUP_CHECKPOINTS
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
from .prompts.imagenet_template import openai_imagenet_template, sub_imagenet_template
|
| 22 |
+
except ImportError:
|
| 23 |
+
openai_imagenet_template = [
|
| 24 |
+
lambda c: f"a photo of a {c}.",
|
| 25 |
+
lambda c: f"a bad photo of a {c}.",
|
| 26 |
+
lambda c: f"a photo of many {c}.",
|
| 27 |
+
lambda c: f"a photo of the large {c}.",
|
| 28 |
+
lambda c: f"a photo of the small {c}.",
|
| 29 |
+
]
|
| 30 |
+
sub_imagenet_template = openai_imagenet_template[:7]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def get_cls_idx(path: Union[str, Path]) -> Tuple[List[str], List[int]]:
|
| 34 |
+
"""Parse class list file (one line per class, comma-separated synonyms)."""
|
| 35 |
+
path = Path(path)
|
| 36 |
+
with open(path) as f:
|
| 37 |
+
lines = f.readlines()
|
| 38 |
+
class_names, class_indices = [], []
|
| 39 |
+
for idx, line in enumerate(lines):
|
| 40 |
+
names_i = [n.strip() for n in line.strip().split(",")]
|
| 41 |
+
class_names.extend(names_i)
|
| 42 |
+
class_indices.extend([idx] * len(names_i))
|
| 43 |
+
return class_names, class_indices
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class SegEarthPipelineCLIP:
|
| 47 |
+
"""
|
| 48 |
+
CLIP-based SegEarth pipeline (OV, OV-2).
|
| 49 |
+
Uses transformers.CLIPModel + SimFeatUp for dense prediction.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
model_id: str = "openai/clip-vit-base-patch16",
|
| 55 |
+
featup_model: str = "jbu_one",
|
| 56 |
+
featup_weights_path: Optional[Union[str, Path]] = None,
|
| 57 |
+
class_names_path: Optional[Union[str, Path]] = None,
|
| 58 |
+
device: str = "cuda",
|
| 59 |
+
dtype: torch.dtype = torch.float16,
|
| 60 |
+
cls_token_lambda: float = -0.3,
|
| 61 |
+
logit_scale: float = 50.0,
|
| 62 |
+
prob_thd: float = 0.0,
|
| 63 |
+
bg_idx: int = 0,
|
| 64 |
+
slide_crop: int = 0,
|
| 65 |
+
slide_stride: int = 112,
|
| 66 |
+
template_set: str = "openai",
|
| 67 |
+
):
|
| 68 |
+
from transformers import CLIPModel, CLIPProcessor
|
| 69 |
+
|
| 70 |
+
self.device = device
|
| 71 |
+
self.dtype = dtype
|
| 72 |
+
self.cls_token_lambda = cls_token_lambda
|
| 73 |
+
self.logit_scale = logit_scale
|
| 74 |
+
self.prob_thd = prob_thd
|
| 75 |
+
self.bg_idx = bg_idx
|
| 76 |
+
self.slide_crop = slide_crop
|
| 77 |
+
self.slide_stride = slide_stride
|
| 78 |
+
self.output_cls_token = cls_token_lambda != 0
|
| 79 |
+
|
| 80 |
+
self.templates = sub_imagenet_template if template_set == "sub" else openai_imagenet_template
|
| 81 |
+
|
| 82 |
+
self.clip = CLIPModel.from_pretrained(model_id).to(device).to(dtype).eval()
|
| 83 |
+
try:
|
| 84 |
+
self.processor = CLIPProcessor.from_pretrained(model_id)
|
| 85 |
+
except Exception:
|
| 86 |
+
# Fallback: use tokenizer only (CLIPProcessor can trigger mistral_common compat in some envs)
|
| 87 |
+
from transformers import CLIPTokenizer
|
| 88 |
+
self.processor = None
|
| 89 |
+
self._tokenizer = CLIPTokenizer.from_pretrained(model_id)
|
| 90 |
+
self.patch_size = 16
|
| 91 |
+
self.feat_dim = 512
|
| 92 |
+
|
| 93 |
+
# Resolve featup path: self-contained repo only (OV/OV-2/weights/featup)
|
| 94 |
+
ckpt_name = FEATUP_CHECKPOINTS.get(featup_model, "").split("/")[-1]
|
| 95 |
+
repo_dir = Path(__file__).parent
|
| 96 |
+
_candidates = [
|
| 97 |
+
Path(featup_weights_path) if featup_weights_path else None,
|
| 98 |
+
repo_dir / "OV" / "weights" / "featup" / ckpt_name,
|
| 99 |
+
repo_dir / "OV-2" / "weights" / "featup" / ckpt_name,
|
| 100 |
+
repo_dir / "weights" / "featup" / ckpt_name,
|
| 101 |
+
]
|
| 102 |
+
featup_path = next((p for p in _candidates if p and p.exists()), None)
|
| 103 |
+
|
| 104 |
+
self.use_featup = featup_path is not None and featup_path.exists()
|
| 105 |
+
upsampler_name = "bilinear" if not self.use_featup else featup_model.replace("_maskclip", "")
|
| 106 |
+
self.upsampler = get_upsampler(upsampler_name, self.feat_dim).to(device).to(dtype).eval()
|
| 107 |
+
|
| 108 |
+
if self.use_featup:
|
| 109 |
+
ckpt = torch.load(featup_path, map_location="cpu")
|
| 110 |
+
sd = ckpt.get("state_dict", ckpt)
|
| 111 |
+
weights = {k[10:]: v for k, v in sd.items() if k.startswith("upsampler.")}
|
| 112 |
+
self.upsampler.load_state_dict(weights, strict=True)
|
| 113 |
+
|
| 114 |
+
repo_dir = Path(__file__).parent
|
| 115 |
+
cls_path = class_names_path or (repo_dir / "configs" / "cls_openearthmap_sar.txt")
|
| 116 |
+
cls_path = Path(cls_path)
|
| 117 |
+
if cls_path.exists():
|
| 118 |
+
self.class_names, self.class_indices = get_cls_idx(cls_path)
|
| 119 |
+
else:
|
| 120 |
+
self.class_names = ["building", "road", "water", "vegetation", "bare soil"]
|
| 121 |
+
self.class_indices = list(range(len(self.class_names)))
|
| 122 |
+
|
| 123 |
+
self.num_classes = max(self.class_indices) + 1
|
| 124 |
+
self.num_queries = len(self.class_indices)
|
| 125 |
+
self.query_idx = torch.tensor(self.class_indices, dtype=torch.int64, device=device)
|
| 126 |
+
self._build_query_features()
|
| 127 |
+
|
| 128 |
+
def _build_query_features(self):
|
| 129 |
+
query_features = []
|
| 130 |
+
with torch.no_grad():
|
| 131 |
+
tokenizer = getattr(self, "_tokenizer", None) or (self.processor.tokenizer if self.processor else None)
|
| 132 |
+
for name in self.class_names:
|
| 133 |
+
texts = [t(name) for t in self.templates]
|
| 134 |
+
inputs = tokenizer(text=texts, return_tensors="pt", padding=True, truncation=True)
|
| 135 |
+
inputs = {k: v.to(self.device) for k, v in inputs.items()}
|
| 136 |
+
out = self.clip.get_text_features(**inputs)
|
| 137 |
+
if hasattr(out, "shape"):
|
| 138 |
+
feat_t = out
|
| 139 |
+
elif hasattr(out, "pooler_output") and out.pooler_output is not None:
|
| 140 |
+
feat_t = out.pooler_output
|
| 141 |
+
else:
|
| 142 |
+
feat_t = out.last_hidden_state.mean(1)
|
| 143 |
+
feat = feat_t.mean(0) / feat_t.mean(0).norm()
|
| 144 |
+
query_features.append(feat.unsqueeze(0))
|
| 145 |
+
self.query_features = torch.cat(query_features, dim=0).to(self.dtype)
|
| 146 |
+
|
| 147 |
+
def _encode_image_patches(self, pixel_values: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 148 |
+
out = self.clip.vision_model(pixel_values)
|
| 149 |
+
hidden = out.last_hidden_state
|
| 150 |
+
proj = self.clip.visual_projection.weight
|
| 151 |
+
patch_tokens = hidden[:, 1:, :]
|
| 152 |
+
patch_feats = patch_tokens @ proj.T
|
| 153 |
+
cls_token = None
|
| 154 |
+
if self.output_cls_token:
|
| 155 |
+
cls_tok = hidden[:, 0:1, :]
|
| 156 |
+
cls_token = (cls_tok @ proj.T).squeeze(1)
|
| 157 |
+
cls_token = F.normalize(cls_token, dim=-1)
|
| 158 |
+
return patch_feats, cls_token
|
| 159 |
+
|
| 160 |
+
def _preprocess_image(self, image: Image.Image, size: Optional[int] = 224, keep_size: bool = False) -> torch.Tensor:
|
| 161 |
+
t = transforms.Compose([
|
| 162 |
+
transforms.ToTensor(),
|
| 163 |
+
transforms.Normalize(
|
| 164 |
+
[0.48145466, 0.4578275, 0.40821073],
|
| 165 |
+
[0.26862954, 0.26130258, 0.27577711],
|
| 166 |
+
),
|
| 167 |
+
])
|
| 168 |
+
x = t(image.convert("RGB"))
|
| 169 |
+
if not keep_size and size:
|
| 170 |
+
x = transforms.functional.resize(x, (size, size))
|
| 171 |
+
return x.unsqueeze(0).to(self.device).to(self.dtype)
|
| 172 |
+
|
| 173 |
+
def _compute_padsize(self, H: int, W: int) -> Tuple[int, int, int, int]:
|
| 174 |
+
l, r, t, b = 0, 0, 0, 0
|
| 175 |
+
if W % self.patch_size:
|
| 176 |
+
lr = self.patch_size - (W % self.patch_size)
|
| 177 |
+
l = lr // 2
|
| 178 |
+
r = lr - l
|
| 179 |
+
if H % self.patch_size:
|
| 180 |
+
tb = self.patch_size - (H % self.patch_size)
|
| 181 |
+
t = tb // 2
|
| 182 |
+
b = tb - t
|
| 183 |
+
return l, r, t, b
|
| 184 |
+
|
| 185 |
+
def _forward_single_crop(self, img_tensor: torch.Tensor) -> torch.Tensor:
|
| 186 |
+
B, C, H, W = img_tensor.shape
|
| 187 |
+
patch_h, patch_w = H // self.patch_size, W // self.patch_size
|
| 188 |
+
patch_feats, cls_token = self._encode_image_patches(img_tensor)
|
| 189 |
+
patch_feats = patch_feats.permute(0, 2, 1).view(B, self.feat_dim, patch_h, patch_w)
|
| 190 |
+
patch_feats = patch_feats.to(self.dtype)
|
| 191 |
+
img_tensor = img_tensor.to(self.dtype)
|
| 192 |
+
patch_feats = self.upsampler(patch_feats, img_tensor)
|
| 193 |
+
out_h, out_w = H, W
|
| 194 |
+
patch_feats = patch_feats.view(B, self.feat_dim, -1).permute(0, 2, 1)
|
| 195 |
+
patch_feats = F.normalize(patch_feats, dim=-1)
|
| 196 |
+
logits = patch_feats @ self.query_features.T
|
| 197 |
+
if self.output_cls_token and cls_token is not None:
|
| 198 |
+
cls_logits = cls_token @ self.query_features.T
|
| 199 |
+
logits = logits + cls_logits.unsqueeze(1) * self.cls_token_lambda
|
| 200 |
+
logits = logits.permute(0, 2, 1).view(B, self.num_queries, out_h, out_w)
|
| 201 |
+
return logits[0]
|
| 202 |
+
|
| 203 |
+
def _forward_slide(self, img_tensor: torch.Tensor, ori_shape: Tuple[int, int]) -> torch.Tensor:
|
| 204 |
+
B, _, h_img, w_img = img_tensor.shape
|
| 205 |
+
stride = (self.slide_stride, self.slide_stride)
|
| 206 |
+
crop = (self.slide_crop, self.slide_crop)
|
| 207 |
+
h_stride, w_stride = stride
|
| 208 |
+
h_crop, w_crop = crop
|
| 209 |
+
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
|
| 210 |
+
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
|
| 211 |
+
preds = img_tensor.new_zeros((B, self.num_queries, h_img, w_img))
|
| 212 |
+
count_mat = img_tensor.new_zeros((B, 1, h_img, w_img))
|
| 213 |
+
for h_idx in range(h_grids):
|
| 214 |
+
for w_idx in range(w_grids):
|
| 215 |
+
y1 = h_idx * h_stride
|
| 216 |
+
x1 = w_idx * w_stride
|
| 217 |
+
y2 = min(y1 + h_crop, h_img)
|
| 218 |
+
x2 = min(x1 + w_crop, w_img)
|
| 219 |
+
y1 = max(y2 - h_crop, 0)
|
| 220 |
+
x1 = max(x2 - w_crop, 0)
|
| 221 |
+
crop_img = img_tensor[:, :, y1:y2, x1:x2]
|
| 222 |
+
H, W = crop_img.shape[2:]
|
| 223 |
+
l, r, t, b = self._compute_padsize(H, W)
|
| 224 |
+
if any([l, r, t, b]):
|
| 225 |
+
crop_img = F.pad(crop_img, (l, r, t, b))
|
| 226 |
+
crop_logits = self._forward_single_crop(crop_img)
|
| 227 |
+
if any([l, r, t, b]):
|
| 228 |
+
crop_logits = crop_logits[:, t : t + H, l : l + W]
|
| 229 |
+
pad_crop = F.pad(
|
| 230 |
+
crop_logits.unsqueeze(0),
|
| 231 |
+
(int(x1), int(preds.shape[3] - x2), int(y1), int(preds.shape[2] - y2)),
|
| 232 |
+
)
|
| 233 |
+
preds += pad_crop
|
| 234 |
+
count_mat[:, :, y1:y2, x1:x2] += 1
|
| 235 |
+
preds = preds / count_mat.clamp(min=1)
|
| 236 |
+
logits = F.interpolate(preds, size=ori_shape, mode="bilinear")
|
| 237 |
+
return logits[0]
|
| 238 |
+
|
| 239 |
+
def _postprocess(self, logits: torch.Tensor) -> torch.Tensor:
|
| 240 |
+
logits = logits * self.logit_scale
|
| 241 |
+
probs = logits.softmax(0)
|
| 242 |
+
if self.num_classes != self.num_queries:
|
| 243 |
+
cls_idx = F.one_hot(self.query_idx, self.num_classes)
|
| 244 |
+
cls_idx = cls_idx.T.view(self.num_classes, self.num_queries, 1, 1)
|
| 245 |
+
probs = (probs.unsqueeze(0) * cls_idx).max(1)[0]
|
| 246 |
+
seg_pred = probs.argmax(0, keepdim=True)
|
| 247 |
+
if self.prob_thd > 0:
|
| 248 |
+
max_prob = probs.max(0, keepdim=True)[0]
|
| 249 |
+
seg_pred[max_prob < self.prob_thd] = self.bg_idx
|
| 250 |
+
return seg_pred.squeeze(0)
|
| 251 |
+
|
| 252 |
+
@torch.no_grad()
|
| 253 |
+
def __call__(self, image: Union[Image.Image, torch.Tensor], return_logits: bool = False) -> torch.Tensor:
|
| 254 |
+
if isinstance(image, Image.Image):
|
| 255 |
+
use_slide = self.slide_crop > 0
|
| 256 |
+
keep_size = use_slide
|
| 257 |
+
img_tensor = self._preprocess_image(image, size=224, keep_size=keep_size)
|
| 258 |
+
else:
|
| 259 |
+
img_tensor = image.to(self.device).to(self.dtype)
|
| 260 |
+
if img_tensor.dim() == 3:
|
| 261 |
+
img_tensor = img_tensor.unsqueeze(0)
|
| 262 |
+
B, C, H, W = img_tensor.shape
|
| 263 |
+
ori_shape = (H, W)
|
| 264 |
+
use_slide = self.slide_crop > 0 and (H > self.slide_crop or W > self.slide_crop)
|
| 265 |
+
if use_slide:
|
| 266 |
+
logits = self._forward_slide(img_tensor, ori_shape)
|
| 267 |
+
else:
|
| 268 |
+
l, r, t, b = self._compute_padsize(H, W)
|
| 269 |
+
if any([l, r, t, b]):
|
| 270 |
+
img_tensor = F.pad(img_tensor, (l, r, t, b))
|
| 271 |
+
out_h, out_w = img_tensor.shape[2], img_tensor.shape[3]
|
| 272 |
+
else:
|
| 273 |
+
out_h, out_w = H, W
|
| 274 |
+
logits = self._forward_single_crop(img_tensor)
|
| 275 |
+
if any([l, r, t, b]):
|
| 276 |
+
logits = logits[:, t : t + H, l : l + W]
|
| 277 |
+
if (out_h, out_w) != ori_shape:
|
| 278 |
+
logits = F.interpolate(logits.unsqueeze(0), size=ori_shape, mode="bilinear").squeeze(0)
|
| 279 |
+
if return_logits:
|
| 280 |
+
if self.num_classes != self.num_queries:
|
| 281 |
+
cls_idx = F.one_hot(self.query_idx, self.num_classes)
|
| 282 |
+
cls_idx = cls_idx.T.view(self.num_classes, self.num_queries, 1, 1)
|
| 283 |
+
logits = (logits.unsqueeze(0) * cls_idx).max(1)[0]
|
| 284 |
+
return logits
|
| 285 |
+
return self._postprocess(logits)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
class SegEarthPipelineSAM3:
|
| 289 |
+
"""
|
| 290 |
+
SAM3-based SegEarth pipeline (OV-3).
|
| 291 |
+
Uses sam3 package for open-vocabulary segmentation.
|
| 292 |
+
Requires: pip install sam3 (or transformers>=4.45 for Sam3Model)
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
def __init__(
|
| 296 |
+
self,
|
| 297 |
+
model_id: str = "facebook/sam3",
|
| 298 |
+
local_checkpoint: Optional[Union[str, Path]] = None,
|
| 299 |
+
class_names_path: Optional[Union[str, Path]] = None,
|
| 300 |
+
device: str = "cuda",
|
| 301 |
+
prob_thd: float = 0.0,
|
| 302 |
+
bg_idx: int = 0,
|
| 303 |
+
slide_crop: int = 0,
|
| 304 |
+
slide_stride: int = 112,
|
| 305 |
+
confidence_threshold: float = 0.5,
|
| 306 |
+
use_sem_seg: bool = True,
|
| 307 |
+
use_presence_score: bool = True,
|
| 308 |
+
use_transformer_decoder: bool = True,
|
| 309 |
+
):
|
| 310 |
+
self.device = device
|
| 311 |
+
self.prob_thd = prob_thd
|
| 312 |
+
self.bg_idx = bg_idx
|
| 313 |
+
self.slide_crop = slide_crop
|
| 314 |
+
self.slide_stride = slide_stride
|
| 315 |
+
self.confidence_threshold = confidence_threshold
|
| 316 |
+
self.use_sem_seg = use_sem_seg
|
| 317 |
+
self.use_presence_score = use_presence_score
|
| 318 |
+
self.use_transformer_decoder = use_transformer_decoder
|
| 319 |
+
|
| 320 |
+
# Workaround for cuDNN "No execution plans support the graph" with SDPA
|
| 321 |
+
if device == "cuda":
|
| 322 |
+
if hasattr(torch.backends.cuda, "enable_flash_sdp"):
|
| 323 |
+
torch.backends.cuda.enable_flash_sdp(False)
|
| 324 |
+
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
| 325 |
+
if hasattr(torch.backends.cuda, "enable_math_sdp"):
|
| 326 |
+
torch.backends.cuda.enable_math_sdp(True)
|
| 327 |
+
|
| 328 |
+
try:
|
| 329 |
+
from sam3 import build_sam3_image_model
|
| 330 |
+
from sam3.model.sam3_image_processor import Sam3Processor
|
| 331 |
+
except ImportError:
|
| 332 |
+
raise ImportError(
|
| 333 |
+
"SegEarth OV-3 requires the sam3 package. Install from: "
|
| 334 |
+
"https://github.com/facebookresearch/sam3 or use transformers.Sam3Model.from_pretrained('facebook/sam3')"
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
ckpt_path = Path(local_checkpoint) if local_checkpoint else None
|
| 338 |
+
if ckpt_path and not ckpt_path.is_absolute():
|
| 339 |
+
ckpt_path = Path(__file__).parent / "OV-3" / ckpt_path
|
| 340 |
+
use_safetensors = ckpt_path and str(ckpt_path).endswith(".safetensors") and ckpt_path.exists()
|
| 341 |
+
use_pt = ckpt_path and (str(ckpt_path).endswith(".pt") or str(ckpt_path).endswith(".bin")) and ckpt_path.exists()
|
| 342 |
+
|
| 343 |
+
if use_safetensors:
|
| 344 |
+
self.model = build_sam3_image_model(checkpoint_path=None, load_from_HF=False, device=device)
|
| 345 |
+
from safetensors.torch import load_file
|
| 346 |
+
state_dict = load_file(str(ckpt_path))
|
| 347 |
+
# HF model.safetensors uses "detector_model." prefix; sam3 expects "detector." -> stripped
|
| 348 |
+
state_dict = {k.replace("detector_model.", ""): v for k, v in state_dict.items()}
|
| 349 |
+
self.model.load_state_dict(state_dict, strict=False)
|
| 350 |
+
elif use_pt:
|
| 351 |
+
self.model = build_sam3_image_model(checkpoint_path=str(ckpt_path), load_from_HF=False, device=device)
|
| 352 |
+
else:
|
| 353 |
+
self.model = build_sam3_image_model(checkpoint_path=None, load_from_HF=True, device=device)
|
| 354 |
+
self.processor = Sam3Processor(self.model, confidence_threshold=confidence_threshold, device=device)
|
| 355 |
+
|
| 356 |
+
repo_dir = Path(__file__).parent
|
| 357 |
+
cls_path = class_names_path or (repo_dir / "configs" / "cls_openearthmap_sar.txt")
|
| 358 |
+
cls_path = Path(cls_path)
|
| 359 |
+
if cls_path.exists():
|
| 360 |
+
self.class_names, self.class_indices = get_cls_idx(cls_path)
|
| 361 |
+
else:
|
| 362 |
+
self.class_names = ["building", "road", "water", "vegetation", "bare soil"]
|
| 363 |
+
self.class_indices = list(range(len(self.class_names)))
|
| 364 |
+
self.num_classes = max(self.class_indices) + 1
|
| 365 |
+
self.num_queries = len(self.class_indices)
|
| 366 |
+
self.query_idx = torch.tensor(self.class_indices, dtype=torch.int64, device=device)
|
| 367 |
+
|
| 368 |
+
def _inference_single_view(self, image: Image.Image) -> torch.Tensor:
|
| 369 |
+
w, h = image.size
|
| 370 |
+
seg_logits = torch.zeros((self.num_queries, h, w), device=self.device)
|
| 371 |
+
sdp_ctx = (
|
| 372 |
+
torch.backends.cuda.sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False, enable_cudnn=False)
|
| 373 |
+
if self.device == "cuda" and hasattr(torch.backends.cuda, "sdp_kernel")
|
| 374 |
+
else contextlib.nullcontext()
|
| 375 |
+
)
|
| 376 |
+
with torch.no_grad(), torch.autocast(device_type="cuda", dtype=torch.bfloat16), sdp_ctx:
|
| 377 |
+
inference_state = self.processor.set_image(image)
|
| 378 |
+
for query_idx, query_word in enumerate(self.class_names):
|
| 379 |
+
self.processor.reset_all_prompts(inference_state)
|
| 380 |
+
inference_state = self.processor.set_text_prompt(state=inference_state, prompt=query_word)
|
| 381 |
+
if self.use_transformer_decoder and inference_state.get("masks_logits") is not None:
|
| 382 |
+
inst_len = inference_state["masks_logits"].shape[0]
|
| 383 |
+
for inst_id in range(inst_len):
|
| 384 |
+
instance_logits = inference_state["masks_logits"][inst_id].squeeze()
|
| 385 |
+
instance_score = inference_state["object_score"][inst_id]
|
| 386 |
+
if instance_logits.shape != (h, w):
|
| 387 |
+
instance_logits = F.interpolate(
|
| 388 |
+
instance_logits.view(1, 1, *instance_logits.shape),
|
| 389 |
+
size=(h, w), mode="bilinear", align_corners=False
|
| 390 |
+
).squeeze()
|
| 391 |
+
seg_logits[query_idx] = torch.max(seg_logits[query_idx], instance_logits * instance_score)
|
| 392 |
+
if self.use_sem_seg and inference_state.get("semantic_mask_logits") is not None:
|
| 393 |
+
semantic_logits = inference_state["semantic_mask_logits"]
|
| 394 |
+
if semantic_logits.shape != (h, w):
|
| 395 |
+
semantic_logits = F.interpolate(
|
| 396 |
+
semantic_logits.view(1, 1, *semantic_logits.shape) if semantic_logits.dim() == 2 else semantic_logits.unsqueeze(0),
|
| 397 |
+
size=(h, w), mode="bilinear", align_corners=False
|
| 398 |
+
).squeeze()
|
| 399 |
+
seg_logits[query_idx] = torch.max(seg_logits[query_idx], semantic_logits)
|
| 400 |
+
if self.use_presence_score and inference_state.get("presence_score") is not None:
|
| 401 |
+
seg_logits[query_idx] = seg_logits[query_idx] * inference_state["presence_score"]
|
| 402 |
+
return seg_logits
|
| 403 |
+
|
| 404 |
+
def slide_inference(self, image: Image.Image) -> torch.Tensor:
|
| 405 |
+
w_img, h_img = image.size
|
| 406 |
+
stride = (self.slide_stride, self.slide_stride)
|
| 407 |
+
crop = (self.slide_crop, self.slide_crop)
|
| 408 |
+
h_stride, w_stride = stride
|
| 409 |
+
h_crop, w_crop = crop
|
| 410 |
+
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
|
| 411 |
+
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
|
| 412 |
+
preds = torch.zeros((self.num_queries, h_img, w_img), device=self.device)
|
| 413 |
+
count_mat = torch.zeros((1, h_img, w_img), device=self.device)
|
| 414 |
+
for h_idx in range(h_grids):
|
| 415 |
+
for w_idx in range(w_grids):
|
| 416 |
+
y1 = h_idx * h_stride
|
| 417 |
+
x1 = w_idx * w_stride
|
| 418 |
+
y2 = min(y1 + h_crop, h_img)
|
| 419 |
+
x2 = min(x1 + w_crop, w_img)
|
| 420 |
+
y1 = max(y2 - h_crop, 0)
|
| 421 |
+
x1 = max(x2 - w_crop, 0)
|
| 422 |
+
crop_img = image.crop((x1, y1, x2, y2))
|
| 423 |
+
crop_seg = self._inference_single_view(crop_img)
|
| 424 |
+
preds[:, y1:y2, x1:x2] += crop_seg
|
| 425 |
+
count_mat[:, y1:y2, x1:x2] += 1
|
| 426 |
+
return preds / count_mat.clamp(min=1)
|
| 427 |
+
|
| 428 |
+
@torch.no_grad()
|
| 429 |
+
def __call__(self, image: Union[Image.Image, torch.Tensor]) -> torch.Tensor:
|
| 430 |
+
if isinstance(image, torch.Tensor):
|
| 431 |
+
image = transforms.functional.to_pil_image(image)
|
| 432 |
+
image = image.convert("RGB")
|
| 433 |
+
if self.slide_crop > 0 and (image.size[0] > self.slide_crop or image.size[1] > self.slide_crop):
|
| 434 |
+
seg_logits = self.slide_inference(image)
|
| 435 |
+
else:
|
| 436 |
+
seg_logits = self._inference_single_view(image)
|
| 437 |
+
if self.num_classes != self.num_queries:
|
| 438 |
+
cls_idx = F.one_hot(self.query_idx, self.num_classes)
|
| 439 |
+
cls_idx = cls_idx.T.view(self.num_classes, self.num_queries, 1, 1)
|
| 440 |
+
seg_logits = (seg_logits.unsqueeze(0) * cls_idx).max(1)[0]
|
| 441 |
+
seg_pred = seg_logits.argmax(0, keepdim=True)
|
| 442 |
+
if self.prob_thd > 0:
|
| 443 |
+
max_prob = seg_logits.max(0, keepdim=True)[0]
|
| 444 |
+
seg_pred[max_prob < self.prob_thd] = self.bg_idx
|
| 445 |
+
return seg_pred.squeeze(0)
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
def SegEarthPipeline(
|
| 449 |
+
variant: str = "OV-2",
|
| 450 |
+
model_id: Optional[str] = None,
|
| 451 |
+
**kwargs,
|
| 452 |
+
):
|
| 453 |
+
"""
|
| 454 |
+
Factory for SegEarth pipelines. Load from self-contained subfolders OV/, OV-2/, OV-3/.
|
| 455 |
+
Args:
|
| 456 |
+
variant: One of OV, OV-2, OV-3 (or legacy: ov_clip_openai_vitb16, ov2_alignearth_sar, ov3_sam3)
|
| 457 |
+
model_id: Override HF model ID
|
| 458 |
+
**kwargs: Passed to pipeline constructor
|
| 459 |
+
"""
|
| 460 |
+
import json
|
| 461 |
+
repo_dir = Path(__file__).parent
|
| 462 |
+
variant_map = {"ov_clip_openai_vitb16": "OV", "ov2_alignearth_sar": "OV-2", "ov3_sam3": "OV-3"}
|
| 463 |
+
subfolder = variant_map.get(variant, variant)
|
| 464 |
+
sub_path = repo_dir / subfolder / "pipeline.py"
|
| 465 |
+
if sub_path.exists():
|
| 466 |
+
import importlib.util
|
| 467 |
+
spec = importlib.util.spec_from_file_location(f"segearth_{subfolder}", sub_path)
|
| 468 |
+
mod = importlib.util.module_from_spec(spec)
|
| 469 |
+
spec.loader.exec_module(mod)
|
| 470 |
+
return mod.load(**kwargs) if model_id is None else mod.load(model_id=model_id, **kwargs)
|
| 471 |
+
# Fallback: legacy flat config
|
| 472 |
+
if model_id is None:
|
| 473 |
+
model_id = "BiliSakura/AlignEarth-SAR-ViT-B-16"
|
| 474 |
+
if variant in ("ov3_sam3", "OV-3"):
|
| 475 |
+
return SegEarthPipelineSAM3(model_id=model_id or "facebook/sam3", **kwargs)
|
| 476 |
+
return SegEarthPipelineCLIP(model_id=model_id, **kwargs)
|