|
|
import os |
|
|
import cv2 |
|
|
import numpy as np |
|
|
import torch |
|
|
from PIL import Image |
|
|
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection |
|
|
from sam2.sam2_image_predictor import SAM2ImagePredictor |
|
|
from sam2.build_sam import build_sam2 |
|
|
|
|
|
def segment_single_image(image_path, text_prompt, |
|
|
sam2_checkpoint="/mnt/prev_nas/qhy/MagicMotion/trajectory_construction/Grounded_SAM2/checkpoints/sam2_hiera_large.pt", |
|
|
model_cfg="sam2_hiera_l.yaml"): |
|
|
""" |
|
|
对单张图片进行文本引导分割,返回二值掩码。 |
|
|
|
|
|
Args: |
|
|
image_path (str): 输入图片路径 |
|
|
text_prompt (str): 文本提示(如 "car") |
|
|
sam2_checkpoint (str): SAM2 模型路径 |
|
|
model_cfg (str): SAM2 配置文件名 |
|
|
|
|
|
Returns: |
|
|
mask (np.ndarray): 二值掩码 (H, W), dtype=bool |
|
|
success (bool): 是否成功检测到物体 |
|
|
""" |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
|
|
|
processor = AutoProcessor.from_pretrained("/mnt/prev_nas/qhy/MagicMotion/trajectory_construction/Grounded_SAM2/checkpoints/grounding-dino-tiny") |
|
|
grounding_model = AutoModelForZeroShotObjectDetection.from_pretrained( |
|
|
"/mnt/prev_nas/qhy/MagicMotion/trajectory_construction/Grounded_SAM2/checkpoints/grounding-dino-tiny" |
|
|
).to(device) |
|
|
|
|
|
|
|
|
sam2_model = build_sam2(model_cfg, sam2_checkpoint) |
|
|
predictor = SAM2ImagePredictor(sam2_model) |
|
|
|
|
|
|
|
|
image_pil = Image.open(image_path).convert("RGB") |
|
|
image_np = np.array(image_pil) |
|
|
|
|
|
|
|
|
text = text_prompt.strip().lower() |
|
|
if not text.endswith("."): |
|
|
text += "." |
|
|
|
|
|
|
|
|
inputs = processor(images=image_pil, text=text, return_tensors="pt").to(device) |
|
|
with torch.no_grad(): |
|
|
outputs = grounding_model(**inputs) |
|
|
|
|
|
results = processor.post_process_grounded_object_detection( |
|
|
outputs, |
|
|
inputs.input_ids, |
|
|
box_threshold=0.25, |
|
|
text_threshold=0.3, |
|
|
target_sizes=[image_pil.size[::-1]] |
|
|
) |
|
|
|
|
|
boxes = results[0]["boxes"].cpu().numpy() |
|
|
if len(boxes) == 0: |
|
|
print(f"❌ 未检测到 '{text_prompt}'") |
|
|
return None, False |
|
|
|
|
|
print(f"✅ 检测到 {len(boxes)} 个 '{text_prompt}'") |
|
|
|
|
|
|
|
|
predictor.set_image(image_np) |
|
|
masks, _, _ = predictor.predict( |
|
|
point_coords=None, |
|
|
point_labels=None, |
|
|
box=boxes, |
|
|
multimask_output=False |
|
|
) |
|
|
|
|
|
|
|
|
if masks.ndim == 4: |
|
|
masks = masks.squeeze(1) |
|
|
final_mask = np.any(masks, axis=0) |
|
|
|
|
|
return final_mask, True |
|
|
|
|
|
|
|
|
def save_mask(mask, output_path): |
|
|
"""保存二值掩码为 PNG(白色=前景,黑色=背景)""" |
|
|
mask_uint8 = (mask * 255).astype(np.uint8) |
|
|
cv2.imwrite(output_path, mask_uint8) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import argparse |
|
|
|
|
|
parser = argparse.ArgumentParser(description="单图文本引导分割") |
|
|
parser.add_argument("--image", required=True, help="输入图片路径") |
|
|
parser.add_argument("--text", required=True, help="文本提示(如 'car')") |
|
|
parser.add_argument("--output", default="mask.png", help="输出掩码路径") |
|
|
args = parser.parse_args() |
|
|
|
|
|
mask, success = segment_single_image(args.image, args.text) |
|
|
|
|
|
if success: |
|
|
save_mask(mask, args.output) |
|
|
print(f"✅ 掩码已保存至: {args.output}") |
|
|
|
|
|
else: |
|
|
print("❌ 分割失败") |
|
|
|