File size: 3,779 Bytes
ad44ad4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import os
import cv2
import numpy as np
import torch
from PIL import Image
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
from sam2.sam2_image_predictor import SAM2ImagePredictor
from sam2.build_sam import build_sam2

def segment_single_image(image_path, text_prompt, 
                        sam2_checkpoint="/mnt/prev_nas/qhy/MagicMotion/trajectory_construction/Grounded_SAM2/checkpoints/sam2_hiera_large.pt",
                        model_cfg="sam2_hiera_l.yaml"):
    """
    对单张图片进行文本引导分割,返回二值掩码。
    
    Args:
        image_path (str): 输入图片路径
        text_prompt (str): 文本提示(如 "car")
        sam2_checkpoint (str): SAM2 模型路径
        model_cfg (str): SAM2 配置文件名
    
    Returns:
        mask (np.ndarray): 二值掩码 (H, W), dtype=bool
        success (bool): 是否成功检测到物体
    """
    # === 1. 加载模型 ===
    device = "cuda" if torch.cuda.is_available() else "cpu"
    
    # Grounding DINO
    processor = AutoProcessor.from_pretrained("/mnt/prev_nas/qhy/MagicMotion/trajectory_construction/Grounded_SAM2/checkpoints/grounding-dino-tiny")
    grounding_model = AutoModelForZeroShotObjectDetection.from_pretrained(
        "/mnt/prev_nas/qhy/MagicMotion/trajectory_construction/Grounded_SAM2/checkpoints/grounding-dino-tiny"
    ).to(device)
    
    # SAM2 Image Predictor
    sam2_model = build_sam2(model_cfg, sam2_checkpoint)
    predictor = SAM2ImagePredictor(sam2_model)
    
    # === 2. 读取图像 ===
    image_pil = Image.open(image_path).convert("RGB")
    image_np = np.array(image_pil)
    
    # === 3. 文本预处理(Grounding DINO 要求小写 + 句号)===
    text = text_prompt.strip().lower()
    if not text.endswith("."):
        text += "."
    
    # === 4. Grounding DINO 检测边界框 ===
    inputs = processor(images=image_pil, text=text, return_tensors="pt").to(device)
    with torch.no_grad():
        outputs = grounding_model(**inputs)
    
    results = processor.post_process_grounded_object_detection(
        outputs,
        inputs.input_ids,
        box_threshold=0.25,
        text_threshold=0.3,
        target_sizes=[image_pil.size[::-1]]  # (H, W)
    )
    
    boxes = results[0]["boxes"].cpu().numpy()
    if len(boxes) == 0:
        print(f"❌ 未检测到 '{text_prompt}'")
        return None, False
    
    print(f"✅ 检测到 {len(boxes)} 个 '{text_prompt}'")
    
    # === 5. SAM2 生成掩码 ===
    predictor.set_image(image_np)
    masks, _, _ = predictor.predict(
        point_coords=None,
        point_labels=None,
        box=boxes,          # 可一次处理多个框
        multimask_output=False
    )
    
    # 合并所有检测到的物体(逻辑或)
    if masks.ndim == 4:
        masks = masks.squeeze(1)  # (N, H, W)
    final_mask = np.any(masks, axis=0)  # (H, W), bool
    
    return final_mask, True


def save_mask(mask, output_path):
    """保存二值掩码为 PNG(白色=前景,黑色=背景)"""
    mask_uint8 = (mask * 255).astype(np.uint8)
    cv2.imwrite(output_path, mask_uint8)


if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description="单图文本引导分割")
    parser.add_argument("--image", required=True, help="输入图片路径")
    parser.add_argument("--text", required=True, help="文本提示(如 'car')")
    parser.add_argument("--output", default="mask.png", help="输出掩码路径")
    args = parser.parse_args()
    
    mask, success = segment_single_image(args.image, args.text)
    
    if success:
        save_mask(mask, args.output)
        print(f"✅ 掩码已保存至: {args.output}")
        
    else:
        print("❌ 分割失败")