|
|
import os |
|
|
import cv2 |
|
|
import torch |
|
|
import numpy as np |
|
|
import supervision as sv |
|
|
from PIL import Image |
|
|
from sam2.build_sam import build_sam2_video_predictor, build_sam2 |
|
|
from sam2.sam2_image_predictor import SAM2ImagePredictor |
|
|
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection |
|
|
from utils.track_utils import sample_points_from_masks |
|
|
from utils.video_utils import create_video_from_images |
|
|
|
|
|
|
|
|
""" |
|
|
Step 1: Environment settings and model initialization |
|
|
""" |
|
|
|
|
|
torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__() |
|
|
|
|
|
if torch.cuda.get_device_properties(0).major >= 8: |
|
|
|
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
torch.backends.cudnn.allow_tf32 = True |
|
|
|
|
|
|
|
|
sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" |
|
|
model_cfg = "sam2_hiera_l.yaml" |
|
|
|
|
|
video_predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint) |
|
|
sam2_image_model = build_sam2(model_cfg, sam2_checkpoint) |
|
|
image_predictor = SAM2ImagePredictor(sam2_image_model) |
|
|
|
|
|
|
|
|
|
|
|
model_id = "IDEA-Research/grounding-dino-tiny" |
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
processor = AutoProcessor.from_pretrained(model_id) |
|
|
grounding_model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to( |
|
|
device |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
text = "head." |
|
|
|
|
|
|
|
|
|
|
|
video_dir = "demo/tiger" |
|
|
|
|
|
|
|
|
frame_names = [ |
|
|
p |
|
|
for p in os.listdir(video_dir) |
|
|
if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"] |
|
|
] |
|
|
frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) |
|
|
|
|
|
|
|
|
inference_state = video_predictor.init_state(video_path=video_dir) |
|
|
|
|
|
ann_frame_idx = 0 |
|
|
ann_obj_id = ( |
|
|
1 |
|
|
) |
|
|
|
|
|
|
|
|
""" |
|
|
Step 2: Prompt Grounding DINO and SAM image predictor to get the box and mask for specific frame |
|
|
""" |
|
|
|
|
|
|
|
|
img_path = os.path.join(video_dir, frame_names[ann_frame_idx]) |
|
|
image = Image.open(img_path) |
|
|
|
|
|
|
|
|
inputs = processor(images=image, text=text, return_tensors="pt").to(device) |
|
|
with torch.no_grad(): |
|
|
outputs = grounding_model(**inputs) |
|
|
|
|
|
results = processor.post_process_grounded_object_detection( |
|
|
outputs, |
|
|
inputs.input_ids, |
|
|
box_threshold=0.25, |
|
|
text_threshold=0.3, |
|
|
target_sizes=[image.size[::-1]], |
|
|
) |
|
|
|
|
|
|
|
|
image_predictor.set_image(np.array(image.convert("RGB"))) |
|
|
|
|
|
|
|
|
input_boxes = results[0]["boxes"].cpu().numpy() |
|
|
OBJECTS = results[0]["labels"] |
|
|
|
|
|
|
|
|
masks, scores, logits = image_predictor.predict( |
|
|
point_coords=None, |
|
|
point_labels=None, |
|
|
box=input_boxes, |
|
|
multimask_output=False, |
|
|
) |
|
|
|
|
|
|
|
|
if masks.ndim == 3: |
|
|
masks = masks[None] |
|
|
scores = scores[None] |
|
|
logits = logits[None] |
|
|
elif masks.ndim == 4: |
|
|
masks = masks.squeeze(1) |
|
|
|
|
|
""" |
|
|
Step 3: Register each object's positive points to video predictor with seperate add_new_points call |
|
|
""" |
|
|
|
|
|
PROMPT_TYPE_FOR_VIDEO = "box" |
|
|
|
|
|
assert PROMPT_TYPE_FOR_VIDEO in [ |
|
|
"point", |
|
|
"box", |
|
|
"mask", |
|
|
], "SAM 2 video predictor only support point/box/mask prompt" |
|
|
|
|
|
|
|
|
if PROMPT_TYPE_FOR_VIDEO == "point": |
|
|
|
|
|
all_sample_points = sample_points_from_masks(masks=masks, num_points=10) |
|
|
|
|
|
for object_id, (label, points) in enumerate( |
|
|
zip(OBJECTS, all_sample_points), start=1 |
|
|
): |
|
|
labels = np.ones((points.shape[0]), dtype=np.int32) |
|
|
_, out_obj_ids, out_mask_logits = video_predictor.add_new_points_or_box( |
|
|
inference_state=inference_state, |
|
|
frame_idx=ann_frame_idx, |
|
|
obj_id=object_id, |
|
|
points=points, |
|
|
labels=labels, |
|
|
) |
|
|
|
|
|
elif PROMPT_TYPE_FOR_VIDEO == "box": |
|
|
for object_id, (label, box) in enumerate(zip(OBJECTS, input_boxes), start=1): |
|
|
_, out_obj_ids, out_mask_logits = video_predictor.add_new_points_or_box( |
|
|
inference_state=inference_state, |
|
|
frame_idx=ann_frame_idx, |
|
|
obj_id=object_id, |
|
|
box=box, |
|
|
) |
|
|
|
|
|
elif PROMPT_TYPE_FOR_VIDEO == "mask": |
|
|
for object_id, (label, mask) in enumerate(zip(OBJECTS, masks), start=1): |
|
|
labels = np.ones((1), dtype=np.int32) |
|
|
_, out_obj_ids, out_mask_logits = video_predictor.add_new_mask( |
|
|
inference_state=inference_state, |
|
|
frame_idx=ann_frame_idx, |
|
|
obj_id=object_id, |
|
|
mask=mask, |
|
|
) |
|
|
else: |
|
|
raise NotImplementedError( |
|
|
"SAM 2 video predictor only support point/box/mask prompts" |
|
|
) |
|
|
|
|
|
|
|
|
""" |
|
|
Step 4: Propagate the video predictor to get the segmentation results for each frame |
|
|
""" |
|
|
video_segments = {} |
|
|
for out_frame_idx, out_obj_ids, out_mask_logits in video_predictor.propagate_in_video( |
|
|
inference_state |
|
|
): |
|
|
video_segments[out_frame_idx] = { |
|
|
out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() |
|
|
for i, out_obj_id in enumerate(out_obj_ids) |
|
|
} |
|
|
|
|
|
""" |
|
|
Step 5: Visualize the segment results across the video and save them |
|
|
""" |
|
|
|
|
|
save_dir = "demo/masks" |
|
|
|
|
|
if not os.path.exists(save_dir): |
|
|
os.makedirs(save_dir) |
|
|
|
|
|
ID_TO_OBJECTS = {i: obj for i, obj in enumerate(OBJECTS, start=1)} |
|
|
for frame_idx, segments in video_segments.items(): |
|
|
img = cv2.imread(os.path.join(video_dir, frame_names[frame_idx])) |
|
|
|
|
|
object_ids = list(segments.keys()) |
|
|
masks = list(segments.values()) |
|
|
masks = np.concatenate(masks, axis=0) |
|
|
|
|
|
detections = sv.Detections( |
|
|
xyxy=sv.mask_to_xyxy(masks), |
|
|
mask=masks, |
|
|
class_id=np.array(object_ids, dtype=np.int32), |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mask_annotator = sv.MaskAnnotator() |
|
|
|
|
|
annotated_frame = mask_annotator.annotate( |
|
|
scene=np.zeros_like(img), detections=detections |
|
|
) |
|
|
cv2.imwrite( |
|
|
os.path.join(save_dir, f"annotated_frame_{frame_idx:05d}.png"), annotated_frame |
|
|
) |
|
|
|