Qnancy's picture
Upload folder using huggingface_hub
ad44ad4 verified
import os
import sys
import gc
import shutil
from pathlib import Path
# 先杀掉可能冲突的进程(可选)
os.system("pkill -f monitor_3.py")
# 获取当前脚本所在目录,并添加 Grounded_SAM2 到路径
current_dir = os.path.dirname(os.path.abspath(__file__))
grounded_sam2_path = os.path.join(current_dir, "Grounded_SAM2")
if grounded_sam2_path not in sys.path:
sys.path.insert(0, grounded_sam2_path)
import cv2
import numpy as np
import supervision as sv
import torch
from PIL import Image
from transformers import AutoModelForZeroShotObjectDetection, AutoProcessor
from Grounded_SAM2.sam2.build_sam import build_sam2, build_sam2_video_predictor
from Grounded_SAM2.sam2.sam2_image_predictor import SAM2ImagePredictor
from Grounded_SAM2.utils.track_utils import sample_points_from_masks
import argparse
# ========================
# 配置参数
# ========================
JSON_PATH = "/mnt/prev_nas/qhy/datasets/miradata9k_trackable_objects.json"
VIDEO_ROOT_DIR = "/mnt/prev_nas/qhy/datasets/MiraData9K_download/zip/MiraData9K"
OUTPUT_DIR = "/ossfs/workspace"
PROGRESS_FILE = os.path.join(OUTPUT_DIR, "progress.json") # ← 进度文件路径
MODEL_CFG = "sam2_hiera_l.yaml"
CHECKPOINT = "/mnt/prev_nas/qhy/MagicMotion/trajectory_construction/Grounded_SAM2/checkpoints/sam2_hiera_large.pt"
GROUNDING_MODEL_ID = "/mnt/prev_nas/qhy/MagicMotion/trajectory_construction/Grounded_SAM2/checkpoints/grounding-dino-tiny"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
FPS = 16
def load_progress():
"""加载 progress.json,如果不存在则返回空字典"""
if os.path.exists(PROGRESS_FILE):
with open(PROGRESS_FILE, 'r', encoding='utf-8') as f:
try:
return json.load(f)
except json.JSONDecodeError:
print(f"⚠️ {PROGRESS_FILE} 格式错误,将重建")
return {}
return {}
def save_progress(progress):
"""保存进度到 JSON 文件"""
temp_file = PROGRESS_FILE + ".tmp"
with open(temp_file, 'w', encoding='utf-8') as f:
json.dump(progress, f, indent=2, ensure_ascii=False)
# 原子性替换,防止写入一半中断
shutil.move(temp_file, PROGRESS_FILE)
def segment_core(
text,
video_dir,
inference_state,
video_predictor,
image_predictor,
grounding_model,
processor,
device="cuda"
):
"""
核心分割逻辑:使用已初始化的模型和状态进行推理
"""
# 获取帧名并排序
frame_names = [
p for p in os.listdir(video_dir)
if os.path.splitext(p)[-1].lower() in [".jpg", ".jpeg"]
]
frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
ann_frame_idx = 0 # 用于 prompt 的第一帧
img_path = os.path.join(video_dir, frame_names[ann_frame_idx])
image = Image.open(img_path).convert("RGB")
# Grounding DINO 推理
inputs = processor(images=image, text=text, return_tensors="pt").to(device)
with torch.no_grad():
outputs = grounding_model(**inputs)
results = processor.post_process_grounded_object_detection(
outputs,
inputs.input_ids,
box_threshold=0.25,
text_threshold=0.3,
target_sizes=[image.size[::-1]],
)
input_boxes = results[0]["boxes"].cpu().numpy()
OBJECTS = results[0]["labels"]
if len(input_boxes) == 0:
print(f"⚠️ Grounding DINO 未检测到任何对象: '{text}'")
return []
# 设置图像预测器
image_predictor.set_image(np.array(image))
masks, _, _ = image_predictor.predict(
point_coords=None,
point_labels=None,
box=input_boxes,
multimask_output=False,
)
# 调整 mask 形状 (n, H, W)
if masks.ndim == 4:
masks = masks.squeeze(1) # (1, 1, H, W) -> (H, W)
elif masks.ndim == 3 and masks.shape[0] != len(OBJECTS):
masks = masks[None] # (H, W) -> (1, H, W)
# 添加每个对象到视频预测器
PROMPT_TYPE_FOR_VIDEO = "box"
if PROMPT_TYPE_FOR_VIDEO == "point":
all_sample_points = sample_points_from_masks(masks=masks, num_points=10)
for obj_id, (label, points) in enumerate(zip(OBJECTS, all_sample_points), start=1):
labels = np.ones((points.shape[0]), dtype=np.int32)
video_predictor.add_new_points_or_box(
inference_state=inference_state,
frame_idx=ann_frame_idx,
obj_id=obj_id,
points=points,
labels=labels,
)
elif PROMPT_TYPE_FOR_VIDEO == "box":
for obj_id, (label, box) in enumerate(zip(OBJECTS, input_boxes), start=1):
video_predictor.add_new_points_or_box(
inference_state=inference_state,
frame_idx=ann_frame_idx,
obj_id=obj_id,
box=box,
)
elif PROMPT_TYPE_FOR_VIDEO == "mask":
for obj_id, (label, mask) in enumerate(zip(OBJECTS, masks), start=1):
video_predictor.add_new_mask(
inference_state=inference_state,
frame_idx=ann_frame_idx,
obj_id=obj_id,
mask=mask,
)
else:
raise NotImplementedError("Only support 'point', 'box', or 'mask'.")
# 传播所有帧
video_segments = {}
for out_frame_idx, out_obj_ids, out_mask_logits in video_predictor.propagate_in_video(inference_state):
video_segments[out_frame_idx] = {
out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy()[0]
for i, out_obj_id in enumerate(out_obj_ids)
}
# 可视化结果
annotated_frames = []
for frame_idx in range(len(frame_names)):
frame_path = os.path.join(video_dir, frame_names[frame_idx])
if not os.path.exists(frame_path):
continue
img = cv2.imread(frame_path)
if img is None:
continue
if frame_idx not in video_segments or len(video_segments[frame_idx]) == 0:
annotated_frames.append(np.zeros_like(img))
continue
segments = video_segments[frame_idx]
object_ids = list(segments.keys())
masks_list = list(segments.values())
masks_array = np.stack(masks_list, axis=0) # (n, h, w)
detections = sv.Detections(
xyxy=sv.mask_to_xyxy(masks_array),
mask=masks_array,
class_id=np.array(object_ids, dtype=int),
)
mask_annotator = sv.MaskAnnotator()
annotated_frame = mask_annotator.annotate(
scene=np.zeros_like(img), detections=detections
)
annotated_frames.append(annotated_frame)
return annotated_frames
def save_video(frames, output_path, fps=16):
"""保存帧为 MP4 视频"""
if not frames:
print(f"⚠️ 无帧可保存: {output_path}")
return
height, width, _ = frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
for frame in frames:
video_writer.write(frame)
video_writer.release()
def extract_all_frames(video_path, output_dir=None, image_format="jpg"):
"""
提取视频所有帧
"""
video_path = str(video_path)
if not os.path.exists(video_path):
raise FileNotFoundError(f"视频不存在: {video_path}")
if output_dir is None:
video_stem = Path(video_path).stem
parent_dir = Path(video_path).parent
output_dir = str(parent_dir / f"{video_stem}_frames")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir, exist_ok=False)
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise IOError(f"无法打开视频: {video_path}")
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print(f"📽️ 处理视频: {video_path} ({total_frames} 帧)")
saved_count = 0
frame_idx = 0
while True:
ret, frame = cap.read()
if not ret:
break
save_path = os.path.join(output_dir, f"{frame_idx:06d}.{image_format}")
success = cv2.imwrite(save_path, frame)
if success:
saved_count += 1
frame_idx += 1
cap.release()
print(f"✅ 已提取 {saved_count} 帧到: {output_dir}")
return output_dir
def main():
# 加载进度
progress = load_progress()
print(f"📁 已加载 {len(progress)} 条处理记录")
# 加载任务列表
with open(JSON_PATH, 'r', encoding='utf-8') as f:
data = json.load(f)
print(f"📋 共 {len(data)} 个视频待处理")
# ========================
# 初始化模型(只加载一次)
# ========================
print("🚀 正在加载模型...")
torch.autocast(device_type="cuda", enabled=(DEVICE=="cuda"), dtype=torch.bfloat16).__enter__()
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
# 构建视频和图像预测器
video_predictor = build_sam2_video_predictor(MODEL_CFG, CHECKPOINT).to(DEVICE)
sam2_image_model = build_sam2(MODEL_CFG, CHECKPOINT).to(DEVICE)
image_predictor = SAM2ImagePredictor(sam2_image_model)
# 加载 Grounding DINO
processor = AutoProcessor.from_pretrained(GROUNDING_MODEL_ID)
grounding_model = AutoModelForZeroShotObjectDetection.from_pretrained(GROUNDING_MODEL_ID).to(DEVICE)
print("✅ 模型加载完成")
# ========================
# 加载数据并处理
# ========================
with open(JSON_PATH, 'r', encoding='utf-8') as f:
data = json.load(f)
os.makedirs(OUTPUT_DIR, exist_ok=True)
print(f"Loaded {len(data)} videos")
for item in data:
video_path = item["video_path"] # e.g., MiraData9K/xxx/yyy.mp4
trackable_objects = item["trackable_objects"]
output_video_path = os.path.join(OUTPUT_DIR, video_path.replace(".mp4", ".mp4"))
output_video_path = output_video_path.replace("MiraData9K/", "") # 清理路径
local_video_path = os.path.join(VIDEO_ROOT_DIR, video_path)
# 输出文件存在且未在 progress 中?→ 补录为 done
if os.path.exists(output_video_path) and video_path not in progress:
progress[video_path] = {"status": "done"}
print(f"🔍 发现已存在结果,标记为完成: {video_path}")
continue
# 检查是否已完成
if progress.get(video_path, {}).get("status") == "done":
print(f"⏭️ 已完成,跳过: {video_path}")
continue
if not trackable_objects:
progress[video_path] = {"status": "skipped"}
print(f"⏭️ 无跟踪对象,跳过: {video_path}")
continue
print(f"🎬 Processing: {video_path}")
print(f" Objects: {trackable_objects}")
try:
# 1. 提取帧
frame_dir = extract_all_frames(local_video_path)
# 2. 初始化视频状态(关键:每次新建)
inference_state = video_predictor.init_state(video_path=frame_dir)
# 3. 执行分割
text_prompt = ".".join(trackable_objects) + "."
annotated_frames = segment_core(
text=text_prompt,
video_dir=frame_dir,
inference_state=inference_state,
video_predictor=video_predictor,
image_predictor=image_predictor,
grounding_model=grounding_model,
processor=processor,
device=DEVICE
)
if not annotated_frames:
print(f"❌ 分割结果为空: {video_path}")
continue
# 4. 保存轨迹视频
save_video(annotated_frames, output_video_path, fps=FPS)
progress[video_path] = {"status": "done"}
print(f"✅ 成功保存: {output_video_path}")
except Exception as e:
print(f"❌ 处理失败 {video_path}: {str(e)}")
progress[video_path] = {"status": "failed", "error": str(e)}
finally:
# ✅ 关键:释放 inference_state 和临时资源
if 'inference_state' in locals():
video_predictor.reset_state(inference_state)
del inference_state
if 'frame_dir' in locals() and os.path.exists(frame_dir):
shutil.rmtree(frame_dir)
torch.cuda.empty_cache()
gc.collect()
save_progress(progress)
print("🎉 所有视频处理完成!")
if __name__ == "__main__":
import json
main()