CI-VID / quality_check.py
Helios1208's picture
Upload 3 files
53acdf8 verified
import os
import glob
import subprocess
import gradio as gr
import cv2
import numpy as np
import json
STAGE3_DIR = "test_removal_0_39/stage3_prop_masks"
STAGE4_DIR = "test_removal_0_39/stage4_inpainted"
CI_VID_DIR = "ci_vid_SC_test"
CACHE_DIR = "outputs/merged_previews"
ANNOTATION_FILE = "removal_annotations.json"
FFMPEG_PATH = r"D:\ffmpeg-8.0.1-essentials_build\bin\ffmpeg.exe"
os.makedirs(CACHE_DIR, exist_ok=True)
# ======================= 【层级化 JSON 读写逻辑】 =======================
def load_annotations():
"""读取层级化 JSON 标注记录"""
if os.path.exists(ANNOTATION_FILE):
try:
with open(ANNOTATION_FILE, mode='r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return {}
return {}
def save_annotation(chunk, vid, cam, shot, obj, rating):
"""保存标注到层级化 JSON 文件"""
if not rating:
return "⚠️ Please select a rating first!"
if "-" in [chunk, vid, cam, shot, obj]:
return "⚠️ Please select a valid clip!"
anns = load_annotations()
if chunk not in anns: anns[chunk] = {}
if vid not in anns[chunk]: anns[chunk][vid] = {}
if cam not in anns[chunk][vid]: anns[chunk][vid][cam] = {}
if shot not in anns[chunk][vid][cam]: anns[chunk][vid][cam][shot] = {}
anns[chunk][vid][cam][shot][obj] = rating
with open(ANNOTATION_FILE, mode='w', encoding='utf-8') as f:
json.dump(anns, f, ensure_ascii=False, indent=4)
return f"✅ Saved: `{obj}` marked as **{rating}**"
def sync_annotation_ui(chunk, vid, cam, shot, obj):
"""同步 UI 状态 (安全地从嵌套字典中读取)"""
anns = load_annotations()
current_rating = anns.get(chunk, {}).get(vid, {}).get(cam, {}).get(shot, {}).get(obj, None)
return gr.update(value=current_rating), ""
# =====================================================================
def scan_data():
data_map = {}
if not os.path.exists(STAGE4_DIR): return data_map
for cp in sorted(glob.glob(os.path.join(STAGE4_DIR, "chunk_*"))):
chunk = os.path.basename(cp)
data_map[chunk] = {}
for vp in sorted(glob.glob(os.path.join(cp, "*"))):
if not os.path.isdir(vp): continue
vid = os.path.basename(vp)
data_map[chunk][vid] = {}
for cmp in sorted(glob.glob(os.path.join(vp, "*"))):
if not os.path.isdir(cmp): continue
cam = os.path.basename(cmp)
data_map[chunk][vid][cam] = {}
for sp in sorted(glob.glob(os.path.join(cmp, "*"))):
if not os.path.isdir(sp): continue
shot = os.path.basename(sp)
if not shot.isdigit(): continue
objs = [os.path.basename(o).replace('.mp4', '') for o in glob.glob(os.path.join(sp, "*.mp4"))]
if objs: data_map[chunk][vid][cam][shot] = sorted(objs)
return data_map
ORIGINAL_DATA_MAP = scan_data()
def get_init(data_map):
c = list(data_map.keys())[0] if data_map else "-"
v = list(data_map.get(c, {}).keys())[0] if data_map.get(c) else "-"
cam = list(data_map.get(c, {}).get(v, {}).keys())[0] if data_map.get(c, {}).get(v) else "-"
s = list(data_map.get(c, {}).get(v, {}).get(cam, {}).keys())[0] if data_map.get(c, {}).get(v, {}).get(cam) else "-"
o = data_map.get(c, {}).get(v, {}).get(cam, {}).get(s, [])[0] if data_map.get(c, {}).get(v, {}).get(cam, {}).get(s) else "-"
return list(data_map.keys()), c, list(data_map.get(c, {}).keys()), v, list(data_map.get(c, {}).get(v, {}).keys()), cam, list(data_map.get(c, {}).get(v, {}).get(cam, {}).keys()), s, data_map.get(c, {}).get(v, {}).get(cam, {}).get(s, []), o
# ======================= 【新增】动态计数与刷新逻辑 =======================
def get_filter_choices_and_val(base_filter="All"):
"""计算各标签的数量,并生成带数字的 Radio 选项"""
anns = load_annotations()
counts = {"All": 0, "Unlabeled": 0, "High": 0, "Medium": 0, "Low": 0}
for c, v_dict in ORIGINAL_DATA_MAP.items():
for v, cam_dict in v_dict.items():
for cam, s_dict in cam_dict.items():
for s, o_list in s_dict.items():
for o in o_list:
counts["All"] += 1
val = anns.get(c, {}).get(v, {}).get(cam, {}).get(s, {}).get(o, "")
if not val:
counts["Unlabeled"] += 1
elif val in counts:
counts[val] += 1
choices = [
f"All ({counts['All']})",
f"Unlabeled ({counts['Unlabeled']})",
f"High ({counts['High']})",
f"Medium ({counts['Medium']})",
f"Low ({counts['Low']})"
]
# 找到当前应该选中的那个项 (模糊匹配前缀)
current_val = choices[0]
for choice in choices:
if choice.startswith(base_filter):
current_val = choice
break
return choices, current_val
def apply_filter(base_filter):
"""基于纯净标签 (如 'All') 进行过滤"""
if base_filter == "All":
return ORIGINAL_DATA_MAP
anns = load_annotations()
filtered = {}
for c, v_dict in ORIGINAL_DATA_MAP.items():
for v, cam_dict in v_dict.items():
for cam, s_dict in cam_dict.items():
for s, o_list in s_dict.items():
valid_objs = []
for o in o_list:
val = anns.get(c, {}).get(v, {}).get(cam, {}).get(s, {}).get(o, "")
if base_filter == "Unlabeled" and not val:
valid_objs.append(o)
elif val == base_filter:
valid_objs.append(o)
if valid_objs:
if c not in filtered: filtered[c] = {}
if v not in filtered[c]: filtered[c][v] = {}
if cam not in filtered[c][v]: filtered[c][v][cam] = {}
filtered[c][v][cam][s] = valid_objs
return filtered
def refresh_ui(current_filter_label):
"""统一的刷新事件入口:重新统计数字 -> 重新过滤数据树 -> 刷新所有 UI"""
# 提取纯净标签 (把 "Unlabeled (42)" 截取成 "Unlabeled")
base_filter = "All" if not current_filter_label else current_filter_label.split(" ")[0]
choices, new_radio_val = get_filter_choices_and_val(base_filter)
filtered_map = apply_filter(base_filter)
_, c, _, v, _, cam, _, s, _, o = get_init(filtered_map)
c_list = list(filtered_map.keys())
v_list = list(filtered_map.get(c, {}).keys()) if c != "-" else []
cam_list = list(filtered_map.get(c, {}).get(v, {}).keys()) if v != "-" else []
s_list = list(filtered_map.get(c, {}).get(v, {}).get(cam, {}).keys()) if cam != "-" else []
o_list = filtered_map.get(c, {}).get(v, {}).get(cam, {}).get(s, []) if s != "-" else []
vr = get_valid_replace_shots(filtered_map, c, v, cam, o)
return (
filtered_map,
gr.update(choices=choices, value=new_radio_val), # 更新带有最新数字的选项
get_choices_and_val(c_list),
get_choices_and_val(v_list),
get_choices_and_val(cam_list),
get_choices_and_val(s_list),
get_choices_and_val(o_list),
gr.update(choices=vr, value=vr if vr else "-", label=f"Use replacement clips for [{o if o!='-' else 'None'}]")
)
# =====================================================================
def get_choices_and_val(lst):
return gr.update(choices=lst, value=lst[0] if lst else "-")
def get_valid_replace_shots(data_map, chunk, vid, cam, obj):
if "-" in [chunk, vid, cam, obj]: return []
return [s for s in list(data_map.get(chunk, {}).get(vid, {}).get(cam, {}).keys()) if obj in data_map.get(chunk, {}).get(vid, {}).get(cam, {}).get(s, [])]
def chunk_update(state_map, chunk):
vids = list(state_map.get(chunk, {}).keys())
cams = list(state_map.get(chunk, {}).get(vids[0] if vids else "-", {}).keys())
shots = list(state_map.get(chunk, {}).get(vids[0] if vids else "-", {}).get(cams[0] if cams else "-", {}).keys())
objs = state_map.get(chunk, {}).get(vids[0] if vids else "-", {}).get(cams[0] if cams else "-", {}).get(shots[0] if shots else "-", [])
vr = get_valid_replace_shots(state_map, chunk, vids[0] if vids else "-", cams[0] if cams else "-", objs[0] if objs else "-")
return get_choices_and_val(vids), get_choices_and_val(cams), get_choices_and_val(shots), get_choices_and_val(objs), gr.update(choices=vr, value=vr, label=f"Use replacement clips for [{objs[0] if objs else '-'}]")
def vid_update(state_map, chunk, vid):
cams = list(state_map.get(chunk, {}).get(vid, {}).keys())
shots = list(state_map.get(chunk, {}).get(vid, {}).get(cams[0] if cams else "-", {}).keys())
objs = state_map.get(chunk, {}).get(vid, {}).get(cams[0] if cams else "-", {}).get(shots[0] if shots else "-", [])
vr = get_valid_replace_shots(state_map, chunk, vid, cams[0] if cams else "-", objs[0] if objs else "-")
return get_choices_and_val(cams), get_choices_and_val(shots), get_choices_and_val(objs), gr.update(choices=vr, value=vr, label=f"Use replacement clips for [{objs[0] if objs else '-'}]")
def cam_update(state_map, chunk, vid, cam):
shots = list(state_map.get(chunk, {}).get(vid, {}).get(cam, {}).keys())
objs = state_map.get(chunk, {}).get(vid, {}).get(cam, {}).get(shots[0] if shots else "-", [])
vr = get_valid_replace_shots(state_map, chunk, vid, cam, objs[0] if objs else "-")
return get_choices_and_val(shots), get_choices_and_val(objs), gr.update(choices=vr, value=vr, label=f"Use replacement clips for [{objs[0] if objs else '-'}]")
def shot_update(state_map, chunk, vid, cam, shot):
objs = state_map.get(chunk, {}).get(vid, {}).get(cam, {}).get(shot, [])
vr = get_valid_replace_shots(state_map, chunk, vid, cam, objs[0] if objs else "-")
return get_choices_and_val(objs), gr.update(choices=vr, value=vr, label=f"Use replacement clips for [{objs[0] if objs else '-'}]")
def obj_update(state_map, chunk, vid, cam, obj):
vr = get_valid_replace_shots(state_map, chunk, vid, cam, obj)
return gr.update(choices=vr, value=vr, label=f"Use replacement clips for [{obj}]")
def create_overlay_video(orig_path, mask_dir, out_path):
if os.path.exists(out_path): return out_path
cap = cv2.VideoCapture(orig_path)
fps = cap.get(cv2.CAP_PROP_FPS) or 24
orig_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
orig_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
scale = 0.5
w = int((orig_w * scale) // 2 * 2)
h = int((orig_h * scale) // 2 * 2)
mask_files = sorted(glob.glob(os.path.join(mask_dir, "*.png")))
temp_path = out_path.replace('.mp4', '_temp.mp4')
out = cv2.VideoWriter(temp_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
idx = 0
while True:
ret, frame = cap.read()
if not ret: break
frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)
if idx < len(mask_files):
mask = cv2.imread(mask_files[idx], cv2.IMREAD_GRAYSCALE)
if mask is not None:
mask = cv2.resize(mask, (w, h), interpolation=cv2.INTER_NEAREST)
frame[mask > 128] = frame[mask > 128] * 0.5 + np.array([0, 0, 255], dtype=np.uint8) * 0.5
out.write(frame)
idx += 1
cap.release()
out.release()
subprocess.run([FFMPEG_PATH, '-y', '-i', temp_path, '-c:v', 'libx264', '-preset', 'ultrafast', '-crf', '32', out_path], capture_output=True)
if os.path.exists(temp_path): os.remove(temp_path)
return out_path
def load_preview(chunk, vid, cam, shot, obj):
if not all([chunk, vid, cam, shot, obj]) or "-" in [chunk, vid, cam, shot, obj]: return None, None
orig = os.path.join(CI_VID_DIR, chunk, vid, cam, f"{shot}.mp4")
edit = os.path.join(STAGE4_DIR, chunk, vid, cam, str(shot), f"{obj}.mp4")
mask_dir = os.path.join(STAGE3_DIR, chunk, vid, cam, str(shot), str(obj))
orig_to_show = create_overlay_video(orig, mask_dir, os.path.join(CACHE_DIR, f"ov_{chunk}_{vid}_{cam}_{shot}_{obj}.mp4")) if os.path.exists(orig) and os.path.exists(mask_dir) else (orig if os.path.exists(orig) else None)
return orig_to_show, (edit if os.path.exists(edit) else None)
def merge_videos(chunk, vid, cam, obj, replace_shots):
if not all([chunk, vid, cam, obj]) or "-" in [chunk, vid, cam, obj]: return None
all_orig = sorted([p for p in glob.glob(os.path.join(CI_VID_DIR, chunk, vid, cam, "*.mp4")) if os.path.basename(p).replace('.mp4','').isdigit()], key=lambda x: int(os.path.basename(x).split('.')[0]))
out_path = os.path.join(CACHE_DIR, f"merged_{chunk}_{vid}_{cam}_{obj}.mp4")
inputs = []
for p in all_orig:
sid = os.path.basename(p).replace('.mp4','')
edit_p = os.path.join(STAGE4_DIR, chunk, vid, cam, sid, f"{obj}.mp4")
inputs.append(edit_p if (sid in replace_shots and os.path.exists(edit_p)) else p)
if not inputs: return None
flt = "".join([f"[{i}:v]scale=1280:720:force_original_aspect_ratio=decrease,pad=1280:720:(ow-iw)/2:(oh-ih)/2,setsar=1,fps=24[v{i}];" for i in range(len(inputs))])
concat = "".join([f"[v{i}]" for i in range(len(inputs))]) + f"concat=n={len(inputs)}:v=1:a=0[outv]"
cmd = [FFMPEG_PATH, "-y", *sum([["-i", inp] for inp in inputs], []), "-filter_complex", flt + concat, "-map", "[outv]", "-c:v", "libx264", "-preset", "ultrafast", out_path]
subprocess.run(cmd, check=True, capture_output=True)
return out_path
# 获取初始标签和数字
INIT_CHOICES, INIT_VAL = get_filter_choices_and_val("All")
CHUNKS, C, VIDS, V, CAMS, CAM, SHOTS, S, OBJS, O = get_init(ORIGINAL_DATA_MAP)
with gr.Blocks() as app:
state_map = gr.State(ORIGINAL_DATA_MAP)
gr.Markdown("# 🎬 Inconsistency Quality Check Panel")
with gr.Row(variant="panel"):
filter_radio = gr.Radio(
choices=INIT_CHOICES,
value=INIT_VAL,
label="🔎 Filter Status"
)
with gr.Row():
chunk_dd = gr.Dropdown(choices=CHUNKS, label="1. Chunk", value=C)
vid_dd = gr.Dropdown(choices=VIDS, label="2. Video ID", value=V)
cam_dd = gr.Dropdown(choices=CAMS, label="3. Camera ID", value=CAM)
gr.Markdown("---")
with gr.Row():
with gr.Column():
with gr.Row():
shot_dd = gr.Dropdown(choices=SHOTS, label="4. Shot", value=S)
obj_dd = gr.Dropdown(choices=OBJS, label="5. Object", value=O)
gr.Markdown("### ✍️ Evaluation")
with gr.Row(variant="panel"):
rating_radio = gr.Radio(choices=["High", "Medium", "Low"], label="6. Rate Removal Quality")
with gr.Column():
save_btn = gr.Button("💾 Save Rating", variant="primary")
refresh_btn = gr.Button("🔄 Refresh List & Stats", variant="secondary") # 【新增刷新按钮】
save_status = gr.Markdown("")
v_orig = gr.Video(label="Original w/ Mask")
v_edit = gr.Video(label="Inpainted Result")
with gr.Column():
replace = gr.CheckboxGroup(choices=get_valid_replace_shots(ORIGINAL_DATA_MAP, C, V, CAM, O), label=f"Use replacement clips for [{O}]", value=get_valid_replace_shots(ORIGINAL_DATA_MAP, C, V, CAM, O))
btn = gr.Button("🚀 Generate Sequence Preview", variant="secondary")
v_res = gr.Video(label="Sequence Preview")
# ================= 绑定事件 =================
# 0. 刷新按钮 与 筛选器 共用同一套更新逻辑
refresh_outputs = [state_map, filter_radio, chunk_dd, vid_dd, cam_dd, shot_dd, obj_dd, replace]
filter_radio.change(refresh_ui, [filter_radio], refresh_outputs)
refresh_btn.click(refresh_ui, [filter_radio], refresh_outputs)
# 1. 列表联级更新 (毫秒级)
chunk_dd.change(chunk_update, [state_map, chunk_dd], [vid_dd, cam_dd, shot_dd, obj_dd, replace])
vid_dd.change(vid_update, [state_map, chunk_dd, vid_dd], [cam_dd, shot_dd, obj_dd, replace])
cam_dd.change(cam_update, [state_map, chunk_dd, vid_dd, cam_dd], [shot_dd, obj_dd, replace])
shot_dd.change(shot_update, [state_map, chunk_dd, vid_dd, cam_dd, shot_dd], [obj_dd, replace])
obj_dd.change(obj_update, [state_map, chunk_dd, vid_dd, cam_dd, obj_dd], [replace])
# 2. 优先刷新 UI 状态 (毫秒级)
shot_dd.change(sync_annotation_ui, [chunk_dd, vid_dd, cam_dd, shot_dd, obj_dd], [rating_radio, save_status])
obj_dd.change(sync_annotation_ui, [chunk_dd, vid_dd, cam_dd, shot_dd, obj_dd], [rating_radio, save_status])
app.load(sync_annotation_ui, [chunk_dd, vid_dd, cam_dd, shot_dd, obj_dd], [rating_radio, save_status])
save_btn.click(save_annotation, [chunk_dd, vid_dd, cam_dd, shot_dd, obj_dd, rating_radio], [save_status])
# 3. 最后加载视频 (耗时)
shot_dd.change(load_preview, [chunk_dd, vid_dd, cam_dd, shot_dd, obj_dd], [v_orig, v_edit])
obj_dd.change(load_preview, [chunk_dd, vid_dd, cam_dd, shot_dd, obj_dd], [v_orig, v_edit])
app.load(load_preview, [chunk_dd, vid_dd, cam_dd, shot_dd, obj_dd], [v_orig, v_edit])
# 合成全序列视频
btn.click(merge_videos, [chunk_dd, vid_dd, cam_dd, obj_dd, replace], [v_res])
if __name__ == "__main__":
print(f"Link: http://127.0.0.1:7860/")
app.launch(server_name="0.0.0.0", server_port=7860)