Upload 3 files
Browse files- CI-VID_test_samples_SC.jsonl +0 -0
- filter_ci_vid_samples.py +250 -0
- move_ci_vid_samples.py +109 -0
CI-VID_test_samples_SC.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
filter_ci_vid_samples.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import json
|
| 4 |
+
import math
|
| 5 |
+
import time
|
| 6 |
+
import subprocess
|
| 7 |
+
import itertools
|
| 8 |
+
import concurrent.futures
|
| 9 |
+
from collections import OrderedDict
|
| 10 |
+
from typing import Optional, List, Dict, Any, Tuple
|
| 11 |
+
|
| 12 |
+
split = "train" # train, test
|
| 13 |
+
DATA_ROOT = "/workspace/ci_vid_chunk0" # 按你的实际路径修改
|
| 14 |
+
IN_JSONL = os.path.join(DATA_ROOT, f"CI-VID_{split}_samples.jsonl")
|
| 15 |
+
OUT_JSONL = os.path.join(DATA_ROOT, f"CI-VID_{split}_samples_SC.jsonl")
|
| 16 |
+
|
| 17 |
+
MAX_SHOTS = 4
|
| 18 |
+
MIN_AVG_DUR = 0.0 # seconds
|
| 19 |
+
MIN_SHOT_DUR = 3.0 # seconds
|
| 20 |
+
MAX_SHOT_DUR = 7.0 # seconds
|
| 21 |
+
PRINT_EVERY = 500 # 进度打印频率(行)
|
| 22 |
+
MAX_WORKERS = 64 # 并发线程数,推荐为 CPU 核心数的 2-4 倍
|
| 23 |
+
CHUNK_SIZE = 5000 # 每次从文件读取的批次大小,防止大文件 OOM
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def ffprobe_duration_sec(video_path: str) -> Optional[float]:
|
| 27 |
+
"""Return duration in seconds using ffprobe, or None if failed."""
|
| 28 |
+
try:
|
| 29 |
+
p = subprocess.run(
|
| 30 |
+
[
|
| 31 |
+
"ffprobe",
|
| 32 |
+
"-v", "error",
|
| 33 |
+
"-show_entries", "format=duration",
|
| 34 |
+
"-of", "default=noprint_wrappers=1:nokey=1",
|
| 35 |
+
video_path,
|
| 36 |
+
],
|
| 37 |
+
stdout=subprocess.PIPE,
|
| 38 |
+
stderr=subprocess.PIPE,
|
| 39 |
+
text=True,
|
| 40 |
+
check=False,
|
| 41 |
+
)
|
| 42 |
+
if p.returncode != 0:
|
| 43 |
+
return None
|
| 44 |
+
s = p.stdout.strip()
|
| 45 |
+
if not s:
|
| 46 |
+
return None
|
| 47 |
+
dur = float(s)
|
| 48 |
+
if math.isnan(dur) or dur <= 0:
|
| 49 |
+
return None
|
| 50 |
+
return dur
|
| 51 |
+
except Exception:
|
| 52 |
+
return None
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def list_mp4s_sorted(video_dir: str) -> List[str]:
|
| 56 |
+
"""List mp4s in a directory, sorted by numeric filename (0.mp4,1.mp4,...) when possible."""
|
| 57 |
+
if not os.path.isdir(video_dir):
|
| 58 |
+
return []
|
| 59 |
+
files = [f for f in os.listdir(video_dir) if f.lower().endswith(".mp4")]
|
| 60 |
+
|
| 61 |
+
def key_fn(name: str):
|
| 62 |
+
m = re.match(r"(\d+)\.mp4$", name)
|
| 63 |
+
return (0, int(m.group(1))) if m else (1, name)
|
| 64 |
+
|
| 65 |
+
files.sort(key=key_fn)
|
| 66 |
+
return [os.path.join(video_dir, f) for f in files]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def count_nonempty_lines(path: str) -> int:
|
| 70 |
+
n = 0
|
| 71 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 72 |
+
for line in f:
|
| 73 |
+
if line.strip():
|
| 74 |
+
n += 1
|
| 75 |
+
return n
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def reorder_for_output(
|
| 79 |
+
row: Dict[str, Any],
|
| 80 |
+
shot_count: int,
|
| 81 |
+
durs: List[float],
|
| 82 |
+
avg_dur: float,
|
| 83 |
+
mp4_count_found: int,
|
| 84 |
+
video_dir_abs: str,
|
| 85 |
+
) -> OrderedDict:
|
| 86 |
+
"""
|
| 87 |
+
Output key order:
|
| 88 |
+
1) video_path
|
| 89 |
+
2) shot_count, avg_shot_duration_sec, shot_durations_sec, mp4_count_found, video_dir_abs
|
| 90 |
+
3) other original keys (excluding video_path, individual_captions, joint_captions)
|
| 91 |
+
4) individual_captions, joint_captions at the end
|
| 92 |
+
"""
|
| 93 |
+
out = OrderedDict()
|
| 94 |
+
out["video_path"] = row.get("video_path")
|
| 95 |
+
|
| 96 |
+
# new keys right after video_path
|
| 97 |
+
out["shot_count"] = shot_count
|
| 98 |
+
out["avg_shot_duration_sec"] = avg_dur
|
| 99 |
+
out["shot_durations_sec"] = durs
|
| 100 |
+
out["mp4_count_found"] = mp4_count_found
|
| 101 |
+
out["video_dir_abs"] = video_dir_abs
|
| 102 |
+
|
| 103 |
+
# keep other keys (stable order as they appeared in row)
|
| 104 |
+
for k, v in row.items():
|
| 105 |
+
if k in ("video_path", "individual_captions", "joint_captions"):
|
| 106 |
+
continue
|
| 107 |
+
out[k] = v
|
| 108 |
+
|
| 109 |
+
# captions at the very end
|
| 110 |
+
if "individual_captions" in row:
|
| 111 |
+
out["individual_captions"] = row["individual_captions"]
|
| 112 |
+
if "joint_captions" in row:
|
| 113 |
+
out["joint_captions"] = row["joint_captions"]
|
| 114 |
+
|
| 115 |
+
return out
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def process_line(line: str) -> Tuple[Optional[OrderedDict], bool, str]:
|
| 119 |
+
"""
|
| 120 |
+
处理单行 JSONL 数据。
|
| 121 |
+
返回: (处理后的行字典/None, 是否通过 MAX_SHOTS 检查, 状态标签)
|
| 122 |
+
"""
|
| 123 |
+
line = line.strip()
|
| 124 |
+
if not line:
|
| 125 |
+
return None, False, "empty"
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
row: Dict[str, Any] = json.loads(line)
|
| 129 |
+
except json.JSONDecodeError:
|
| 130 |
+
return None, False, "json_fail"
|
| 131 |
+
|
| 132 |
+
indiv = row.get("individual_captions", [])
|
| 133 |
+
if not isinstance(indiv, list):
|
| 134 |
+
return None, False, "format_fail"
|
| 135 |
+
|
| 136 |
+
shot_count = len(indiv)
|
| 137 |
+
if shot_count > MAX_SHOTS:
|
| 138 |
+
return None, False, "len_gt_4"
|
| 139 |
+
|
| 140 |
+
# 只要 shot_count <= MAX_SHOTS,就将其计入 len_le_4
|
| 141 |
+
is_len_le_4 = True
|
| 142 |
+
|
| 143 |
+
video_path = row.get("video_path")
|
| 144 |
+
if not isinstance(video_path, str):
|
| 145 |
+
return None, is_len_le_4, "format_fail"
|
| 146 |
+
|
| 147 |
+
video_dir = os.path.join(DATA_ROOT, video_path)
|
| 148 |
+
if not os.path.isdir(video_dir):
|
| 149 |
+
return None, is_len_le_4, "missing_dir"
|
| 150 |
+
|
| 151 |
+
mp4s = list_mp4s_sorted(video_dir)
|
| 152 |
+
if not mp4s:
|
| 153 |
+
return None, is_len_le_4, "missing_mp4"
|
| 154 |
+
|
| 155 |
+
# mp4数一般应等于 shot_count;稳妥起见取 min
|
| 156 |
+
n = min(shot_count, len(mp4s))
|
| 157 |
+
durs: List[float] = []
|
| 158 |
+
|
| 159 |
+
for p in mp4s[:n]:
|
| 160 |
+
dur = ffprobe_duration_sec(p)
|
| 161 |
+
if dur is None:
|
| 162 |
+
return None, is_len_le_4, "duration_fail"
|
| 163 |
+
durs.append(dur)
|
| 164 |
+
|
| 165 |
+
if not durs:
|
| 166 |
+
return None, is_len_le_4, "duration_fail"
|
| 167 |
+
|
| 168 |
+
if any(d <= MIN_SHOT_DUR for d in durs):
|
| 169 |
+
return None, is_len_le_4, "dur_too_short"
|
| 170 |
+
|
| 171 |
+
avg_dur = sum(durs) / len(durs)
|
| 172 |
+
if avg_dur <= MIN_AVG_DUR:
|
| 173 |
+
return None, is_len_le_4, "dur_too_short"
|
| 174 |
+
|
| 175 |
+
out_row = reorder_for_output(
|
| 176 |
+
row=row,
|
| 177 |
+
shot_count=shot_count,
|
| 178 |
+
durs=durs,
|
| 179 |
+
avg_dur=avg_dur,
|
| 180 |
+
mp4_count_found=len(mp4s),
|
| 181 |
+
video_dir_abs=video_dir,
|
| 182 |
+
)
|
| 183 |
+
return out_row, is_len_le_4, "kept"
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def main():
|
| 187 |
+
total_lines = count_nonempty_lines(IN_JSONL)
|
| 188 |
+
|
| 189 |
+
total = 0
|
| 190 |
+
len_le_4 = 0
|
| 191 |
+
kept = 0
|
| 192 |
+
missing_dir = 0
|
| 193 |
+
missing_mp4 = 0
|
| 194 |
+
duration_fail = 0
|
| 195 |
+
json_fail = 0
|
| 196 |
+
|
| 197 |
+
t_start = time.time()
|
| 198 |
+
|
| 199 |
+
with open(IN_JSONL, "r", encoding="utf-8") as fin, \
|
| 200 |
+
open(OUT_JSONL, "w", encoding="utf-8") as fout, \
|
| 201 |
+
concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
|
| 202 |
+
|
| 203 |
+
while True:
|
| 204 |
+
# 分批读取,防止大文件吃光内存
|
| 205 |
+
lines = list(itertools.islice(fin, CHUNK_SIZE))
|
| 206 |
+
if not lines:
|
| 207 |
+
break
|
| 208 |
+
|
| 209 |
+
# map 会保持结果顺序与输入顺序完全一致
|
| 210 |
+
results = executor.map(process_line, lines)
|
| 211 |
+
|
| 212 |
+
for out_row, is_len_le_4, status in results:
|
| 213 |
+
if status == "empty":
|
| 214 |
+
continue
|
| 215 |
+
|
| 216 |
+
total += 1
|
| 217 |
+
|
| 218 |
+
# 按照你的原始逻辑处理各项计数
|
| 219 |
+
if is_len_le_4:
|
| 220 |
+
len_le_4 += 1
|
| 221 |
+
|
| 222 |
+
if status == "json_fail":
|
| 223 |
+
json_fail += 1
|
| 224 |
+
elif status == "missing_dir":
|
| 225 |
+
missing_dir += 1
|
| 226 |
+
elif status == "missing_mp4":
|
| 227 |
+
missing_mp4 += 1
|
| 228 |
+
elif status == "duration_fail":
|
| 229 |
+
duration_fail += 1
|
| 230 |
+
elif status == "kept":
|
| 231 |
+
fout.write(json.dumps(out_row, ensure_ascii=False) + "\n")
|
| 232 |
+
kept += 1
|
| 233 |
+
|
| 234 |
+
# progress
|
| 235 |
+
if total == 1 or total % PRINT_EVERY == 0 or total == total_lines:
|
| 236 |
+
elapsed = time.time() - t_start
|
| 237 |
+
speed = total / elapsed if elapsed > 0 else 0.0
|
| 238 |
+
pct = (total / total_lines * 100.0) if total_lines > 0 else 0.0
|
| 239 |
+
print(
|
| 240 |
+
f"[{total}/{total_lines} | {pct:.1f}%] kept={kept} "
|
| 241 |
+
f"len<=4={len_le_4} missing_dir={missing_dir} missing_mp4={missing_mp4} "
|
| 242 |
+
f"dur_fail={duration_fail} json_fail={json_fail} speed={speed:.1f} lines/s"
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
print("Done.")
|
| 246 |
+
print(f"Output: {OUT_JSONL}")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
if __name__ == "__main__":
|
| 250 |
+
main()
|
move_ci_vid_samples.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import shutil
|
| 4 |
+
import time
|
| 5 |
+
import concurrent.futures
|
| 6 |
+
|
| 7 |
+
split = "train" # train, test
|
| 8 |
+
# 配置路径
|
| 9 |
+
OLD_ROOT = "/workspace/ci_vid_chunk0"
|
| 10 |
+
NEW_ROOT = f"/workspace/ci_vid_SC_{split}"
|
| 11 |
+
JSONL_FILE = os.path.join(OLD_ROOT, f"CI-VID_{split}_samples_SC.jsonl")
|
| 12 |
+
|
| 13 |
+
# 线程数(仅做路径和元数据操作,不需要太大)
|
| 14 |
+
MAX_WORKERS = 16
|
| 15 |
+
PRINT_EVERY = 100
|
| 16 |
+
|
| 17 |
+
def move_video_dir(line: str) -> str:
|
| 18 |
+
"""
|
| 19 |
+
解析 JSONL 单行,将对应的视频文件夹移动到新目录。
|
| 20 |
+
返回状态字符串用于统计。
|
| 21 |
+
"""
|
| 22 |
+
line = line.strip()
|
| 23 |
+
if not line:
|
| 24 |
+
return "empty"
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
row = json.loads(line)
|
| 28 |
+
except json.JSONDecodeError:
|
| 29 |
+
return "json_fail"
|
| 30 |
+
|
| 31 |
+
video_rel_path = row.get("video_path")
|
| 32 |
+
if not isinstance(video_rel_path, str):
|
| 33 |
+
return "format_fail"
|
| 34 |
+
|
| 35 |
+
old_dir = os.path.join(OLD_ROOT, video_rel_path)
|
| 36 |
+
new_dir = os.path.join(NEW_ROOT, video_rel_path)
|
| 37 |
+
|
| 38 |
+
# 如果原目录不存在,可能之前已经被移动过,或者数据有误
|
| 39 |
+
if not os.path.isdir(old_dir):
|
| 40 |
+
# 顺便检查是不是已经移到了新目录
|
| 41 |
+
if os.path.isdir(new_dir):
|
| 42 |
+
return "already_moved"
|
| 43 |
+
return "missing_old"
|
| 44 |
+
|
| 45 |
+
# 确保新目录的父级结构存在 (例如创建 chunk_33/00d2d19dd524c7f7ccaadba657c49263)
|
| 46 |
+
parent_new_dir = os.path.dirname(new_dir)
|
| 47 |
+
os.makedirs(parent_new_dir, exist_ok=True)
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
# 移动整个文件夹
|
| 51 |
+
# shutil.move(old_dir, new_dir)
|
| 52 |
+
shutil.copytree(old_dir, new_dir, dirs_exist_ok=True)
|
| 53 |
+
return "success"
|
| 54 |
+
except Exception as e:
|
| 55 |
+
# 捕捉权限不足或其他异常
|
| 56 |
+
return f"error"
|
| 57 |
+
|
| 58 |
+
def main():
|
| 59 |
+
if not os.path.exists(JSONL_FILE):
|
| 60 |
+
print(f"找不到 JSONL 文件: {JSONL_FILE}")
|
| 61 |
+
return
|
| 62 |
+
|
| 63 |
+
# 统计总行数用于显示进度
|
| 64 |
+
total_lines = sum(1 for line in open(JSONL_FILE, "r", encoding="utf-8") if line.strip())
|
| 65 |
+
print(f"共发现 {total_lines} 条待处理记录。开始移动...\n")
|
| 66 |
+
|
| 67 |
+
stats = {
|
| 68 |
+
"success": 0,
|
| 69 |
+
"already_moved": 0,
|
| 70 |
+
"missing_old": 0,
|
| 71 |
+
"error": 0,
|
| 72 |
+
"json_fail": 0,
|
| 73 |
+
"format_fail": 0,
|
| 74 |
+
"empty": 0
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
t_start = time.time()
|
| 78 |
+
processed = 0
|
| 79 |
+
|
| 80 |
+
with open(JSONL_FILE, "r", encoding="utf-8") as fin:
|
| 81 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
|
| 82 |
+
# 提交所有任务
|
| 83 |
+
futures = [executor.submit(move_video_dir, line) for line in fin]
|
| 84 |
+
|
| 85 |
+
# 收集结果
|
| 86 |
+
for future in concurrent.futures.as_completed(futures):
|
| 87 |
+
status = future.result()
|
| 88 |
+
stats[status] += 1
|
| 89 |
+
processed += 1
|
| 90 |
+
|
| 91 |
+
# 打印进度
|
| 92 |
+
if processed % PRINT_EVERY == 0 or processed == total_lines:
|
| 93 |
+
elapsed = time.time() - t_start
|
| 94 |
+
speed = processed / elapsed if elapsed > 0 else 0.0
|
| 95 |
+
pct = (processed / total_lines) * 100
|
| 96 |
+
print(
|
| 97 |
+
f"[{processed}/{total_lines} | {pct:.1f}%] "
|
| 98 |
+
f"成功: {stats['success']} | 已移动: {stats['already_moved']} | "
|
| 99 |
+
f"缺失: {stats['missing_old']} | 报错: {stats['error']} | "
|
| 100 |
+
f"速度: {speed:.1f} 个/秒"
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
print("\n移动完成!统计结果:")
|
| 104 |
+
for k, v in stats.items():
|
| 105 |
+
if v > 0:
|
| 106 |
+
print(f"- {k}: {v}")
|
| 107 |
+
|
| 108 |
+
if __name__ == "__main__":
|
| 109 |
+
main()
|