Update cdda_video_extractor.py
Browse files- cdda_video_extractor.py +59 -39
cdda_video_extractor.py
CHANGED
|
@@ -9,32 +9,36 @@ from PIL import Image
|
|
| 9 |
|
| 10 |
# ================= CONFIGURATION =================
|
| 11 |
MAX_RES = 1024 # Max pixels for longest side
|
| 12 |
-
|
| 13 |
-
#
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
# =================================================
|
| 16 |
|
| 17 |
def parse_args():
|
| 18 |
-
parser = argparse.ArgumentParser(description="
|
| 19 |
parser.add_argument("input_video", help="Path to the video file.")
|
| 20 |
-
parser.add_argument("--threshold", type=float, default=
|
| 21 |
-
help="
|
| 22 |
parser.add_argument("--step", type=int, default=1,
|
| 23 |
-
help="Check every Nth frame
|
| 24 |
parser.add_argument("--start_frame", type=int, default=0,
|
| 25 |
-
help="Start processing from this
|
| 26 |
return parser.parse_args()
|
| 27 |
|
| 28 |
def save_frame(cv2_frame, output_path):
|
| 29 |
-
"""
|
| 30 |
-
Heavy processing: Only runs when a frame is confirmed to be saved.
|
| 31 |
-
Converts BGR->RGB, Resizes (Lanczos), and Saves.
|
| 32 |
-
"""
|
| 33 |
-
# Convert BGR (OpenCV) to RGB (PIL)
|
| 34 |
rgb_frame = cv2.cvtColor(cv2_frame, cv2.COLOR_BGR2RGB)
|
| 35 |
img = Image.fromarray(rgb_frame)
|
| 36 |
|
| 37 |
-
# Resize logic
|
| 38 |
w, h = img.size
|
| 39 |
longest = max(w, h)
|
| 40 |
if longest > MAX_RES:
|
|
@@ -44,6 +48,24 @@ def save_frame(cv2_frame, output_path):
|
|
| 44 |
|
| 45 |
img.save(output_path, "WEBP")
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
def main():
|
| 48 |
args = parse_args()
|
| 49 |
|
|
@@ -62,59 +84,59 @@ def main():
|
|
| 62 |
|
| 63 |
print(f"[*] Processing: {args.input_video}")
|
| 64 |
print(f"[*] Output: {base_dir}")
|
|
|
|
| 65 |
|
| 66 |
cap = cv2.VideoCapture(args.input_video)
|
| 67 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 68 |
|
| 69 |
-
# Handle Start Frame Seek
|
| 70 |
if args.start_frame > 0:
|
| 71 |
-
if args.start_frame >= total_frames:
|
| 72 |
-
print(f"[!] Error: Start frame {args.start_frame} is larger than total frames {total_frames}.")
|
| 73 |
-
return
|
| 74 |
-
|
| 75 |
-
print(f"[*] Jumping to frame {args.start_frame}...")
|
| 76 |
cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_frame)
|
| 77 |
|
| 78 |
-
|
| 79 |
-
frame_count = args.start_frame
|
| 80 |
saved_count = 0
|
| 81 |
|
| 82 |
try:
|
| 83 |
while True:
|
| 84 |
-
# Read frame
|
| 85 |
ret, frame = cap.read()
|
| 86 |
if not ret:
|
| 87 |
break
|
| 88 |
|
| 89 |
-
# Skip frames if step > 1
|
| 90 |
-
# We use modulo on the relative progress to keep step consistent
|
| 91 |
if (frame_count - args.start_frame) % args.step != 0:
|
| 92 |
frame_count += 1
|
| 93 |
continue
|
| 94 |
|
| 95 |
-
# ---
|
| 96 |
-
#
|
| 97 |
-
|
| 98 |
|
| 99 |
has_changed = False
|
| 100 |
|
| 101 |
-
if
|
| 102 |
-
# Always save the first processed frame (whether it's frame 0 or frame 5000)
|
| 103 |
has_changed = True
|
| 104 |
else:
|
| 105 |
-
#
|
| 106 |
-
|
| 107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
|
| 109 |
-
|
|
|
|
| 110 |
has_changed = True
|
| 111 |
|
| 112 |
-
# ---
|
| 113 |
if has_changed:
|
| 114 |
filename = f"frame_{frame_count}.webp"
|
| 115 |
filepath = os.path.join(img_dir, filename)
|
| 116 |
|
| 117 |
-
# Perform the heavy resize/save only now
|
| 118 |
save_frame(frame, filepath)
|
| 119 |
|
| 120 |
entry = {
|
|
@@ -125,13 +147,11 @@ def main():
|
|
| 125 |
with open(jsonl_path, 'a') as f:
|
| 126 |
f.write(json.dumps(entry) + "\n")
|
| 127 |
|
| 128 |
-
|
| 129 |
-
prev_small_frame = curr_small_frame
|
| 130 |
saved_count += 1
|
| 131 |
|
| 132 |
frame_count += 1
|
| 133 |
|
| 134 |
-
# Update terminal
|
| 135 |
if frame_count % 100 == 0:
|
| 136 |
sys.stdout.write(f"\r[Processing] Frame: {frame_count}/{total_frames} | Saved: {saved_count} ")
|
| 137 |
sys.stdout.flush()
|
|
|
|
| 9 |
|
| 10 |
# ================= CONFIGURATION =================
|
| 11 |
MAX_RES = 1024 # Max pixels for longest side
|
| 12 |
+
|
| 13 |
+
# NEW THRESHOLD LOGIC:
|
| 14 |
+
# This represents the % of the screen that must change to trigger a save.
|
| 15 |
+
# 0.1 = 0.1% of pixels changed (very sensitive but ignores noise).
|
| 16 |
+
# 1.0 = 1% of the screen changed.
|
| 17 |
+
CHANGE_PERCENTAGE_THRESHOLD = 0.05
|
| 18 |
+
|
| 19 |
+
# NOISE FLOOR:
|
| 20 |
+
# How much a single pixel's color must change (0-255) to be considered "changed".
|
| 21 |
+
# Video compression usually fluctuates by 5-15.
|
| 22 |
+
# Setting this to 25 ignores compression artifacts but detects game updates.
|
| 23 |
+
PIXEL_DIFF_THRESHOLD = 25
|
| 24 |
# =================================================
|
| 25 |
|
| 26 |
def parse_args():
|
| 27 |
+
parser = argparse.ArgumentParser(description="Robust extract changed frames from video.")
|
| 28 |
parser.add_argument("input_video", help="Path to the video file.")
|
| 29 |
+
parser.add_argument("--threshold", type=float, default=CHANGE_PERCENTAGE_THRESHOLD,
|
| 30 |
+
help="Percentage of screen area that must change (0.1 - 1.0).")
|
| 31 |
parser.add_argument("--step", type=int, default=1,
|
| 32 |
+
help="Check every Nth frame.")
|
| 33 |
parser.add_argument("--start_frame", type=int, default=0,
|
| 34 |
+
help="Start processing from this frame.")
|
| 35 |
return parser.parse_args()
|
| 36 |
|
| 37 |
def save_frame(cv2_frame, output_path):
|
| 38 |
+
"""Heavy processing: Converts BGR->RGB, Resizes (Lanczos), and Saves."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
rgb_frame = cv2.cvtColor(cv2_frame, cv2.COLOR_BGR2RGB)
|
| 40 |
img = Image.fromarray(rgb_frame)
|
| 41 |
|
|
|
|
| 42 |
w, h = img.size
|
| 43 |
longest = max(w, h)
|
| 44 |
if longest > MAX_RES:
|
|
|
|
| 48 |
|
| 49 |
img.save(output_path, "WEBP")
|
| 50 |
|
| 51 |
+
def preprocess_for_comparison(frame):
|
| 52 |
+
"""
|
| 53 |
+
Prepares a frame for fast, robust comparison:
|
| 54 |
+
1. Downscale (speed)
|
| 55 |
+
2. Grayscale (ignore minor color shifts)
|
| 56 |
+
3. Blur (remove compression noise)
|
| 57 |
+
"""
|
| 58 |
+
# Downscale drastically for comparison speed (e.g., 320px width)
|
| 59 |
+
# This also naturally averages out noise.
|
| 60 |
+
h, w = frame.shape[:2]
|
| 61 |
+
scale = 320 / w
|
| 62 |
+
small = cv2.resize(frame, (320, int(h * scale)))
|
| 63 |
+
|
| 64 |
+
gray = cv2.cvtColor(small, cv2.COLOR_BGR2GRAY)
|
| 65 |
+
# Gaussian Blur is crucial for ignoring video compression grain
|
| 66 |
+
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
|
| 67 |
+
return blurred
|
| 68 |
+
|
| 69 |
def main():
|
| 70 |
args = parse_args()
|
| 71 |
|
|
|
|
| 84 |
|
| 85 |
print(f"[*] Processing: {args.input_video}")
|
| 86 |
print(f"[*] Output: {base_dir}")
|
| 87 |
+
print(f"[*] Config: Pixel Diff > {PIXEL_DIFF_THRESHOLD}, Area > {args.threshold}%")
|
| 88 |
|
| 89 |
cap = cv2.VideoCapture(args.input_video)
|
| 90 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 91 |
|
|
|
|
| 92 |
if args.start_frame > 0:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_frame)
|
| 94 |
|
| 95 |
+
prev_processed_frame = None
|
| 96 |
+
frame_count = args.start_frame
|
| 97 |
saved_count = 0
|
| 98 |
|
| 99 |
try:
|
| 100 |
while True:
|
|
|
|
| 101 |
ret, frame = cap.read()
|
| 102 |
if not ret:
|
| 103 |
break
|
| 104 |
|
|
|
|
|
|
|
| 105 |
if (frame_count - args.start_frame) % args.step != 0:
|
| 106 |
frame_count += 1
|
| 107 |
continue
|
| 108 |
|
| 109 |
+
# --- ROBUST PATH: Change Detection ---
|
| 110 |
+
# Preprocess current frame (Gray -> Blur)
|
| 111 |
+
curr_processed = preprocess_for_comparison(frame)
|
| 112 |
|
| 113 |
has_changed = False
|
| 114 |
|
| 115 |
+
if prev_processed_frame is None:
|
|
|
|
| 116 |
has_changed = True
|
| 117 |
else:
|
| 118 |
+
# 1. Calculate Absolute Difference
|
| 119 |
+
diff = cv2.absdiff(curr_processed, prev_processed_frame)
|
| 120 |
+
|
| 121 |
+
# 2. Apply Noise Floor Threshold
|
| 122 |
+
# Any pixel difference less than PIXEL_DIFF_THRESHOLD (25) becomes 0.
|
| 123 |
+
# This removes the "shimmering" of black backgrounds in videos.
|
| 124 |
+
_, thresh = cv2.threshold(diff, PIXEL_DIFF_THRESHOLD, 255, cv2.THRESH_BINARY)
|
| 125 |
+
|
| 126 |
+
# 3. Count Non-Zero Pixels (The pixels that actually changed significantly)
|
| 127 |
+
changed_pixels = np.count_nonzero(thresh)
|
| 128 |
+
total_pixels = thresh.size
|
| 129 |
+
change_percentage = (changed_pixels / total_pixels) * 100
|
| 130 |
|
| 131 |
+
# 4. Check against Area Threshold
|
| 132 |
+
if change_percentage > args.threshold:
|
| 133 |
has_changed = True
|
| 134 |
|
| 135 |
+
# --- Saving ---
|
| 136 |
if has_changed:
|
| 137 |
filename = f"frame_{frame_count}.webp"
|
| 138 |
filepath = os.path.join(img_dir, filename)
|
| 139 |
|
|
|
|
| 140 |
save_frame(frame, filepath)
|
| 141 |
|
| 142 |
entry = {
|
|
|
|
| 147 |
with open(jsonl_path, 'a') as f:
|
| 148 |
f.write(json.dumps(entry) + "\n")
|
| 149 |
|
| 150 |
+
prev_processed_frame = curr_processed
|
|
|
|
| 151 |
saved_count += 1
|
| 152 |
|
| 153 |
frame_count += 1
|
| 154 |
|
|
|
|
| 155 |
if frame_count % 100 == 0:
|
| 156 |
sys.stdout.write(f"\r[Processing] Frame: {frame_count}/{total_frames} | Saved: {saved_count} ")
|
| 157 |
sys.stdout.flush()
|