Upload 2 files
Browse files- cdda_video_extractor.py +61 -51
- cleanup_dataset.py +82 -0
cdda_video_extractor.py
CHANGED
|
@@ -9,22 +9,28 @@ from PIL import Image
|
|
| 9 |
|
| 10 |
# ================= CONFIGURATION =================
|
| 11 |
MAX_RES = 1024 # Max pixels for longest side
|
| 12 |
-
# Threshold for detecting change.
|
| 13 |
-
# 0
|
| 14 |
-
|
| 15 |
-
DEFAULT_THRESHOLD = 5.0
|
| 16 |
# =================================================
|
| 17 |
|
| 18 |
def parse_args():
|
| 19 |
-
parser = argparse.ArgumentParser(description="
|
| 20 |
-
parser.add_argument("input_video", help="Path to the video file
|
| 21 |
parser.add_argument("--threshold", type=float, default=DEFAULT_THRESHOLD,
|
| 22 |
-
help="Sensitivity
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
return parser.parse_args()
|
| 24 |
|
| 25 |
-
def
|
| 26 |
-
"""
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
| 28 |
rgb_frame = cv2.cvtColor(cv2_frame, cv2.COLOR_BGR2RGB)
|
| 29 |
img = Image.fromarray(rgb_frame)
|
| 30 |
|
|
@@ -36,17 +42,7 @@ def process_frame(cv2_frame):
|
|
| 36 |
new_size = (int(w * scale), int(h * scale))
|
| 37 |
img = img.resize(new_size, Image.Resampling.LANCZOS)
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
def calculate_diff(img1_array, img2_array):
|
| 42 |
-
"""Calculates Mean Squared Error between two image arrays."""
|
| 43 |
-
# Ensure shapes match (resize might cause off-by-one pixel issues rarely, but PIL handles consistent aspect)
|
| 44 |
-
if img1_array.shape != img2_array.shape:
|
| 45 |
-
return float('inf')
|
| 46 |
-
|
| 47 |
-
err = np.sum((img1_array.astype("float") - img2_array.astype("float")) ** 2)
|
| 48 |
-
err /= float(img1_array.shape[0] * img1_array.shape[1])
|
| 49 |
-
return err
|
| 50 |
|
| 51 |
def main():
|
| 52 |
args = parse_args()
|
|
@@ -55,11 +51,9 @@ def main():
|
|
| 55 |
print(f"[!] Error: File '{args.input_video}' not found.")
|
| 56 |
return
|
| 57 |
|
| 58 |
-
#
|
| 59 |
-
# extracted_images / video_filename_timestamp / images
|
| 60 |
video_name = os.path.splitext(os.path.basename(args.input_video))[0]
|
| 61 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 62 |
-
|
| 63 |
base_dir = os.path.join(os.getcwd(), "extracted_images", f"{video_name}_{timestamp}")
|
| 64 |
img_dir = os.path.join(base_dir, "images")
|
| 65 |
jsonl_path = os.path.join(base_dir, "data.jsonl")
|
|
@@ -70,44 +64,59 @@ def main():
|
|
| 70 |
print(f"[*] Output: {base_dir}")
|
| 71 |
|
| 72 |
cap = cv2.VideoCapture(args.input_video)
|
|
|
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
saved_count = 0
|
| 77 |
-
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 78 |
|
| 79 |
try:
|
| 80 |
while True:
|
|
|
|
| 81 |
ret, frame = cap.read()
|
| 82 |
if not ret:
|
| 83 |
break
|
| 84 |
-
|
| 85 |
-
#
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
has_changed = False
|
| 90 |
|
| 91 |
-
if
|
| 92 |
-
# Always save the first frame
|
| 93 |
has_changed = True
|
| 94 |
else:
|
| 95 |
-
#
|
| 96 |
-
|
| 97 |
-
|
|
|
|
|
|
|
| 98 |
has_changed = True
|
| 99 |
-
|
|
|
|
| 100 |
if has_changed:
|
| 101 |
filename = f"frame_{frame_count}.webp"
|
| 102 |
filepath = os.path.join(img_dir, filename)
|
| 103 |
|
| 104 |
-
#
|
| 105 |
-
|
| 106 |
|
| 107 |
-
# Log JSONL
|
| 108 |
-
# Note: Since this is video, we don't have keypresses.
|
| 109 |
-
# We log "None" or skip the keypress field.
|
| 110 |
-
# The merger script handles "None" automatically.
|
| 111 |
entry = {
|
| 112 |
"image": filename,
|
| 113 |
"original_frame_id": frame_count
|
|
@@ -116,21 +125,22 @@ def main():
|
|
| 116 |
with open(jsonl_path, 'a') as f:
|
| 117 |
f.write(json.dumps(entry) + "\n")
|
| 118 |
|
| 119 |
-
|
|
|
|
| 120 |
saved_count += 1
|
| 121 |
-
|
| 122 |
frame_count += 1
|
| 123 |
|
| 124 |
-
#
|
| 125 |
-
if frame_count %
|
| 126 |
-
sys.stdout.write(f"\r[Processing] Frame: {frame_count}/{total_frames} |
|
| 127 |
sys.stdout.flush()
|
| 128 |
|
| 129 |
except KeyboardInterrupt:
|
| 130 |
-
print("\n[!] Interrupted
|
| 131 |
finally:
|
| 132 |
cap.release()
|
| 133 |
-
print(f"\n[*] Done. Extracted {saved_count} frames
|
| 134 |
|
| 135 |
if __name__ == "__main__":
|
| 136 |
main()
|
|
|
|
| 9 |
|
| 10 |
# ================= CONFIGURATION =================
|
| 11 |
MAX_RES = 1024 # Max pixels for longest side
|
| 12 |
+
# Threshold for detecting change.
|
| 13 |
+
# 2.0 - 5.0 is usually good for gameplay.
|
| 14 |
+
DEFAULT_THRESHOLD = 3.0
|
|
|
|
| 15 |
# =================================================
|
| 16 |
|
| 17 |
def parse_args():
|
| 18 |
+
parser = argparse.ArgumentParser(description="Fast extract changed frames from video.")
|
| 19 |
+
parser.add_argument("input_video", help="Path to the video file.")
|
| 20 |
parser.add_argument("--threshold", type=float, default=DEFAULT_THRESHOLD,
|
| 21 |
+
help="Sensitivity (higher = ignores more noise).")
|
| 22 |
+
parser.add_argument("--step", type=int, default=1,
|
| 23 |
+
help="Check every Nth frame (e.g. 2 for 2x speed).")
|
| 24 |
+
parser.add_argument("--start_frame", type=int, default=0,
|
| 25 |
+
help="Start processing from this specific frame number.")
|
| 26 |
return parser.parse_args()
|
| 27 |
|
| 28 |
+
def save_frame(cv2_frame, output_path):
|
| 29 |
+
"""
|
| 30 |
+
Heavy processing: Only runs when a frame is confirmed to be saved.
|
| 31 |
+
Converts BGR->RGB, Resizes (Lanczos), and Saves.
|
| 32 |
+
"""
|
| 33 |
+
# Convert BGR (OpenCV) to RGB (PIL)
|
| 34 |
rgb_frame = cv2.cvtColor(cv2_frame, cv2.COLOR_BGR2RGB)
|
| 35 |
img = Image.fromarray(rgb_frame)
|
| 36 |
|
|
|
|
| 42 |
new_size = (int(w * scale), int(h * scale))
|
| 43 |
img = img.resize(new_size, Image.Resampling.LANCZOS)
|
| 44 |
|
| 45 |
+
img.save(output_path, "WEBP")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
def main():
|
| 48 |
args = parse_args()
|
|
|
|
| 51 |
print(f"[!] Error: File '{args.input_video}' not found.")
|
| 52 |
return
|
| 53 |
|
| 54 |
+
# Setup Paths
|
|
|
|
| 55 |
video_name = os.path.splitext(os.path.basename(args.input_video))[0]
|
| 56 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
|
|
| 57 |
base_dir = os.path.join(os.getcwd(), "extracted_images", f"{video_name}_{timestamp}")
|
| 58 |
img_dir = os.path.join(base_dir, "images")
|
| 59 |
jsonl_path = os.path.join(base_dir, "data.jsonl")
|
|
|
|
| 64 |
print(f"[*] Output: {base_dir}")
|
| 65 |
|
| 66 |
cap = cv2.VideoCapture(args.input_video)
|
| 67 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 68 |
|
| 69 |
+
# Handle Start Frame Seek
|
| 70 |
+
if args.start_frame > 0:
|
| 71 |
+
if args.start_frame >= total_frames:
|
| 72 |
+
print(f"[!] Error: Start frame {args.start_frame} is larger than total frames {total_frames}.")
|
| 73 |
+
return
|
| 74 |
+
|
| 75 |
+
print(f"[*] Jumping to frame {args.start_frame}...")
|
| 76 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_frame)
|
| 77 |
+
|
| 78 |
+
prev_small_frame = None
|
| 79 |
+
frame_count = args.start_frame # Initialize counter to start frame
|
| 80 |
saved_count = 0
|
|
|
|
| 81 |
|
| 82 |
try:
|
| 83 |
while True:
|
| 84 |
+
# Read frame
|
| 85 |
ret, frame = cap.read()
|
| 86 |
if not ret:
|
| 87 |
break
|
| 88 |
+
|
| 89 |
+
# Skip frames if step > 1
|
| 90 |
+
# We use modulo on the relative progress to keep step consistent
|
| 91 |
+
if (frame_count - args.start_frame) % args.step != 0:
|
| 92 |
+
frame_count += 1
|
| 93 |
+
continue
|
| 94 |
+
|
| 95 |
+
# --- FAST PATH: Change Detection ---
|
| 96 |
+
# 1. Subsample: Take every 4th pixel.
|
| 97 |
+
curr_small_frame = frame[::4, ::4]
|
| 98 |
+
|
| 99 |
has_changed = False
|
| 100 |
|
| 101 |
+
if prev_small_frame is None:
|
| 102 |
+
# Always save the first processed frame (whether it's frame 0 or frame 5000)
|
| 103 |
has_changed = True
|
| 104 |
else:
|
| 105 |
+
# 2. Fast Difference Calculation
|
| 106 |
+
diff_frame = cv2.absdiff(curr_small_frame, prev_small_frame)
|
| 107 |
+
score = np.mean(diff_frame)
|
| 108 |
+
|
| 109 |
+
if score > args.threshold:
|
| 110 |
has_changed = True
|
| 111 |
+
|
| 112 |
+
# --- SLOW PATH: Saving ---
|
| 113 |
if has_changed:
|
| 114 |
filename = f"frame_{frame_count}.webp"
|
| 115 |
filepath = os.path.join(img_dir, filename)
|
| 116 |
|
| 117 |
+
# Perform the heavy resize/save only now
|
| 118 |
+
save_frame(frame, filepath)
|
| 119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
entry = {
|
| 121 |
"image": filename,
|
| 122 |
"original_frame_id": frame_count
|
|
|
|
| 125 |
with open(jsonl_path, 'a') as f:
|
| 126 |
f.write(json.dumps(entry) + "\n")
|
| 127 |
|
| 128 |
+
# Update previous comparison frame
|
| 129 |
+
prev_small_frame = curr_small_frame
|
| 130 |
saved_count += 1
|
| 131 |
+
|
| 132 |
frame_count += 1
|
| 133 |
|
| 134 |
+
# Update terminal
|
| 135 |
+
if frame_count % 100 == 0:
|
| 136 |
+
sys.stdout.write(f"\r[Processing] Frame: {frame_count}/{total_frames} | Saved: {saved_count} ")
|
| 137 |
sys.stdout.flush()
|
| 138 |
|
| 139 |
except KeyboardInterrupt:
|
| 140 |
+
print("\n[!] Interrupted.")
|
| 141 |
finally:
|
| 142 |
cap.release()
|
| 143 |
+
print(f"\n[*] Done. Extracted {saved_count} frames.")
|
| 144 |
|
| 145 |
if __name__ == "__main__":
|
| 146 |
main()
|
cleanup_dataset.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
def clean_jsonl(jsonl_path):
|
| 6 |
+
"""
|
| 7 |
+
Reads a jsonl file, checks if referenced images exist,
|
| 8 |
+
and rewrites the file without the missing entries.
|
| 9 |
+
"""
|
| 10 |
+
base_dir = os.path.dirname(jsonl_path)
|
| 11 |
+
img_dir = os.path.join(base_dir, "images")
|
| 12 |
+
|
| 13 |
+
if not os.path.exists(img_dir):
|
| 14 |
+
print(f"[!] Skipping {base_dir}: 'images' folder not found.")
|
| 15 |
+
return
|
| 16 |
+
|
| 17 |
+
# Read all lines
|
| 18 |
+
with open(jsonl_path, 'r', encoding='utf-8') as f:
|
| 19 |
+
lines = f.readlines()
|
| 20 |
+
|
| 21 |
+
valid_lines = []
|
| 22 |
+
removed_count = 0
|
| 23 |
+
total_count = len(lines)
|
| 24 |
+
|
| 25 |
+
for line in lines:
|
| 26 |
+
if not line.strip():
|
| 27 |
+
continue
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
entry = json.loads(line)
|
| 31 |
+
image_name = entry.get("image")
|
| 32 |
+
|
| 33 |
+
if image_name:
|
| 34 |
+
image_path = os.path.join(img_dir, image_name)
|
| 35 |
+
|
| 36 |
+
# THE CORE CHECK: Does the file exist?
|
| 37 |
+
if os.path.exists(image_path):
|
| 38 |
+
valid_lines.append(line)
|
| 39 |
+
else:
|
| 40 |
+
removed_count += 1
|
| 41 |
+
else:
|
| 42 |
+
# If entry has no "image" field, decide to keep or drop.
|
| 43 |
+
# For this dataset format, we drop it.
|
| 44 |
+
removed_count += 1
|
| 45 |
+
|
| 46 |
+
except json.JSONDecodeError:
|
| 47 |
+
removed_count += 1
|
| 48 |
+
|
| 49 |
+
# Only rewrite if we actually removed something
|
| 50 |
+
if removed_count > 0:
|
| 51 |
+
with open(jsonl_path, 'w', encoding='utf-8') as f:
|
| 52 |
+
f.writelines(valid_lines)
|
| 53 |
+
print(f"[✓] Cleaned {jsonl_path}")
|
| 54 |
+
print(f" - Total Lines: {total_count}")
|
| 55 |
+
print(f" - Removed: {removed_count}")
|
| 56 |
+
print(f" - Remaining: {len(valid_lines)}")
|
| 57 |
+
else:
|
| 58 |
+
print(f"[.] No changes needed for {jsonl_path}")
|
| 59 |
+
|
| 60 |
+
def main():
|
| 61 |
+
parser = argparse.ArgumentParser(description="Remove JSONL lines pointing to missing images.")
|
| 62 |
+
parser.add_argument("dir", default=".", help="Root directory to scan (default: current dir).")
|
| 63 |
+
args = parser.parse_args()
|
| 64 |
+
|
| 65 |
+
print(f"[*] Scanning for data.jsonl files in '{args.dir}'...")
|
| 66 |
+
|
| 67 |
+
found_files = 0
|
| 68 |
+
|
| 69 |
+
# Walk through all directories recursively
|
| 70 |
+
for root, dirs, files in os.walk(args.dir):
|
| 71 |
+
if "data.jsonl" in files:
|
| 72 |
+
found_files += 1
|
| 73 |
+
full_path = os.path.join(root, "data.jsonl")
|
| 74 |
+
clean_jsonl(full_path)
|
| 75 |
+
|
| 76 |
+
if found_files == 0:
|
| 77 |
+
print("[!] No 'data.jsonl' files found.")
|
| 78 |
+
else:
|
| 79 |
+
print("\n[*] Cleanup complete.")
|
| 80 |
+
|
| 81 |
+
if __name__ == "__main__":
|
| 82 |
+
main()
|