| import cv2 |
| import numpy as np |
| import os |
| from PIL import Image, ExifTags |
| import random |
| import argparse |
| import time |
|
|
| |
| os.environ["OPENCV_FFMPEG_READ_ATTEMPTS"] = "50000" |
|
|
| |
| video_path = "path/to/video.mp4" |
| output_dir = "outputs/preprocessed" |
|
|
| frame_interval = 60 |
| zoom_level = 5.0 |
| crops_per_frame = 10 |
| manual_crop = False |
| crop_x_center = 0.5 |
| crop_y_center = 0.5 |
|
|
|
|
| def extract_and_preprocess_frames( |
| video_path, |
| output_dir, |
| frame_interval=10, |
| zoom_level=5.0, |
| crops_per_frame=5, |
| manual_crop=False, |
| crop_x=0.5, |
| crop_y=0.5, |
| start_frame=0, |
| end_frame=None, |
| segment_id=0, |
| ): |
| """ |
| Extract frames from a video segment and generate zoomed crops. |
| |
| Workflow: |
| 1) Read video frames in a specified frame range [start_frame, end_frame) |
| 2) Keep every nth frame (frame_interval) |
| 3) Optional EXIF-based orientation correction (mostly useful for images, harmless for video frames) |
| 4) Crop a zoom window (manual or random position) |
| 5) Resize crop to 640x640 |
| 6) Apply CLAHE contrast enhancement |
| 7) Save as JPG with frame and crop position encoded in filename |
| |
| Returns: |
| saved_count (int): Number of extracted frames processed (not total crops) |
| total_saved (int): Total number of crops saved |
| """ |
| |
| os.makedirs(output_dir, exist_ok=True) |
|
|
| |
| cap = cv2.VideoCapture(video_path) |
| if not cap.isOpened(): |
| raise ValueError(f"Could not open video file: {video_path}") |
|
|
| |
| total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| fps = cap.get(cv2.CAP_PROP_FPS) |
|
|
| |
| if end_frame is None or end_frame > total_frames: |
| end_frame = total_frames |
|
|
| print(f"Video loaded: {video_path}") |
| print(f"Total frames: {total_frames}") |
| print(f"FPS: {fps}") |
| print(f"Processing segment {segment_id + 1}: frames {start_frame} to {end_frame}") |
| print(f"Extracting every {frame_interval} frames") |
| print(f"Zooming {zoom_level * 100}% and cropping to 640x640") |
| print(f"Crops per frame: {crops_per_frame}") |
| print(f"Manual crop: {manual_crop}, Position: ({crop_x}, {crop_y})") |
|
|
| |
| clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) |
|
|
| |
| cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) |
| count = start_frame |
| saved_count = 0 |
|
|
| |
| segment_dir = os.path.join(output_dir, f"segment_{segment_id + 1}") |
| os.makedirs(segment_dir, exist_ok=True) |
|
|
| while count < end_frame: |
| ret, frame = cap.read() |
| if not ret: |
| print(f"Error reading frame at position {count}. Breaking out of loop.") |
| break |
|
|
| |
| if (count - start_frame) % frame_interval == 0: |
| |
| pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) |
|
|
| |
| try: |
| orientation_tag = None |
| for tag_id, tag_name in ExifTags.TAGS.items(): |
| if tag_name == "Orientation": |
| orientation_tag = tag_id |
| break |
|
|
| exif = dict(pil_img.getexif().items()) |
|
|
| if orientation_tag is not None and orientation_tag in exif: |
| if exif[orientation_tag] == 3: |
| pil_img = pil_img.rotate(180, expand=True) |
| elif exif[orientation_tag] == 6: |
| pil_img = pil_img.rotate(270, expand=True) |
| elif exif[orientation_tag] == 8: |
| pil_img = pil_img.rotate(90, expand=True) |
| except (AttributeError, KeyError, IndexError, TypeError): |
| |
| pass |
|
|
| |
| frame = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR) |
|
|
| |
| height, width = frame.shape[:2] |
|
|
| |
| |
| crop_width = int(width / zoom_level) |
| crop_height = int(height / zoom_level) |
|
|
| |
| if crop_width <= 0 or crop_height <= 0: |
| print(f"Skipping frame {count}: invalid crop size ({crop_width}, {crop_height})") |
| count += 1 |
| continue |
|
|
| if manual_crop: |
| |
| process_crop( |
| frame, count, 0, crop_x, crop_y, |
| crop_width, crop_height, width, height, |
| segment_dir, clahe |
| ) |
| saved_count += 1 |
| else: |
| |
| for i in range(crops_per_frame): |
| |
| random_x = random.uniform(0.1, 0.9) |
| random_y = random.uniform(0.1, 0.9) |
|
|
| process_crop( |
| frame, count, i, random_x, random_y, |
| crop_width, crop_height, width, height, |
| segment_dir, clahe |
| ) |
| saved_count += 1 |
|
|
| if saved_count % 10 == 0: |
| print(f"Segment {segment_id + 1}: Processed {saved_count} extracted frames (current frame={count})") |
|
|
| count += 1 |
|
|
| cap.release() |
|
|
| |
| total_saved = saved_count * (1 if manual_crop else crops_per_frame) |
|
|
| print( |
| f"Segment {segment_id + 1} completed! " |
| f"Processed {saved_count} extracted frames and saved {total_saved} crops." |
| ) |
| return saved_count, total_saved |
|
|
|
|
| def process_crop( |
| frame, |
| frame_count, |
| crop_index, |
| rel_x, |
| rel_y, |
| crop_width, |
| crop_height, |
| width, |
| height, |
| output_dir, |
| clahe, |
| ): |
| """ |
| Create one crop from a frame, resize to 640x640, enhance contrast, and save. |
| |
| Args: |
| rel_x, rel_y: Relative crop center coordinates in [0, 1] |
| """ |
| |
| center_x = int(rel_x * width) |
| center_y = int(rel_y * height) |
|
|
| |
| start_x = center_x - (crop_width // 2) |
| start_y = center_y - (crop_height // 2) |
|
|
| |
| start_x = max(0, min(start_x, width - crop_width)) |
| start_y = max(0, min(start_y, height - crop_height)) |
|
|
| |
| end_x = min(start_x + crop_width, width) |
| end_y = min(start_y + crop_height, height) |
|
|
| |
| if end_x - start_x < crop_width: |
| start_x = max(0, end_x - crop_width) |
| if end_y - start_y < crop_height: |
| start_y = max(0, end_y - crop_height) |
|
|
| try: |
| |
| cropped = frame[start_y:end_y, start_x:end_x] |
|
|
| |
| if cropped.size == 0: |
| print(f"Empty crop at frame {frame_count}, crop {crop_index}") |
| return |
|
|
| |
| zoomed = cv2.resize(cropped, (640, 640), interpolation=cv2.INTER_LINEAR) |
|
|
| |
| if len(zoomed.shape) == 3: |
| |
| |
| enhanced = cv2.merge([ |
| clahe.apply(zoomed[:, :, 0]), |
| clahe.apply(zoomed[:, :, 1]), |
| clahe.apply(zoomed[:, :, 2]), |
| ]) |
| else: |
| |
| enhanced = clahe.apply(zoomed) |
|
|
| |
| |
| if crop_index == 0: |
| filename = f"frame5_{frame_count:06d}" |
| else: |
| filename = f"frame5_{frame_count:06d}_crop{crop_index}" |
|
|
| |
| pos_info = f"_x{int(rel_x * 100):03d}_y{int(rel_y * 100):03d}" |
| frame_filename = os.path.join(output_dir, f"{filename}{pos_info}.jpg") |
|
|
| cv2.imwrite(frame_filename, enhanced) |
|
|
| except Exception as e: |
| print(f"Error processing crop at frame {frame_count}, crop {crop_index}: {e}") |
|
|
|
|
| def process_video_in_segments( |
| video_path, |
| output_dir, |
| frame_interval, |
| zoom_level, |
| crops_per_frame, |
| manual_crop, |
| crop_x, |
| crop_y, |
| segment_size=5000, |
| overlap=100, |
| ): |
| """ |
| Process video in segments to avoid memory/decoder instability on long videos. |
| |
| Note: |
| - Overlap can help avoid missing frames near segment boundaries. |
| - But overlap can also create duplicate outputs if the same frame is processed in two segments. |
| """ |
| cap = cv2.VideoCapture(video_path) |
| if not cap.isOpened(): |
| raise ValueError(f"Could not open video file: {video_path}") |
|
|
| total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| cap.release() |
|
|
| print(f"Total frames in video: {total_frames}") |
| print(f"Processing in segments of {segment_size} frames with {overlap} frame overlap") |
|
|
| |
| if segment_size <= overlap: |
| raise ValueError("segment_size must be greater than overlap") |
|
|
| |
| start_frames = list(range(0, total_frames, segment_size - overlap)) |
|
|
| total_frames_processed = 0 |
| total_crops_processed = 0 |
|
|
| for i, start_frame in enumerate(start_frames): |
| end_frame = min(start_frame + segment_size, total_frames) |
|
|
| print(f"\n{'=' * 80}") |
| print(f"Processing segment {i + 1}/{len(start_frames)}: frames {start_frame} to {end_frame}") |
| print(f"{'=' * 80}\n") |
|
|
| |
| if i > 0: |
| time.sleep(2) |
|
|
| try: |
| frames_processed, crops_processed = extract_and_preprocess_frames( |
| video_path=video_path, |
| output_dir=output_dir, |
| frame_interval=frame_interval, |
| zoom_level=zoom_level, |
| crops_per_frame=crops_per_frame, |
| manual_crop=manual_crop, |
| crop_x=crop_x, |
| crop_y=crop_y, |
| start_frame=start_frame, |
| end_frame=end_frame, |
| segment_id=i, |
| ) |
|
|
| total_frames_processed += frames_processed |
| total_crops_processed += crops_processed |
|
|
| except Exception as e: |
| print(f"Error processing segment {i + 1}: {e}") |
| print("Continuing with next segment...") |
|
|
| print(f"\n{'=' * 80}") |
| print( |
| f"Processing complete! " |
| f"Processed {total_frames_processed} extracted frames and saved {total_crops_processed} crops." |
| ) |
| print(f"{'=' * 80}") |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser( |
| description="Extract frames from a video and generate zoomed crops with optional CLAHE enhancement" |
| ) |
|
|
| |
| parser.add_argument("--video", type=str, default=video_path, help="Path to input video") |
| parser.add_argument("--output", type=str, default=output_dir, help="Directory to save processed crops") |
|
|
| |
| parser.add_argument("--interval", type=int, default=frame_interval, help="Extract every nth frame") |
| parser.add_argument("--zoom", type=float, default=zoom_level, help="Zoom factor (e.g., 5.0 = 500%%)") |
| parser.add_argument("--crops", type=int, default=crops_per_frame, help="Random crops per extracted frame") |
| parser.add_argument("--manual", action="store_true", help="Use one manual crop position instead of random crops") |
| parser.add_argument("--crop_x", type=float, default=crop_x_center, help="Manual crop center X in [0,1]") |
| parser.add_argument("--crop_y", type=float, default=crop_y_center, help="Manual crop center Y in [0,1]") |
|
|
| |
| parser.add_argument("--segment_size", type=int, default=5000, help="Frames per segment") |
| parser.add_argument("--overlap", type=int, default=100, help="Segment overlap in frames") |
|
|
| args = parser.parse_args() |
|
|
| |
| if not args.video or not os.path.isfile(args.video): |
| print(f"Error: Video file '{args.video}' does not exist.") |
| raise SystemExit(1) |
|
|
| if args.interval <= 0: |
| print("Error: --interval must be > 0") |
| raise SystemExit(1) |
|
|
| if args.zoom <= 0: |
| print("Error: --zoom must be > 0") |
| raise SystemExit(1) |
|
|
| if args.crops <= 0 and not args.manual: |
| print("Error: --crops must be > 0 when not using --manual") |
| raise SystemExit(1) |
|
|
| if not (0.0 <= args.crop_x <= 1.0 and 0.0 <= args.crop_y <= 1.0): |
| print("Error: --crop_x and --crop_y must be in [0,1]") |
| raise SystemExit(1) |
|
|
| try: |
| process_video_in_segments( |
| video_path=args.video, |
| output_dir=args.output, |
| frame_interval=args.interval, |
| zoom_level=args.zoom, |
| crops_per_frame=args.crops, |
| manual_crop=args.manual, |
| crop_x=args.crop_x, |
| crop_y=args.crop_y, |
| segment_size=args.segment_size, |
| overlap=args.overlap, |
| ) |
| except Exception as e: |
| print(f"An error occurred: {e}") |