WIT-base-eyeglasses / script /wit_filter.py
ynkuai's picture
Upload 33 files
e28b6bb verified
import torch
import math
import os
import argparse
import logging
from datasets import load_dataset, Features, Sequence, Value, Image
from huggingface_hub import hf_hub_download
from ultralytics import YOLO, YOLOWorld
def parse_args() -> argparse.Namespace:
"""
Parse command-line arguments for the WIT Data Filtering System.
Returns:
argparse.Namespace: Parsed arguments.
"""
parser = argparse.ArgumentParser(description="WIT Data Filtering System")
parser.add_argument('--device', type=str, default="cuda:0", help='Device to use for inference')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size for processing')
parser.add_argument('--output_filtered_data_file_path', type=str, default="filtered_data_file.parquet", help='Path to save filtered data file')
parser.add_argument('--eval_mode', action='store_true', help='Enable evaluation mode')
parser.add_argument('--filtered_image_dir', type=str, default="image_filter_result_dir", help='Directory to save filtered images')
return parser.parse_args()
# Evaluation data index in original wit dataset.
eval_data_no_face = [1496, 1750, 1818, 1952, 2303, 3088, 3365, 3878, 3923]
eval_data_have_face_no_glasses = [541, 960, 1096, 1763, 2518, 2687, 3200, 5393, 5702]
eval_data_have_face_with_eyeglasses = [990, 2246, 3298, 4596, 5401, 5578, 5754, 7397, 8879]
eval_data_have_face_with_sunglasses = [1116, 3239, 6754]
eval_data_idx = eval_data_no_face + eval_data_have_face_no_glasses + eval_data_have_face_with_eyeglasses + eval_data_have_face_with_sunglasses
# YOLOv8-face-detection Model: detect face
def load_yolo_face_model(device: str) -> YOLO:
"""
Load the YOLOv8 face detection model.
Args:
device (str): Device to load the model on (e.g., 'cuda:0' or 'cpu').
Returns:
YOLO: Loaded YOLO face detection model.
"""
yolo_face_model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt")
return YOLO(yolo_face_model_path).to(device)
# YOLO-World Model: detect eyeglasses, sunglasses
def load_yolo_world_model(device: str) -> YOLOWorld:
"""
Load the YOLO-World model for eyeglasses and sunglasses detection.
Args:
device (str): Device to load the model on (e.g., 'cuda:0' or 'cpu').
Returns:
YOLOWorld: Loaded YOLO-World model.
"""
yolo_world_model = YOLOWorld("yolov8s-world.pt").to(device)
yolo_world_model.set_classes(["eyeglasses", "sunglasses"])
return yolo_world_model
def main() -> None:
"""
Main function to run the WIT Data Filtering System. Handles argument parsing, model loading,
dataset loading, detection, filtering, and saving results.
"""
args = parse_args()
device = args.device
batch_size = args.batch_size
output_filtered_data_file_path = os.path.abspath(os.path.expanduser(args.output_filtered_data_file_path))
eval_mode = args.eval_mode
filtered_image_dir = os.path.abspath(os.path.expanduser(args.filtered_image_dir))
# Path for saving the filtered images in evaluation.
img_dir_no_face = os.path.join(filtered_image_dir, "no_face")
img_dir_valid_face_no_glasses = os.path.join(filtered_image_dir, "valid_face_no_glasses")
img_dir_valid_face_with_eyeglasses = os.path.join(filtered_image_dir, "valid_face_with_eyeglasses")
img_dir_valid_face_with_sunglasses = os.path.join(filtered_image_dir, "valid_face_with_sunglasses")
save_filtered_image = eval_mode
# If the dataset is big, force the save_filtered_image to be `False` (will be set after loading dataset).
if save_filtered_image:
os.makedirs(img_dir_no_face, exist_ok=True)
os.makedirs(img_dir_valid_face_no_glasses, exist_ok=True)
os.makedirs(img_dir_valid_face_with_eyeglasses, exist_ok=True)
os.makedirs(img_dir_valid_face_with_sunglasses, exist_ok=True)
# Load models
yolo_face_model = load_yolo_face_model(device)
yolo_world_model = load_yolo_world_model(device)
face_yolo_threshold = 0.7
eyeglasses_yolo_threshold = 0.25
cls_idx_map = {"eyeglasses": 0, "sunglasses": 1}
def detect_face_and_eyeglasses(examples, idx):
"""
Detect faces, eyeglasses, and sunglasses in a batch of images.
Args:
examples (Dict[str, Any]): Batch of examples from the dataset, containing images.
idx (List[int]): Indices of the images in the dataset.
Returns:
Dict[str, Any]: Detection results including image, glasses_score, glasses_box, face_score, face_box.
"""
images = []
for i, image in zip(idx, examples["image"]):
try:
image = image.convert("RGB")
images.append(image)
except Exception as e:
logging.warning(f"Failed to load image at index {i}: {e}")
images.append(None)
continue
# Detect faces for the image batch
try:
results_face = yolo_face_model.predict(images, conf=face_yolo_threshold, device=device, verbose=False)
except Exception as e:
logging.error(f"Face model inference failed for batch: {e}")
# Return None for all images in this batch
return {
"image": images,
"glasses_score": [None]*len(images),
"glasses_box": [None]*len(images),
"face_score": [None]*len(images),
"face_box": [None]*len(images),
}
glasses_scores = []
glasses_boxes = []
face_scores = []
face_boxes = []
for i, image, result_face in zip(idx, images, results_face):
# Iterate across the face detection result for each image.
if image is None:
logging.warning(f"Skip unvalid image at index {i}")
glasses_scores.append(None)
glasses_boxes.append(None)
face_scores.append(None)
face_boxes.append(None)
continue
# 1. No face detected.
if len(result_face.boxes.cls) == 0:
glasses_scores.append(None)
glasses_boxes.append(None)
face_scores.append(None)
face_boxes.append(None)
if save_filtered_image:
image.save(f"{img_dir_no_face}/{i}.jpg")
continue
# 2. Face detected.
face_score = []
face_box = []
has_valid_face = False
# Filter the face detection results based on the bbox size.
for j in range(len(result_face.boxes.conf)):
# Iterate across the detected face bboxes in current image.
w, h = math.ceil(result_face.boxes.xywh[j, 2]), math.ceil(result_face.boxes.xywh[j, 3])
if w >= 100 and h >= 100:
has_valid_face = True
score = result_face.boxes.conf[j]
box_xyxy = [int(x) for x in result_face.boxes.xyxy[j].tolist()] # [x0, y0, x1, y1]
face_score.append(score)
face_box.append(box_xyxy)
else:
continue
# 3. Detected faces are all smaller than 100-px.
if not has_valid_face:
glasses_scores.append(None)
glasses_boxes.append(None)
face_scores.append(None)
face_boxes.append(None)
continue
else:
face_scores.append(torch.tensor(face_score))
face_boxes.append(torch.tensor(face_box))
# 4. Have at least one valid face.
# Detect eyeglasses and sunglasses for the single image with valid face.
try:
result_eyeglasses = yolo_world_model.predict(image, conf=eyeglasses_yolo_threshold, device=device, verbose=False)[0]
except Exception as e:
logging.error(f"Eyeglasses model inference failed at index {i}: {e}")
glasses_scores.append(None)
glasses_boxes.append(None)
continue
# 5. No eyeglasses detected.
if len(result_eyeglasses.boxes.cls) == 0:
glasses_scores.append(None)
glasses_boxes.append(None)
if save_filtered_image:
image.save(f"{img_dir_valid_face_no_glasses}/{i}.jpg")
continue
glasses_score = []
glasses_box = []
is_eyeglasses = True
for j in range(len(result_eyeglasses.boxes.conf)):
# Iterate across the detected glasses bboxes in current image.
category = result_eyeglasses.boxes.cls[j]
if category == cls_idx_map["eyeglasses"]:
score = result_eyeglasses.boxes.conf[j]
box_xyxy = [int(x) for x in result_eyeglasses.boxes.xyxy[j].tolist()] # [x0, y0, x1, y1]
glasses_score.append(score)
glasses_box.append(box_xyxy)
elif category == cls_idx_map["sunglasses"]:
is_eyeglasses = False
break
if not is_eyeglasses:
# 6. Sunglasses detected, drop the eyeglasses bbox.
glasses_scores.append(None)
glasses_boxes.append(None)
if save_filtered_image:
image.save(f"{img_dir_valid_face_with_sunglasses}/{i}.jpg")
else:
# 7. Sunglasses not detected, keep the eyeglasses bbox.
glasses_scores.append(torch.tensor(glasses_score)) # [n]
glasses_boxes.append(torch.tensor(glasses_box)) # [n, 4]
if save_filtered_image:
image.save(f"{img_dir_valid_face_with_eyeglasses}/{i}.jpg")
# No valid face: All of the four features are None.
# Valid face without eyeglasses: "face_score" and "face_box" has value. "glasses_score" and "glasses_box" are None.
# Valid face with eyeglasses: All of the four features are not None.
return {
"image": images,
"glasses_score": glasses_scores,
"glasses_box": glasses_boxes,
"face_score": face_scores,
"face_box": face_boxes,
}
# Load the first two shards of the wit-base dataset.
base_url = "https://huggingface.co/datasets/wikimedia/wit_base/resolve/main/data/"
data_files = {"train": [base_url + "train-00000-of-00330.parquet", base_url + "train-00001-of-00330.parquet"]}
wit = load_dataset("parquet", data_files=data_files, split="train", trust_remote_code=True).cast_column('image', Image())
# Select the curated subset for evaluation.
if eval_mode:
wit = wit.select(eval_data_idx)
save_filtered_image = True
# If the dataset is big, force the save_filtered_image to be `False`.
if len(wit) > 1000:
save_filtered_image = False
# Define new columns to store detection results.
features = {
"image": Image(),
"glasses_score": Sequence(feature=Value(dtype='float16', id=None), length=-1, id=None),
"glasses_box": Sequence(feature=Sequence(feature=Value(dtype='int16', id=None), length=-1, id=None), length=-1, id=None),
"face_score": Sequence(feature=Value(dtype='float16', id=None), length=-1, id=None),
"face_box": Sequence(feature=Sequence(feature=Value(dtype='int16', id=None), length=-1, id=None), length=-1, id=None)
}
# Delete unrelated columns.
remove_columns = wit.column_names
remove_columns.remove("image")
# Run the detection.
wit = wit.map(
detect_face_and_eyeglasses,
with_indices=True,
batched=True,
batch_size=batch_size,
features=Features(features),
remove_columns=remove_columns
)
# Filter the dataset based on detection result.
wit_filter = wit.filter(lambda example: example["glasses_score"])
# Save the filtered dataset as parquet file.
wit_filter.to_parquet(output_filtered_data_file_path)
if __name__ == "__main__":
main()