|
|
import torch |
|
|
import math |
|
|
import os |
|
|
import argparse |
|
|
import logging |
|
|
|
|
|
from datasets import load_dataset, Features, Sequence, Value, Image |
|
|
from huggingface_hub import hf_hub_download |
|
|
from ultralytics import YOLO, YOLOWorld |
|
|
|
|
|
def parse_args() -> argparse.Namespace: |
|
|
""" |
|
|
Parse command-line arguments for the WIT Data Filtering System. |
|
|
|
|
|
Returns: |
|
|
argparse.Namespace: Parsed arguments. |
|
|
""" |
|
|
parser = argparse.ArgumentParser(description="WIT Data Filtering System") |
|
|
parser.add_argument('--device', type=str, default="cuda:0", help='Device to use for inference') |
|
|
parser.add_argument('--batch_size', type=int, default=32, help='Batch size for processing') |
|
|
parser.add_argument('--output_filtered_data_file_path', type=str, default="filtered_data_file.parquet", help='Path to save filtered data file') |
|
|
parser.add_argument('--eval_mode', action='store_true', help='Enable evaluation mode') |
|
|
parser.add_argument('--filtered_image_dir', type=str, default="image_filter_result_dir", help='Directory to save filtered images') |
|
|
return parser.parse_args() |
|
|
|
|
|
|
|
|
eval_data_no_face = [1496, 1750, 1818, 1952, 2303, 3088, 3365, 3878, 3923] |
|
|
eval_data_have_face_no_glasses = [541, 960, 1096, 1763, 2518, 2687, 3200, 5393, 5702] |
|
|
eval_data_have_face_with_eyeglasses = [990, 2246, 3298, 4596, 5401, 5578, 5754, 7397, 8879] |
|
|
eval_data_have_face_with_sunglasses = [1116, 3239, 6754] |
|
|
eval_data_idx = eval_data_no_face + eval_data_have_face_no_glasses + eval_data_have_face_with_eyeglasses + eval_data_have_face_with_sunglasses |
|
|
|
|
|
|
|
|
def load_yolo_face_model(device: str) -> YOLO: |
|
|
""" |
|
|
Load the YOLOv8 face detection model. |
|
|
|
|
|
Args: |
|
|
device (str): Device to load the model on (e.g., 'cuda:0' or 'cpu'). |
|
|
|
|
|
Returns: |
|
|
YOLO: Loaded YOLO face detection model. |
|
|
""" |
|
|
yolo_face_model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt") |
|
|
return YOLO(yolo_face_model_path).to(device) |
|
|
|
|
|
|
|
|
def load_yolo_world_model(device: str) -> YOLOWorld: |
|
|
""" |
|
|
Load the YOLO-World model for eyeglasses and sunglasses detection. |
|
|
|
|
|
Args: |
|
|
device (str): Device to load the model on (e.g., 'cuda:0' or 'cpu'). |
|
|
|
|
|
Returns: |
|
|
YOLOWorld: Loaded YOLO-World model. |
|
|
""" |
|
|
yolo_world_model = YOLOWorld("yolov8s-world.pt").to(device) |
|
|
yolo_world_model.set_classes(["eyeglasses", "sunglasses"]) |
|
|
return yolo_world_model |
|
|
|
|
|
|
|
|
def main() -> None: |
|
|
""" |
|
|
Main function to run the WIT Data Filtering System. Handles argument parsing, model loading, |
|
|
dataset loading, detection, filtering, and saving results. |
|
|
""" |
|
|
args = parse_args() |
|
|
device = args.device |
|
|
batch_size = args.batch_size |
|
|
output_filtered_data_file_path = os.path.abspath(os.path.expanduser(args.output_filtered_data_file_path)) |
|
|
eval_mode = args.eval_mode |
|
|
filtered_image_dir = os.path.abspath(os.path.expanduser(args.filtered_image_dir)) |
|
|
|
|
|
|
|
|
img_dir_no_face = os.path.join(filtered_image_dir, "no_face") |
|
|
img_dir_valid_face_no_glasses = os.path.join(filtered_image_dir, "valid_face_no_glasses") |
|
|
img_dir_valid_face_with_eyeglasses = os.path.join(filtered_image_dir, "valid_face_with_eyeglasses") |
|
|
img_dir_valid_face_with_sunglasses = os.path.join(filtered_image_dir, "valid_face_with_sunglasses") |
|
|
|
|
|
save_filtered_image = eval_mode |
|
|
|
|
|
|
|
|
if save_filtered_image: |
|
|
os.makedirs(img_dir_no_face, exist_ok=True) |
|
|
os.makedirs(img_dir_valid_face_no_glasses, exist_ok=True) |
|
|
os.makedirs(img_dir_valid_face_with_eyeglasses, exist_ok=True) |
|
|
os.makedirs(img_dir_valid_face_with_sunglasses, exist_ok=True) |
|
|
|
|
|
|
|
|
yolo_face_model = load_yolo_face_model(device) |
|
|
yolo_world_model = load_yolo_world_model(device) |
|
|
face_yolo_threshold = 0.7 |
|
|
eyeglasses_yolo_threshold = 0.25 |
|
|
cls_idx_map = {"eyeglasses": 0, "sunglasses": 1} |
|
|
|
|
|
def detect_face_and_eyeglasses(examples, idx): |
|
|
""" |
|
|
Detect faces, eyeglasses, and sunglasses in a batch of images. |
|
|
|
|
|
Args: |
|
|
examples (Dict[str, Any]): Batch of examples from the dataset, containing images. |
|
|
idx (List[int]): Indices of the images in the dataset. |
|
|
|
|
|
Returns: |
|
|
Dict[str, Any]: Detection results including image, glasses_score, glasses_box, face_score, face_box. |
|
|
""" |
|
|
images = [] |
|
|
for i, image in zip(idx, examples["image"]): |
|
|
try: |
|
|
image = image.convert("RGB") |
|
|
images.append(image) |
|
|
except Exception as e: |
|
|
logging.warning(f"Failed to load image at index {i}: {e}") |
|
|
images.append(None) |
|
|
continue |
|
|
|
|
|
try: |
|
|
results_face = yolo_face_model.predict(images, conf=face_yolo_threshold, device=device, verbose=False) |
|
|
except Exception as e: |
|
|
logging.error(f"Face model inference failed for batch: {e}") |
|
|
|
|
|
return { |
|
|
"image": images, |
|
|
"glasses_score": [None]*len(images), |
|
|
"glasses_box": [None]*len(images), |
|
|
"face_score": [None]*len(images), |
|
|
"face_box": [None]*len(images), |
|
|
} |
|
|
|
|
|
glasses_scores = [] |
|
|
glasses_boxes = [] |
|
|
face_scores = [] |
|
|
face_boxes = [] |
|
|
for i, image, result_face in zip(idx, images, results_face): |
|
|
|
|
|
if image is None: |
|
|
logging.warning(f"Skip unvalid image at index {i}") |
|
|
glasses_scores.append(None) |
|
|
glasses_boxes.append(None) |
|
|
face_scores.append(None) |
|
|
face_boxes.append(None) |
|
|
continue |
|
|
|
|
|
|
|
|
if len(result_face.boxes.cls) == 0: |
|
|
glasses_scores.append(None) |
|
|
glasses_boxes.append(None) |
|
|
face_scores.append(None) |
|
|
face_boxes.append(None) |
|
|
if save_filtered_image: |
|
|
image.save(f"{img_dir_no_face}/{i}.jpg") |
|
|
continue |
|
|
|
|
|
|
|
|
face_score = [] |
|
|
face_box = [] |
|
|
has_valid_face = False |
|
|
|
|
|
for j in range(len(result_face.boxes.conf)): |
|
|
|
|
|
w, h = math.ceil(result_face.boxes.xywh[j, 2]), math.ceil(result_face.boxes.xywh[j, 3]) |
|
|
if w >= 100 and h >= 100: |
|
|
has_valid_face = True |
|
|
|
|
|
score = result_face.boxes.conf[j] |
|
|
box_xyxy = [int(x) for x in result_face.boxes.xyxy[j].tolist()] |
|
|
face_score.append(score) |
|
|
face_box.append(box_xyxy) |
|
|
else: |
|
|
continue |
|
|
|
|
|
|
|
|
if not has_valid_face: |
|
|
glasses_scores.append(None) |
|
|
glasses_boxes.append(None) |
|
|
face_scores.append(None) |
|
|
face_boxes.append(None) |
|
|
continue |
|
|
else: |
|
|
face_scores.append(torch.tensor(face_score)) |
|
|
face_boxes.append(torch.tensor(face_box)) |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
result_eyeglasses = yolo_world_model.predict(image, conf=eyeglasses_yolo_threshold, device=device, verbose=False)[0] |
|
|
except Exception as e: |
|
|
logging.error(f"Eyeglasses model inference failed at index {i}: {e}") |
|
|
glasses_scores.append(None) |
|
|
glasses_boxes.append(None) |
|
|
continue |
|
|
|
|
|
if len(result_eyeglasses.boxes.cls) == 0: |
|
|
glasses_scores.append(None) |
|
|
glasses_boxes.append(None) |
|
|
if save_filtered_image: |
|
|
image.save(f"{img_dir_valid_face_no_glasses}/{i}.jpg") |
|
|
continue |
|
|
|
|
|
glasses_score = [] |
|
|
glasses_box = [] |
|
|
is_eyeglasses = True |
|
|
for j in range(len(result_eyeglasses.boxes.conf)): |
|
|
|
|
|
category = result_eyeglasses.boxes.cls[j] |
|
|
if category == cls_idx_map["eyeglasses"]: |
|
|
score = result_eyeglasses.boxes.conf[j] |
|
|
box_xyxy = [int(x) for x in result_eyeglasses.boxes.xyxy[j].tolist()] |
|
|
glasses_score.append(score) |
|
|
glasses_box.append(box_xyxy) |
|
|
elif category == cls_idx_map["sunglasses"]: |
|
|
is_eyeglasses = False |
|
|
break |
|
|
|
|
|
if not is_eyeglasses: |
|
|
|
|
|
glasses_scores.append(None) |
|
|
glasses_boxes.append(None) |
|
|
if save_filtered_image: |
|
|
image.save(f"{img_dir_valid_face_with_sunglasses}/{i}.jpg") |
|
|
else: |
|
|
|
|
|
glasses_scores.append(torch.tensor(glasses_score)) |
|
|
glasses_boxes.append(torch.tensor(glasses_box)) |
|
|
if save_filtered_image: |
|
|
image.save(f"{img_dir_valid_face_with_eyeglasses}/{i}.jpg") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return { |
|
|
"image": images, |
|
|
"glasses_score": glasses_scores, |
|
|
"glasses_box": glasses_boxes, |
|
|
"face_score": face_scores, |
|
|
"face_box": face_boxes, |
|
|
} |
|
|
|
|
|
|
|
|
base_url = "https://huggingface.co/datasets/wikimedia/wit_base/resolve/main/data/" |
|
|
data_files = {"train": [base_url + "train-00000-of-00330.parquet", base_url + "train-00001-of-00330.parquet"]} |
|
|
wit = load_dataset("parquet", data_files=data_files, split="train", trust_remote_code=True).cast_column('image', Image()) |
|
|
|
|
|
|
|
|
if eval_mode: |
|
|
wit = wit.select(eval_data_idx) |
|
|
save_filtered_image = True |
|
|
|
|
|
|
|
|
if len(wit) > 1000: |
|
|
save_filtered_image = False |
|
|
|
|
|
|
|
|
features = { |
|
|
"image": Image(), |
|
|
"glasses_score": Sequence(feature=Value(dtype='float16', id=None), length=-1, id=None), |
|
|
"glasses_box": Sequence(feature=Sequence(feature=Value(dtype='int16', id=None), length=-1, id=None), length=-1, id=None), |
|
|
"face_score": Sequence(feature=Value(dtype='float16', id=None), length=-1, id=None), |
|
|
"face_box": Sequence(feature=Sequence(feature=Value(dtype='int16', id=None), length=-1, id=None), length=-1, id=None) |
|
|
} |
|
|
|
|
|
remove_columns = wit.column_names |
|
|
remove_columns.remove("image") |
|
|
|
|
|
wit = wit.map( |
|
|
detect_face_and_eyeglasses, |
|
|
with_indices=True, |
|
|
batched=True, |
|
|
batch_size=batch_size, |
|
|
features=Features(features), |
|
|
remove_columns=remove_columns |
|
|
) |
|
|
|
|
|
|
|
|
wit_filter = wit.filter(lambda example: example["glasses_score"]) |
|
|
|
|
|
|
|
|
wit_filter.to_parquet(output_filtered_data_file_path) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|