|
|
import json |
|
|
from pathlib import Path |
|
|
from argparse import ArgumentParser |
|
|
import os |
|
|
|
|
|
import torch |
|
|
from transformers import CLIPProcessor, CLIPModel |
|
|
from datasets import load_from_disk, load_dataset |
|
|
from PIL import Image |
|
|
import numpy as np |
|
|
import math |
|
|
from tqdm.auto import tqdm |
|
|
import random |
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
if os.environ.get('METRIC_PATH'): |
|
|
METRIC_PATH = os.environ.get("METRIC_PATH") + "/" |
|
|
else: |
|
|
METRIC_PATH = "" |
|
|
H, W = 224, 224 |
|
|
MODEL_PATH = "/bohr/clip-vit-large-patch14-aft9/v1/clip-vit-large-patch14" |
|
|
DATASET_PATH = METRIC_PATH + "reference_dataset" |
|
|
MASK_PATH = "masks.jsonl" |
|
|
SPLIT = "test" |
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
RETAIN_RATIO = 0.0625 |
|
|
SCORE_OUTPUT_FILE = "score.json" |
|
|
MEAN_COLOR = (0, 0, 0) |
|
|
|
|
|
|
|
|
parser = ArgumentParser() |
|
|
parser.add_argument("--mask-file", default='masks.npy', type=str) |
|
|
parser.add_argument("--debug", default=False, action='store_true') |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
def write_error_score(error_message): |
|
|
"""Write error score to JSON file""" |
|
|
error_json = { |
|
|
"status": False, |
|
|
"score": { |
|
|
"public_a": 0.0, |
|
|
"public_detail": { |
|
|
"Score": 0.0, |
|
|
"Accuracy": 0.0, |
|
|
}, |
|
|
"private_b": 0.0, |
|
|
"private_detail": { |
|
|
"Score": 0.0, |
|
|
"Accuracy": 0.0, |
|
|
}, |
|
|
}, |
|
|
"msg": f"Error: {error_message}", |
|
|
} |
|
|
|
|
|
with open(SCORE_OUTPUT_FILE, 'w') as f: |
|
|
json.dump(error_json, f, indent=2) |
|
|
print(f"Error written to {SCORE_OUTPUT_FILE}: {error_message}") |
|
|
|
|
|
|
|
|
def safe_load_masks(mask_file_path, expected_dataset_size): |
|
|
""" |
|
|
Safely load and validate the masks file from contestants. |
|
|
|
|
|
Parameters: |
|
|
mask_file_path: Path to the masks file |
|
|
expected_dataset_size: Expected number of test cases |
|
|
|
|
|
Returns: |
|
|
dict: Validated masks dictionary or None if invalid |
|
|
""" |
|
|
try: |
|
|
|
|
|
if not os.path.exists(mask_file_path): |
|
|
write_error_score("Mask file not found.") |
|
|
return None |
|
|
|
|
|
|
|
|
file_size = os.path.getsize(mask_file_path) |
|
|
max_file_size = 50 * 1024 * 1024 |
|
|
if file_size > max_file_size: |
|
|
write_error_score("Mask file too large.") |
|
|
return None |
|
|
|
|
|
masks = {} |
|
|
|
|
|
|
|
|
if mask_file_path.endswith('.jsonl'): |
|
|
|
|
|
try: |
|
|
with open(mask_file_path, 'r') as f: |
|
|
for line_num, line in enumerate(f, 1): |
|
|
if line.strip(): |
|
|
try: |
|
|
data = json.loads(line.strip()) |
|
|
idx = data.get('idx') |
|
|
coordinates = data.get('coordinates') |
|
|
|
|
|
if idx is None or coordinates is None: |
|
|
write_error_score("Invalid JSONL format.") |
|
|
return None |
|
|
|
|
|
masks[idx] = coordinates |
|
|
except json.JSONDecodeError: |
|
|
write_error_score("Invalid JSON in mask file.") |
|
|
return None |
|
|
except Exception: |
|
|
write_error_score("Unable to load JSONL mask file.") |
|
|
return None |
|
|
|
|
|
|
|
|
if not isinstance(masks, dict): |
|
|
write_error_score("Mask data must be a dictionary.") |
|
|
return None |
|
|
|
|
|
|
|
|
if len(masks) != expected_dataset_size: |
|
|
|
|
|
write_error_score("Incorrect number of mask entries.") |
|
|
return None |
|
|
|
|
|
|
|
|
for idx, coordinates in masks.items(): |
|
|
|
|
|
if not isinstance(idx, (int, np.integer, str)): |
|
|
write_error_score("Invalid mask index format.") |
|
|
return None |
|
|
|
|
|
|
|
|
if not isinstance(coordinates, (tuple, list)) or len(coordinates) != 2: |
|
|
write_error_score("Invalid mask coordinate structure.") |
|
|
return None |
|
|
|
|
|
try: |
|
|
(top, left), (bottom, right) = coordinates |
|
|
except (ValueError, TypeError): |
|
|
write_error_score("Invalid mask coordinate format.") |
|
|
return None |
|
|
|
|
|
|
|
|
coords = [top, left, bottom, right] |
|
|
for coord in coords: |
|
|
if not isinstance(coord, (int, np.integer)): |
|
|
write_error_score("Mask coordinates must be integers.") |
|
|
return None |
|
|
|
|
|
if not (0 <= coord <= 224): |
|
|
write_error_score("Mask coordinates out of valid range.") |
|
|
return None |
|
|
|
|
|
|
|
|
if not (top < bottom and left < right): |
|
|
write_error_score("Invalid mask coordinate ordering.") |
|
|
return None |
|
|
|
|
|
|
|
|
crop_area = (bottom - top) * (right - left) |
|
|
max_area = RETAIN_RATIO * 224 * 224 |
|
|
if crop_area > max_area: |
|
|
write_error_score("Mask area exceeds allowed limit.") |
|
|
return None |
|
|
|
|
|
|
|
|
if crop_area <= 0: |
|
|
write_error_score("Invalid mask area.") |
|
|
return None |
|
|
|
|
|
return masks |
|
|
|
|
|
except Exception as e: |
|
|
write_error_score("Unexpected error loading mask file.") |
|
|
return None |
|
|
|
|
|
|
|
|
def check_validity(coordinates): |
|
|
""" |
|
|
Check if coordinates are valid according to the requirements. |
|
|
Returns True if valid, False otherwise. |
|
|
""" |
|
|
try: |
|
|
|
|
|
if not hasattr(coordinates, '__iter__') or len(coordinates) != 2: |
|
|
return False |
|
|
|
|
|
(top, left), (bottom, right) = coordinates |
|
|
|
|
|
|
|
|
if not all(isinstance(coord, (int, np.integer)) for coord in [top, left, bottom, right]): |
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not (0 <= top < 224 and 0 <= left < 224 and 1 <= bottom <= 224 and 1 <= right <= 224): |
|
|
return False |
|
|
|
|
|
|
|
|
if not (top < bottom and left < right): |
|
|
return False |
|
|
|
|
|
|
|
|
crop_area = (bottom - top) * (right - left) |
|
|
max_area = RETAIN_RATIO * 224 * 224 |
|
|
if crop_area > max_area: |
|
|
return False |
|
|
|
|
|
return True |
|
|
except Exception: |
|
|
return False |
|
|
|
|
|
def generate_mask_from_coordinates(image, coordinates): |
|
|
""" |
|
|
Generate a binary mask from crop coordinates. |
|
|
|
|
|
Parameters: |
|
|
image: PIL Image |
|
|
coordinates: tuple of ((top, left), (bottom, right)) |
|
|
|
|
|
Returns: |
|
|
numpy array: Binary mask with 1s in the crop area |
|
|
""" |
|
|
H, W = 224, 224 |
|
|
mask = np.zeros((H, W), dtype=np.int8) |
|
|
|
|
|
(top, left), (bottom, right) = coordinates |
|
|
mask[top:bottom, left:right] = 1 |
|
|
|
|
|
return mask |
|
|
|
|
|
def apply_mask_with_mean(image, mask, mean_rgb=MEAN_COLOR): |
|
|
""" |
|
|
Apply arbitrary binary mask to image, replacing masked areas with mean values |
|
|
|
|
|
Parameters: |
|
|
- image: PIL Image (224x224) |
|
|
- mask: Binary numpy array or PIL Image (224x224) where 0 is the area to drop and 1 is the area to keep |
|
|
- mean_rgb: RGB mean values to use (default: from config) |
|
|
|
|
|
Returns: Modified PIL Image |
|
|
""" |
|
|
|
|
|
img_array = np.array(image).copy() |
|
|
|
|
|
|
|
|
if isinstance(mask, Image.Image): |
|
|
mask_array = np.array(mask.convert('L')) > 127 |
|
|
else: |
|
|
mask_array = mask > 0 |
|
|
|
|
|
|
|
|
mask_3d = np.stack([mask_array] * 3, axis=2) |
|
|
|
|
|
|
|
|
mean_values = np.array([int(m * 255) for m in mean_rgb]) |
|
|
|
|
|
img_array = np.where(mask_3d, img_array, mean_values.reshape(1, 1, 3)) |
|
|
|
|
|
return Image.fromarray(img_array.astype(np.uint8)) |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
try: |
|
|
|
|
|
try: |
|
|
dataset = load_from_disk(DATASET_PATH) |
|
|
dataset = dataset[SPLIT] |
|
|
except Exception: |
|
|
write_error_score("Unable to load reference dataset.") |
|
|
exit(1) |
|
|
|
|
|
|
|
|
masks = safe_load_masks(MASK_PATH, len(dataset)) |
|
|
if masks is None: |
|
|
exit(1) |
|
|
|
|
|
|
|
|
invalid_coordinates = [] |
|
|
valid_coordinates = 0 |
|
|
for idx, coordinates in masks.items(): |
|
|
if not check_validity(coordinates): |
|
|
invalid_coordinates.append(idx) |
|
|
else: |
|
|
valid_coordinates += 1 |
|
|
|
|
|
if invalid_coordinates: |
|
|
print(f"Warning: Found {len(invalid_coordinates)} invalid coordinates (indices: {invalid_coordinates[:10]}{'...' if len(invalid_coordinates) > 10 else ''})") |
|
|
print(f"Invalid coordinates will be treated as incorrect predictions") |
|
|
print(f"Valid coordinates: {valid_coordinates}/{len(masks)}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f"Loading CLIP model and processor: {MODEL_PATH}...") |
|
|
try: |
|
|
model = CLIPModel.from_pretrained(MODEL_PATH).to(DEVICE) |
|
|
processor = CLIPProcessor.from_pretrained(MODEL_PATH) |
|
|
model.eval() |
|
|
print("Model and processor loaded successfully.") |
|
|
except Exception as e: |
|
|
write_error_score("Unable to load model.") |
|
|
exit(1) |
|
|
|
|
|
try: |
|
|
labels = sorted(list(set(dataset['name']))) + ['other'] |
|
|
text_inputs = processor(text=labels, return_tensors="pt", padding=True).to(DEVICE) |
|
|
except Exception: |
|
|
write_error_score("Unable to process labels.") |
|
|
exit(1) |
|
|
|
|
|
|
|
|
label_to_index = {label: i for i, label in enumerate(labels)} |
|
|
index_to_label = {i: label for label, i in label_to_index.items()} |
|
|
|
|
|
def predict_with_coordinates(image, coordinates): |
|
|
try: |
|
|
|
|
|
mask = generate_mask_from_coordinates(image, coordinates) |
|
|
assert len(mask.shape) == 2 |
|
|
|
|
|
if image.mode != "RGB": |
|
|
image = image.convert("RGB") |
|
|
image = apply_mask_with_mean(image, mask) |
|
|
image_processed = processor(images=image, return_tensors="pt").to(DEVICE) |
|
|
pixel_values = image_processed['pixel_values'] |
|
|
outputs_full = model(pixel_values=pixel_values, **text_inputs) |
|
|
logits_full = outputs_full.logits_per_image |
|
|
predicted_index_full = logits_full.argmax(dim=-1).item() |
|
|
return predicted_index_full |
|
|
except Exception: |
|
|
|
|
|
return len(labels) - 1 |
|
|
|
|
|
def get_accuracy(masks): |
|
|
try: |
|
|
with torch.no_grad(): |
|
|
correct = 0 |
|
|
for item in tqdm(dataset): |
|
|
idx = item['idx'] |
|
|
if idx not in masks: |
|
|
continue |
|
|
coordinates = masks[idx] |
|
|
|
|
|
|
|
|
if not check_validity(coordinates): |
|
|
print(f"Invalid coordinates for item {idx}") |
|
|
continue |
|
|
|
|
|
image = item['image'] |
|
|
true_label_label = item['name'] |
|
|
|
|
|
|
|
|
true_label_idx = label_to_index[true_label_label] |
|
|
if predict_with_coordinates(image, coordinates) == true_label_idx: |
|
|
correct += 1 |
|
|
return correct / len(masks) |
|
|
except Exception: |
|
|
return 0.0 |
|
|
|
|
|
def get_accuracy_by_sets(masks): |
|
|
"""Calculate accuracy for A set (smaller) and B set (larger) with 30:70 split""" |
|
|
try: |
|
|
|
|
|
random.seed(42) |
|
|
|
|
|
with torch.no_grad(): |
|
|
correct_a = 0 |
|
|
correct_b = 0 |
|
|
total_a = 0 |
|
|
total_b = 0 |
|
|
|
|
|
|
|
|
valid_items = [] |
|
|
for item in dataset: |
|
|
idx = item['idx'] |
|
|
if idx in masks: |
|
|
valid_items.append(item) |
|
|
|
|
|
|
|
|
items_by_class = {} |
|
|
for item in valid_items: |
|
|
class_name = item['name'] |
|
|
if class_name not in items_by_class: |
|
|
items_by_class[class_name] = [] |
|
|
items_by_class[class_name].append(item) |
|
|
|
|
|
|
|
|
set_a_items = [] |
|
|
set_b_items = [] |
|
|
|
|
|
for class_name, class_items in items_by_class.items(): |
|
|
|
|
|
random.shuffle(class_items) |
|
|
|
|
|
|
|
|
split_point = int(len(class_items) * 0.3) |
|
|
|
|
|
|
|
|
if len(class_items) >= 2: |
|
|
if split_point == 0: |
|
|
split_point = 1 |
|
|
elif split_point == len(class_items): |
|
|
split_point = len(class_items) - 1 |
|
|
|
|
|
class_a_items = class_items[:split_point] |
|
|
class_b_items = class_items[split_point:] |
|
|
|
|
|
set_a_items.extend(class_a_items) |
|
|
set_b_items.extend(class_b_items) |
|
|
|
|
|
print(f"Class '{class_name}': {len(class_items)} total, {len(class_a_items)} to A, {len(class_b_items)} to B") |
|
|
|
|
|
print(f"Stratified split: Set A has {len(set_a_items)} items, Set B has {len(set_b_items)} items") |
|
|
|
|
|
|
|
|
a_class_counts = {} |
|
|
b_class_counts = {} |
|
|
for item in set_a_items: |
|
|
class_name = item['name'] |
|
|
a_class_counts[class_name] = a_class_counts.get(class_name, 0) + 1 |
|
|
for item in set_b_items: |
|
|
class_name = item['name'] |
|
|
b_class_counts[class_name] = b_class_counts.get(class_name, 0) + 1 |
|
|
|
|
|
print("Class distribution verification:") |
|
|
for class_name in sorted(labels): |
|
|
a_count = a_class_counts.get(class_name, 0) |
|
|
b_count = b_class_counts.get(class_name, 0) |
|
|
total_count = a_count + b_count |
|
|
if total_count > 0: |
|
|
a_ratio = a_count / total_count |
|
|
b_ratio = b_count / total_count |
|
|
print(f" {class_name}: A={a_count} ({a_ratio:.1%}), B={b_count} ({b_ratio:.1%})") |
|
|
|
|
|
|
|
|
for item in tqdm(set_a_items, desc="Processing Set A"): |
|
|
idx = item['idx'] |
|
|
coordinates = masks[idx] |
|
|
|
|
|
|
|
|
if not check_validity(coordinates): |
|
|
total_a += 1 |
|
|
continue |
|
|
|
|
|
image = item['image'] |
|
|
true_label_label = item['name'] |
|
|
true_label_idx = label_to_index[true_label_label] |
|
|
|
|
|
|
|
|
masked_pred_idx = predict_with_coordinates(image, coordinates) |
|
|
|
|
|
is_correct = masked_pred_idx == true_label_idx |
|
|
|
|
|
total_a += 1 |
|
|
if is_correct: |
|
|
correct_a += 1 |
|
|
|
|
|
|
|
|
for item in tqdm(set_b_items, desc="Processing Set B"): |
|
|
idx = item['idx'] |
|
|
coordinates = masks[idx] |
|
|
|
|
|
|
|
|
if not check_validity(coordinates): |
|
|
total_b += 1 |
|
|
continue |
|
|
|
|
|
image = item['image'] |
|
|
true_label_label = item['name'] |
|
|
true_label_idx = label_to_index[true_label_label] |
|
|
|
|
|
|
|
|
masked_pred_idx = predict_with_coordinates(image, coordinates) |
|
|
|
|
|
is_correct = masked_pred_idx == true_label_idx |
|
|
|
|
|
total_b += 1 |
|
|
if is_correct: |
|
|
correct_b += 1 |
|
|
|
|
|
accuracy_a = correct_a / total_a if total_a > 0 else 0 |
|
|
accuracy_b = correct_b / total_b if total_b > 0 else 0 |
|
|
|
|
|
print(f"Set A (30%): {total_a} samples, accuracy: {accuracy_a:.4f}") |
|
|
print(f"Set B (70%): {total_b} samples, accuracy: {accuracy_b:.4f}") |
|
|
|
|
|
return accuracy_a, accuracy_b |
|
|
except Exception: |
|
|
return 0.0, 0.0 |
|
|
|
|
|
def predict_without_mask(image): |
|
|
"""Predict on original image without mask""" |
|
|
try: |
|
|
if image.mode != "RGB": |
|
|
image = image.convert("RGB") |
|
|
image_processed = processor(images=image, return_tensors="pt").to(DEVICE) |
|
|
pixel_values = image_processed['pixel_values'] |
|
|
outputs_full = model(pixel_values=pixel_values, **text_inputs) |
|
|
logits_full = outputs_full.logits_per_image |
|
|
predicted_index_full = logits_full.argmax(dim=-1).item() |
|
|
return predicted_index_full |
|
|
except Exception: |
|
|
return len(labels) - 1 |
|
|
|
|
|
|
|
|
accuracy_a, accuracy_b = get_accuracy_by_sets(masks) |
|
|
|
|
|
|
|
|
score_a = accuracy_a |
|
|
score_b = accuracy_b |
|
|
|
|
|
|
|
|
if not (0.0 <= score_a <= 1.0) or not isinstance(score_a, (int, float)) or math.isnan(score_a) or math.isinf(score_a): |
|
|
score_a = 0.0 |
|
|
if not (0.0 <= score_b <= 1.0) or not isinstance(score_b, (int, float)) or math.isnan(score_b) or math.isinf(score_b): |
|
|
score_b = 0.0 |
|
|
|
|
|
print(f"Score A: {score_a}, Score B: {score_b}") |
|
|
|
|
|
|
|
|
score = { |
|
|
"public_a": score_a, |
|
|
"public_detail": { |
|
|
"Score": score_a, |
|
|
"Accuracy": accuracy_a, |
|
|
}, |
|
|
"private_b": score_b, |
|
|
"private_detail": { |
|
|
"Score": score_b, |
|
|
"Accuracy": accuracy_b, |
|
|
}, |
|
|
} |
|
|
|
|
|
ret_json = { |
|
|
"status": True, |
|
|
"score": score, |
|
|
"msg": "Success!", |
|
|
} |
|
|
|
|
|
|
|
|
with open(SCORE_OUTPUT_FILE, 'w') as f: |
|
|
json.dump(ret_json, f, indent=2) |
|
|
|
|
|
print(f"Score saved to {SCORE_OUTPUT_FILE}") |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
write_error_score("Unexpected error during evaluation.") |
|
|
exit(1) |
|
|
|