Datasets:
Added Pixel
Browse files- Individual-Contest/.DS_Store +0 -0
- Individual-Contest/Pixel/Pixel.ipynb +643 -0
- Individual-Contest/Pixel/Solution/Pixel_Solution.ipynb +519 -0
- Individual-Contest/Pixel/Solution/Scoring/metrics.py +549 -0
- Individual-Contest/Pixel/Solution/Scoring/reference_dataset/dataset_dict.json +1 -0
- Individual-Contest/Pixel/Solution/Scoring/reference_dataset/test/data-00000-of-00001.arrow +3 -0
- Individual-Contest/Pixel/Solution/Scoring/reference_dataset/test/dataset_info.json +49 -0
- Individual-Contest/Pixel/Solution/Scoring/reference_dataset/test/state.json +13 -0
- Individual-Contest/Pixel/Solution/figs/IOAI-Logo.png +3 -0
- Individual-Contest/Pixel/Solution/test_set/dataset_dict.json +1 -0
- Individual-Contest/Pixel/Solution/test_set/test/data-00000-of-00001.arrow +3 -0
- Individual-Contest/Pixel/Solution/test_set/test/dataset_info.json +41 -0
- Individual-Contest/Pixel/Solution/test_set/test/state.json +13 -0
- Individual-Contest/Pixel/clip-vit-large-patch14/config.json +46 -0
- Individual-Contest/Pixel/clip-vit-large-patch14/merges.txt +0 -0
- Individual-Contest/Pixel/clip-vit-large-patch14/model.safetensors +3 -0
- Individual-Contest/Pixel/clip-vit-large-patch14/preprocessor_config.json +28 -0
- Individual-Contest/Pixel/clip-vit-large-patch14/special_tokens_map.json +30 -0
- Individual-Contest/Pixel/clip-vit-large-patch14/tokenizer.json +0 -0
- Individual-Contest/Pixel/clip-vit-large-patch14/tokenizer_config.json +32 -0
- Individual-Contest/Pixel/clip-vit-large-patch14/vocab.json +0 -0
- Individual-Contest/Pixel/figs/IOAI-Logo.png +3 -0
- Individual-Contest/Pixel/figs/Pixel Fig 1.png +3 -0
- Individual-Contest/Pixel/figs/Pixel Fig 2.png +3 -0
- Individual-Contest/Pixel/training_set/dataset_dict.json +1 -0
- Individual-Contest/Pixel/training_set/train/data-00000-of-00001.arrow +3 -0
- Individual-Contest/Pixel/training_set/train/dataset_info.json +45 -0
- Individual-Contest/Pixel/training_set/train/state.json +13 -0
Individual-Contest/.DS_Store
CHANGED
|
Binary files a/Individual-Contest/.DS_Store and b/Individual-Contest/.DS_Store differ
|
|
|
Individual-Contest/Pixel/Pixel.ipynb
ADDED
|
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "362e9f36-2300-4851-8f49-b952e62a2c78",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"<img src=\"./figs/IOAI-Logo.png\" alt=\"IOAI Logo\" width=\"200\" height=\"auto\">\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"[IOAI 2025 (Beijing, China), Individual Contest](https://ioai-official.org/china-2025)\n",
|
| 11 |
+
"\n",
|
| 12 |
+
"[](https://colab.research.google.com/github/IOAI-official/IOAI-2025/blob/main/Individual-Contest/Pixel/Pixel.ipynb)"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"cell_type": "markdown",
|
| 17 |
+
"id": "4509a190",
|
| 18 |
+
"metadata": {},
|
| 19 |
+
"source": [
|
| 20 |
+
"# Pixel Efficiency\n",
|
| 21 |
+
"\n",
|
| 22 |
+
"## 1. Problem Description\n",
|
| 23 |
+
"\n",
|
| 24 |
+
"You are a student in wildlife biology, working on a groundbreaking research project at the Starr Park Research Center. Your team has deployed thousands of camera traps across remote wilderness areas to monitor endangered species populations. However, the satellite internet connections in these remote locations have extremely limited bandwidth. Your job is to write code that identifies the most critical pixels in each wildlife photograph so that only the essential visual information needs to be transmitted back to headquarters.\n",
|
| 25 |
+
"\n",
|
| 26 |
+
"<img src=\"./figs/Pixel Fig 1.png\" width=\"400\">\n",
|
| 27 |
+
"\n",
|
| 28 |
+
"## 2. Dataset\n",
|
| 29 |
+
"\n",
|
| 30 |
+
"The dataset consists of a training set and a test set. Datasets are loaded using `load_from_disk`, and are in the format of `datasets`. Test set is not visible to the contestants.\n",
|
| 31 |
+
"\n",
|
| 32 |
+
"In the dataset there are the following fields:\n",
|
| 33 |
+
"\n",
|
| 34 |
+
"- `image`: the image are RGB full color images in PIL format, the size of each image is (224, 224)\n",
|
| 35 |
+
"- `name`: the animal species label\n",
|
| 36 |
+
"- `idx`: unique identifiers used to track the records.\n",
|
| 37 |
+
"\n",
|
| 38 |
+
"1. **Training Set (`train_dataset` folder)**:\n",
|
| 39 |
+
" - The training set is used for training your models/ doing experimentations on and can be accessed and downloaded directly during the competition.\n",
|
| 40 |
+
" - There are 700 images in the training set.\n",
|
| 41 |
+
"\n",
|
| 42 |
+
"2. **Test Set (`test_dataset` folder)**: \n",
|
| 43 |
+
" - These follow the same format as the training set but do not contain the `name` field.\n",
|
| 44 |
+
" - There are 698 images in test set, which had been separated into 2 testing sets within the ratio of 3:7, i.e. 30% of the data would be used to calculate the Leaderboard A score, another 70% data would be used to calculate the Leaderboard B score.\n",
|
| 45 |
+
" - The testing set is used to calculate the Leaderboard A score and the Leaderboard B score and is not directly accessible during the competition. Contestants can access the result on Leaderboard A , but cannot access the result on Leaderboard B. The final score would be counted using Leaderboard B only. The subsets for Leaderboard A and Leaderboard B are completely distinct.\n",
|
| 46 |
+
" \n",
|
| 47 |
+
"\n",
|
| 48 |
+
"## 3. Task\n",
|
| 49 |
+
"You are given a dataset of animal photographs and a CLIP model that can do a zero-shot classification of animal species. To conserve bandwidth, you need to retain at most **6.25%** of the pixels of each image, while keeping classification accuracy as high as possible.\n",
|
| 50 |
+
"\n",
|
| 51 |
+
"More specifically, your task is to return **one rectangle mask** for each image, which contain a single rectangular area indicating the area to keep. Each mask is defined by two coordinate tuples: one for the top-left corner and one for the bottom-right corner of the rectangle. Below is a visualization of what the image would look like after applying a rectangular mask using the process from the baseline:\n",
|
| 52 |
+
"\n",
|
| 53 |
+
"<img src=\"./figs/Pixel Fig 2.png\" width=\"400\">\n",
|
| 54 |
+
"\n",
|
| 55 |
+
"**Coordinate Convention:**\n",
|
| 56 |
+
"- Top-left corner coordinates are **inclusive** (the pixel at this position is included in the mask) \n",
|
| 57 |
+
"- Bottom-right corner coordinates are **exclusive** (the pixel at this position is NOT included in the mask)\n",
|
| 58 |
+
"\n",
|
| 59 |
+
"For example, if you specify coordinates `((10, 20), (15, 25))`, the mask will cover pixels from row 10 to 14 (inclusive) and column 20 to 24 (inclusive), for a total area of 25 pixels.\n",
|
| 60 |
+
"\n",
|
| 61 |
+
"As an illustration, if an image size is 3x3 and we wanted to keep only the top-right pixel using coordinates `((0, 2), (1, 3))`, the resulting binary mask would be:\n",
|
| 62 |
+
"\n",
|
| 63 |
+
"```\n",
|
| 64 |
+
"[[0, 0, 1],\n",
|
| 65 |
+
" [0, 0, 0],\n",
|
| 66 |
+
" [0, 0, 0]]\n",
|
| 67 |
+
"```\n",
|
| 68 |
+
"\n",
|
| 69 |
+
"Below is a summary of the requirements for your masks:\n",
|
| 70 |
+
"\n",
|
| 71 |
+
"- Return one rectangle mask defined by coordinate tuples: `((top, left), (bottom, right))`\n",
|
| 72 |
+
"- Top-left corner coordinates are inclusive, bottom-right corner coordinates are exclusive\n",
|
| 73 |
+
"- The rectangle mask should cover at most *6.25%* of the original pixels (minimum 93.75% reduction of the original pixels)\n",
|
| 74 |
+
"- All images are of size (224, 224), so coordinate values should be within the range [0, 224]\n",
|
| 75 |
+
"\n",
|
| 76 |
+
"\n",
|
| 77 |
+
"Images would be masked using the mask you created, outside the masked rectangle, all pixels outside the masked rectangle will be replaced with RGB(0, 0, 0) (black) values. The masked image will be then passed through the CLIP model during evaluation, and your task is to keep the classification accuracy of the CLIP model on these masked images as high as possible. **An additional `other` class would be added into the classes for classification** to ensure that your masked image retains actual useful information for the researchers back at Starr Park Headquarters. So for example, if your image doesn't contain any animal information, the model will predict the `others` class instead of predicting a random animal and having a chance of getting it correct.\n",
|
| 78 |
+
"\n",
|
| 79 |
+
"You need to work only with the provided CLIP model and dataset. As a reminder, CLIP generates representations for both text and image, and it can compute a similarity score between them. So if you have ten animal classes, CLIP can look at the provided image and decide which text (class) is closest to the image. \n",
|
| 80 |
+
"\n",
|
| 81 |
+
"To ensure that your solution would handle the traffic of images for the research center, your code should run in **UNDER 8 MINUTES for the 698 images in the test dataset**. It is recommended that you test your solution on the training set first, which contain 700 images, to understand how much time your solution takes (testing set would take slightly longer due to dataset loading).\n",
|
| 82 |
+
"\n",
|
| 83 |
+
"## 4. Submission\n",
|
| 84 |
+
"\n",
|
| 85 |
+
"Contestants need to submit a notebook file named `submission.ipynb`. The file should output a `.jsonl` file titled `submission.jsonl`, which contains all the generated masks for the dataset split. Each mask in the `submission.jsonl` file should be stored as a tuple of two coordinate tuples: `((top, left), (bottom, right))`, where the top-left corner is inclusive and the bottom-right corner is exclusive.\n",
|
| 86 |
+
"\n",
|
| 87 |
+
"Contestants don't need to separate test sets into Leaderboard A and Leaderboard B, the evaluation machine will read `submission.jsonl` and automatically calculate the scores for Leaderboard A and Leaderboard B based on the prediction results and true labels. \n",
|
| 88 |
+
"\n",
|
| 89 |
+
"The submission files must strictly follow the above format and naming; otherwise, the system will not be able to read them correctly. \n",
|
| 90 |
+
"\n",
|
| 91 |
+
"## 5. Score\n",
|
| 92 |
+
"\n",
|
| 93 |
+
"The evaluation metric will be **classification accuracy**, defined as the proportion of correctly predicted samples over the total number of evaluated samples.\n",
|
| 94 |
+
"\n",
|
| 95 |
+
"Your score is the zero-shot classification accuracy of CLIP on the masked test images. **If a submitted mask for an image is invalid (wrong shape, more than 6.25% pixels retained, etc.), that image is counted as incorrect. A sample script is provided to compute the training split score.**\n",
|
| 96 |
+
"\n",
|
| 97 |
+
"\n",
|
| 98 |
+
"## 6. Baseline and Training Set\n",
|
| 99 |
+
"\n",
|
| 100 |
+
"- Below you can find the baseline solution.\n",
|
| 101 |
+
"- The dataset is in `training_set` folder.\n",
|
| 102 |
+
"- The highest score by the Scientific Committee for this task is 0.83 in Leader Board B, this score is used for score unification.\n",
|
| 103 |
+
"- The baseline score by the Scientific Committee for this task is 0.19 in Leader Board B, this score is used for score unification."
|
| 104 |
+
]
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"cell_type": "code",
|
| 108 |
+
"execution_count": null,
|
| 109 |
+
"id": "9d1ad03b-ba1e-4c24-b866-fe6a138b58c9",
|
| 110 |
+
"metadata": {},
|
| 111 |
+
"outputs": [],
|
| 112 |
+
"source": [
|
| 113 |
+
"import random\n",
|
| 114 |
+
"import numpy as np\n",
|
| 115 |
+
"import torch\n",
|
| 116 |
+
"\n",
|
| 117 |
+
"seed = 42\n",
|
| 118 |
+
"\n",
|
| 119 |
+
"random.seed(seed) # Python built-in random\n",
|
| 120 |
+
"np.random.seed(seed) # NumPy\n",
|
| 121 |
+
"torch.manual_seed(seed) # PyTorch (CPU)\n",
|
| 122 |
+
"torch.cuda.manual_seed(seed) # PyTorch (single GPU)\n",
|
| 123 |
+
"torch.cuda.manual_seed_all(seed) # PyTorch (all GPUs)\n",
|
| 124 |
+
"\n",
|
| 125 |
+
"# Ensures deterministic behavior\n",
|
| 126 |
+
"torch.backends.cudnn.deterministic = True\n",
|
| 127 |
+
"torch.backends.cudnn.benchmark = False"
|
| 128 |
+
]
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"cell_type": "markdown",
|
| 132 |
+
"id": "af37a8ed",
|
| 133 |
+
"metadata": {},
|
| 134 |
+
"source": [
|
| 135 |
+
"### Dependencies and Config Variables"
|
| 136 |
+
]
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"cell_type": "code",
|
| 140 |
+
"execution_count": null,
|
| 141 |
+
"id": "23b68a41",
|
| 142 |
+
"metadata": {},
|
| 143 |
+
"outputs": [],
|
| 144 |
+
"source": [
|
| 145 |
+
"import os\n",
|
| 146 |
+
"import matplotlib.pyplot as plt\n",
|
| 147 |
+
"import numpy as np\n",
|
| 148 |
+
"from collections import Counter\n",
|
| 149 |
+
"from PIL import Image\n",
|
| 150 |
+
"from tqdm import tqdm\n",
|
| 151 |
+
"import glob\n",
|
| 152 |
+
"import json\n",
|
| 153 |
+
"import math\n",
|
| 154 |
+
"import torch\n",
|
| 155 |
+
"import matplotlib.pyplot as plt\n",
|
| 156 |
+
"from datasets import load_dataset, load_from_disk\n",
|
| 157 |
+
"from transformers import CLIPProcessor, CLIPModel\n",
|
| 158 |
+
"from PIL import Image\n",
|
| 159 |
+
"from tqdm.auto import tqdm \n",
|
| 160 |
+
"\n",
|
| 161 |
+
"TRAIN_PATH = \"./training_set/\"\n",
|
| 162 |
+
"# The training set is deployed automatically in the testing machine. \n",
|
| 163 |
+
"# You notebook can access the TRAIN_PATH even if you do not mount it along with notebook.\n",
|
| 164 |
+
"\n",
|
| 165 |
+
"MODEL_PATH = \"./clip-vit-large-patch14\"\n",
|
| 166 |
+
"# The clip model is deployed automatically in the testing machine. \n",
|
| 167 |
+
"# You notebook can access the MODEL_PATH even if you do not mount it along with notebook.\n",
|
| 168 |
+
"\n",
|
| 169 |
+
"DATASET_PATH = TRAIN_PATH + \"train_dataset\"\n",
|
| 170 |
+
"SPLIT = \"train\"\n",
|
| 171 |
+
"DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
|
| 172 |
+
"BACKGROUND_CLASS = \"other\" # Class used to catch masked images that have no useful information, preventing completely off masks from \"guessing\" the answer from the 10 classes\n",
|
| 173 |
+
"\n",
|
| 174 |
+
"# Image and Masking Configuration\n",
|
| 175 |
+
"HEIGHT = 224\n",
|
| 176 |
+
"WIDTH = 224\n",
|
| 177 |
+
"RETAIN_RATIO = 0.0625 # Retain 6.25% of pixels\n",
|
| 178 |
+
"MEAN_COLOR = (0, 0, 0) # RGB mean values for masked out areas\n"
|
| 179 |
+
]
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"cell_type": "markdown",
|
| 183 |
+
"id": "b888c040",
|
| 184 |
+
"metadata": {},
|
| 185 |
+
"source": [
|
| 186 |
+
"### Dataset loading\n",
|
| 187 |
+
"\n",
|
| 188 |
+
"Let's first load the dataset in and see what's in it:\n"
|
| 189 |
+
]
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"cell_type": "code",
|
| 193 |
+
"execution_count": null,
|
| 194 |
+
"id": "857ab7ff",
|
| 195 |
+
"metadata": {},
|
| 196 |
+
"outputs": [],
|
| 197 |
+
"source": [
|
| 198 |
+
"# Load the dataset\n",
|
| 199 |
+
"print(\"Loading dataset...\")\n",
|
| 200 |
+
"dataset_whole = load_from_disk(DATASET_PATH)\n",
|
| 201 |
+
"dataset = dataset_whole[SPLIT]\n",
|
| 202 |
+
"\n",
|
| 203 |
+
"# Print first item to check available fields\n",
|
| 204 |
+
"print(\"\\nFirst item keys:\")\n",
|
| 205 |
+
"print(dataset_whole[SPLIT][0].keys())\n",
|
| 206 |
+
"\n",
|
| 207 |
+
"# Show basic dataset statistics without converting fields yet\n",
|
| 208 |
+
"print(f\"\\nDataset loaded successfully!\")\n",
|
| 209 |
+
"print(f\"Total samples: {len(dataset)}\")\n",
|
| 210 |
+
"\n",
|
| 211 |
+
"print(f\"\\nSample item structure:\")\n",
|
| 212 |
+
"sample_item = dataset[0]\n",
|
| 213 |
+
"print(f\" Keys: {list(sample_item.keys())}\")\n",
|
| 214 |
+
"print(f\" Image type: {type(sample_item['image'])}\")\n",
|
| 215 |
+
"print(f\" Image size: {sample_item['image'].size}\")\n",
|
| 216 |
+
"print(f\" Index: {sample_item['idx']}\")\n",
|
| 217 |
+
"\n"
|
| 218 |
+
]
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"cell_type": "code",
|
| 222 |
+
"execution_count": null,
|
| 223 |
+
"id": "37c2dbfa",
|
| 224 |
+
"metadata": {},
|
| 225 |
+
"outputs": [],
|
| 226 |
+
"source": [
|
| 227 |
+
"# Visualize first 10 samples\n",
|
| 228 |
+
"fig, axes = plt.subplots(2, 5, figsize=(15, 8))\n",
|
| 229 |
+
"axes = axes.flatten()\n",
|
| 230 |
+
"\n",
|
| 231 |
+
"print(\"Visualizing first 10 samples...\")\n",
|
| 232 |
+
"\n",
|
| 233 |
+
"for i in range(10):\n",
|
| 234 |
+
" sample = dataset[i]\n",
|
| 235 |
+
" image = sample['image']\n",
|
| 236 |
+
" label = sample['name']\n",
|
| 237 |
+
" \n",
|
| 238 |
+
" axes[i].imshow(image)\n",
|
| 239 |
+
" axes[i].set_title(f\"{label}\\n\", fontsize=12)\n",
|
| 240 |
+
" axes[i].axis('off')\n",
|
| 241 |
+
"\n",
|
| 242 |
+
"plt.tight_layout()\n",
|
| 243 |
+
"plt.suptitle('First 10 Samples from Dataset', fontsize=16, y=1.02)\n",
|
| 244 |
+
"plt.show()\n"
|
| 245 |
+
]
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"cell_type": "markdown",
|
| 249 |
+
"id": "24cee96a",
|
| 250 |
+
"metadata": {},
|
| 251 |
+
"source": [
|
| 252 |
+
"### Model\n",
|
| 253 |
+
"\n",
|
| 254 |
+
"Now let's load the model and see some predictions:"
|
| 255 |
+
]
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"cell_type": "code",
|
| 259 |
+
"execution_count": null,
|
| 260 |
+
"id": "fd92b376",
|
| 261 |
+
"metadata": {},
|
| 262 |
+
"outputs": [],
|
| 263 |
+
"source": [
|
| 264 |
+
"print(f\"Loading CLIP model and processor: {MODEL_PATH}...\")\n",
|
| 265 |
+
"model = CLIPModel.from_pretrained(MODEL_PATH).to(DEVICE)\n",
|
| 266 |
+
"processor = CLIPProcessor.from_pretrained(MODEL_PATH)\n",
|
| 267 |
+
"print(\"Model and processor loaded successfully.\")"
|
| 268 |
+
]
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"cell_type": "code",
|
| 272 |
+
"execution_count": null,
|
| 273 |
+
"id": "a2e25924",
|
| 274 |
+
"metadata": {},
|
| 275 |
+
"outputs": [],
|
| 276 |
+
"source": [
|
| 277 |
+
"image = dataset[0]['image']\n",
|
| 278 |
+
"# Visualize the image with its true label\n",
|
| 279 |
+
"plt.figure(figsize=(8, 6))\n",
|
| 280 |
+
"plt.imshow(image)\n",
|
| 281 |
+
"plt.title(f\"Sample Image\\nTrue Label: {dataset[0]['name']}\", fontsize=14)\n",
|
| 282 |
+
"plt.axis('off')\n",
|
| 283 |
+
"plt.show()\n",
|
| 284 |
+
"\n",
|
| 285 |
+
"\n",
|
| 286 |
+
"labels = sorted(list(set(dataset['name']))) + [BACKGROUND_CLASS]\n",
|
| 287 |
+
"text_inputs = processor(text=labels, return_tensors=\"pt\", padding=True).to(DEVICE)\n",
|
| 288 |
+
"image_processed = processor(images=image, return_tensors=\"pt\").to(DEVICE)\n",
|
| 289 |
+
"pixel_values = image_processed['pixel_values']\n",
|
| 290 |
+
"outputs_full = model(pixel_values=pixel_values, **text_inputs)\n",
|
| 291 |
+
"logits_full = outputs_full.logits_per_image # Shape: (1, num_styles)\n",
|
| 292 |
+
"predicted_index_full = logits_full.argmax(dim=-1).item()\n",
|
| 293 |
+
"\n",
|
| 294 |
+
"print(f\"Predicted label: {labels[predicted_index_full]}\")\n"
|
| 295 |
+
]
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"cell_type": "markdown",
|
| 299 |
+
"id": "54467f0c",
|
| 300 |
+
"metadata": {},
|
| 301 |
+
"source": [
|
| 302 |
+
"### Baseline: A trivial masking method\n",
|
| 303 |
+
"\n",
|
| 304 |
+
"We will now be implementing a trivial masking solution, one that randomly masks out 90% of the pixels."
|
| 305 |
+
]
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"cell_type": "code",
|
| 309 |
+
"execution_count": null,
|
| 310 |
+
"id": "0ee24b90",
|
| 311 |
+
"metadata": {},
|
| 312 |
+
"outputs": [],
|
| 313 |
+
"source": [
|
| 314 |
+
"def generate_center_crop_coordinates(image):\n",
|
| 315 |
+
" \"\"\"\n",
|
| 316 |
+
" Generate coordinates for a center crop mask.\n",
|
| 317 |
+
" \n",
|
| 318 |
+
" Returns:\n",
|
| 319 |
+
" tuple: ((top, left), (bottom, right)) coordinates for the crop\n",
|
| 320 |
+
" \"\"\"\n",
|
| 321 |
+
" H, W = image.size\n",
|
| 322 |
+
" total_px = H * W\n",
|
| 323 |
+
" k = int(total_px * RETAIN_RATIO)\n",
|
| 324 |
+
" \n",
|
| 325 |
+
" # Calculate side length of the square crop\n",
|
| 326 |
+
" side_length = int(np.sqrt(k))\n",
|
| 327 |
+
" \n",
|
| 328 |
+
" # Calculate center coordinates\n",
|
| 329 |
+
" center_h, center_w = H // 2, W // 2\n",
|
| 330 |
+
" \n",
|
| 331 |
+
" # Calculate crop boundaries\n",
|
| 332 |
+
" half_side = side_length // 2\n",
|
| 333 |
+
" top = max(0, center_h - half_side)\n",
|
| 334 |
+
" left = max(0, center_w - half_side)\n",
|
| 335 |
+
" bottom = min(H, top + side_length)\n",
|
| 336 |
+
" right = min(W, left + side_length)\n",
|
| 337 |
+
" \n",
|
| 338 |
+
" return ((top, left), (bottom, right))\n",
|
| 339 |
+
"\n",
|
| 340 |
+
"def generate_mask_from_coordinates(image, coordinates):\n",
|
| 341 |
+
" \"\"\"\n",
|
| 342 |
+
" Generate a binary mask from crop coordinates.\n",
|
| 343 |
+
" \n",
|
| 344 |
+
" Parameters:\n",
|
| 345 |
+
" image: PIL Image\n",
|
| 346 |
+
" coordinates: tuple of ((top, left), (bottom, right))\n",
|
| 347 |
+
" \n",
|
| 348 |
+
" Returns:\n",
|
| 349 |
+
" numpy array: Binary mask with 1s in the crop area\n",
|
| 350 |
+
" \"\"\"\n",
|
| 351 |
+
" H, W = image.size\n",
|
| 352 |
+
" mask = np.zeros((H, W), dtype=np.int8)\n",
|
| 353 |
+
" \n",
|
| 354 |
+
" (top, left), (bottom, right) = coordinates\n",
|
| 355 |
+
" mask[top:bottom, left:right] = 1\n",
|
| 356 |
+
" \n",
|
| 357 |
+
" return mask"
|
| 358 |
+
]
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"cell_type": "code",
|
| 362 |
+
"execution_count": null,
|
| 363 |
+
"id": "e87f8f85",
|
| 364 |
+
"metadata": {},
|
| 365 |
+
"outputs": [],
|
| 366 |
+
"source": [
|
| 367 |
+
"def apply_mask_with_mean(image, mask, mean_rgb=MEAN_COLOR):\n",
|
| 368 |
+
" \"\"\"\n",
|
| 369 |
+
" Apply arbitrary binary mask to image, replacing masked areas with mean values\n",
|
| 370 |
+
"\n",
|
| 371 |
+
" Parameters:\n",
|
| 372 |
+
" - image: PIL Image (224x224)\n",
|
| 373 |
+
" - mask: Binary numpy array or PIL Image (224x224) where 0 is the area to drop and 1 is the area to keep\n",
|
| 374 |
+
" - mean_rgb: RGB mean values to use (default: from config)\n",
|
| 375 |
+
"\n",
|
| 376 |
+
" Returns: Modified PIL Image\n",
|
| 377 |
+
" \"\"\"\n",
|
| 378 |
+
" # Convert images to numpy arrays\n",
|
| 379 |
+
" img_array = np.array(image).copy()\n",
|
| 380 |
+
"\n",
|
| 381 |
+
" # Ensure mask is numpy array\n",
|
| 382 |
+
" if isinstance(mask, Image.Image):\n",
|
| 383 |
+
" mask_array = np.array(mask.convert('L')) > 127 # Convert to binary\n",
|
| 384 |
+
" else:\n",
|
| 385 |
+
" mask_array = mask > 0\n",
|
| 386 |
+
"\n",
|
| 387 |
+
" # Reshape mask for broadcasting with RGB\n",
|
| 388 |
+
" mask_3d = np.stack([mask_array] * 3, axis=2)\n",
|
| 389 |
+
"\n",
|
| 390 |
+
" # Convert mean values to 0-255 range\n",
|
| 391 |
+
" mean_values = np.array([int(m * 255) for m in mean_rgb])\n",
|
| 392 |
+
" # Apply mask - replace areas where mask is 0 (drop) with mean values, keep areas where mask is 1\n",
|
| 393 |
+
" img_array = np.where(mask_3d, img_array, mean_values.reshape(1, 1, 3))\n",
|
| 394 |
+
"\n",
|
| 395 |
+
" return Image.fromarray(img_array.astype(np.uint8))"
|
| 396 |
+
]
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"cell_type": "code",
|
| 400 |
+
"execution_count": null,
|
| 401 |
+
"id": "a84ed28b",
|
| 402 |
+
"metadata": {},
|
| 403 |
+
"outputs": [],
|
| 404 |
+
"source": [
|
| 405 |
+
"image = dataset[0]['image']\n",
|
| 406 |
+
"# Visualize the image with its true label\n",
|
| 407 |
+
"plt.figure(figsize=(8, 6))\n",
|
| 408 |
+
"plt.imshow(image)\n",
|
| 409 |
+
"plt.title(f\"Sample Image\\nTrue Label: {dataset[0]['name']}\", fontsize=14)\n",
|
| 410 |
+
"plt.axis('off')\n",
|
| 411 |
+
"plt.show()\n",
|
| 412 |
+
"\n",
|
| 413 |
+
"\n",
|
| 414 |
+
"labels = sorted(list(set(dataset['name']))) + [BACKGROUND_CLASS]\n",
|
| 415 |
+
"text_inputs = processor(text=labels, return_tensors=\"pt\", padding=True).to(DEVICE)\n",
|
| 416 |
+
"\n",
|
| 417 |
+
"mask = generate_mask_from_coordinates(image, generate_center_crop_coordinates(image))\n",
|
| 418 |
+
"image_masked = apply_mask_with_mean(image, mask)\n",
|
| 419 |
+
"\n",
|
| 420 |
+
"plt.figure(figsize=(8, 6))\n",
|
| 421 |
+
"plt.imshow(image_masked)\n",
|
| 422 |
+
"plt.title(f\"Masked Image\\nTrue Label: {dataset[0]['name']}\", fontsize=14)\n",
|
| 423 |
+
"plt.axis('off')\n",
|
| 424 |
+
"plt.show()\n",
|
| 425 |
+
"\n",
|
| 426 |
+
"image_processed = processor(images=image_masked, return_tensors=\"pt\").to(DEVICE)\n",
|
| 427 |
+
"pixel_values = image_processed['pixel_values']\n",
|
| 428 |
+
"outputs_full = model(pixel_values=pixel_values, **text_inputs)\n",
|
| 429 |
+
"logits_full = outputs_full.logits_per_image # Shape: (1, num_styles)\n",
|
| 430 |
+
"predicted_index_full = logits_full.argmax(dim=-1).item()\n",
|
| 431 |
+
"\n",
|
| 432 |
+
"print(f\"Predicted label: {labels[predicted_index_full]}\")"
|
| 433 |
+
]
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"cell_type": "markdown",
|
| 437 |
+
"id": "d6987536",
|
| 438 |
+
"metadata": {},
|
| 439 |
+
"source": [
|
| 440 |
+
"### Exporting the masks\n"
|
| 441 |
+
]
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"cell_type": "code",
|
| 445 |
+
"execution_count": null,
|
| 446 |
+
"id": "8aa65a80",
|
| 447 |
+
"metadata": {},
|
| 448 |
+
"outputs": [],
|
| 449 |
+
"source": [
|
| 450 |
+
"#DATA_PATH is the secret environment variable to point the address of the validation set and test set on the testing machine. \n",
|
| 451 |
+
"#Contestants cannot access this address locally.\n",
|
| 452 |
+
"import os\n",
|
| 453 |
+
"if os.environ.get('DATA_PATH'):\n",
|
| 454 |
+
" TEST_PATH = os.environ.get(\"DATA_PATH\") + \"/\" \n",
|
| 455 |
+
"else:\n",
|
| 456 |
+
" TEST_PATH = \"\" # Fallback for local testing\n",
|
| 457 |
+
"\n",
|
| 458 |
+
"dataset = load_from_disk(TEST_PATH + \"test_dataset\")\n",
|
| 459 |
+
"split = \"test\"\n",
|
| 460 |
+
"dataset = dataset[split]"
|
| 461 |
+
]
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"cell_type": "code",
|
| 465 |
+
"execution_count": null,
|
| 466 |
+
"id": "5cae01f0",
|
| 467 |
+
"metadata": {},
|
| 468 |
+
"outputs": [],
|
| 469 |
+
"source": [
|
| 470 |
+
"## Exporting results and validating on full dataset\n",
|
| 471 |
+
"RETAIN_RATIO = 0.0625\n",
|
| 472 |
+
"\n",
|
| 473 |
+
"masks = {}\n",
|
| 474 |
+
"for item in tqdm(dataset):\n",
|
| 475 |
+
" image = item['image']\n",
|
| 476 |
+
"\n",
|
| 477 |
+
" ## you should replace mask generation with your function\n",
|
| 478 |
+
" coordinates = generate_center_crop_coordinates(image)\n",
|
| 479 |
+
" \n",
|
| 480 |
+
" # don't need to change below, it's just saving to file\n",
|
| 481 |
+
" idx = item['idx']\n",
|
| 482 |
+
" # For validation, we still need to generate the full mask\n",
|
| 483 |
+
" mask = generate_mask_from_coordinates(image, coordinates)\n",
|
| 484 |
+
" assert mask.shape == (224, 224), \"Mask should be 224x224\"\n",
|
| 485 |
+
" assert mask.sum() <= RETAIN_RATIO * 224 * 224, \"You should leave only 6.25% of pixels\"\n",
|
| 486 |
+
" \n",
|
| 487 |
+
" # Save only the coordinates (topleft, bottomright) instead of the full mask\n",
|
| 488 |
+
" masks[idx] = coordinates\n",
|
| 489 |
+
"\n",
|
| 490 |
+
"# Save as JSONL (one JSON object per line) - much safer than pickle\n",
|
| 491 |
+
"with open('submission.jsonl', 'w') as f:\n",
|
| 492 |
+
" for idx, coordinates in masks.items():\n",
|
| 493 |
+
" json.dump({\"idx\": idx, \"coordinates\": coordinates}, f)\n",
|
| 494 |
+
" f.write('\\n')\n",
|
| 495 |
+
"\n",
|
| 496 |
+
"print(\"Masks saved to masks.jsonl\")"
|
| 497 |
+
]
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"cell_type": "markdown",
|
| 501 |
+
"id": "ed60e044",
|
| 502 |
+
"metadata": {},
|
| 503 |
+
"source": [
|
| 504 |
+
"### Validation"
|
| 505 |
+
]
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"cell_type": "code",
|
| 509 |
+
"execution_count": null,
|
| 510 |
+
"id": "3d1ef680",
|
| 511 |
+
"metadata": {},
|
| 512 |
+
"outputs": [],
|
| 513 |
+
"source": [
|
| 514 |
+
"# # Validation code for generated masks\n",
|
| 515 |
+
"\n",
|
| 516 |
+
"# def check_validity(coordinates):\n",
|
| 517 |
+
"# \"\"\"\n",
|
| 518 |
+
"# Check if coordinates are valid according to the requirements.\n",
|
| 519 |
+
"# Returns True if valid, False otherwise.\n",
|
| 520 |
+
"# \"\"\"\n",
|
| 521 |
+
"# try:\n",
|
| 522 |
+
"# # Check if coordinates is a tuple of two tuples\n",
|
| 523 |
+
"# if not isinstance(coordinates, tuple) or len(coordinates) != 2:\n",
|
| 524 |
+
"# print(f\"Coordinates is not a tuple of two tuples\")\n",
|
| 525 |
+
"# return False\n",
|
| 526 |
+
" \n",
|
| 527 |
+
"# (top, left), (bottom, right) = coordinates\n",
|
| 528 |
+
" \n",
|
| 529 |
+
"# # Check if all coordinates are integers\n",
|
| 530 |
+
"# if not all(isinstance(coord, (int, np.integer)) for coord in [top, left, bottom, right]):\n",
|
| 531 |
+
"# print(f\"Coordinates are not integers\")\n",
|
| 532 |
+
"# return False\n",
|
| 533 |
+
" \n",
|
| 534 |
+
"# # Check if coordinates are within image bounds\n",
|
| 535 |
+
"# # For slicing mask[top:bottom, left:right], valid ranges are:\n",
|
| 536 |
+
"# # top, left: [0, 223] (inclusive)\n",
|
| 537 |
+
"# # bottom, right: [1, 224] (inclusive) since we need top < bottom and left < right\n",
|
| 538 |
+
"# if not (0 <= top < 224 and 0 <= left < 224 and 1 <= bottom <= 224 and 1 <= right <= 224):\n",
|
| 539 |
+
"# print(f\"Coordinates are not within image bounds\")\n",
|
| 540 |
+
"# return False\n",
|
| 541 |
+
" \n",
|
| 542 |
+
"# # Check if top-left is actually top-left of bottom-right (proper ordering)\n",
|
| 543 |
+
"# if not (top < bottom and left < right):\n",
|
| 544 |
+
"# print(f\"Top-left is not actually top-left of bottom-right\")\n",
|
| 545 |
+
"# return False\n",
|
| 546 |
+
" \n",
|
| 547 |
+
"# # Check that the crop area doesn't exceed RETAIN_RATIO\n",
|
| 548 |
+
"# crop_area = (bottom - top) * (right - left)\n",
|
| 549 |
+
"# max_area = RETAIN_RATIO * 224 * 224\n",
|
| 550 |
+
"# if crop_area > max_area:\n",
|
| 551 |
+
"# print(f\"Crop area {crop_area} exceeds max area {max_area}\")\n",
|
| 552 |
+
"# return False\n",
|
| 553 |
+
" \n",
|
| 554 |
+
"# return True\n",
|
| 555 |
+
"# except Exception:\n",
|
| 556 |
+
"# return False\n",
|
| 557 |
+
"\n",
|
| 558 |
+
"\n",
|
| 559 |
+
"\n",
|
| 560 |
+
"\n",
|
| 561 |
+
"# def validate_masks(masks):\n",
|
| 562 |
+
"# \"\"\"Simple validation of generated masks on the dataset\"\"\"\n",
|
| 563 |
+
"# correct = 0\n",
|
| 564 |
+
"# total = 0\n",
|
| 565 |
+
"\n",
|
| 566 |
+
"# labels = sorted(list(set(dataset['name']))) + ['other']\n",
|
| 567 |
+
"# text_inputs = processor(text=labels, return_tensors=\"pt\", padding=True).to(DEVICE)\n",
|
| 568 |
+
"\n",
|
| 569 |
+
"# with torch.no_grad():\n",
|
| 570 |
+
"# for item in tqdm(dataset, desc=\"Validating masks\"):\n",
|
| 571 |
+
"# idx = item['idx']\n",
|
| 572 |
+
"# if idx not in masks:\n",
|
| 573 |
+
"# continue\n",
|
| 574 |
+
"\n",
|
| 575 |
+
"# if not check_validity(masks[idx]):\n",
|
| 576 |
+
"# continue\n",
|
| 577 |
+
" \n",
|
| 578 |
+
"# mask_coordinates = masks[idx]\n",
|
| 579 |
+
"# image = item['image']\n",
|
| 580 |
+
"# true_label = item['name']\n",
|
| 581 |
+
" \n",
|
| 582 |
+
"# # Apply mask to image\n",
|
| 583 |
+
"# if image.mode != \"RGB\":\n",
|
| 584 |
+
"# image = image.convert(\"RGB\")\n",
|
| 585 |
+
" \n",
|
| 586 |
+
"# mask = generate_mask_from_coordinates(image, mask_coordinates)\n",
|
| 587 |
+
"\n",
|
| 588 |
+
"# # Apply mask with mean color replacement\n",
|
| 589 |
+
"# img_array = np.array(image).copy()\n",
|
| 590 |
+
"# mask_array = mask > 0\n",
|
| 591 |
+
"# mask_3d = np.stack([mask_array] * 3, axis=2)\n",
|
| 592 |
+
"# mean_values = np.array([0, 0, 0]) # Black mean color\n",
|
| 593 |
+
"# img_array = np.where(mask_3d, img_array, mean_values.reshape(1, 1, 3))\n",
|
| 594 |
+
"# masked_image = Image.fromarray(img_array.astype(np.uint8))\n",
|
| 595 |
+
" \n",
|
| 596 |
+
"# # Get prediction on masked image\n",
|
| 597 |
+
"# image_processed = processor(images=masked_image, return_tensors=\"pt\").to(DEVICE)\n",
|
| 598 |
+
"# pixel_values = image_processed['pixel_values']\n",
|
| 599 |
+
"# outputs = model(pixel_values=pixel_values, **text_inputs)\n",
|
| 600 |
+
"# logits = outputs.logits_per_image\n",
|
| 601 |
+
"# predicted_idx = logits.argmax(dim=-1).item()\n",
|
| 602 |
+
"# predicted_label = labels[predicted_idx]\n",
|
| 603 |
+
" \n",
|
| 604 |
+
"# # Check if prediction is correct\n",
|
| 605 |
+
"# if predicted_label == true_label:\n",
|
| 606 |
+
"# correct += 1\n",
|
| 607 |
+
"# total += 1\n",
|
| 608 |
+
" \n",
|
| 609 |
+
"# accuracy = correct / total if total > 0 else 0\n",
|
| 610 |
+
"# print(f\"Validation Results:\")\n",
|
| 611 |
+
"# print(f\"Total samples: {total}\")\n",
|
| 612 |
+
"# print(f\"Correct predictions: {correct}\")\n",
|
| 613 |
+
"# print(f\"Accuracy: {accuracy:.4f} ({accuracy*100:.2f}%)\")\n",
|
| 614 |
+
" \n",
|
| 615 |
+
"# return accuracy\n",
|
| 616 |
+
"\n",
|
| 617 |
+
"# # Run validation\n",
|
| 618 |
+
"# accuracy = validate_masks(masks)\n"
|
| 619 |
+
]
|
| 620 |
+
}
|
| 621 |
+
],
|
| 622 |
+
"metadata": {
|
| 623 |
+
"kernelspec": {
|
| 624 |
+
"display_name": "Python 3 (ipykernel)",
|
| 625 |
+
"language": "python",
|
| 626 |
+
"name": "python3"
|
| 627 |
+
},
|
| 628 |
+
"language_info": {
|
| 629 |
+
"codemirror_mode": {
|
| 630 |
+
"name": "ipython",
|
| 631 |
+
"version": 3
|
| 632 |
+
},
|
| 633 |
+
"file_extension": ".py",
|
| 634 |
+
"mimetype": "text/x-python",
|
| 635 |
+
"name": "python",
|
| 636 |
+
"nbconvert_exporter": "python",
|
| 637 |
+
"pygments_lexer": "ipython3",
|
| 638 |
+
"version": "3.12.9"
|
| 639 |
+
}
|
| 640 |
+
},
|
| 641 |
+
"nbformat": 4,
|
| 642 |
+
"nbformat_minor": 5
|
| 643 |
+
}
|
Individual-Contest/Pixel/Solution/Pixel_Solution.ipynb
ADDED
|
@@ -0,0 +1,519 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "2051e891-2bcf-42a6-9a1e-f773baff8808",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"<img src=\"./figs/IOAI-Logo.png\" alt=\"IOAI Logo\" width=\"200\" height=\"auto\">\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"[IOAI 2025 (Beijing, China), Individual Contest](https://ioai-official.org/china-2025)\n",
|
| 11 |
+
"\n",
|
| 12 |
+
"[](https://colab.research.google.com/github/IOAI-official/IOAI-2025/blob/main/Individual-Contest/Pixel/Solution/Pixel_Solution.ipynb)"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"cell_type": "markdown",
|
| 17 |
+
"id": "441c46a0-b20a-4dc1-9227-f659833a7d2f",
|
| 18 |
+
"metadata": {},
|
| 19 |
+
"source": [
|
| 20 |
+
"# Pixel Efficiency: Reference Solution"
|
| 21 |
+
]
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"cell_type": "code",
|
| 25 |
+
"execution_count": null,
|
| 26 |
+
"id": "2df5ae63",
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"outputs": [],
|
| 29 |
+
"source": [
|
| 30 |
+
"import os\n",
|
| 31 |
+
"import numpy as np\n",
|
| 32 |
+
"from PIL import Image\n",
|
| 33 |
+
"from tqdm import tqdm\n",
|
| 34 |
+
"import json\n",
|
| 35 |
+
"import torch\n",
|
| 36 |
+
"import torch.nn as nn\n",
|
| 37 |
+
"from datasets import load_from_disk\n",
|
| 38 |
+
"from transformers import CLIPProcessor, CLIPModel\n",
|
| 39 |
+
"from typing import Optional\n",
|
| 40 |
+
"from transformers.models.clip.modeling_clip import CLIPVisionTransformer, CLIPVisionConfig, BaseModelOutputWithPooling\n",
|
| 41 |
+
"\n",
|
| 42 |
+
"\n",
|
| 43 |
+
"# Dataset configuration\n",
|
| 44 |
+
"DATASET_PATH = os.environ.get(\"DATA_PATH\") + \"/test_dataset\"\n",
|
| 45 |
+
"SPLIT = \"test\"\n",
|
| 46 |
+
"\n",
|
| 47 |
+
"# Model Configuration\n",
|
| 48 |
+
"MODEL_PATH = \"./clip-vit-large-patch14\"\n",
|
| 49 |
+
"DEVICE = \"cuda\"\n",
|
| 50 |
+
"BACKGROUND_CLASS = \"other\"\n",
|
| 51 |
+
"\n",
|
| 52 |
+
"# Image and Masking Configuration\n",
|
| 53 |
+
"HEIGHT = 224\n",
|
| 54 |
+
"WIDTH = 224\n",
|
| 55 |
+
"RETAIN_RATIO = 0.0625\n",
|
| 56 |
+
"MEAN_COLOR = (0, 0, 0)\n",
|
| 57 |
+
"STRIDE = 2\n",
|
| 58 |
+
"TOP_K = 3\n",
|
| 59 |
+
"\n",
|
| 60 |
+
"# Load the dataset\n",
|
| 61 |
+
"print(\"Loading dataset...\")\n",
|
| 62 |
+
"dataset_whole = load_from_disk(DATASET_PATH)\n",
|
| 63 |
+
"dataset = dataset_whole[SPLIT]\n",
|
| 64 |
+
"\n",
|
| 65 |
+
"print(f\"Dataset loaded successfully! Total samples: {len(dataset)}\")\n",
|
| 66 |
+
"\n",
|
| 67 |
+
"print(f\"Loading CLIP model and processor: {MODEL_PATH}...\")\n",
|
| 68 |
+
"model = CLIPModel.from_pretrained(MODEL_PATH).to(DEVICE)\n",
|
| 69 |
+
"processor = CLIPProcessor.from_pretrained(MODEL_PATH)\n",
|
| 70 |
+
"print(\"Model and processor loaded successfully.\")\n",
|
| 71 |
+
"\n",
|
| 72 |
+
"\n",
|
| 73 |
+
"def generate_all_rectangular_regions(image_size=224, patch_size=14, retain_ratio=RETAIN_RATIO, max_aspect_ratio=1.2, stride=1):\n",
|
| 74 |
+
" \"\"\"Generate rectangular regions with optimizations for speed\"\"\"\n",
|
| 75 |
+
" max_pixels = int(retain_ratio * image_size * image_size)\n",
|
| 76 |
+
" patches_per_side = image_size // patch_size\n",
|
| 77 |
+
" patch_area = patch_size * patch_size\n",
|
| 78 |
+
" target_patches = max_pixels // patch_area\n",
|
| 79 |
+
" \n",
|
| 80 |
+
" min_patches = max(1, target_patches - 1)\n",
|
| 81 |
+
" max_patches = target_patches + 1\n",
|
| 82 |
+
" \n",
|
| 83 |
+
" regions = []\n",
|
| 84 |
+
" region_to_patches = []\n",
|
| 85 |
+
" \n",
|
| 86 |
+
" # Pre-compute valid rectangle dimensions\n",
|
| 87 |
+
" valid_dims = []\n",
|
| 88 |
+
" for width_patches in range(1, patches_per_side + 1):\n",
|
| 89 |
+
" for height_patches in range(1, patches_per_side + 1):\n",
|
| 90 |
+
" total_patches = width_patches * height_patches\n",
|
| 91 |
+
" if min_patches <= total_patches <= max_patches:\n",
|
| 92 |
+
" aspect_ratio = max(width_patches, height_patches) / min(width_patches, height_patches)\n",
|
| 93 |
+
" if aspect_ratio <= max_aspect_ratio:\n",
|
| 94 |
+
" valid_dims.append((width_patches, height_patches, total_patches))\n",
|
| 95 |
+
" \n",
|
| 96 |
+
" # Generate rectangles using stride for positions\n",
|
| 97 |
+
" for width_patches, height_patches, total_patches in valid_dims:\n",
|
| 98 |
+
" for top_patch in range(0, patches_per_side - height_patches + 1, stride):\n",
|
| 99 |
+
" for left_patch in range(0, patches_per_side - width_patches + 1, stride):\n",
|
| 100 |
+
" bottom_patch = top_patch + height_patches\n",
|
| 101 |
+
" right_patch = left_patch + width_patches\n",
|
| 102 |
+
" \n",
|
| 103 |
+
" pixel_coords = (\n",
|
| 104 |
+
" top_patch * patch_size,\n",
|
| 105 |
+
" left_patch * patch_size,\n",
|
| 106 |
+
" bottom_patch * patch_size,\n",
|
| 107 |
+
" right_patch * patch_size\n",
|
| 108 |
+
" )\n",
|
| 109 |
+
" regions.append(pixel_coords)\n",
|
| 110 |
+
" \n",
|
| 111 |
+
" covered_patches = []\n",
|
| 112 |
+
" for p_row in range(top_patch, bottom_patch):\n",
|
| 113 |
+
" for p_col in range(left_patch, right_patch):\n",
|
| 114 |
+
" patch_idx = p_row * patches_per_side + p_col\n",
|
| 115 |
+
" covered_patches.append(patch_idx)\n",
|
| 116 |
+
" region_to_patches.append(covered_patches)\n",
|
| 117 |
+
" \n",
|
| 118 |
+
" return regions, region_to_patches\n",
|
| 119 |
+
"\n",
|
| 120 |
+
"\n",
|
| 121 |
+
"class MaskCLIPVisionTransformer(CLIPVisionTransformer):\n",
|
| 122 |
+
" \"\"\"Modified CLIP Vision Transformer that supports mask tokens for all possible rectangular regions\"\"\"\n",
|
| 123 |
+
" \n",
|
| 124 |
+
" def __init__(self, config: CLIPVisionConfig, retain_ratio=RETAIN_RATIO):\n",
|
| 125 |
+
" super().__init__(config)\n",
|
| 126 |
+
" self.retain_ratio = retain_ratio\n",
|
| 127 |
+
" self.num_patches = (config.image_size // config.patch_size) ** 2\n",
|
| 128 |
+
" \n",
|
| 129 |
+
" self.regions, self.region_to_patches = generate_all_rectangular_regions(\n",
|
| 130 |
+
" image_size=config.image_size, \n",
|
| 131 |
+
" patch_size=config.patch_size, \n",
|
| 132 |
+
" retain_ratio=retain_ratio,\n",
|
| 133 |
+
" max_aspect_ratio=1.2,\n",
|
| 134 |
+
" stride=STRIDE\n",
|
| 135 |
+
" )\n",
|
| 136 |
+
" self.num_mask_tokens = len(self.regions)\n",
|
| 137 |
+
" \n",
|
| 138 |
+
" self.mask_tokens = nn.Parameter(torch.randn(1, self.num_mask_tokens, config.hidden_size))\n",
|
| 139 |
+
" \n",
|
| 140 |
+
" def create_mask_attention_matrix(self, batch_size):\n",
|
| 141 |
+
" \"\"\"Create attention mask matrix for all rectangular regions\"\"\"\n",
|
| 142 |
+
" N = self.num_patches\n",
|
| 143 |
+
" M = self.num_mask_tokens\n",
|
| 144 |
+
" total_tokens = N + 1 + M\n",
|
| 145 |
+
" \n",
|
| 146 |
+
" attention_mask = torch.zeros(total_tokens, total_tokens, dtype=torch.bool, device=self.mask_tokens.device)\n",
|
| 147 |
+
" \n",
|
| 148 |
+
" # Class token and image patches do NOT attend to mask tokens\n",
|
| 149 |
+
" attention_mask[:N+1, N+1:] = True\n",
|
| 150 |
+
" \n",
|
| 151 |
+
" # Each mask token attends to its specific image patches (not CLS)\n",
|
| 152 |
+
" attention_mask[N+1:, 1:N+1] = True\n",
|
| 153 |
+
" \n",
|
| 154 |
+
" # Then allow each mask token to attend to its assigned patches\n",
|
| 155 |
+
" for mask_idx in range(M):\n",
|
| 156 |
+
" covered_patches = self.region_to_patches[mask_idx]\n",
|
| 157 |
+
" for patch_idx in covered_patches:\n",
|
| 158 |
+
" token_pos = 1 + patch_idx\n",
|
| 159 |
+
" attention_mask[N + 1 + mask_idx, token_pos] = False\n",
|
| 160 |
+
" \n",
|
| 161 |
+
" # Mask tokens do NOT attend to each other\n",
|
| 162 |
+
" attention_mask[N+1:, N+1:] = True\n",
|
| 163 |
+
" # Allow self-attention for each mask token\n",
|
| 164 |
+
" for i in range(M):\n",
|
| 165 |
+
" attention_mask[N + 1 + i, N + 1 + i] = False\n",
|
| 166 |
+
" \n",
|
| 167 |
+
" return attention_mask\n",
|
| 168 |
+
" \n",
|
| 169 |
+
" def forward(\n",
|
| 170 |
+
" self,\n",
|
| 171 |
+
" pixel_values: Optional[torch.FloatTensor] = None,\n",
|
| 172 |
+
" output_attentions: Optional[bool] = None,\n",
|
| 173 |
+
" output_hidden_states: Optional[bool] = None,\n",
|
| 174 |
+
" interpolate_pos_encoding: Optional[bool] = False,\n",
|
| 175 |
+
" use_mask_tokens: bool = False,\n",
|
| 176 |
+
" ) -> BaseModelOutputWithPooling:\n",
|
| 177 |
+
" \"\"\"Forward pass with optional mask tokens\"\"\"\n",
|
| 178 |
+
" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n",
|
| 179 |
+
" output_hidden_states = (\n",
|
| 180 |
+
" output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n",
|
| 181 |
+
" )\n",
|
| 182 |
+
"\n",
|
| 183 |
+
" if pixel_values is None:\n",
|
| 184 |
+
" raise ValueError(\"You have to specify pixel_values\")\n",
|
| 185 |
+
"\n",
|
| 186 |
+
" # Get embeddings (patches + class token)\n",
|
| 187 |
+
" hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)\n",
|
| 188 |
+
" hidden_states = self.pre_layrnorm(hidden_states)\n",
|
| 189 |
+
" \n",
|
| 190 |
+
" if use_mask_tokens:\n",
|
| 191 |
+
" # Add mask tokens to the sequence\n",
|
| 192 |
+
" batch_size = hidden_states.shape[0]\n",
|
| 193 |
+
" \n",
|
| 194 |
+
" cls_token_embedding = hidden_states[:, 0:1, :]\n",
|
| 195 |
+
" mask_tokens_expanded = cls_token_embedding.expand(batch_size, self.num_mask_tokens, -1)\n",
|
| 196 |
+
" \n",
|
| 197 |
+
" if mask_tokens_expanded.device != hidden_states.device:\n",
|
| 198 |
+
" mask_tokens_expanded = mask_tokens_expanded.to(hidden_states.device)\n",
|
| 199 |
+
" \n",
|
| 200 |
+
" hidden_states = torch.cat([hidden_states, mask_tokens_expanded], dim=1)\n",
|
| 201 |
+
" \n",
|
| 202 |
+
" # Create custom attention mask\n",
|
| 203 |
+
" attention_mask = self.create_mask_attention_matrix(batch_size)\n",
|
| 204 |
+
" \n",
|
| 205 |
+
" seq_len = hidden_states.shape[1]\n",
|
| 206 |
+
" attention_mask_4d = attention_mask.unsqueeze(0).unsqueeze(0).expand(batch_size, 1, -1, -1)\n",
|
| 207 |
+
" attention_mask_4d = attention_mask_4d.float()\n",
|
| 208 |
+
" attention_mask_4d = attention_mask_4d.masked_fill(attention_mask_4d == 1, float('-inf'))\n",
|
| 209 |
+
" attention_mask_4d = attention_mask_4d.masked_fill(attention_mask_4d == 0, 0.0)\n",
|
| 210 |
+
" else:\n",
|
| 211 |
+
" attention_mask_4d = None\n",
|
| 212 |
+
"\n",
|
| 213 |
+
" # Process through encoder layers\n",
|
| 214 |
+
" encoder_outputs = self.encoder(\n",
|
| 215 |
+
" inputs_embeds=hidden_states,\n",
|
| 216 |
+
" attention_mask=attention_mask_4d,\n",
|
| 217 |
+
" causal_attention_mask=None,\n",
|
| 218 |
+
" output_attentions=output_attentions,\n",
|
| 219 |
+
" output_hidden_states=output_hidden_states,\n",
|
| 220 |
+
" )\n",
|
| 221 |
+
"\n",
|
| 222 |
+
" last_hidden_state = encoder_outputs.last_hidden_state\n",
|
| 223 |
+
" \n",
|
| 224 |
+
" if use_mask_tokens:\n",
|
| 225 |
+
" # Extract different token types\n",
|
| 226 |
+
" class_token_output = last_hidden_state[:, 0]\n",
|
| 227 |
+
" mask_tokens_output = last_hidden_state[:, self.num_patches + 1:]\n",
|
| 228 |
+
" \n",
|
| 229 |
+
" # Apply post layer norm\n",
|
| 230 |
+
" pooled_output = self.post_layernorm(class_token_output)\n",
|
| 231 |
+
" mask_tokens_output = self.post_layernorm(mask_tokens_output)\n",
|
| 232 |
+
" \n",
|
| 233 |
+
" return {\n",
|
| 234 |
+
" 'last_hidden_state': last_hidden_state,\n",
|
| 235 |
+
" 'pooler_output': pooled_output,\n",
|
| 236 |
+
" 'mask_tokens_output': mask_tokens_output,\n",
|
| 237 |
+
" 'hidden_states': encoder_outputs.hidden_states,\n",
|
| 238 |
+
" 'attentions': encoder_outputs.attentions,\n",
|
| 239 |
+
" }\n",
|
| 240 |
+
" else:\n",
|
| 241 |
+
" # Standard CLIP behavior\n",
|
| 242 |
+
" pooled_output = last_hidden_state[:, 0, :]\n",
|
| 243 |
+
" pooled_output = self.post_layernorm(pooled_output)\n",
|
| 244 |
+
"\n",
|
| 245 |
+
" return BaseModelOutputWithPooling(\n",
|
| 246 |
+
" last_hidden_state=last_hidden_state,\n",
|
| 247 |
+
" pooler_output=pooled_output,\n",
|
| 248 |
+
" hidden_states=encoder_outputs.hidden_states,\n",
|
| 249 |
+
" attentions=encoder_outputs.attentions,\n",
|
| 250 |
+
" )\n",
|
| 251 |
+
"\n",
|
| 252 |
+
"\n",
|
| 253 |
+
"def apply_mask_with_mean(image, mask, mean_rgb=MEAN_COLOR):\n",
|
| 254 |
+
" \"\"\"Apply arbitrary binary mask to image, replacing masked areas with mean values\"\"\"\n",
|
| 255 |
+
" img_array = np.array(image).copy()\n",
|
| 256 |
+
"\n",
|
| 257 |
+
" if isinstance(mask, Image.Image):\n",
|
| 258 |
+
" mask_array = np.array(mask.convert('L')) > 127\n",
|
| 259 |
+
" else:\n",
|
| 260 |
+
" mask_array = mask > 0\n",
|
| 261 |
+
"\n",
|
| 262 |
+
" mask_3d = np.stack([mask_array] * 3, axis=2)\n",
|
| 263 |
+
" mean_values = np.array([int(m * 255) for m in mean_rgb])\n",
|
| 264 |
+
" img_array = np.where(mask_3d, img_array, mean_values.reshape(1, 1, 3))\n",
|
| 265 |
+
"\n",
|
| 266 |
+
" return Image.fromarray(img_array.astype(np.uint8))\n",
|
| 267 |
+
"\n",
|
| 268 |
+
"\n",
|
| 269 |
+
"def compute_vision_features_once(model, image, mask_vision_model):\n",
|
| 270 |
+
" \"\"\"\n",
|
| 271 |
+
" Compute vision features once for efficient reuse across multiple mask selection functions.\n",
|
| 272 |
+
" This eliminates redundant forward passes.\n",
|
| 273 |
+
" \"\"\"\n",
|
| 274 |
+
" image_inputs = processor(images=image, return_tensors=\"pt\").to(DEVICE)\n",
|
| 275 |
+
" \n",
|
| 276 |
+
" with torch.no_grad():\n",
|
| 277 |
+
" vision_outputs = mask_vision_model(\n",
|
| 278 |
+
" pixel_values=image_inputs['pixel_values'],\n",
|
| 279 |
+
" use_mask_tokens=True\n",
|
| 280 |
+
" )\n",
|
| 281 |
+
" \n",
|
| 282 |
+
" full_image_features = vision_outputs['pooler_output']\n",
|
| 283 |
+
" if hasattr(model, 'visual_projection') and model.visual_projection is not None:\n",
|
| 284 |
+
" full_image_features = model.visual_projection(full_image_features)\n",
|
| 285 |
+
" \n",
|
| 286 |
+
" mask_tokens_features = vision_outputs['mask_tokens_output']\n",
|
| 287 |
+
" if hasattr(model, 'visual_projection') and model.visual_projection is not None:\n",
|
| 288 |
+
" batch_size, num_tokens, embed_dim = mask_tokens_features.shape\n",
|
| 289 |
+
" mask_tokens_features = mask_tokens_features.view(-1, embed_dim)\n",
|
| 290 |
+
" mask_tokens_features = model.visual_projection(mask_tokens_features)\n",
|
| 291 |
+
" mask_tokens_features = mask_tokens_features.view(batch_size, num_tokens, -1)\n",
|
| 292 |
+
" \n",
|
| 293 |
+
" full_image_features = full_image_features / full_image_features.norm(dim=-1, keepdim=True)\n",
|
| 294 |
+
" mask_tokens_features = mask_tokens_features / mask_tokens_features.norm(dim=-1, keepdim=True)\n",
|
| 295 |
+
" \n",
|
| 296 |
+
" return vision_outputs, full_image_features, mask_tokens_features\n",
|
| 297 |
+
"\n",
|
| 298 |
+
"\n",
|
| 299 |
+
"def find_best_mask_region_calibrated(model, image, class_names, mask_vision_model, text_features, \n",
|
| 300 |
+
" vision_outputs=None, full_image_features=None, mask_tokens_features=None, \n",
|
| 301 |
+
" return_detailed=False):\n",
|
| 302 |
+
" \"\"\"Find the best mask region using MaskCLIP approach with calibration for black-pixel masking\"\"\"\n",
|
| 303 |
+
" num_mask_tokens = mask_vision_model.num_mask_tokens\n",
|
| 304 |
+
" \n",
|
| 305 |
+
" # Use pre-computed features if provided, otherwise compute them\n",
|
| 306 |
+
" if vision_outputs is None or full_image_features is None or mask_tokens_features is None:\n",
|
| 307 |
+
" image_inputs = processor(images=image, return_tensors=\"pt\").to(DEVICE)\n",
|
| 308 |
+
" \n",
|
| 309 |
+
" with torch.no_grad():\n",
|
| 310 |
+
" vision_outputs = mask_vision_model(\n",
|
| 311 |
+
" pixel_values=image_inputs['pixel_values'],\n",
|
| 312 |
+
" use_mask_tokens=True\n",
|
| 313 |
+
" )\n",
|
| 314 |
+
" \n",
|
| 315 |
+
" full_image_features = vision_outputs['pooler_output']\n",
|
| 316 |
+
" if hasattr(model, 'visual_projection') and model.visual_projection is not None:\n",
|
| 317 |
+
" full_image_features = model.visual_projection(full_image_features)\n",
|
| 318 |
+
" \n",
|
| 319 |
+
" mask_tokens_features = vision_outputs['mask_tokens_output']\n",
|
| 320 |
+
" if hasattr(model, 'visual_projection') and model.visual_projection is not None:\n",
|
| 321 |
+
" batch_size, num_tokens, embed_dim = mask_tokens_features.shape\n",
|
| 322 |
+
" mask_tokens_features = mask_tokens_features.view(-1, embed_dim)\n",
|
| 323 |
+
" mask_tokens_features = model.visual_projection(mask_tokens_features)\n",
|
| 324 |
+
" mask_tokens_features = mask_tokens_features.view(batch_size, num_tokens, -1)\n",
|
| 325 |
+
" \n",
|
| 326 |
+
" full_image_features = full_image_features / full_image_features.norm(dim=-1, keepdim=True)\n",
|
| 327 |
+
" mask_tokens_features = mask_tokens_features / mask_tokens_features.norm(dim=-1, keepdim=True)\n",
|
| 328 |
+
" \n",
|
| 329 |
+
" # Compute similarity between full image and text\n",
|
| 330 |
+
" full_image_similarities = torch.matmul(full_image_features, text_features.T)\n",
|
| 331 |
+
" full_image_prediction = torch.argmax(full_image_similarities, dim=-1)\n",
|
| 332 |
+
" predicted_class_idx = full_image_prediction.item()\n",
|
| 333 |
+
" \n",
|
| 334 |
+
" # Compute similarities for each mask token\n",
|
| 335 |
+
" mask_similarities = torch.matmul(mask_tokens_features.squeeze(0), text_features.T)\n",
|
| 336 |
+
" mask_predictions = torch.argmax(mask_similarities, dim=-1)\n",
|
| 337 |
+
" \n",
|
| 338 |
+
" # Get candidates that predict the same class as full image, sorted by confidence\n",
|
| 339 |
+
" matching_masks = (mask_predictions == predicted_class_idx)\n",
|
| 340 |
+
" \n",
|
| 341 |
+
" if matching_masks.any():\n",
|
| 342 |
+
" candidate_indices = torch.where(matching_masks)[0]\n",
|
| 343 |
+
" candidate_confidences = mask_similarities[candidate_indices, predicted_class_idx]\n",
|
| 344 |
+
" sorted_indices = torch.argsort(candidate_confidences, descending=True)\n",
|
| 345 |
+
" sorted_candidates = candidate_indices[sorted_indices]\n",
|
| 346 |
+
" else:\n",
|
| 347 |
+
" # If no exact matches, use all candidates sorted by confidence for predicted class\n",
|
| 348 |
+
" candidate_confidences = mask_similarities[:, predicted_class_idx]\n",
|
| 349 |
+
" sorted_candidates = torch.topk(candidate_confidences, len(candidate_confidences)).indices\n",
|
| 350 |
+
" \n",
|
| 351 |
+
" # OPTIMIZATION: If TOP_K=1, skip calibration and return best candidate directly\n",
|
| 352 |
+
" if TOP_K == 1:\n",
|
| 353 |
+
" return sorted_candidates[0].item()\n",
|
| 354 |
+
" \n",
|
| 355 |
+
" # CALIBRATION STEP: Test top K candidates, return immediately when one is correct\n",
|
| 356 |
+
" calibration_results = []\n",
|
| 357 |
+
" candidates_to_test = sorted_candidates[:TOP_K]\n",
|
| 358 |
+
" \n",
|
| 359 |
+
" for i, candidate_idx in enumerate(candidates_to_test):\n",
|
| 360 |
+
" candidate_idx_item = candidate_idx.item()\n",
|
| 361 |
+
" \n",
|
| 362 |
+
" # Create masked image for this candidate\n",
|
| 363 |
+
" coordinates = mask_idx_to_coordinates(candidate_idx_item, mask_vision_model)\n",
|
| 364 |
+
" mask = generate_mask_from_coordinates(image, coordinates)\n",
|
| 365 |
+
" masked_image = apply_mask_with_mean(image, mask)\n",
|
| 366 |
+
" \n",
|
| 367 |
+
" # Test with actual forward pass\n",
|
| 368 |
+
" with torch.no_grad():\n",
|
| 369 |
+
" masked_image_inputs = processor(images=masked_image, return_tensors=\"pt\").to(DEVICE)\n",
|
| 370 |
+
" masked_image_features = model.get_image_features(**masked_image_inputs)\n",
|
| 371 |
+
" masked_image_features = masked_image_features / masked_image_features.norm(dim=-1, keepdim=True)\n",
|
| 372 |
+
" \n",
|
| 373 |
+
" masked_similarities = torch.matmul(masked_image_features, text_features.T)\n",
|
| 374 |
+
" masked_prediction = torch.argmax(masked_similarities, dim=-1).item()\n",
|
| 375 |
+
" masked_confidence = masked_similarities[0, predicted_class_idx].item()\n",
|
| 376 |
+
" \n",
|
| 377 |
+
" # If this candidate predicts correctly, return it immediately (early exit optimization)\n",
|
| 378 |
+
" if masked_prediction == predicted_class_idx:\n",
|
| 379 |
+
" return candidate_idx_item\n",
|
| 380 |
+
" \n",
|
| 381 |
+
" # Store failed calibration result\n",
|
| 382 |
+
" calibration_results.append((candidate_idx_item, masked_confidence))\n",
|
| 383 |
+
" \n",
|
| 384 |
+
" # If we reach here, all TOP_K candidates failed calibration\n",
|
| 385 |
+
" # Fall back to the next best candidate from sorted list WITHOUT additional calibration\n",
|
| 386 |
+
" if len(sorted_candidates) > TOP_K:\n",
|
| 387 |
+
" return sorted_candidates[TOP_K].item()\n",
|
| 388 |
+
" else:\n",
|
| 389 |
+
" # If no more candidates available, return the best failed calibration result\n",
|
| 390 |
+
" if calibration_results:\n",
|
| 391 |
+
" return max(calibration_results, key=lambda x: x[1])[0]\n",
|
| 392 |
+
" else:\n",
|
| 393 |
+
" # Ultimate fallback: return the best mask token prediction\n",
|
| 394 |
+
" return sorted_candidates[0].item()\n",
|
| 395 |
+
"\n",
|
| 396 |
+
"\n",
|
| 397 |
+
"def mask_idx_to_coordinates(mask_idx, mask_vision_model):\n",
|
| 398 |
+
" \"\"\"Convert mask token index to image coordinates using the pre-computed regions\"\"\"\n",
|
| 399 |
+
" if mask_idx >= len(mask_vision_model.regions):\n",
|
| 400 |
+
" raise ValueError(f\"mask_idx {mask_idx} is out of range. Only {len(mask_vision_model.regions)} regions available.\")\n",
|
| 401 |
+
" \n",
|
| 402 |
+
" top, left, bottom, right = mask_vision_model.regions[mask_idx]\n",
|
| 403 |
+
" return ((top, left), (bottom, right))\n",
|
| 404 |
+
"\n",
|
| 405 |
+
"\n",
|
| 406 |
+
"def generate_mask_from_coordinates(image, coordinates):\n",
|
| 407 |
+
" \"\"\"Generate a binary mask from crop coordinates\"\"\"\n",
|
| 408 |
+
" H, W = 224, 224\n",
|
| 409 |
+
" mask = np.zeros((H, W), dtype=np.int8)\n",
|
| 410 |
+
" \n",
|
| 411 |
+
" (top, left), (bottom, right) = coordinates\n",
|
| 412 |
+
" mask[top:bottom, left:right] = 1\n",
|
| 413 |
+
" \n",
|
| 414 |
+
" return mask\n",
|
| 415 |
+
"\n",
|
| 416 |
+
"\n",
|
| 417 |
+
"# Create the MaskCLIP model\n",
|
| 418 |
+
"print(\"Creating MaskCLIP model...\")\n",
|
| 419 |
+
"mask_vision_model = MaskCLIPVisionTransformer(model.vision_model.config, retain_ratio=RETAIN_RATIO)\n",
|
| 420 |
+
"mask_vision_model.load_state_dict(model.vision_model.state_dict(), strict=False)\n",
|
| 421 |
+
"mask_vision_model = mask_vision_model.to(DEVICE)\n",
|
| 422 |
+
"mask_vision_model.eval()\n",
|
| 423 |
+
"print(\"MaskCLIP model created successfully.\")\n",
|
| 424 |
+
"\n",
|
| 425 |
+
"dataset_eval = load_from_disk(DATASET_PATH)\n",
|
| 426 |
+
"dataset_eval = dataset_eval[SPLIT]\n",
|
| 427 |
+
"\n",
|
| 428 |
+
"# Get class names from training dataset for consistent evaluation \n",
|
| 429 |
+
"train_dataset = load_from_disk(\"/bohr/train-yzfn/v1/train_dataset\")[\"train\"] # TODO: This part needs changing!\n",
|
| 430 |
+
"class_names_eval = list(set([item['name'] for item in train_dataset])) + [BACKGROUND_CLASS]\n",
|
| 431 |
+
"\n",
|
| 432 |
+
"# Prepare text features once for efficiency\n",
|
| 433 |
+
"print(\"Preparing text features...\")\n",
|
| 434 |
+
"text_inputs_eval = processor(text=class_names_eval, return_tensors=\"pt\", padding=True).to(DEVICE)\n",
|
| 435 |
+
"with torch.no_grad():\n",
|
| 436 |
+
" text_features_eval = model.get_text_features(**text_inputs_eval)\n",
|
| 437 |
+
" text_features_eval = text_features_eval / text_features_eval.norm(dim=-1, keepdim=True)\n",
|
| 438 |
+
"print(\"Text features prepared.\")\n",
|
| 439 |
+
"\n",
|
| 440 |
+
"# Main evaluation loop\n",
|
| 441 |
+
"masks = {}\n",
|
| 442 |
+
"total_correct = 0\n",
|
| 443 |
+
"total_processed = 0\n",
|
| 444 |
+
"\n",
|
| 445 |
+
"for item in tqdm(dataset_eval):\n",
|
| 446 |
+
" image = item['image']\n",
|
| 447 |
+
" total_processed += 1\n",
|
| 448 |
+
"\n",
|
| 449 |
+
" try:\n",
|
| 450 |
+
" # Compute vision features once for efficiency (eliminates redundant forward passes)\n",
|
| 451 |
+
" vision_outputs, full_image_features, mask_tokens_features = compute_vision_features_once(\n",
|
| 452 |
+
" model, image, mask_vision_model\n",
|
| 453 |
+
" )\n",
|
| 454 |
+
" \n",
|
| 455 |
+
" # Get prediction from pre-computed features\n",
|
| 456 |
+
" full_image_similarities = torch.matmul(full_image_features, text_features_eval.T)\n",
|
| 457 |
+
" predicted_class_idx = torch.argmax(full_image_similarities, dim=-1).item()\n",
|
| 458 |
+
" \n",
|
| 459 |
+
" best_mask_idx = find_best_mask_region_calibrated(\n",
|
| 460 |
+
" model, image, class_names_eval, mask_vision_model, text_features_eval,\n",
|
| 461 |
+
" vision_outputs=vision_outputs, full_image_features=full_image_features, \n",
|
| 462 |
+
" mask_tokens_features=mask_tokens_features\n",
|
| 463 |
+
" )\n",
|
| 464 |
+
" \n",
|
| 465 |
+
" coordinates = mask_idx_to_coordinates(best_mask_idx, mask_vision_model)\n",
|
| 466 |
+
" \n",
|
| 467 |
+
" # Validate the mask\n",
|
| 468 |
+
" mask = generate_mask_from_coordinates(image, coordinates)\n",
|
| 469 |
+
" assert mask.shape == (224, 224), \"Mask should be 224x224\"\n",
|
| 470 |
+
" assert mask.sum() <= RETAIN_RATIO * 224 * 224, \"You should leave only 6.25% of pixels\"\n",
|
| 471 |
+
"\n",
|
| 472 |
+
" \n",
|
| 473 |
+
" # Save the coordinates\n",
|
| 474 |
+
" idx = item['idx']\n",
|
| 475 |
+
" masks[idx] = coordinates\n",
|
| 476 |
+
" \n",
|
| 477 |
+
" except Exception as e:\n",
|
| 478 |
+
" print(f\"Error processing image {item['idx']}: {e}\")\n",
|
| 479 |
+
" # Fallback to a small center region if there's an error\n",
|
| 480 |
+
" if len(mask_vision_model.regions) > 0:\n",
|
| 481 |
+
" region_sizes = [(r[2]-r[0])*(r[3]-r[1]) for r in mask_vision_model.regions]\n",
|
| 482 |
+
" min_region_idx = region_sizes.index(min(region_sizes))\n",
|
| 483 |
+
" fallback_coords = mask_idx_to_coordinates(min_region_idx, mask_vision_model)\n",
|
| 484 |
+
" else:\n",
|
| 485 |
+
" fallback_coords = ((84, 84), (140, 140))\n",
|
| 486 |
+
" masks[item['idx']] = fallback_coords\n",
|
| 487 |
+
"\n",
|
| 488 |
+
"# Save as JSONL (one JSON object per line) - much safer than pickle\n",
|
| 489 |
+
"with open('submission.jsonl', 'w') as f:\n",
|
| 490 |
+
" for idx, coordinates in masks.items():\n",
|
| 491 |
+
" json.dump({\"idx\": idx, \"coordinates\": coordinates}, f)\n",
|
| 492 |
+
" f.write('\\n')\n",
|
| 493 |
+
"\n",
|
| 494 |
+
"print(\"Masks saved to masks.jsonl\")\n"
|
| 495 |
+
]
|
| 496 |
+
}
|
| 497 |
+
],
|
| 498 |
+
"metadata": {
|
| 499 |
+
"kernelspec": {
|
| 500 |
+
"display_name": "Python 3 (ipykernel)",
|
| 501 |
+
"language": "python",
|
| 502 |
+
"name": "python3"
|
| 503 |
+
},
|
| 504 |
+
"language_info": {
|
| 505 |
+
"codemirror_mode": {
|
| 506 |
+
"name": "ipython",
|
| 507 |
+
"version": 3
|
| 508 |
+
},
|
| 509 |
+
"file_extension": ".py",
|
| 510 |
+
"mimetype": "text/x-python",
|
| 511 |
+
"name": "python",
|
| 512 |
+
"nbconvert_exporter": "python",
|
| 513 |
+
"pygments_lexer": "ipython3",
|
| 514 |
+
"version": "3.12.9"
|
| 515 |
+
}
|
| 516 |
+
},
|
| 517 |
+
"nbformat": 4,
|
| 518 |
+
"nbformat_minor": 5
|
| 519 |
+
}
|
Individual-Contest/Pixel/Solution/Scoring/metrics.py
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from argparse import ArgumentParser
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from transformers import CLIPProcessor, CLIPModel
|
| 8 |
+
from datasets import load_from_disk, load_dataset
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import numpy as np
|
| 11 |
+
import math
|
| 12 |
+
from tqdm.auto import tqdm # Progress bar
|
| 13 |
+
import random
|
| 14 |
+
import matplotlib.pyplot as plt
|
| 15 |
+
|
| 16 |
+
if os.environ.get('METRIC_PATH'):
|
| 17 |
+
METRIC_PATH = os.environ.get("METRIC_PATH") + "/"
|
| 18 |
+
else:
|
| 19 |
+
METRIC_PATH = "" # Fallback for local testing
|
| 20 |
+
H, W = 224, 224
|
| 21 |
+
MODEL_PATH = "/bohr/clip-vit-large-patch14-aft9/v1/clip-vit-large-patch14"
|
| 22 |
+
DATASET_PATH = METRIC_PATH + "reference_dataset"
|
| 23 |
+
MASK_PATH = "masks.jsonl"
|
| 24 |
+
SPLIT = "test"
|
| 25 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 26 |
+
RETAIN_RATIO = 0.0625
|
| 27 |
+
SCORE_OUTPUT_FILE = "score.json"
|
| 28 |
+
MEAN_COLOR = (0, 0, 0)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
parser = ArgumentParser()
|
| 32 |
+
parser.add_argument("--mask-file", default='masks.npy', type=str)
|
| 33 |
+
parser.add_argument("--debug", default=False, action='store_true')
|
| 34 |
+
args = parser.parse_args()
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def write_error_score(error_message):
|
| 38 |
+
"""Write error score to JSON file"""
|
| 39 |
+
error_json = {
|
| 40 |
+
"status": False,
|
| 41 |
+
"score": {
|
| 42 |
+
"public_a": 0.0,
|
| 43 |
+
"public_detail": {
|
| 44 |
+
"Score": 0.0,
|
| 45 |
+
"Accuracy": 0.0,
|
| 46 |
+
},
|
| 47 |
+
"private_b": 0.0,
|
| 48 |
+
"private_detail": {
|
| 49 |
+
"Score": 0.0,
|
| 50 |
+
"Accuracy": 0.0,
|
| 51 |
+
},
|
| 52 |
+
},
|
| 53 |
+
"msg": f"Error: {error_message}",
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
with open(SCORE_OUTPUT_FILE, 'w') as f:
|
| 57 |
+
json.dump(error_json, f, indent=2)
|
| 58 |
+
print(f"Error written to {SCORE_OUTPUT_FILE}: {error_message}")
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def safe_load_masks(mask_file_path, expected_dataset_size):
|
| 62 |
+
"""
|
| 63 |
+
Safely load and validate the masks file from contestants.
|
| 64 |
+
|
| 65 |
+
Parameters:
|
| 66 |
+
mask_file_path: Path to the masks file
|
| 67 |
+
expected_dataset_size: Expected number of test cases
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
dict: Validated masks dictionary or None if invalid
|
| 71 |
+
"""
|
| 72 |
+
try:
|
| 73 |
+
# Check if file exists
|
| 74 |
+
if not os.path.exists(mask_file_path):
|
| 75 |
+
write_error_score("Mask file not found.")
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
# Check file size (prevent extremely large files)
|
| 79 |
+
file_size = os.path.getsize(mask_file_path)
|
| 80 |
+
max_file_size = 50 * 1024 * 1024 # 50MB limit
|
| 81 |
+
if file_size > max_file_size:
|
| 82 |
+
write_error_score("Mask file too large.")
|
| 83 |
+
return None
|
| 84 |
+
|
| 85 |
+
masks = {}
|
| 86 |
+
|
| 87 |
+
# Load based on file extension
|
| 88 |
+
if mask_file_path.endswith('.jsonl'):
|
| 89 |
+
# Load JSONL format (one JSON object per line)
|
| 90 |
+
try:
|
| 91 |
+
with open(mask_file_path, 'r') as f:
|
| 92 |
+
for line_num, line in enumerate(f, 1):
|
| 93 |
+
if line.strip(): # Skip empty lines
|
| 94 |
+
try:
|
| 95 |
+
data = json.loads(line.strip())
|
| 96 |
+
idx = data.get('idx')
|
| 97 |
+
coordinates = data.get('coordinates')
|
| 98 |
+
|
| 99 |
+
if idx is None or coordinates is None:
|
| 100 |
+
write_error_score("Invalid JSONL format.")
|
| 101 |
+
return None
|
| 102 |
+
|
| 103 |
+
masks[idx] = coordinates
|
| 104 |
+
except json.JSONDecodeError:
|
| 105 |
+
write_error_score("Invalid JSON in mask file.")
|
| 106 |
+
return None
|
| 107 |
+
except Exception:
|
| 108 |
+
write_error_score("Unable to load JSONL mask file.")
|
| 109 |
+
return None
|
| 110 |
+
|
| 111 |
+
# Validate it's a dictionary
|
| 112 |
+
if not isinstance(masks, dict):
|
| 113 |
+
write_error_score("Mask data must be a dictionary.")
|
| 114 |
+
return None
|
| 115 |
+
|
| 116 |
+
# Check number of entries
|
| 117 |
+
if len(masks) != expected_dataset_size:
|
| 118 |
+
# print(len(masks), expected_dataset_size)
|
| 119 |
+
write_error_score("Incorrect number of mask entries.")
|
| 120 |
+
return None
|
| 121 |
+
|
| 122 |
+
# Validate each mask entry
|
| 123 |
+
for idx, coordinates in masks.items():
|
| 124 |
+
# Validate index
|
| 125 |
+
if not isinstance(idx, (int, np.integer, str)):
|
| 126 |
+
write_error_score("Invalid mask index format.")
|
| 127 |
+
return None
|
| 128 |
+
|
| 129 |
+
# Validate coordinates structure
|
| 130 |
+
if not isinstance(coordinates, (tuple, list)) or len(coordinates) != 2:
|
| 131 |
+
write_error_score("Invalid mask coordinate structure.")
|
| 132 |
+
return None
|
| 133 |
+
|
| 134 |
+
try:
|
| 135 |
+
(top, left), (bottom, right) = coordinates
|
| 136 |
+
except (ValueError, TypeError):
|
| 137 |
+
write_error_score("Invalid mask coordinate format.")
|
| 138 |
+
return None
|
| 139 |
+
|
| 140 |
+
# Validate coordinate types and values
|
| 141 |
+
coords = [top, left, bottom, right]
|
| 142 |
+
for coord in coords:
|
| 143 |
+
if not isinstance(coord, (int, np.integer)):
|
| 144 |
+
write_error_score("Mask coordinates must be integers.")
|
| 145 |
+
return None
|
| 146 |
+
|
| 147 |
+
if not (0 <= coord <= 224):
|
| 148 |
+
write_error_score("Mask coordinates out of valid range.")
|
| 149 |
+
return None
|
| 150 |
+
|
| 151 |
+
# Validate coordinate ordering
|
| 152 |
+
if not (top < bottom and left < right):
|
| 153 |
+
write_error_score("Invalid mask coordinate ordering.")
|
| 154 |
+
return None
|
| 155 |
+
|
| 156 |
+
# Validate area constraint
|
| 157 |
+
crop_area = (bottom - top) * (right - left)
|
| 158 |
+
max_area = RETAIN_RATIO * 224 * 224
|
| 159 |
+
if crop_area > max_area:
|
| 160 |
+
write_error_score("Mask area exceeds allowed limit.")
|
| 161 |
+
return None
|
| 162 |
+
|
| 163 |
+
# Additional security: prevent degenerate cases
|
| 164 |
+
if crop_area <= 0:
|
| 165 |
+
write_error_score("Invalid mask area.")
|
| 166 |
+
return None
|
| 167 |
+
|
| 168 |
+
return masks
|
| 169 |
+
|
| 170 |
+
except Exception as e:
|
| 171 |
+
write_error_score("Unexpected error loading mask file.")
|
| 172 |
+
return None
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def check_validity(coordinates):
|
| 176 |
+
"""
|
| 177 |
+
Check if coordinates are valid according to the requirements.
|
| 178 |
+
Returns True if valid, False otherwise.
|
| 179 |
+
"""
|
| 180 |
+
try:
|
| 181 |
+
# Check if coordinates is a tuple of two tuples
|
| 182 |
+
if not hasattr(coordinates, '__iter__') or len(coordinates) != 2:
|
| 183 |
+
return False
|
| 184 |
+
|
| 185 |
+
(top, left), (bottom, right) = coordinates
|
| 186 |
+
|
| 187 |
+
# Check if all coordinates are integers
|
| 188 |
+
if not all(isinstance(coord, (int, np.integer)) for coord in [top, left, bottom, right]):
|
| 189 |
+
return False
|
| 190 |
+
|
| 191 |
+
# Check if coordinates are within image bounds
|
| 192 |
+
# For slicing mask[top:bottom, left:right], valid ranges are:
|
| 193 |
+
# top, left: [0, 223] (inclusive)
|
| 194 |
+
# bottom, right: [1, 224] (inclusive) since we need top < bottom and left < right
|
| 195 |
+
if not (0 <= top < 224 and 0 <= left < 224 and 1 <= bottom <= 224 and 1 <= right <= 224):
|
| 196 |
+
return False
|
| 197 |
+
|
| 198 |
+
# Check if top-left is actually top-left of bottom-right (proper ordering)
|
| 199 |
+
if not (top < bottom and left < right):
|
| 200 |
+
return False
|
| 201 |
+
|
| 202 |
+
# Check that the crop area doesn't exceed RETAIN_RATIO
|
| 203 |
+
crop_area = (bottom - top) * (right - left)
|
| 204 |
+
max_area = RETAIN_RATIO * 224 * 224
|
| 205 |
+
if crop_area > max_area:
|
| 206 |
+
return False
|
| 207 |
+
|
| 208 |
+
return True
|
| 209 |
+
except Exception:
|
| 210 |
+
return False
|
| 211 |
+
|
| 212 |
+
def generate_mask_from_coordinates(image, coordinates):
|
| 213 |
+
"""
|
| 214 |
+
Generate a binary mask from crop coordinates.
|
| 215 |
+
|
| 216 |
+
Parameters:
|
| 217 |
+
image: PIL Image
|
| 218 |
+
coordinates: tuple of ((top, left), (bottom, right))
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
numpy array: Binary mask with 1s in the crop area
|
| 222 |
+
"""
|
| 223 |
+
H, W = 224, 224 # Standard image size
|
| 224 |
+
mask = np.zeros((H, W), dtype=np.int8)
|
| 225 |
+
|
| 226 |
+
(top, left), (bottom, right) = coordinates
|
| 227 |
+
mask[top:bottom, left:right] = 1
|
| 228 |
+
|
| 229 |
+
return mask
|
| 230 |
+
|
| 231 |
+
def apply_mask_with_mean(image, mask, mean_rgb=MEAN_COLOR):
|
| 232 |
+
"""
|
| 233 |
+
Apply arbitrary binary mask to image, replacing masked areas with mean values
|
| 234 |
+
|
| 235 |
+
Parameters:
|
| 236 |
+
- image: PIL Image (224x224)
|
| 237 |
+
- mask: Binary numpy array or PIL Image (224x224) where 0 is the area to drop and 1 is the area to keep
|
| 238 |
+
- mean_rgb: RGB mean values to use (default: from config)
|
| 239 |
+
|
| 240 |
+
Returns: Modified PIL Image
|
| 241 |
+
"""
|
| 242 |
+
# Convert images to numpy arrays
|
| 243 |
+
img_array = np.array(image).copy()
|
| 244 |
+
|
| 245 |
+
# Ensure mask is numpy array
|
| 246 |
+
if isinstance(mask, Image.Image):
|
| 247 |
+
mask_array = np.array(mask.convert('L')) > 127 # Convert to binary
|
| 248 |
+
else:
|
| 249 |
+
mask_array = mask > 0
|
| 250 |
+
|
| 251 |
+
# Reshape mask for broadcasting with RGB
|
| 252 |
+
mask_3d = np.stack([mask_array] * 3, axis=2)
|
| 253 |
+
|
| 254 |
+
# Convert mean values to 0-255 range
|
| 255 |
+
mean_values = np.array([int(m * 255) for m in mean_rgb])
|
| 256 |
+
# Apply mask - replace areas where mask is 0 (drop) with mean values, keep areas where mask is 1
|
| 257 |
+
img_array = np.where(mask_3d, img_array, mean_values.reshape(1, 1, 3))
|
| 258 |
+
|
| 259 |
+
return Image.fromarray(img_array.astype(np.uint8))
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
if __name__ == '__main__':
|
| 263 |
+
try:
|
| 264 |
+
# Load the animals dataset using the function from animal_dataset.py
|
| 265 |
+
try:
|
| 266 |
+
dataset = load_from_disk(DATASET_PATH)
|
| 267 |
+
dataset = dataset[SPLIT]
|
| 268 |
+
except Exception:
|
| 269 |
+
write_error_score("Unable to load reference dataset.")
|
| 270 |
+
exit(1)
|
| 271 |
+
|
| 272 |
+
# Safely load and validate masks
|
| 273 |
+
masks = safe_load_masks(MASK_PATH, len(dataset))
|
| 274 |
+
if masks is None:
|
| 275 |
+
exit(1) # Error already written by safe_load_masks
|
| 276 |
+
|
| 277 |
+
# Check validity of coordinates and report invalid ones
|
| 278 |
+
invalid_coordinates = []
|
| 279 |
+
valid_coordinates = 0
|
| 280 |
+
for idx, coordinates in masks.items():
|
| 281 |
+
if not check_validity(coordinates):
|
| 282 |
+
invalid_coordinates.append(idx)
|
| 283 |
+
else:
|
| 284 |
+
valid_coordinates += 1
|
| 285 |
+
|
| 286 |
+
if invalid_coordinates:
|
| 287 |
+
print(f"Warning: Found {len(invalid_coordinates)} invalid coordinates (indices: {invalid_coordinates[:10]}{'...' if len(invalid_coordinates) > 10 else ''})")
|
| 288 |
+
print(f"Invalid coordinates will be treated as incorrect predictions")
|
| 289 |
+
print(f"Valid coordinates: {valid_coordinates}/{len(masks)}")
|
| 290 |
+
|
| 291 |
+
#dataset = dataset.select(range(10)) # debug remove later
|
| 292 |
+
|
| 293 |
+
# --- Step 1: Load Model and Processor ---
|
| 294 |
+
print(f"Loading CLIP model and processor: {MODEL_PATH}...")
|
| 295 |
+
try:
|
| 296 |
+
model = CLIPModel.from_pretrained(MODEL_PATH).to(DEVICE)
|
| 297 |
+
processor = CLIPProcessor.from_pretrained(MODEL_PATH)
|
| 298 |
+
model.eval() # Set to evaluation mode
|
| 299 |
+
print("Model and processor loaded successfully.")
|
| 300 |
+
except Exception as e:
|
| 301 |
+
write_error_score("Unable to load model.")
|
| 302 |
+
exit(1)
|
| 303 |
+
|
| 304 |
+
try:
|
| 305 |
+
labels = sorted(list(set(dataset['name']))) + ['other']
|
| 306 |
+
text_inputs = processor(text=labels, return_tensors="pt", padding=True).to(DEVICE)
|
| 307 |
+
except Exception:
|
| 308 |
+
write_error_score("Unable to process labels.")
|
| 309 |
+
exit(1)
|
| 310 |
+
|
| 311 |
+
# Map label names to indices for later comparison
|
| 312 |
+
label_to_index = {label: i for i, label in enumerate(labels)}
|
| 313 |
+
index_to_label = {i: label for label, i in label_to_index.items()} # For mapping prediction back
|
| 314 |
+
|
| 315 |
+
def predict_with_coordinates(image, coordinates):
|
| 316 |
+
try:
|
| 317 |
+
# Generate mask from coordinates
|
| 318 |
+
mask = generate_mask_from_coordinates(image, coordinates)
|
| 319 |
+
assert len(mask.shape) == 2
|
| 320 |
+
|
| 321 |
+
if image.mode != "RGB":
|
| 322 |
+
image = image.convert("RGB")
|
| 323 |
+
image = apply_mask_with_mean(image, mask)
|
| 324 |
+
image_processed = processor(images=image, return_tensors="pt").to(DEVICE)
|
| 325 |
+
pixel_values = image_processed['pixel_values']
|
| 326 |
+
outputs_full = model(pixel_values=pixel_values, **text_inputs)
|
| 327 |
+
logits_full = outputs_full.logits_per_image # Shape: (1, num_labels)
|
| 328 |
+
predicted_index_full = logits_full.argmax(dim=-1).item()
|
| 329 |
+
return predicted_index_full
|
| 330 |
+
except Exception:
|
| 331 |
+
# Return a random prediction if processing fails
|
| 332 |
+
return len(labels) - 1 # Return 'other' class
|
| 333 |
+
|
| 334 |
+
def get_accuracy(masks):
|
| 335 |
+
try:
|
| 336 |
+
with torch.no_grad(): # Disable gradient calculations for inference
|
| 337 |
+
correct = 0
|
| 338 |
+
for item in tqdm(dataset):
|
| 339 |
+
idx = item['idx']
|
| 340 |
+
if idx not in masks:
|
| 341 |
+
continue
|
| 342 |
+
coordinates = masks[idx]
|
| 343 |
+
|
| 344 |
+
# Check coordinates validity - if invalid, mark as incorrect
|
| 345 |
+
if not check_validity(coordinates):
|
| 346 |
+
print(f"Invalid coordinates for item {idx}")
|
| 347 |
+
continue # Skip this item, treating it as incorrect
|
| 348 |
+
|
| 349 |
+
image = item['image']
|
| 350 |
+
true_label_label = item['name'] # This is now the animal class name
|
| 351 |
+
|
| 352 |
+
# Store true label for confusion matrix
|
| 353 |
+
true_label_idx = label_to_index[true_label_label]
|
| 354 |
+
if predict_with_coordinates(image, coordinates) == true_label_idx:
|
| 355 |
+
correct += 1
|
| 356 |
+
return correct / len(masks)
|
| 357 |
+
except Exception:
|
| 358 |
+
return 0.0
|
| 359 |
+
|
| 360 |
+
def get_accuracy_by_sets(masks):
|
| 361 |
+
"""Calculate accuracy for A set (smaller) and B set (larger) with 30:70 split"""
|
| 362 |
+
try:
|
| 363 |
+
# Set random seed for reproducible shuffling
|
| 364 |
+
random.seed(42)
|
| 365 |
+
|
| 366 |
+
with torch.no_grad():
|
| 367 |
+
correct_a = 0
|
| 368 |
+
correct_b = 0
|
| 369 |
+
total_a = 0
|
| 370 |
+
total_b = 0
|
| 371 |
+
|
| 372 |
+
# First, collect all valid items that have masks
|
| 373 |
+
valid_items = []
|
| 374 |
+
for item in dataset:
|
| 375 |
+
idx = item['idx']
|
| 376 |
+
if idx in masks:
|
| 377 |
+
valid_items.append(item)
|
| 378 |
+
|
| 379 |
+
# Group items by class name for stratified sampling
|
| 380 |
+
items_by_class = {}
|
| 381 |
+
for item in valid_items:
|
| 382 |
+
class_name = item['name']
|
| 383 |
+
if class_name not in items_by_class:
|
| 384 |
+
items_by_class[class_name] = []
|
| 385 |
+
items_by_class[class_name].append(item)
|
| 386 |
+
|
| 387 |
+
# Stratified split: for each class, allocate 30% to A and 70% to B
|
| 388 |
+
set_a_items = []
|
| 389 |
+
set_b_items = []
|
| 390 |
+
|
| 391 |
+
for class_name, class_items in items_by_class.items():
|
| 392 |
+
# Shuffle items within each class for random stratified sampling
|
| 393 |
+
random.shuffle(class_items)
|
| 394 |
+
|
| 395 |
+
# Calculate split point for this class (30% to A, 70% to B)
|
| 396 |
+
split_point = int(len(class_items) * 0.3)
|
| 397 |
+
|
| 398 |
+
# Ensure at least one item goes to each set if possible
|
| 399 |
+
if len(class_items) >= 2:
|
| 400 |
+
if split_point == 0:
|
| 401 |
+
split_point = 1
|
| 402 |
+
elif split_point == len(class_items):
|
| 403 |
+
split_point = len(class_items) - 1
|
| 404 |
+
|
| 405 |
+
class_a_items = class_items[:split_point]
|
| 406 |
+
class_b_items = class_items[split_point:]
|
| 407 |
+
|
| 408 |
+
set_a_items.extend(class_a_items)
|
| 409 |
+
set_b_items.extend(class_b_items)
|
| 410 |
+
|
| 411 |
+
print(f"Class '{class_name}': {len(class_items)} total, {len(class_a_items)} to A, {len(class_b_items)} to B")
|
| 412 |
+
|
| 413 |
+
print(f"Stratified split: Set A has {len(set_a_items)} items, Set B has {len(set_b_items)} items")
|
| 414 |
+
|
| 415 |
+
# Verify class distribution
|
| 416 |
+
a_class_counts = {}
|
| 417 |
+
b_class_counts = {}
|
| 418 |
+
for item in set_a_items:
|
| 419 |
+
class_name = item['name']
|
| 420 |
+
a_class_counts[class_name] = a_class_counts.get(class_name, 0) + 1
|
| 421 |
+
for item in set_b_items:
|
| 422 |
+
class_name = item['name']
|
| 423 |
+
b_class_counts[class_name] = b_class_counts.get(class_name, 0) + 1
|
| 424 |
+
|
| 425 |
+
print("Class distribution verification:")
|
| 426 |
+
for class_name in sorted(labels):
|
| 427 |
+
a_count = a_class_counts.get(class_name, 0)
|
| 428 |
+
b_count = b_class_counts.get(class_name, 0)
|
| 429 |
+
total_count = a_count + b_count
|
| 430 |
+
if total_count > 0:
|
| 431 |
+
a_ratio = a_count / total_count
|
| 432 |
+
b_ratio = b_count / total_count
|
| 433 |
+
print(f" {class_name}: A={a_count} ({a_ratio:.1%}), B={b_count} ({b_ratio:.1%})")
|
| 434 |
+
|
| 435 |
+
# Process Set A
|
| 436 |
+
for item in tqdm(set_a_items, desc="Processing Set A"):
|
| 437 |
+
idx = item['idx']
|
| 438 |
+
coordinates = masks[idx]
|
| 439 |
+
|
| 440 |
+
# Check coordinates validity - if invalid, mark as incorrect
|
| 441 |
+
if not check_validity(coordinates):
|
| 442 |
+
total_a += 1
|
| 443 |
+
continue # Skip prediction, treating as incorrect
|
| 444 |
+
|
| 445 |
+
image = item['image']
|
| 446 |
+
true_label_label = item['name'] # This is now the animal class name
|
| 447 |
+
true_label_idx = label_to_index[true_label_label]
|
| 448 |
+
|
| 449 |
+
# Get prediction on masked image
|
| 450 |
+
masked_pred_idx = predict_with_coordinates(image, coordinates)
|
| 451 |
+
|
| 452 |
+
is_correct = masked_pred_idx == true_label_idx
|
| 453 |
+
|
| 454 |
+
total_a += 1
|
| 455 |
+
if is_correct:
|
| 456 |
+
correct_a += 1
|
| 457 |
+
|
| 458 |
+
# Process Set B
|
| 459 |
+
for item in tqdm(set_b_items, desc="Processing Set B"):
|
| 460 |
+
idx = item['idx']
|
| 461 |
+
coordinates = masks[idx]
|
| 462 |
+
|
| 463 |
+
# Check coordinates validity - if invalid, mark as incorrect
|
| 464 |
+
if not check_validity(coordinates):
|
| 465 |
+
total_b += 1
|
| 466 |
+
continue # Skip prediction, treating as incorrect
|
| 467 |
+
|
| 468 |
+
image = item['image']
|
| 469 |
+
true_label_label = item['name'] # This is now the animal class name
|
| 470 |
+
true_label_idx = label_to_index[true_label_label]
|
| 471 |
+
|
| 472 |
+
# Get prediction on masked image
|
| 473 |
+
masked_pred_idx = predict_with_coordinates(image, coordinates)
|
| 474 |
+
|
| 475 |
+
is_correct = masked_pred_idx == true_label_idx
|
| 476 |
+
|
| 477 |
+
total_b += 1
|
| 478 |
+
if is_correct:
|
| 479 |
+
correct_b += 1
|
| 480 |
+
|
| 481 |
+
accuracy_a = correct_a / total_a if total_a > 0 else 0
|
| 482 |
+
accuracy_b = correct_b / total_b if total_b > 0 else 0
|
| 483 |
+
|
| 484 |
+
print(f"Set A (30%): {total_a} samples, accuracy: {accuracy_a:.4f}")
|
| 485 |
+
print(f"Set B (70%): {total_b} samples, accuracy: {accuracy_b:.4f}")
|
| 486 |
+
|
| 487 |
+
return accuracy_a, accuracy_b
|
| 488 |
+
except Exception:
|
| 489 |
+
return 0.0, 0.0
|
| 490 |
+
|
| 491 |
+
def predict_without_mask(image):
|
| 492 |
+
"""Predict on original image without mask"""
|
| 493 |
+
try:
|
| 494 |
+
if image.mode != "RGB":
|
| 495 |
+
image = image.convert("RGB")
|
| 496 |
+
image_processed = processor(images=image, return_tensors="pt").to(DEVICE)
|
| 497 |
+
pixel_values = image_processed['pixel_values']
|
| 498 |
+
outputs_full = model(pixel_values=pixel_values, **text_inputs)
|
| 499 |
+
logits_full = outputs_full.logits_per_image
|
| 500 |
+
predicted_index_full = logits_full.argmax(dim=-1).item()
|
| 501 |
+
return predicted_index_full
|
| 502 |
+
except Exception:
|
| 503 |
+
return len(labels) - 1 # Return 'other' class
|
| 504 |
+
|
| 505 |
+
# Calculate accuracies for A and B sets
|
| 506 |
+
accuracy_a, accuracy_b = get_accuracy_by_sets(masks)
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
score_a = accuracy_a
|
| 510 |
+
score_b = accuracy_b
|
| 511 |
+
|
| 512 |
+
# Ensure scores are within valid bounds [0.0, 1.0]
|
| 513 |
+
if not (0.0 <= score_a <= 1.0) or not isinstance(score_a, (int, float)) or math.isnan(score_a) or math.isinf(score_a):
|
| 514 |
+
score_a = 0.0
|
| 515 |
+
if not (0.0 <= score_b <= 1.0) or not isinstance(score_b, (int, float)) or math.isnan(score_b) or math.isinf(score_b):
|
| 516 |
+
score_b = 0.0
|
| 517 |
+
|
| 518 |
+
print(f"Score A: {score_a}, Score B: {score_b}")
|
| 519 |
+
|
| 520 |
+
#----------calculate the score on the leaderboard------------#
|
| 521 |
+
score = {
|
| 522 |
+
"public_a": score_a,
|
| 523 |
+
"public_detail": {
|
| 524 |
+
"Score": score_a,
|
| 525 |
+
"Accuracy": accuracy_a,
|
| 526 |
+
},
|
| 527 |
+
"private_b": score_b,
|
| 528 |
+
"private_detail": {
|
| 529 |
+
"Score": score_b,
|
| 530 |
+
"Accuracy": accuracy_b,
|
| 531 |
+
},
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
ret_json = {
|
| 535 |
+
"status": True,
|
| 536 |
+
"score": score,
|
| 537 |
+
"msg": "Success!",
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
# Save the score to JSON file
|
| 541 |
+
with open(SCORE_OUTPUT_FILE, 'w') as f:
|
| 542 |
+
json.dump(ret_json, f, indent=2)
|
| 543 |
+
|
| 544 |
+
print(f"Score saved to {SCORE_OUTPUT_FILE}")
|
| 545 |
+
|
| 546 |
+
except Exception as e:
|
| 547 |
+
# Catch any unexpected errors during execution
|
| 548 |
+
write_error_score("Unexpected error during evaluation.")
|
| 549 |
+
exit(1)
|
Individual-Contest/Pixel/Solution/Scoring/reference_dataset/dataset_dict.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"splits": ["test"]}
|
Individual-Contest/Pixel/Solution/Scoring/reference_dataset/test/data-00000-of-00001.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc613e30c79b865d0ce146cf080f8a0e8f8e62eb9134fe96e73e160a957f65cc
|
| 3 |
+
size 66694656
|
Individual-Contest/Pixel/Solution/Scoring/reference_dataset/test/dataset_info.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"builder_name": "parquet",
|
| 3 |
+
"citation": "",
|
| 4 |
+
"config_name": "default",
|
| 5 |
+
"dataset_name": "private_do_not_share_2025_6_pixel_test_reference",
|
| 6 |
+
"dataset_size": 66689696,
|
| 7 |
+
"description": "",
|
| 8 |
+
"download_checksums": {
|
| 9 |
+
"hf://datasets/fedyanin/PRIVATE_DO_NOT_SHARE_2025_6_pixel_test_reference@70a41c3eda0606d8e873d1e1fa8d19600870a600/data/test-00000-of-00001.parquet": {
|
| 10 |
+
"num_bytes": 66551523,
|
| 11 |
+
"checksum": null
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
"download_size": 66551523,
|
| 15 |
+
"features": {
|
| 16 |
+
"image": {
|
| 17 |
+
"_type": "Image"
|
| 18 |
+
},
|
| 19 |
+
"name": {
|
| 20 |
+
"dtype": "string",
|
| 21 |
+
"_type": "Value"
|
| 22 |
+
},
|
| 23 |
+
"meta": {
|
| 24 |
+
"dtype": "string",
|
| 25 |
+
"_type": "Value"
|
| 26 |
+
},
|
| 27 |
+
"idx": {
|
| 28 |
+
"dtype": "string",
|
| 29 |
+
"_type": "Value"
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
"homepage": "",
|
| 33 |
+
"license": "",
|
| 34 |
+
"size_in_bytes": 133241219,
|
| 35 |
+
"splits": {
|
| 36 |
+
"test": {
|
| 37 |
+
"name": "test",
|
| 38 |
+
"num_bytes": 66689696,
|
| 39 |
+
"num_examples": 698,
|
| 40 |
+
"dataset_name": "private_do_not_share_2025_6_pixel_test_reference"
|
| 41 |
+
}
|
| 42 |
+
},
|
| 43 |
+
"version": {
|
| 44 |
+
"version_str": "0.0.0",
|
| 45 |
+
"major": 0,
|
| 46 |
+
"minor": 0,
|
| 47 |
+
"patch": 0
|
| 48 |
+
}
|
| 49 |
+
}
|
Individual-Contest/Pixel/Solution/Scoring/reference_dataset/test/state.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_data_files": [
|
| 3 |
+
{
|
| 4 |
+
"filename": "data-00000-of-00001.arrow"
|
| 5 |
+
}
|
| 6 |
+
],
|
| 7 |
+
"_fingerprint": "e1588b9c5d07a415",
|
| 8 |
+
"_format_columns": null,
|
| 9 |
+
"_format_kwargs": {},
|
| 10 |
+
"_format_type": null,
|
| 11 |
+
"_output_all_columns": false,
|
| 12 |
+
"_split": "test"
|
| 13 |
+
}
|
Individual-Contest/Pixel/Solution/figs/IOAI-Logo.png
ADDED
|
|
Git LFS Details
|
Individual-Contest/Pixel/Solution/test_set/dataset_dict.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"splits": ["test"]}
|
Individual-Contest/Pixel/Solution/test_set/test/data-00000-of-00001.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4855d3cef6910dc41ac3f6621d4421534e7fb8330f2df71a04fd2761d0b4f9d0
|
| 3 |
+
size 66491192
|
Individual-Contest/Pixel/Solution/test_set/test/dataset_info.json
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"builder_name": "parquet",
|
| 3 |
+
"citation": "",
|
| 4 |
+
"config_name": "default",
|
| 5 |
+
"dataset_name": "2025_6_pixel_test",
|
| 6 |
+
"dataset_size": 66487813,
|
| 7 |
+
"description": "",
|
| 8 |
+
"download_checksums": {
|
| 9 |
+
"hf://datasets/InternationalOlympiadAI/2025_6_pixel_test@ba4c48588115e61c9cadb9fa8e1bd073c2837435/data/test-00000-of-00001.parquet": {
|
| 10 |
+
"num_bytes": 66472867,
|
| 11 |
+
"checksum": null
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
"download_size": 66472867,
|
| 15 |
+
"features": {
|
| 16 |
+
"image": {
|
| 17 |
+
"_type": "Image"
|
| 18 |
+
},
|
| 19 |
+
"idx": {
|
| 20 |
+
"dtype": "string",
|
| 21 |
+
"_type": "Value"
|
| 22 |
+
}
|
| 23 |
+
},
|
| 24 |
+
"homepage": "",
|
| 25 |
+
"license": "",
|
| 26 |
+
"size_in_bytes": 132960680,
|
| 27 |
+
"splits": {
|
| 28 |
+
"test": {
|
| 29 |
+
"name": "test",
|
| 30 |
+
"num_bytes": 66487813,
|
| 31 |
+
"num_examples": 698,
|
| 32 |
+
"dataset_name": "2025_6_pixel_test"
|
| 33 |
+
}
|
| 34 |
+
},
|
| 35 |
+
"version": {
|
| 36 |
+
"version_str": "0.0.0",
|
| 37 |
+
"major": 0,
|
| 38 |
+
"minor": 0,
|
| 39 |
+
"patch": 0
|
| 40 |
+
}
|
| 41 |
+
}
|
Individual-Contest/Pixel/Solution/test_set/test/state.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_data_files": [
|
| 3 |
+
{
|
| 4 |
+
"filename": "data-00000-of-00001.arrow"
|
| 5 |
+
}
|
| 6 |
+
],
|
| 7 |
+
"_fingerprint": "3783220c01b30a7c",
|
| 8 |
+
"_format_columns": null,
|
| 9 |
+
"_format_kwargs": {},
|
| 10 |
+
"_format_type": null,
|
| 11 |
+
"_output_all_columns": false,
|
| 12 |
+
"_split": "test"
|
| 13 |
+
}
|
Individual-Contest/Pixel/clip-vit-large-patch14/config.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"CLIPModel"
|
| 4 |
+
],
|
| 5 |
+
"initializer_factor": 1.0,
|
| 6 |
+
"logit_scale_init_value": 2.6592,
|
| 7 |
+
"model_type": "clip",
|
| 8 |
+
"projection_dim": 768,
|
| 9 |
+
"text_config": {
|
| 10 |
+
"attention_dropout": 0.0,
|
| 11 |
+
"dropout": 0.0,
|
| 12 |
+
"hidden_act": "quick_gelu",
|
| 13 |
+
"hidden_size": 768,
|
| 14 |
+
"initializer_factor": 1.0,
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"intermediate_size": 3072,
|
| 17 |
+
"layer_norm_eps": 1e-05,
|
| 18 |
+
"max_position_embeddings": 77,
|
| 19 |
+
"model_type": "clip_text_model",
|
| 20 |
+
"num_attention_heads": 12,
|
| 21 |
+
"num_hidden_layers": 12,
|
| 22 |
+
"projection_dim": 768,
|
| 23 |
+
"torch_dtype": "float32",
|
| 24 |
+
"vocab_size": 49408
|
| 25 |
+
},
|
| 26 |
+
"torch_dtype": "float32",
|
| 27 |
+
"transformers_version": "4.51.3",
|
| 28 |
+
"vision_config": {
|
| 29 |
+
"attention_dropout": 0.0,
|
| 30 |
+
"dropout": 0.0,
|
| 31 |
+
"hidden_act": "quick_gelu",
|
| 32 |
+
"hidden_size": 1024,
|
| 33 |
+
"image_size": 224,
|
| 34 |
+
"initializer_factor": 1.0,
|
| 35 |
+
"initializer_range": 0.02,
|
| 36 |
+
"intermediate_size": 4096,
|
| 37 |
+
"layer_norm_eps": 1e-05,
|
| 38 |
+
"model_type": "clip_vision_model",
|
| 39 |
+
"num_attention_heads": 16,
|
| 40 |
+
"num_channels": 3,
|
| 41 |
+
"num_hidden_layers": 24,
|
| 42 |
+
"patch_size": 14,
|
| 43 |
+
"projection_dim": 768,
|
| 44 |
+
"torch_dtype": "float32"
|
| 45 |
+
}
|
| 46 |
+
}
|
Individual-Contest/Pixel/clip-vit-large-patch14/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Individual-Contest/Pixel/clip-vit-large-patch14/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:522913128f8e780c6c3755a2451fa2ed52363b562a3e3cf8e1598c050004e910
|
| 3 |
+
size 135
|
Individual-Contest/Pixel/clip-vit-large-patch14/preprocessor_config.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": {
|
| 3 |
+
"height": 224,
|
| 4 |
+
"width": 224
|
| 5 |
+
},
|
| 6 |
+
"do_center_crop": true,
|
| 7 |
+
"do_convert_rgb": true,
|
| 8 |
+
"do_normalize": true,
|
| 9 |
+
"do_rescale": true,
|
| 10 |
+
"do_resize": true,
|
| 11 |
+
"image_mean": [
|
| 12 |
+
0.48145466,
|
| 13 |
+
0.4578275,
|
| 14 |
+
0.40821073
|
| 15 |
+
],
|
| 16 |
+
"image_processor_type": "CLIPImageProcessor",
|
| 17 |
+
"image_std": [
|
| 18 |
+
0.26862954,
|
| 19 |
+
0.26130258,
|
| 20 |
+
0.27577711
|
| 21 |
+
],
|
| 22 |
+
"processor_class": "CLIPProcessor",
|
| 23 |
+
"resample": 3,
|
| 24 |
+
"rescale_factor": 0.00392156862745098,
|
| 25 |
+
"size": {
|
| 26 |
+
"shortest_edge": 224
|
| 27 |
+
}
|
| 28 |
+
}
|
Individual-Contest/Pixel/clip-vit-large-patch14/special_tokens_map.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|startoftext|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": true,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|endoftext|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<|endoftext|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"unk_token": {
|
| 24 |
+
"content": "<|endoftext|>",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
}
|
| 30 |
+
}
|
Individual-Contest/Pixel/clip-vit-large-patch14/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Individual-Contest/Pixel/clip-vit-large-patch14/tokenizer_config.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"49406": {
|
| 5 |
+
"content": "<|startoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"49407": {
|
| 13 |
+
"content": "<|endoftext|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
}
|
| 20 |
+
},
|
| 21 |
+
"bos_token": "<|startoftext|>",
|
| 22 |
+
"clean_up_tokenization_spaces": false,
|
| 23 |
+
"do_lower_case": true,
|
| 24 |
+
"eos_token": "<|endoftext|>",
|
| 25 |
+
"errors": "replace",
|
| 26 |
+
"extra_special_tokens": {},
|
| 27 |
+
"model_max_length": 77,
|
| 28 |
+
"pad_token": "<|endoftext|>",
|
| 29 |
+
"processor_class": "CLIPProcessor",
|
| 30 |
+
"tokenizer_class": "CLIPTokenizer",
|
| 31 |
+
"unk_token": "<|endoftext|>"
|
| 32 |
+
}
|
Individual-Contest/Pixel/clip-vit-large-patch14/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Individual-Contest/Pixel/figs/IOAI-Logo.png
ADDED
|
|
Git LFS Details
|
Individual-Contest/Pixel/figs/Pixel Fig 1.png
ADDED
|
|
Git LFS Details
|
Individual-Contest/Pixel/figs/Pixel Fig 2.png
ADDED
|
|
Git LFS Details
|
Individual-Contest/Pixel/training_set/dataset_dict.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"splits": ["train"]}
|
Individual-Contest/Pixel/training_set/train/data-00000-of-00001.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc96339b675cda03d917ae9401f099e705a02fefc7b9bacd37ca4ac5558814ea
|
| 3 |
+
size 66293080
|
Individual-Contest/Pixel/training_set/train/dataset_info.json
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"builder_name": "parquet",
|
| 3 |
+
"citation": "",
|
| 4 |
+
"config_name": "default",
|
| 5 |
+
"dataset_name": "2025_6_pixel_train",
|
| 6 |
+
"dataset_size": 66289069,
|
| 7 |
+
"description": "",
|
| 8 |
+
"download_checksums": {
|
| 9 |
+
"hf://datasets/InternationalOlympiadAI/2025_6_pixel_train@e559471b7f6f163c671b62ac7fdafd800c378af9/data/train-00000-of-00001.parquet": {
|
| 10 |
+
"num_bytes": 66270582,
|
| 11 |
+
"checksum": null
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
"download_size": 66270582,
|
| 15 |
+
"features": {
|
| 16 |
+
"name": {
|
| 17 |
+
"dtype": "string",
|
| 18 |
+
"_type": "Value"
|
| 19 |
+
},
|
| 20 |
+
"image": {
|
| 21 |
+
"_type": "Image"
|
| 22 |
+
},
|
| 23 |
+
"idx": {
|
| 24 |
+
"dtype": "string",
|
| 25 |
+
"_type": "Value"
|
| 26 |
+
}
|
| 27 |
+
},
|
| 28 |
+
"homepage": "",
|
| 29 |
+
"license": "",
|
| 30 |
+
"size_in_bytes": 132559651,
|
| 31 |
+
"splits": {
|
| 32 |
+
"train": {
|
| 33 |
+
"name": "train",
|
| 34 |
+
"num_bytes": 66289069,
|
| 35 |
+
"num_examples": 700,
|
| 36 |
+
"dataset_name": "2025_6_pixel_train"
|
| 37 |
+
}
|
| 38 |
+
},
|
| 39 |
+
"version": {
|
| 40 |
+
"version_str": "0.0.0",
|
| 41 |
+
"major": 0,
|
| 42 |
+
"minor": 0,
|
| 43 |
+
"patch": 0
|
| 44 |
+
}
|
| 45 |
+
}
|
Individual-Contest/Pixel/training_set/train/state.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_data_files": [
|
| 3 |
+
{
|
| 4 |
+
"filename": "data-00000-of-00001.arrow"
|
| 5 |
+
}
|
| 6 |
+
],
|
| 7 |
+
"_fingerprint": "55cd60415fe29420",
|
| 8 |
+
"_format_columns": null,
|
| 9 |
+
"_format_kwargs": {},
|
| 10 |
+
"_format_type": null,
|
| 11 |
+
"_output_all_columns": false,
|
| 12 |
+
"_split": "train"
|
| 13 |
+
}
|