Add files using upload-large-folder tool
Browse files- README.md +70 -5
- source_code/sam3/examples/sam3_image_batched_inference.ipynb +0 -0
- source_code/sam3/sam3/eval/hota_eval_toolkit/__init__.py +1 -0
- source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/_timing.py +68 -0
- source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/datasets/tao_ow.py +891 -0
- source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/datasets/youtube_vis.py +524 -0
- source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/eval.py +395 -0
- source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/metrics/hota.py +291 -0
- source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/utils.py +195 -0
- source_code/sam3/sam3/eval/teta_eval_toolkit/datasets/__init__.py +5 -0
- source_code/sam3/sam3/eval/teta_eval_toolkit/metrics/__init__.py +4 -0
- source_code/sam3/sam3/model/utils/misc.py +77 -0
- source_code/sam3/sam3/perflib/triton/nms.py +124 -0
- source_code/sam3/sam3/sam/common.py +39 -0
- source_code/sam3/sam3/train/__init__.py +1 -0
- source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_crowded.yaml +66 -0
- source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_food.yaml +66 -0
- source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_sports.yaml +66 -0
- source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_wiki_common.yaml +66 -0
- source_code/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_eval.yaml +539 -0
- source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test.yaml +174 -0
- source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test_noheur.yaml +174 -0
- source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_val.yaml +174 -0
- source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_test.yaml +174 -0
- source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test.yaml +174 -0
- source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val.yaml +174 -0
- source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val_noheur.yaml +174 -0
- source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_droid.yaml +64 -0
- source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_ego4d.yaml +64 -0
- source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_fathomnet.yaml +64 -0
- source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_geode.yaml +64 -0
- source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_yt1b.yaml +64 -0
- source_code/sam3/sam3/train/data/__init__.py +1 -0
- source_code/sam3/sam3/train/data/sam3_image_dataset.py +528 -0
- source_code/sam3/sam3/train/loss/mask_sampling.py +113 -0
- source_code/sam3/sam3/train/optim/__init__.py +1 -0
- source_code/sam3/sam3/train/optim/optimizer.py +498 -0
- source_code/sam3/sam3/train/transforms/basic_for_api.py +1396 -0
- source_code/sam3/sam3/train/transforms/point_sampling.py +345 -0
- source_code/sam3/sam3/train/utils/distributed.py +585 -0
- source_code/sam3/scripts/eval/gold/README.md +299 -0
- source_code/sam3/scripts/eval/gold/eval_sam3.py +104 -0
- source_code/sam3/scripts/eval/silver/README.md +405 -0
- source_code/sam3/scripts/eval/silver/download_preprocess_nga.py +140 -0
- source_code/sam3/scripts/eval/silver/extract_frames.py +99 -0
- source_code/sam3/scripts/eval/silver/preprocess_silver_geode_bdd100k_food_rec.py +70 -0
- source_code/sam3/scripts/eval/silver/utils.py +148 -0
- source_code/sam3/scripts/eval/standalone_cgf1.py +48 -0
- source_code/sam3/scripts/eval/veval/README.md +248 -0
- source_code/sam3/scripts/eval/veval/saco_yt1b_annot_update.py +136 -0
README.md
CHANGED
|
@@ -1,13 +1,21 @@
|
|
| 1 |
-
# SegMamba & GliomaSAM3-MoE
|
| 2 |
|
| 3 |
-
This repository contains
|
| 4 |
|
| 5 |
## Repository Structure
|
| 6 |
|
| 7 |
```
|
| 8 |
.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
├── gliomasam3_moe/
|
| 10 |
-
│ ├── checkpoints/ # GliomaSAM3-MoE
|
| 11 |
│ │ ├── ckpt_step2000.pt
|
| 12 |
│ │ ├── ckpt_step2600.pt
|
| 13 |
│ │ └── ckpt_step3000.pt # Best checkpoint
|
|
@@ -23,7 +31,7 @@ This repository contains pre-trained model weights, evaluation results, and visu
|
|
| 23 |
│ └── ...
|
| 24 |
│
|
| 25 |
├── segmamba/
|
| 26 |
-
│ ├── checkpoints/ # SegMamba
|
| 27 |
│ │ ├── tmp_model_ep599_0.8295.pt
|
| 28 |
│ │ └── tmp_model_ep799_0.8498.pt # Best checkpoint (Dice=0.8498)
|
| 29 |
│ └── prediction_results/
|
|
@@ -58,6 +66,53 @@ This repository contains pre-trained model weights, evaluation results, and visu
|
|
| 58 |
- Mean Dice: 0.8498
|
| 59 |
- Trained for 800 epochs on BraTS 2023
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
## Usage
|
| 62 |
|
| 63 |
### Loading GliomaSAM3-MoE
|
|
@@ -86,7 +141,17 @@ model.load_state_dict(ckpt["model"])
|
|
| 86 |
|
| 87 |
## Data
|
| 88 |
|
| 89 |
-
Models were trained and evaluated on BraTS 2023 GLI Challenge dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
| 91 |
## Citation
|
| 92 |
|
|
|
|
| 1 |
+
# SegMamba & GliomaSAM3-MoE: Complete Reproduction Package
|
| 2 |
|
| 3 |
+
This repository contains **everything needed to reproduce** our brain tumor segmentation experiments on BraTS 2023 dataset, including source code, pre-trained weights, and evaluation results.
|
| 4 |
|
| 5 |
## Repository Structure
|
| 6 |
|
| 7 |
```
|
| 8 |
.
|
| 9 |
+
├── source_code/
|
| 10 |
+
│ ├── gliomasam3_moe/ # GliomaSAM3-MoE source code
|
| 11 |
+
│ ├── SegMamba/ # SegMamba source code (with mamba/monai)
|
| 12 |
+
│ └── sam3/ # SAM3 dependency module
|
| 13 |
+
│
|
| 14 |
+
├── pretrained_weights/
|
| 15 |
+
│ └── sam3.pt # SAM3 pretrained weights (3.3GB)
|
| 16 |
+
│
|
| 17 |
├── gliomasam3_moe/
|
| 18 |
+
│ ├── checkpoints/ # GliomaSAM3-MoE trained weights
|
| 19 |
│ │ ├── ckpt_step2000.pt
|
| 20 |
│ │ ├── ckpt_step2600.pt
|
| 21 |
│ │ └── ckpt_step3000.pt # Best checkpoint
|
|
|
|
| 31 |
│ └── ...
|
| 32 |
│
|
| 33 |
├── segmamba/
|
| 34 |
+
│ ├── checkpoints/ # SegMamba trained weights
|
| 35 |
│ │ ├── tmp_model_ep599_0.8295.pt
|
| 36 |
│ │ └── tmp_model_ep799_0.8498.pt # Best checkpoint (Dice=0.8498)
|
| 37 |
│ └── prediction_results/
|
|
|
|
| 66 |
- Mean Dice: 0.8498
|
| 67 |
- Trained for 800 epochs on BraTS 2023
|
| 68 |
|
| 69 |
+
## Quick Start: Reproduction
|
| 70 |
+
|
| 71 |
+
### 1. Download this repository
|
| 72 |
+
|
| 73 |
+
```bash
|
| 74 |
+
# Clone the dataset
|
| 75 |
+
git clone https://huggingface.co/datasets/ChipYTY/segmamba
|
| 76 |
+
cd segmamba
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
### 2. Prepare BraTS 2023 Data
|
| 80 |
+
|
| 81 |
+
Download BraTS 2023 GLI Challenge data from [Synapse](https://www.synapse.org/#!Synapse:syn51514105) and preprocess:
|
| 82 |
+
|
| 83 |
+
```bash
|
| 84 |
+
cd source_code/SegMamba
|
| 85 |
+
python 2_preprocessing_mri.py --input_dir /path/to/BraTS2023 --output_dir /path/to/processed
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
### 3. Run GliomaSAM3-MoE Inference
|
| 89 |
+
|
| 90 |
+
```bash
|
| 91 |
+
cd source_code/gliomasam3_moe
|
| 92 |
+
|
| 93 |
+
# Set SAM3 path
|
| 94 |
+
export PYTHONPATH=/path/to/source_code/sam3:$PYTHONPATH
|
| 95 |
+
export SAM3_CKPT=/path/to/pretrained_weights/sam3.pt
|
| 96 |
+
|
| 97 |
+
# Run inference
|
| 98 |
+
python infer.py \
|
| 99 |
+
--config configs/train.yaml \
|
| 100 |
+
--checkpoint /path/to/gliomasam3_moe/checkpoints/ckpt_step3000.pt \
|
| 101 |
+
--data_dir /path/to/processed \
|
| 102 |
+
--output_dir ./predictions
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
### 4. Run SegMamba Inference
|
| 106 |
+
|
| 107 |
+
```bash
|
| 108 |
+
cd source_code/SegMamba
|
| 109 |
+
|
| 110 |
+
python 4_predict.py \
|
| 111 |
+
--checkpoint /path/to/segmamba/checkpoints/tmp_model_ep799_0.8498.pt \
|
| 112 |
+
--data_dir /path/to/processed \
|
| 113 |
+
--output_dir ./predictions
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
## Usage
|
| 117 |
|
| 118 |
### Loading GliomaSAM3-MoE
|
|
|
|
| 141 |
|
| 142 |
## Data
|
| 143 |
|
| 144 |
+
Models were trained and evaluated on BraTS 2023 GLI Challenge dataset.
|
| 145 |
+
|
| 146 |
+
- **Download**: [Synapse BraTS 2023](https://www.synapse.org/#!Synapse:syn51514105)
|
| 147 |
+
- **Preprocessing**: Use `source_code/SegMamba/2_preprocessing_mri.py`
|
| 148 |
+
|
| 149 |
+
## Requirements
|
| 150 |
+
|
| 151 |
+
- Python 3.10+
|
| 152 |
+
- PyTorch 2.0+
|
| 153 |
+
- CUDA 11.8+ (for SegMamba's Mamba CUDA kernels)
|
| 154 |
+
- See `source_code/gliomasam3_moe/requirements.txt` for full list
|
| 155 |
|
| 156 |
## Citation
|
| 157 |
|
source_code/sam3/examples/sam3_image_batched_inference.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
source_code/sam3/sam3/eval/hota_eval_toolkit/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/_timing.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
import inspect
|
| 4 |
+
from functools import wraps
|
| 5 |
+
from time import perf_counter
|
| 6 |
+
|
| 7 |
+
DO_TIMING = False
|
| 8 |
+
DISPLAY_LESS_PROGRESS = False
|
| 9 |
+
timer_dict = {}
|
| 10 |
+
counter = 0
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def time(f):
|
| 14 |
+
@wraps(f)
|
| 15 |
+
def wrap(*args, **kw):
|
| 16 |
+
if DO_TIMING:
|
| 17 |
+
# Run function with timing
|
| 18 |
+
ts = perf_counter()
|
| 19 |
+
result = f(*args, **kw)
|
| 20 |
+
te = perf_counter()
|
| 21 |
+
tt = te - ts
|
| 22 |
+
|
| 23 |
+
# Get function name
|
| 24 |
+
arg_names = inspect.getfullargspec(f)[0]
|
| 25 |
+
if arg_names[0] == "self" and DISPLAY_LESS_PROGRESS:
|
| 26 |
+
return result
|
| 27 |
+
elif arg_names[0] == "self":
|
| 28 |
+
method_name = type(args[0]).__name__ + "." + f.__name__
|
| 29 |
+
else:
|
| 30 |
+
method_name = f.__name__
|
| 31 |
+
|
| 32 |
+
# Record accumulative time in each function for analysis
|
| 33 |
+
if method_name in timer_dict.keys():
|
| 34 |
+
timer_dict[method_name] += tt
|
| 35 |
+
else:
|
| 36 |
+
timer_dict[method_name] = tt
|
| 37 |
+
|
| 38 |
+
# If code is finished, display timing summary
|
| 39 |
+
if method_name == "Evaluator.evaluate":
|
| 40 |
+
print("")
|
| 41 |
+
print("Timing analysis:")
|
| 42 |
+
for key, value in timer_dict.items():
|
| 43 |
+
print("%-70s %2.4f sec" % (key, value))
|
| 44 |
+
else:
|
| 45 |
+
# Get function argument values for printing special arguments of interest
|
| 46 |
+
arg_titles = ["tracker", "seq", "cls"]
|
| 47 |
+
arg_vals = []
|
| 48 |
+
for i, a in enumerate(arg_names):
|
| 49 |
+
if a in arg_titles:
|
| 50 |
+
arg_vals.append(args[i])
|
| 51 |
+
arg_text = "(" + ", ".join(arg_vals) + ")"
|
| 52 |
+
|
| 53 |
+
# Display methods and functions with different indentation.
|
| 54 |
+
if arg_names[0] == "self":
|
| 55 |
+
print("%-74s %2.4f sec" % (" " * 4 + method_name + arg_text, tt))
|
| 56 |
+
elif arg_names[0] == "test":
|
| 57 |
+
pass
|
| 58 |
+
else:
|
| 59 |
+
global counter
|
| 60 |
+
counter += 1
|
| 61 |
+
print("%i %-70s %2.4f sec" % (counter, method_name + arg_text, tt))
|
| 62 |
+
|
| 63 |
+
return result
|
| 64 |
+
else:
|
| 65 |
+
# If config["TIME_PROGRESS"] is false, or config["USE_PARALLEL"] is true, run functions normally without timing.
|
| 66 |
+
return f(*args, **kw)
|
| 67 |
+
|
| 68 |
+
return wrap
|
source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/datasets/tao_ow.py
ADDED
|
@@ -0,0 +1,891 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
import itertools
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from scipy.optimize import linear_sum_assignment
|
| 10 |
+
|
| 11 |
+
from .. import _timing, utils
|
| 12 |
+
from ..utils import TrackEvalException
|
| 13 |
+
from ._base_dataset import _BaseDataset
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TAO_OW(_BaseDataset):
|
| 17 |
+
"""Dataset class for TAO tracking"""
|
| 18 |
+
|
| 19 |
+
@staticmethod
|
| 20 |
+
def get_default_dataset_config():
|
| 21 |
+
"""Default class config values"""
|
| 22 |
+
code_path = utils.get_code_path()
|
| 23 |
+
default_config = {
|
| 24 |
+
"GT_FOLDER": os.path.join(
|
| 25 |
+
code_path, "data/gt/tao/tao_training"
|
| 26 |
+
), # Location of GT data
|
| 27 |
+
"TRACKERS_FOLDER": os.path.join(
|
| 28 |
+
code_path, "data/trackers/tao/tao_training"
|
| 29 |
+
), # Trackers location
|
| 30 |
+
"OUTPUT_FOLDER": None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
|
| 31 |
+
"TRACKERS_TO_EVAL": None, # Filenames of trackers to eval (if None, all in folder)
|
| 32 |
+
"CLASSES_TO_EVAL": None, # Classes to eval (if None, all classes)
|
| 33 |
+
"SPLIT_TO_EVAL": "training", # Valid: 'training', 'val'
|
| 34 |
+
"PRINT_CONFIG": True, # Whether to print current config
|
| 35 |
+
"TRACKER_SUB_FOLDER": "data", # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
|
| 36 |
+
"OUTPUT_SUB_FOLDER": "", # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
|
| 37 |
+
"TRACKER_DISPLAY_NAMES": None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
|
| 38 |
+
"MAX_DETECTIONS": 300, # Number of maximal allowed detections per image (0 for unlimited)
|
| 39 |
+
"SUBSET": "all",
|
| 40 |
+
}
|
| 41 |
+
return default_config
|
| 42 |
+
|
| 43 |
+
def __init__(self, config=None):
|
| 44 |
+
"""Initialise dataset, checking that all required files are present"""
|
| 45 |
+
super().__init__()
|
| 46 |
+
# Fill non-given config values with defaults
|
| 47 |
+
self.config = utils.init_config(
|
| 48 |
+
config, self.get_default_dataset_config(), self.get_name()
|
| 49 |
+
)
|
| 50 |
+
self.gt_fol = self.config["GT_FOLDER"]
|
| 51 |
+
self.tracker_fol = self.config["TRACKERS_FOLDER"]
|
| 52 |
+
self.should_classes_combine = True
|
| 53 |
+
self.use_super_categories = False
|
| 54 |
+
|
| 55 |
+
self.tracker_sub_fol = self.config["TRACKER_SUB_FOLDER"]
|
| 56 |
+
self.output_fol = self.config["OUTPUT_FOLDER"]
|
| 57 |
+
if self.output_fol is None:
|
| 58 |
+
self.output_fol = self.tracker_fol
|
| 59 |
+
self.output_sub_fol = self.config["OUTPUT_SUB_FOLDER"]
|
| 60 |
+
|
| 61 |
+
gt_dir_files = [
|
| 62 |
+
file for file in os.listdir(self.gt_fol) if file.endswith(".json")
|
| 63 |
+
]
|
| 64 |
+
if len(gt_dir_files) != 1:
|
| 65 |
+
raise TrackEvalException(
|
| 66 |
+
self.gt_fol + " does not contain exactly one json file."
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
|
| 70 |
+
self.gt_data = json.load(f)
|
| 71 |
+
|
| 72 |
+
self.subset = self.config["SUBSET"]
|
| 73 |
+
if self.subset != "all":
|
| 74 |
+
# Split GT data into `known`, `unknown` or `distractor`
|
| 75 |
+
self._split_known_unknown_distractor()
|
| 76 |
+
self.gt_data = self._filter_gt_data(self.gt_data)
|
| 77 |
+
|
| 78 |
+
# merge categories marked with a merged tag in TAO dataset
|
| 79 |
+
self._merge_categories(self.gt_data["annotations"] + self.gt_data["tracks"])
|
| 80 |
+
|
| 81 |
+
# Get sequences to eval and sequence information
|
| 82 |
+
self.seq_list = [
|
| 83 |
+
vid["name"].replace("/", "-") for vid in self.gt_data["videos"]
|
| 84 |
+
]
|
| 85 |
+
self.seq_name_to_seq_id = {
|
| 86 |
+
vid["name"].replace("/", "-"): vid["id"] for vid in self.gt_data["videos"]
|
| 87 |
+
}
|
| 88 |
+
# compute mappings from videos to annotation data
|
| 89 |
+
self.videos_to_gt_tracks, self.videos_to_gt_images = self._compute_vid_mappings(
|
| 90 |
+
self.gt_data["annotations"]
|
| 91 |
+
)
|
| 92 |
+
# compute sequence lengths
|
| 93 |
+
self.seq_lengths = {vid["id"]: 0 for vid in self.gt_data["videos"]}
|
| 94 |
+
for img in self.gt_data["images"]:
|
| 95 |
+
self.seq_lengths[img["video_id"]] += 1
|
| 96 |
+
self.seq_to_images_to_timestep = self._compute_image_to_timestep_mappings()
|
| 97 |
+
self.seq_to_classes = {
|
| 98 |
+
vid["id"]: {
|
| 99 |
+
"pos_cat_ids": list(
|
| 100 |
+
{
|
| 101 |
+
track["category_id"]
|
| 102 |
+
for track in self.videos_to_gt_tracks[vid["id"]]
|
| 103 |
+
}
|
| 104 |
+
),
|
| 105 |
+
"neg_cat_ids": vid["neg_category_ids"],
|
| 106 |
+
"not_exhaustively_labeled_cat_ids": vid["not_exhaustive_category_ids"],
|
| 107 |
+
}
|
| 108 |
+
for vid in self.gt_data["videos"]
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
# Get classes to eval
|
| 112 |
+
considered_vid_ids = [self.seq_name_to_seq_id[vid] for vid in self.seq_list]
|
| 113 |
+
seen_cats = set(
|
| 114 |
+
[
|
| 115 |
+
cat_id
|
| 116 |
+
for vid_id in considered_vid_ids
|
| 117 |
+
for cat_id in self.seq_to_classes[vid_id]["pos_cat_ids"]
|
| 118 |
+
]
|
| 119 |
+
)
|
| 120 |
+
# only classes with ground truth are evaluated in TAO
|
| 121 |
+
self.valid_classes = [
|
| 122 |
+
cls["name"] for cls in self.gt_data["categories"] if cls["id"] in seen_cats
|
| 123 |
+
]
|
| 124 |
+
# cls_name_to_cls_id_map = {cls['name']: cls['id'] for cls in self.gt_data['categories']}
|
| 125 |
+
|
| 126 |
+
if self.config["CLASSES_TO_EVAL"]:
|
| 127 |
+
# self.class_list = [cls.lower() if cls.lower() in self.valid_classes else None
|
| 128 |
+
# for cls in self.config['CLASSES_TO_EVAL']]
|
| 129 |
+
self.class_list = ["object"] # class-agnostic
|
| 130 |
+
if not all(self.class_list):
|
| 131 |
+
raise TrackEvalException(
|
| 132 |
+
"Attempted to evaluate an invalid class. Only classes "
|
| 133 |
+
+ ", ".join(self.valid_classes)
|
| 134 |
+
+ " are valid (classes present in ground truth data)."
|
| 135 |
+
)
|
| 136 |
+
else:
|
| 137 |
+
# self.class_list = [cls for cls in self.valid_classes]
|
| 138 |
+
self.class_list = ["object"] # class-agnostic
|
| 139 |
+
# self.class_name_to_class_id = {k: v for k, v in cls_name_to_cls_id_map.items() if k in self.class_list}
|
| 140 |
+
self.class_name_to_class_id = {"object": 1} # class-agnostic
|
| 141 |
+
|
| 142 |
+
# Get trackers to eval
|
| 143 |
+
if self.config["TRACKERS_TO_EVAL"] is None:
|
| 144 |
+
self.tracker_list = os.listdir(self.tracker_fol)
|
| 145 |
+
else:
|
| 146 |
+
self.tracker_list = self.config["TRACKERS_TO_EVAL"]
|
| 147 |
+
|
| 148 |
+
if self.config["TRACKER_DISPLAY_NAMES"] is None:
|
| 149 |
+
self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
|
| 150 |
+
elif (self.config["TRACKERS_TO_EVAL"] is not None) and (
|
| 151 |
+
len(self.config["TRACKER_DISPLAY_NAMES"]) == len(self.tracker_list)
|
| 152 |
+
):
|
| 153 |
+
self.tracker_to_disp = dict(
|
| 154 |
+
zip(self.tracker_list, self.config["TRACKER_DISPLAY_NAMES"])
|
| 155 |
+
)
|
| 156 |
+
else:
|
| 157 |
+
raise TrackEvalException(
|
| 158 |
+
"List of tracker files and tracker display names do not match."
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
self.tracker_data = {tracker: dict() for tracker in self.tracker_list}
|
| 162 |
+
|
| 163 |
+
for tracker in self.tracker_list:
|
| 164 |
+
tr_dir_files = [
|
| 165 |
+
file
|
| 166 |
+
for file in os.listdir(
|
| 167 |
+
os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
|
| 168 |
+
)
|
| 169 |
+
if file.endswith(".json")
|
| 170 |
+
]
|
| 171 |
+
if len(tr_dir_files) != 1:
|
| 172 |
+
raise TrackEvalException(
|
| 173 |
+
os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
|
| 174 |
+
+ " does not contain exactly one json file."
|
| 175 |
+
)
|
| 176 |
+
with open(
|
| 177 |
+
os.path.join(
|
| 178 |
+
self.tracker_fol, tracker, self.tracker_sub_fol, tr_dir_files[0]
|
| 179 |
+
)
|
| 180 |
+
) as f:
|
| 181 |
+
curr_data = json.load(f)
|
| 182 |
+
|
| 183 |
+
# limit detections if MAX_DETECTIONS > 0
|
| 184 |
+
if self.config["MAX_DETECTIONS"]:
|
| 185 |
+
curr_data = self._limit_dets_per_image(curr_data)
|
| 186 |
+
|
| 187 |
+
# fill missing video ids
|
| 188 |
+
self._fill_video_ids_inplace(curr_data)
|
| 189 |
+
|
| 190 |
+
# make track ids unique over whole evaluation set
|
| 191 |
+
self._make_track_ids_unique(curr_data)
|
| 192 |
+
|
| 193 |
+
# merge categories marked with a merged tag in TAO dataset
|
| 194 |
+
self._merge_categories(curr_data)
|
| 195 |
+
|
| 196 |
+
# get tracker sequence information
|
| 197 |
+
curr_videos_to_tracker_tracks, curr_videos_to_tracker_images = (
|
| 198 |
+
self._compute_vid_mappings(curr_data)
|
| 199 |
+
)
|
| 200 |
+
self.tracker_data[tracker]["vids_to_tracks"] = curr_videos_to_tracker_tracks
|
| 201 |
+
self.tracker_data[tracker]["vids_to_images"] = curr_videos_to_tracker_images
|
| 202 |
+
|
| 203 |
+
def get_display_name(self, tracker):
|
| 204 |
+
return self.tracker_to_disp[tracker]
|
| 205 |
+
|
| 206 |
+
def _load_raw_file(self, tracker, seq, is_gt):
|
| 207 |
+
"""Load a file (gt or tracker) in the TAO format
|
| 208 |
+
|
| 209 |
+
If is_gt, this returns a dict which contains the fields:
|
| 210 |
+
[gt_ids, gt_classes] : list (for each timestep) of 1D NDArrays (for each det).
|
| 211 |
+
[gt_dets]: list (for each timestep) of lists of detections.
|
| 212 |
+
[classes_to_gt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
|
| 213 |
+
keys and corresponding segmentations as values) for each track
|
| 214 |
+
[classes_to_gt_track_ids, classes_to_gt_track_areas, classes_to_gt_track_lengths]: dictionary with class values
|
| 215 |
+
as keys and lists (for each track) as values
|
| 216 |
+
|
| 217 |
+
if not is_gt, this returns a dict which contains the fields:
|
| 218 |
+
[tracker_ids, tracker_classes, tracker_confidences] : list (for each timestep) of 1D NDArrays (for each det).
|
| 219 |
+
[tracker_dets]: list (for each timestep) of lists of detections.
|
| 220 |
+
[classes_to_dt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
|
| 221 |
+
keys and corresponding segmentations as values) for each track
|
| 222 |
+
[classes_to_dt_track_ids, classes_to_dt_track_areas, classes_to_dt_track_lengths]: dictionary with class values
|
| 223 |
+
as keys and lists as values
|
| 224 |
+
[classes_to_dt_track_scores]: dictionary with class values as keys and 1D numpy arrays as values
|
| 225 |
+
"""
|
| 226 |
+
seq_id = self.seq_name_to_seq_id[seq]
|
| 227 |
+
# File location
|
| 228 |
+
if is_gt:
|
| 229 |
+
imgs = self.videos_to_gt_images[seq_id]
|
| 230 |
+
else:
|
| 231 |
+
imgs = self.tracker_data[tracker]["vids_to_images"][seq_id]
|
| 232 |
+
|
| 233 |
+
# Convert data to required format
|
| 234 |
+
num_timesteps = self.seq_lengths[seq_id]
|
| 235 |
+
img_to_timestep = self.seq_to_images_to_timestep[seq_id]
|
| 236 |
+
data_keys = ["ids", "classes", "dets"]
|
| 237 |
+
if not is_gt:
|
| 238 |
+
data_keys += ["tracker_confidences"]
|
| 239 |
+
raw_data = {key: [None] * num_timesteps for key in data_keys}
|
| 240 |
+
for img in imgs:
|
| 241 |
+
# some tracker data contains images without any ground truth information, these are ignored
|
| 242 |
+
try:
|
| 243 |
+
t = img_to_timestep[img["id"]]
|
| 244 |
+
except KeyError:
|
| 245 |
+
continue
|
| 246 |
+
annotations = img["annotations"]
|
| 247 |
+
raw_data["dets"][t] = np.atleast_2d(
|
| 248 |
+
[ann["bbox"] for ann in annotations]
|
| 249 |
+
).astype(float)
|
| 250 |
+
raw_data["ids"][t] = np.atleast_1d(
|
| 251 |
+
[ann["track_id"] for ann in annotations]
|
| 252 |
+
).astype(int)
|
| 253 |
+
raw_data["classes"][t] = np.atleast_1d([1 for _ in annotations]).astype(
|
| 254 |
+
int
|
| 255 |
+
) # class-agnostic
|
| 256 |
+
if not is_gt:
|
| 257 |
+
raw_data["tracker_confidences"][t] = np.atleast_1d(
|
| 258 |
+
[ann["score"] for ann in annotations]
|
| 259 |
+
).astype(float)
|
| 260 |
+
|
| 261 |
+
for t, d in enumerate(raw_data["dets"]):
|
| 262 |
+
if d is None:
|
| 263 |
+
raw_data["dets"][t] = np.empty((0, 4)).astype(float)
|
| 264 |
+
raw_data["ids"][t] = np.empty(0).astype(int)
|
| 265 |
+
raw_data["classes"][t] = np.empty(0).astype(int)
|
| 266 |
+
if not is_gt:
|
| 267 |
+
raw_data["tracker_confidences"][t] = np.empty(0)
|
| 268 |
+
|
| 269 |
+
if is_gt:
|
| 270 |
+
key_map = {"ids": "gt_ids", "classes": "gt_classes", "dets": "gt_dets"}
|
| 271 |
+
else:
|
| 272 |
+
key_map = {
|
| 273 |
+
"ids": "tracker_ids",
|
| 274 |
+
"classes": "tracker_classes",
|
| 275 |
+
"dets": "tracker_dets",
|
| 276 |
+
}
|
| 277 |
+
for k, v in key_map.items():
|
| 278 |
+
raw_data[v] = raw_data.pop(k)
|
| 279 |
+
|
| 280 |
+
# all_classes = [self.class_name_to_class_id[cls] for cls in self.class_list]
|
| 281 |
+
all_classes = [1] # class-agnostic
|
| 282 |
+
|
| 283 |
+
if is_gt:
|
| 284 |
+
classes_to_consider = all_classes
|
| 285 |
+
all_tracks = self.videos_to_gt_tracks[seq_id]
|
| 286 |
+
else:
|
| 287 |
+
# classes_to_consider = self.seq_to_classes[seq_id]['pos_cat_ids'] \
|
| 288 |
+
# + self.seq_to_classes[seq_id]['neg_cat_ids']
|
| 289 |
+
classes_to_consider = all_classes # class-agnostic
|
| 290 |
+
all_tracks = self.tracker_data[tracker]["vids_to_tracks"][seq_id]
|
| 291 |
+
|
| 292 |
+
# classes_to_tracks = {cls: [track for track in all_tracks if track['category_id'] == cls]
|
| 293 |
+
# if cls in classes_to_consider else [] for cls in all_classes}
|
| 294 |
+
classes_to_tracks = {
|
| 295 |
+
cls: [track for track in all_tracks] if cls in classes_to_consider else []
|
| 296 |
+
for cls in all_classes
|
| 297 |
+
} # class-agnostic
|
| 298 |
+
|
| 299 |
+
# mapping from classes to track information
|
| 300 |
+
raw_data["classes_to_tracks"] = {
|
| 301 |
+
cls: [
|
| 302 |
+
{
|
| 303 |
+
det["image_id"]: np.atleast_1d(det["bbox"])
|
| 304 |
+
for det in track["annotations"]
|
| 305 |
+
}
|
| 306 |
+
for track in tracks
|
| 307 |
+
]
|
| 308 |
+
for cls, tracks in classes_to_tracks.items()
|
| 309 |
+
}
|
| 310 |
+
raw_data["classes_to_track_ids"] = {
|
| 311 |
+
cls: [track["id"] for track in tracks]
|
| 312 |
+
for cls, tracks in classes_to_tracks.items()
|
| 313 |
+
}
|
| 314 |
+
raw_data["classes_to_track_areas"] = {
|
| 315 |
+
cls: [track["area"] for track in tracks]
|
| 316 |
+
for cls, tracks in classes_to_tracks.items()
|
| 317 |
+
}
|
| 318 |
+
raw_data["classes_to_track_lengths"] = {
|
| 319 |
+
cls: [len(track["annotations"]) for track in tracks]
|
| 320 |
+
for cls, tracks in classes_to_tracks.items()
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
if not is_gt:
|
| 324 |
+
raw_data["classes_to_dt_track_scores"] = {
|
| 325 |
+
cls: np.array(
|
| 326 |
+
[
|
| 327 |
+
np.mean([float(x["score"]) for x in track["annotations"]])
|
| 328 |
+
for track in tracks
|
| 329 |
+
]
|
| 330 |
+
)
|
| 331 |
+
for cls, tracks in classes_to_tracks.items()
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
if is_gt:
|
| 335 |
+
key_map = {
|
| 336 |
+
"classes_to_tracks": "classes_to_gt_tracks",
|
| 337 |
+
"classes_to_track_ids": "classes_to_gt_track_ids",
|
| 338 |
+
"classes_to_track_lengths": "classes_to_gt_track_lengths",
|
| 339 |
+
"classes_to_track_areas": "classes_to_gt_track_areas",
|
| 340 |
+
}
|
| 341 |
+
else:
|
| 342 |
+
key_map = {
|
| 343 |
+
"classes_to_tracks": "classes_to_dt_tracks",
|
| 344 |
+
"classes_to_track_ids": "classes_to_dt_track_ids",
|
| 345 |
+
"classes_to_track_lengths": "classes_to_dt_track_lengths",
|
| 346 |
+
"classes_to_track_areas": "classes_to_dt_track_areas",
|
| 347 |
+
}
|
| 348 |
+
for k, v in key_map.items():
|
| 349 |
+
raw_data[v] = raw_data.pop(k)
|
| 350 |
+
|
| 351 |
+
raw_data["num_timesteps"] = num_timesteps
|
| 352 |
+
raw_data["neg_cat_ids"] = self.seq_to_classes[seq_id]["neg_cat_ids"]
|
| 353 |
+
raw_data["not_exhaustively_labeled_cls"] = self.seq_to_classes[seq_id][
|
| 354 |
+
"not_exhaustively_labeled_cat_ids"
|
| 355 |
+
]
|
| 356 |
+
raw_data["seq"] = seq
|
| 357 |
+
return raw_data
|
| 358 |
+
|
| 359 |
+
@_timing.time
|
| 360 |
+
def get_preprocessed_seq_data(self, raw_data, cls):
|
| 361 |
+
"""Preprocess data for a single sequence for a single class ready for evaluation.
|
| 362 |
+
Inputs:
|
| 363 |
+
- raw_data is a dict containing the data for the sequence already read in by get_raw_seq_data().
|
| 364 |
+
- cls is the class to be evaluated.
|
| 365 |
+
Outputs:
|
| 366 |
+
- data is a dict containing all of the information that metrics need to perform evaluation.
|
| 367 |
+
It contains the following fields:
|
| 368 |
+
[num_timesteps, num_gt_ids, num_tracker_ids, num_gt_dets, num_tracker_dets] : integers.
|
| 369 |
+
[gt_ids, tracker_ids, tracker_confidences]: list (for each timestep) of 1D NDArrays (for each det).
|
| 370 |
+
[gt_dets, tracker_dets]: list (for each timestep) of lists of detections.
|
| 371 |
+
[similarity_scores]: list (for each timestep) of 2D NDArrays.
|
| 372 |
+
Notes:
|
| 373 |
+
General preprocessing (preproc) occurs in 4 steps. Some datasets may not use all of these steps.
|
| 374 |
+
1) Extract only detections relevant for the class to be evaluated (including distractor detections).
|
| 375 |
+
2) Match gt dets and tracker dets. Remove tracker dets that are matched to a gt det that is of a
|
| 376 |
+
distractor class, or otherwise marked as to be removed.
|
| 377 |
+
3) Remove unmatched tracker dets if they fall within a crowd ignore region or don't meet a certain
|
| 378 |
+
other criteria (e.g. are too small).
|
| 379 |
+
4) Remove gt dets that were only useful for preprocessing and not for actual evaluation.
|
| 380 |
+
After the above preprocessing steps, this function also calculates the number of gt and tracker detections
|
| 381 |
+
and unique track ids. It also relabels gt and tracker ids to be contiguous and checks that ids are
|
| 382 |
+
unique within each timestep.
|
| 383 |
+
TAO:
|
| 384 |
+
In TAO, the 4 preproc steps are as follow:
|
| 385 |
+
1) All classes present in the ground truth data are evaluated separately.
|
| 386 |
+
2) No matched tracker detections are removed.
|
| 387 |
+
3) Unmatched tracker detections are removed if there is not ground truth data and the class does not
|
| 388 |
+
belong to the categories marked as negative for this sequence. Additionally, unmatched tracker
|
| 389 |
+
detections for classes which are marked as not exhaustively labeled are removed.
|
| 390 |
+
4) No gt detections are removed.
|
| 391 |
+
Further, for TrackMAP computation track representations for the given class are accessed from a dictionary
|
| 392 |
+
and the tracks from the tracker data are sorted according to the tracker confidence.
|
| 393 |
+
"""
|
| 394 |
+
cls_id = self.class_name_to_class_id[cls]
|
| 395 |
+
is_not_exhaustively_labeled = cls_id in raw_data["not_exhaustively_labeled_cls"]
|
| 396 |
+
is_neg_category = cls_id in raw_data["neg_cat_ids"]
|
| 397 |
+
|
| 398 |
+
data_keys = [
|
| 399 |
+
"gt_ids",
|
| 400 |
+
"tracker_ids",
|
| 401 |
+
"gt_dets",
|
| 402 |
+
"tracker_dets",
|
| 403 |
+
"tracker_confidences",
|
| 404 |
+
"similarity_scores",
|
| 405 |
+
]
|
| 406 |
+
data = {key: [None] * raw_data["num_timesteps"] for key in data_keys}
|
| 407 |
+
unique_gt_ids = []
|
| 408 |
+
unique_tracker_ids = []
|
| 409 |
+
num_gt_dets = 0
|
| 410 |
+
num_tracker_dets = 0
|
| 411 |
+
for t in range(raw_data["num_timesteps"]):
|
| 412 |
+
# Only extract relevant dets for this class for preproc and eval (cls)
|
| 413 |
+
gt_class_mask = np.atleast_1d(raw_data["gt_classes"][t] == cls_id)
|
| 414 |
+
gt_class_mask = gt_class_mask.astype(bool)
|
| 415 |
+
gt_ids = raw_data["gt_ids"][t][gt_class_mask]
|
| 416 |
+
gt_dets = raw_data["gt_dets"][t][gt_class_mask]
|
| 417 |
+
|
| 418 |
+
tracker_class_mask = np.atleast_1d(raw_data["tracker_classes"][t] == cls_id)
|
| 419 |
+
tracker_class_mask = tracker_class_mask.astype(bool)
|
| 420 |
+
tracker_ids = raw_data["tracker_ids"][t][tracker_class_mask]
|
| 421 |
+
tracker_dets = raw_data["tracker_dets"][t][tracker_class_mask]
|
| 422 |
+
tracker_confidences = raw_data["tracker_confidences"][t][tracker_class_mask]
|
| 423 |
+
similarity_scores = raw_data["similarity_scores"][t][gt_class_mask, :][
|
| 424 |
+
:, tracker_class_mask
|
| 425 |
+
]
|
| 426 |
+
|
| 427 |
+
# Match tracker and gt dets (with hungarian algorithm).
|
| 428 |
+
unmatched_indices = np.arange(tracker_ids.shape[0])
|
| 429 |
+
if gt_ids.shape[0] > 0 and tracker_ids.shape[0] > 0:
|
| 430 |
+
matching_scores = similarity_scores.copy()
|
| 431 |
+
matching_scores[matching_scores < 0.5 - np.finfo("float").eps] = 0
|
| 432 |
+
match_rows, match_cols = linear_sum_assignment(-matching_scores)
|
| 433 |
+
actually_matched_mask = (
|
| 434 |
+
matching_scores[match_rows, match_cols] > 0 + np.finfo("float").eps
|
| 435 |
+
)
|
| 436 |
+
match_cols = match_cols[actually_matched_mask]
|
| 437 |
+
unmatched_indices = np.delete(unmatched_indices, match_cols, axis=0)
|
| 438 |
+
|
| 439 |
+
if gt_ids.shape[0] == 0 and not is_neg_category:
|
| 440 |
+
to_remove_tracker = unmatched_indices
|
| 441 |
+
elif is_not_exhaustively_labeled:
|
| 442 |
+
to_remove_tracker = unmatched_indices
|
| 443 |
+
else:
|
| 444 |
+
to_remove_tracker = np.array([], dtype=int)
|
| 445 |
+
|
| 446 |
+
# remove all unwanted unmatched tracker detections
|
| 447 |
+
data["tracker_ids"][t] = np.delete(tracker_ids, to_remove_tracker, axis=0)
|
| 448 |
+
data["tracker_dets"][t] = np.delete(tracker_dets, to_remove_tracker, axis=0)
|
| 449 |
+
data["tracker_confidences"][t] = np.delete(
|
| 450 |
+
tracker_confidences, to_remove_tracker, axis=0
|
| 451 |
+
)
|
| 452 |
+
similarity_scores = np.delete(similarity_scores, to_remove_tracker, axis=1)
|
| 453 |
+
|
| 454 |
+
data["gt_ids"][t] = gt_ids
|
| 455 |
+
data["gt_dets"][t] = gt_dets
|
| 456 |
+
data["similarity_scores"][t] = similarity_scores
|
| 457 |
+
|
| 458 |
+
unique_gt_ids += list(np.unique(data["gt_ids"][t]))
|
| 459 |
+
unique_tracker_ids += list(np.unique(data["tracker_ids"][t]))
|
| 460 |
+
num_tracker_dets += len(data["tracker_ids"][t])
|
| 461 |
+
num_gt_dets += len(data["gt_ids"][t])
|
| 462 |
+
|
| 463 |
+
# Re-label IDs such that there are no empty IDs
|
| 464 |
+
if len(unique_gt_ids) > 0:
|
| 465 |
+
unique_gt_ids = np.unique(unique_gt_ids)
|
| 466 |
+
gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
|
| 467 |
+
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
|
| 468 |
+
for t in range(raw_data["num_timesteps"]):
|
| 469 |
+
if len(data["gt_ids"][t]) > 0:
|
| 470 |
+
data["gt_ids"][t] = gt_id_map[data["gt_ids"][t]].astype(int)
|
| 471 |
+
if len(unique_tracker_ids) > 0:
|
| 472 |
+
unique_tracker_ids = np.unique(unique_tracker_ids)
|
| 473 |
+
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
|
| 474 |
+
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
|
| 475 |
+
for t in range(raw_data["num_timesteps"]):
|
| 476 |
+
if len(data["tracker_ids"][t]) > 0:
|
| 477 |
+
data["tracker_ids"][t] = tracker_id_map[
|
| 478 |
+
data["tracker_ids"][t]
|
| 479 |
+
].astype(int)
|
| 480 |
+
|
| 481 |
+
# Record overview statistics.
|
| 482 |
+
data["num_tracker_dets"] = num_tracker_dets
|
| 483 |
+
data["num_gt_dets"] = num_gt_dets
|
| 484 |
+
data["num_tracker_ids"] = len(unique_tracker_ids)
|
| 485 |
+
data["num_gt_ids"] = len(unique_gt_ids)
|
| 486 |
+
data["num_timesteps"] = raw_data["num_timesteps"]
|
| 487 |
+
data["seq"] = raw_data["seq"]
|
| 488 |
+
|
| 489 |
+
# get track representations
|
| 490 |
+
data["gt_tracks"] = raw_data["classes_to_gt_tracks"][cls_id]
|
| 491 |
+
data["gt_track_ids"] = raw_data["classes_to_gt_track_ids"][cls_id]
|
| 492 |
+
data["gt_track_lengths"] = raw_data["classes_to_gt_track_lengths"][cls_id]
|
| 493 |
+
data["gt_track_areas"] = raw_data["classes_to_gt_track_areas"][cls_id]
|
| 494 |
+
data["dt_tracks"] = raw_data["classes_to_dt_tracks"][cls_id]
|
| 495 |
+
data["dt_track_ids"] = raw_data["classes_to_dt_track_ids"][cls_id]
|
| 496 |
+
data["dt_track_lengths"] = raw_data["classes_to_dt_track_lengths"][cls_id]
|
| 497 |
+
data["dt_track_areas"] = raw_data["classes_to_dt_track_areas"][cls_id]
|
| 498 |
+
data["dt_track_scores"] = raw_data["classes_to_dt_track_scores"][cls_id]
|
| 499 |
+
data["not_exhaustively_labeled"] = is_not_exhaustively_labeled
|
| 500 |
+
data["iou_type"] = "bbox"
|
| 501 |
+
|
| 502 |
+
# sort tracker data tracks by tracker confidence scores
|
| 503 |
+
if data["dt_tracks"]:
|
| 504 |
+
idx = np.argsort(
|
| 505 |
+
[-score for score in data["dt_track_scores"]], kind="mergesort"
|
| 506 |
+
)
|
| 507 |
+
data["dt_track_scores"] = [data["dt_track_scores"][i] for i in idx]
|
| 508 |
+
data["dt_tracks"] = [data["dt_tracks"][i] for i in idx]
|
| 509 |
+
data["dt_track_ids"] = [data["dt_track_ids"][i] for i in idx]
|
| 510 |
+
data["dt_track_lengths"] = [data["dt_track_lengths"][i] for i in idx]
|
| 511 |
+
data["dt_track_areas"] = [data["dt_track_areas"][i] for i in idx]
|
| 512 |
+
# Ensure that ids are unique per timestep.
|
| 513 |
+
self._check_unique_ids(data)
|
| 514 |
+
|
| 515 |
+
return data
|
| 516 |
+
|
| 517 |
+
def _calculate_similarities(self, gt_dets_t, tracker_dets_t):
|
| 518 |
+
similarity_scores = self._calculate_box_ious(gt_dets_t, tracker_dets_t)
|
| 519 |
+
return similarity_scores
|
| 520 |
+
|
| 521 |
+
def _merge_categories(self, annotations):
|
| 522 |
+
"""
|
| 523 |
+
Merges categories with a merged tag. Adapted from https://github.com/TAO-Dataset
|
| 524 |
+
:param annotations: the annotations in which the classes should be merged
|
| 525 |
+
:return: None
|
| 526 |
+
"""
|
| 527 |
+
merge_map = {}
|
| 528 |
+
for category in self.gt_data["categories"]:
|
| 529 |
+
if "merged" in category:
|
| 530 |
+
for to_merge in category["merged"]:
|
| 531 |
+
merge_map[to_merge["id"]] = category["id"]
|
| 532 |
+
|
| 533 |
+
for ann in annotations:
|
| 534 |
+
ann["category_id"] = merge_map.get(ann["category_id"], ann["category_id"])
|
| 535 |
+
|
| 536 |
+
def _compute_vid_mappings(self, annotations):
|
| 537 |
+
"""
|
| 538 |
+
Computes mappings from Videos to corresponding tracks and images.
|
| 539 |
+
:param annotations: the annotations for which the mapping should be generated
|
| 540 |
+
:return: the video-to-track-mapping, the video-to-image-mapping
|
| 541 |
+
"""
|
| 542 |
+
vids_to_tracks = {}
|
| 543 |
+
vids_to_imgs = {}
|
| 544 |
+
vid_ids = [vid["id"] for vid in self.gt_data["videos"]]
|
| 545 |
+
|
| 546 |
+
# compute an mapping from image IDs to images
|
| 547 |
+
images = {}
|
| 548 |
+
for image in self.gt_data["images"]:
|
| 549 |
+
images[image["id"]] = image
|
| 550 |
+
|
| 551 |
+
for ann in annotations:
|
| 552 |
+
ann["area"] = ann["bbox"][2] * ann["bbox"][3]
|
| 553 |
+
|
| 554 |
+
vid = ann["video_id"]
|
| 555 |
+
if ann["video_id"] not in vids_to_tracks.keys():
|
| 556 |
+
vids_to_tracks[ann["video_id"]] = list()
|
| 557 |
+
if ann["video_id"] not in vids_to_imgs.keys():
|
| 558 |
+
vids_to_imgs[ann["video_id"]] = list()
|
| 559 |
+
|
| 560 |
+
# Fill in vids_to_tracks
|
| 561 |
+
tid = ann["track_id"]
|
| 562 |
+
exist_tids = [track["id"] for track in vids_to_tracks[vid]]
|
| 563 |
+
try:
|
| 564 |
+
index1 = exist_tids.index(tid)
|
| 565 |
+
except ValueError:
|
| 566 |
+
index1 = -1
|
| 567 |
+
if tid not in exist_tids:
|
| 568 |
+
curr_track = {
|
| 569 |
+
"id": tid,
|
| 570 |
+
"category_id": ann["category_id"],
|
| 571 |
+
"video_id": vid,
|
| 572 |
+
"annotations": [ann],
|
| 573 |
+
}
|
| 574 |
+
vids_to_tracks[vid].append(curr_track)
|
| 575 |
+
else:
|
| 576 |
+
vids_to_tracks[vid][index1]["annotations"].append(ann)
|
| 577 |
+
|
| 578 |
+
# Fill in vids_to_imgs
|
| 579 |
+
img_id = ann["image_id"]
|
| 580 |
+
exist_img_ids = [img["id"] for img in vids_to_imgs[vid]]
|
| 581 |
+
try:
|
| 582 |
+
index2 = exist_img_ids.index(img_id)
|
| 583 |
+
except ValueError:
|
| 584 |
+
index2 = -1
|
| 585 |
+
if index2 == -1:
|
| 586 |
+
curr_img = {"id": img_id, "annotations": [ann]}
|
| 587 |
+
vids_to_imgs[vid].append(curr_img)
|
| 588 |
+
else:
|
| 589 |
+
vids_to_imgs[vid][index2]["annotations"].append(ann)
|
| 590 |
+
|
| 591 |
+
# sort annotations by frame index and compute track area
|
| 592 |
+
for vid, tracks in vids_to_tracks.items():
|
| 593 |
+
for track in tracks:
|
| 594 |
+
track["annotations"] = sorted(
|
| 595 |
+
track["annotations"],
|
| 596 |
+
key=lambda x: images[x["image_id"]]["frame_index"],
|
| 597 |
+
)
|
| 598 |
+
# Computer average area
|
| 599 |
+
track["area"] = sum(x["area"] for x in track["annotations"]) / len(
|
| 600 |
+
track["annotations"]
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
# Ensure all videos are present
|
| 604 |
+
for vid_id in vid_ids:
|
| 605 |
+
if vid_id not in vids_to_tracks.keys():
|
| 606 |
+
vids_to_tracks[vid_id] = []
|
| 607 |
+
if vid_id not in vids_to_imgs.keys():
|
| 608 |
+
vids_to_imgs[vid_id] = []
|
| 609 |
+
|
| 610 |
+
return vids_to_tracks, vids_to_imgs
|
| 611 |
+
|
| 612 |
+
def _compute_image_to_timestep_mappings(self):
|
| 613 |
+
"""
|
| 614 |
+
Computes a mapping from images to the corresponding timestep in the sequence.
|
| 615 |
+
:return: the image-to-timestep-mapping
|
| 616 |
+
"""
|
| 617 |
+
images = {}
|
| 618 |
+
for image in self.gt_data["images"]:
|
| 619 |
+
images[image["id"]] = image
|
| 620 |
+
|
| 621 |
+
seq_to_imgs_to_timestep = {vid["id"]: dict() for vid in self.gt_data["videos"]}
|
| 622 |
+
for vid in seq_to_imgs_to_timestep:
|
| 623 |
+
curr_imgs = [img["id"] for img in self.videos_to_gt_images[vid]]
|
| 624 |
+
curr_imgs = sorted(curr_imgs, key=lambda x: images[x]["frame_index"])
|
| 625 |
+
seq_to_imgs_to_timestep[vid] = {
|
| 626 |
+
curr_imgs[i]: i for i in range(len(curr_imgs))
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
return seq_to_imgs_to_timestep
|
| 630 |
+
|
| 631 |
+
def _limit_dets_per_image(self, annotations):
|
| 632 |
+
"""
|
| 633 |
+
Limits the number of detections for each image to config['MAX_DETECTIONS']. Adapted from
|
| 634 |
+
https://github.com/TAO-Dataset/
|
| 635 |
+
:param annotations: the annotations in which the detections should be limited
|
| 636 |
+
:return: the annotations with limited detections
|
| 637 |
+
"""
|
| 638 |
+
max_dets = self.config["MAX_DETECTIONS"]
|
| 639 |
+
img_ann = defaultdict(list)
|
| 640 |
+
for ann in annotations:
|
| 641 |
+
img_ann[ann["image_id"]].append(ann)
|
| 642 |
+
|
| 643 |
+
for img_id, _anns in img_ann.items():
|
| 644 |
+
if len(_anns) <= max_dets:
|
| 645 |
+
continue
|
| 646 |
+
_anns = sorted(_anns, key=lambda x: x["score"], reverse=True)
|
| 647 |
+
img_ann[img_id] = _anns[:max_dets]
|
| 648 |
+
|
| 649 |
+
return [ann for anns in img_ann.values() for ann in anns]
|
| 650 |
+
|
| 651 |
+
def _fill_video_ids_inplace(self, annotations):
|
| 652 |
+
"""
|
| 653 |
+
Fills in missing video IDs inplace. Adapted from https://github.com/TAO-Dataset/
|
| 654 |
+
:param annotations: the annotations for which the videos IDs should be filled inplace
|
| 655 |
+
:return: None
|
| 656 |
+
"""
|
| 657 |
+
missing_video_id = [x for x in annotations if "video_id" not in x]
|
| 658 |
+
if missing_video_id:
|
| 659 |
+
image_id_to_video_id = {
|
| 660 |
+
x["id"]: x["video_id"] for x in self.gt_data["images"]
|
| 661 |
+
}
|
| 662 |
+
for x in missing_video_id:
|
| 663 |
+
x["video_id"] = image_id_to_video_id[x["image_id"]]
|
| 664 |
+
|
| 665 |
+
@staticmethod
|
| 666 |
+
def _make_track_ids_unique(annotations):
|
| 667 |
+
"""
|
| 668 |
+
Makes the track IDs unqiue over the whole annotation set. Adapted from https://github.com/TAO-Dataset/
|
| 669 |
+
:param annotations: the annotation set
|
| 670 |
+
:return: the number of updated IDs
|
| 671 |
+
"""
|
| 672 |
+
track_id_videos = {}
|
| 673 |
+
track_ids_to_update = set()
|
| 674 |
+
max_track_id = 0
|
| 675 |
+
for ann in annotations:
|
| 676 |
+
t = ann["track_id"]
|
| 677 |
+
if t not in track_id_videos:
|
| 678 |
+
track_id_videos[t] = ann["video_id"]
|
| 679 |
+
|
| 680 |
+
if ann["video_id"] != track_id_videos[t]:
|
| 681 |
+
# Track id is assigned to multiple videos
|
| 682 |
+
track_ids_to_update.add(t)
|
| 683 |
+
max_track_id = max(max_track_id, t)
|
| 684 |
+
|
| 685 |
+
if track_ids_to_update:
|
| 686 |
+
print("true")
|
| 687 |
+
next_id = itertools.count(max_track_id + 1)
|
| 688 |
+
new_track_ids = defaultdict(lambda: next(next_id))
|
| 689 |
+
for ann in annotations:
|
| 690 |
+
t = ann["track_id"]
|
| 691 |
+
v = ann["video_id"]
|
| 692 |
+
if t in track_ids_to_update:
|
| 693 |
+
ann["track_id"] = new_track_ids[t, v]
|
| 694 |
+
return len(track_ids_to_update)
|
| 695 |
+
|
| 696 |
+
def _split_known_unknown_distractor(self):
|
| 697 |
+
all_ids = set(
|
| 698 |
+
[i for i in range(1, 2000)]
|
| 699 |
+
) # 2000 is larger than the max category id in TAO-OW.
|
| 700 |
+
# `knowns` includes 78 TAO_category_ids that corresponds to 78 COCO classes.
|
| 701 |
+
# (The other 2 COCO classes do not have corresponding classes in TAO).
|
| 702 |
+
self.knowns = {
|
| 703 |
+
4,
|
| 704 |
+
13,
|
| 705 |
+
1038,
|
| 706 |
+
544,
|
| 707 |
+
1057,
|
| 708 |
+
34,
|
| 709 |
+
35,
|
| 710 |
+
36,
|
| 711 |
+
41,
|
| 712 |
+
45,
|
| 713 |
+
58,
|
| 714 |
+
60,
|
| 715 |
+
579,
|
| 716 |
+
1091,
|
| 717 |
+
1097,
|
| 718 |
+
1099,
|
| 719 |
+
78,
|
| 720 |
+
79,
|
| 721 |
+
81,
|
| 722 |
+
91,
|
| 723 |
+
1115,
|
| 724 |
+
1117,
|
| 725 |
+
95,
|
| 726 |
+
1122,
|
| 727 |
+
99,
|
| 728 |
+
1132,
|
| 729 |
+
621,
|
| 730 |
+
1135,
|
| 731 |
+
625,
|
| 732 |
+
118,
|
| 733 |
+
1144,
|
| 734 |
+
126,
|
| 735 |
+
642,
|
| 736 |
+
1155,
|
| 737 |
+
133,
|
| 738 |
+
1162,
|
| 739 |
+
139,
|
| 740 |
+
154,
|
| 741 |
+
174,
|
| 742 |
+
185,
|
| 743 |
+
699,
|
| 744 |
+
1215,
|
| 745 |
+
714,
|
| 746 |
+
717,
|
| 747 |
+
1229,
|
| 748 |
+
211,
|
| 749 |
+
729,
|
| 750 |
+
221,
|
| 751 |
+
229,
|
| 752 |
+
747,
|
| 753 |
+
235,
|
| 754 |
+
237,
|
| 755 |
+
779,
|
| 756 |
+
276,
|
| 757 |
+
805,
|
| 758 |
+
299,
|
| 759 |
+
829,
|
| 760 |
+
852,
|
| 761 |
+
347,
|
| 762 |
+
371,
|
| 763 |
+
382,
|
| 764 |
+
896,
|
| 765 |
+
392,
|
| 766 |
+
926,
|
| 767 |
+
937,
|
| 768 |
+
428,
|
| 769 |
+
429,
|
| 770 |
+
961,
|
| 771 |
+
452,
|
| 772 |
+
979,
|
| 773 |
+
980,
|
| 774 |
+
982,
|
| 775 |
+
475,
|
| 776 |
+
480,
|
| 777 |
+
993,
|
| 778 |
+
1001,
|
| 779 |
+
502,
|
| 780 |
+
1018,
|
| 781 |
+
}
|
| 782 |
+
# `distractors` is defined as in the paper "Opening up Open-World Tracking"
|
| 783 |
+
self.distractors = {
|
| 784 |
+
20,
|
| 785 |
+
63,
|
| 786 |
+
108,
|
| 787 |
+
180,
|
| 788 |
+
188,
|
| 789 |
+
204,
|
| 790 |
+
212,
|
| 791 |
+
247,
|
| 792 |
+
303,
|
| 793 |
+
403,
|
| 794 |
+
407,
|
| 795 |
+
415,
|
| 796 |
+
490,
|
| 797 |
+
504,
|
| 798 |
+
507,
|
| 799 |
+
513,
|
| 800 |
+
529,
|
| 801 |
+
567,
|
| 802 |
+
569,
|
| 803 |
+
588,
|
| 804 |
+
672,
|
| 805 |
+
691,
|
| 806 |
+
702,
|
| 807 |
+
708,
|
| 808 |
+
711,
|
| 809 |
+
720,
|
| 810 |
+
736,
|
| 811 |
+
737,
|
| 812 |
+
798,
|
| 813 |
+
813,
|
| 814 |
+
815,
|
| 815 |
+
827,
|
| 816 |
+
831,
|
| 817 |
+
851,
|
| 818 |
+
877,
|
| 819 |
+
883,
|
| 820 |
+
912,
|
| 821 |
+
971,
|
| 822 |
+
976,
|
| 823 |
+
1130,
|
| 824 |
+
1133,
|
| 825 |
+
1134,
|
| 826 |
+
1169,
|
| 827 |
+
1184,
|
| 828 |
+
1220,
|
| 829 |
+
}
|
| 830 |
+
self.unknowns = all_ids.difference(self.knowns.union(self.distractors))
|
| 831 |
+
|
| 832 |
+
def _filter_gt_data(self, raw_gt_data):
|
| 833 |
+
"""
|
| 834 |
+
Filter out irrelevant data in the raw_gt_data
|
| 835 |
+
Args:
|
| 836 |
+
raw_gt_data: directly loaded from json.
|
| 837 |
+
|
| 838 |
+
Returns:
|
| 839 |
+
filtered gt_data
|
| 840 |
+
"""
|
| 841 |
+
valid_cat_ids = list()
|
| 842 |
+
if self.subset == "known":
|
| 843 |
+
valid_cat_ids = self.knowns
|
| 844 |
+
elif self.subset == "distractor":
|
| 845 |
+
valid_cat_ids = self.distractors
|
| 846 |
+
elif self.subset == "unknown":
|
| 847 |
+
valid_cat_ids = self.unknowns
|
| 848 |
+
# elif self.subset == "test_only_unknowns":
|
| 849 |
+
# valid_cat_ids = test_only_unknowns
|
| 850 |
+
else:
|
| 851 |
+
raise Exception("The parameter `SUBSET` is incorrect")
|
| 852 |
+
|
| 853 |
+
filtered = dict()
|
| 854 |
+
filtered["videos"] = raw_gt_data["videos"]
|
| 855 |
+
# filtered["videos"] = list()
|
| 856 |
+
unwanted_vid = set()
|
| 857 |
+
# for video in raw_gt_data["videos"]:
|
| 858 |
+
# datasrc = video["name"].split('/')[1]
|
| 859 |
+
# if datasrc in data_srcs:
|
| 860 |
+
# filtered["videos"].append(video)
|
| 861 |
+
# else:
|
| 862 |
+
# unwanted_vid.add(video["id"])
|
| 863 |
+
|
| 864 |
+
filtered["annotations"] = list()
|
| 865 |
+
for ann in raw_gt_data["annotations"]:
|
| 866 |
+
if (ann["video_id"] not in unwanted_vid) and (
|
| 867 |
+
ann["category_id"] in valid_cat_ids
|
| 868 |
+
):
|
| 869 |
+
filtered["annotations"].append(ann)
|
| 870 |
+
|
| 871 |
+
filtered["tracks"] = list()
|
| 872 |
+
for track in raw_gt_data["tracks"]:
|
| 873 |
+
if (track["video_id"] not in unwanted_vid) and (
|
| 874 |
+
track["category_id"] in valid_cat_ids
|
| 875 |
+
):
|
| 876 |
+
filtered["tracks"].append(track)
|
| 877 |
+
|
| 878 |
+
filtered["images"] = list()
|
| 879 |
+
for image in raw_gt_data["images"]:
|
| 880 |
+
if image["video_id"] not in unwanted_vid:
|
| 881 |
+
filtered["images"].append(image)
|
| 882 |
+
|
| 883 |
+
filtered["categories"] = list()
|
| 884 |
+
for cat in raw_gt_data["categories"]:
|
| 885 |
+
if cat["id"] in valid_cat_ids:
|
| 886 |
+
filtered["categories"].append(cat)
|
| 887 |
+
|
| 888 |
+
filtered["info"] = raw_gt_data["info"]
|
| 889 |
+
filtered["licenses"] = raw_gt_data["licenses"]
|
| 890 |
+
|
| 891 |
+
return filtered
|
source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/datasets/youtube_vis.py
ADDED
|
@@ -0,0 +1,524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
# note: this file has been modified from its original version in TrackEval in
|
| 4 |
+
# https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/datasets/youtube_vis.py
|
| 5 |
+
# to support the following:
|
| 6 |
+
# 1) bbox evaluation (via `IOU_TYPE`)
|
| 7 |
+
# 2) passing GT and prediction data as Python objects (via `GT_JSON_OBJECT` and `TRACKER_JSON_OBJECT`)
|
| 8 |
+
# 3) specifying a custom dataset name (via `DATASET_NAME`)
|
| 9 |
+
|
| 10 |
+
import json
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
from .. import _timing, utils
|
| 16 |
+
from ..utils import TrackEvalException
|
| 17 |
+
from ._base_dataset import _BaseDataset
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class YouTubeVIS(_BaseDataset):
|
| 21 |
+
"""Dataset class for YouTubeVIS tracking"""
|
| 22 |
+
|
| 23 |
+
@staticmethod
|
| 24 |
+
def get_default_dataset_config():
|
| 25 |
+
"""Default class config values"""
|
| 26 |
+
code_path = utils.get_code_path()
|
| 27 |
+
default_config = {
|
| 28 |
+
"GT_FOLDER": os.path.join(
|
| 29 |
+
code_path, "data/gt/youtube_vis/"
|
| 30 |
+
), # Location of GT data
|
| 31 |
+
"TRACKERS_FOLDER": os.path.join(code_path, "data/trackers/youtube_vis/"),
|
| 32 |
+
# Trackers location
|
| 33 |
+
"OUTPUT_FOLDER": None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
|
| 34 |
+
"TRACKERS_TO_EVAL": None, # Filenames of trackers to eval (if None, all in folder)
|
| 35 |
+
"CLASSES_TO_EVAL": None, # Classes to eval (if None, all classes)
|
| 36 |
+
"SPLIT_TO_EVAL": "train_sub_split", # Valid: 'train', 'val', 'train_sub_split'
|
| 37 |
+
"PRINT_CONFIG": True, # Whether to print current config
|
| 38 |
+
"OUTPUT_SUB_FOLDER": "", # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
|
| 39 |
+
"TRACKER_SUB_FOLDER": "data", # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
|
| 40 |
+
"TRACKER_DISPLAY_NAMES": None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
|
| 41 |
+
# Added for video phrase AP evaluation -- allow directly specifying the GT JSON data and Tracker (result)
|
| 42 |
+
# JSON data as Python objects, without reading from files.
|
| 43 |
+
"GT_JSON_OBJECT": None,
|
| 44 |
+
"TRACKER_JSON_OBJECT": None,
|
| 45 |
+
"IOU_TYPE": "segm",
|
| 46 |
+
"DATASET_NAME": "video",
|
| 47 |
+
}
|
| 48 |
+
return default_config
|
| 49 |
+
|
| 50 |
+
def __init__(self, config=None):
|
| 51 |
+
"""Initialise dataset, checking that all required files are present"""
|
| 52 |
+
super().__init__()
|
| 53 |
+
# Fill non-given config values with defaults
|
| 54 |
+
self.config = utils.init_config(config, self.get_default_dataset_config())
|
| 55 |
+
self.gt_fol = (
|
| 56 |
+
self.config["GT_FOLDER"] + "youtube_vis_" + self.config["SPLIT_TO_EVAL"]
|
| 57 |
+
)
|
| 58 |
+
self.tracker_fol = (
|
| 59 |
+
self.config["TRACKERS_FOLDER"]
|
| 60 |
+
+ "youtube_vis_"
|
| 61 |
+
+ self.config["SPLIT_TO_EVAL"]
|
| 62 |
+
)
|
| 63 |
+
self.use_super_categories = False
|
| 64 |
+
self.should_classes_combine = True
|
| 65 |
+
assert self.config["IOU_TYPE"] in ["segm", "bbox"]
|
| 66 |
+
self.iou_type = self.config["IOU_TYPE"]
|
| 67 |
+
print("=" * 100)
|
| 68 |
+
print(f"Evaluate annotation type *{self.iou_type}*")
|
| 69 |
+
self.dataset_name = self.config["DATASET_NAME"]
|
| 70 |
+
|
| 71 |
+
self.output_fol = self.config["OUTPUT_FOLDER"]
|
| 72 |
+
if self.output_fol is None:
|
| 73 |
+
self.output_fol = self.tracker_fol
|
| 74 |
+
self.output_sub_fol = self.config["OUTPUT_SUB_FOLDER"]
|
| 75 |
+
self.tracker_sub_fol = self.config["TRACKER_SUB_FOLDER"]
|
| 76 |
+
|
| 77 |
+
if self.config["GT_JSON_OBJECT"] is not None:
|
| 78 |
+
# allow directly specifying the GT JSON data without reading from files
|
| 79 |
+
gt_json = self.config["GT_JSON_OBJECT"]
|
| 80 |
+
assert isinstance(gt_json, dict)
|
| 81 |
+
assert "videos" in gt_json
|
| 82 |
+
assert "categories" in gt_json
|
| 83 |
+
assert "annotations" in gt_json
|
| 84 |
+
self.gt_data = gt_json
|
| 85 |
+
else:
|
| 86 |
+
if not os.path.exists(self.gt_fol):
|
| 87 |
+
print("GT folder not found: " + self.gt_fol)
|
| 88 |
+
raise TrackEvalException(
|
| 89 |
+
"GT folder not found: " + os.path.basename(self.gt_fol)
|
| 90 |
+
)
|
| 91 |
+
gt_dir_files = [
|
| 92 |
+
file for file in os.listdir(self.gt_fol) if file.endswith(".json")
|
| 93 |
+
]
|
| 94 |
+
if len(gt_dir_files) != 1:
|
| 95 |
+
raise TrackEvalException(
|
| 96 |
+
self.gt_fol + " does not contain exactly one json file."
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
|
| 100 |
+
self.gt_data = json.load(f)
|
| 101 |
+
|
| 102 |
+
# Get classes to eval
|
| 103 |
+
self.valid_classes = [cls["name"] for cls in self.gt_data["categories"]]
|
| 104 |
+
cls_name_to_cls_id_map = {
|
| 105 |
+
cls["name"]: cls["id"] for cls in self.gt_data["categories"]
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
if self.config["CLASSES_TO_EVAL"]:
|
| 109 |
+
self.class_list = [
|
| 110 |
+
cls.lower() if cls.lower() in self.valid_classes else None
|
| 111 |
+
for cls in self.config["CLASSES_TO_EVAL"]
|
| 112 |
+
]
|
| 113 |
+
if not all(self.class_list):
|
| 114 |
+
raise TrackEvalException(
|
| 115 |
+
"Attempted to evaluate an invalid class. Only classes "
|
| 116 |
+
+ ", ".join(self.valid_classes)
|
| 117 |
+
+ " are valid."
|
| 118 |
+
)
|
| 119 |
+
else:
|
| 120 |
+
self.class_list = [cls["name"] for cls in self.gt_data["categories"]]
|
| 121 |
+
self.class_name_to_class_id = {
|
| 122 |
+
k: v for k, v in cls_name_to_cls_id_map.items() if k in self.class_list
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
# Get sequences to eval and check gt files exist
|
| 126 |
+
self.seq_list = [
|
| 127 |
+
vid["file_names"][0].split("/")[0] for vid in self.gt_data["videos"]
|
| 128 |
+
]
|
| 129 |
+
self.seq_name_to_seq_id = {
|
| 130 |
+
vid["file_names"][0].split("/")[0]: vid["id"]
|
| 131 |
+
for vid in self.gt_data["videos"]
|
| 132 |
+
}
|
| 133 |
+
self.seq_lengths = {
|
| 134 |
+
vid["id"]: len(vid["file_names"]) for vid in self.gt_data["videos"]
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
# encode masks and compute track areas
|
| 138 |
+
self._prepare_gt_annotations()
|
| 139 |
+
|
| 140 |
+
# Get trackers to eval
|
| 141 |
+
if self.config["TRACKER_JSON_OBJECT"] is not None:
|
| 142 |
+
# allow directly specifying the tracker JSON data without reading from files
|
| 143 |
+
tracker_json = self.config["TRACKER_JSON_OBJECT"]
|
| 144 |
+
assert isinstance(tracker_json, list)
|
| 145 |
+
self.tracker_list = ["tracker"]
|
| 146 |
+
elif self.config["TRACKERS_TO_EVAL"] is None:
|
| 147 |
+
self.tracker_list = os.listdir(self.tracker_fol)
|
| 148 |
+
else:
|
| 149 |
+
self.tracker_list = self.config["TRACKERS_TO_EVAL"]
|
| 150 |
+
|
| 151 |
+
if self.config["TRACKER_DISPLAY_NAMES"] is None:
|
| 152 |
+
self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
|
| 153 |
+
elif (self.config["TRACKERS_TO_EVAL"] is not None) and (
|
| 154 |
+
len(self.config["TRACKER_DISPLAY_NAMES"]) == len(self.tracker_list)
|
| 155 |
+
):
|
| 156 |
+
self.tracker_to_disp = dict(
|
| 157 |
+
zip(self.tracker_list, self.config["TRACKER_DISPLAY_NAMES"])
|
| 158 |
+
)
|
| 159 |
+
else:
|
| 160 |
+
raise TrackEvalException(
|
| 161 |
+
"List of tracker files and tracker display names do not match."
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
# counter for globally unique track IDs
|
| 165 |
+
self.global_tid_counter = 0
|
| 166 |
+
|
| 167 |
+
self.tracker_data = dict()
|
| 168 |
+
if self.config["TRACKER_JSON_OBJECT"] is not None:
|
| 169 |
+
# allow directly specifying the tracker JSON data without reading from files
|
| 170 |
+
tracker = self.tracker_list[0]
|
| 171 |
+
self.tracker_data[tracker] = tracker_json
|
| 172 |
+
else:
|
| 173 |
+
for tracker in self.tracker_list:
|
| 174 |
+
tracker_dir_path = os.path.join(
|
| 175 |
+
self.tracker_fol, tracker, self.tracker_sub_fol
|
| 176 |
+
)
|
| 177 |
+
tr_dir_files = [
|
| 178 |
+
file
|
| 179 |
+
for file in os.listdir(tracker_dir_path)
|
| 180 |
+
if file.endswith(".json")
|
| 181 |
+
]
|
| 182 |
+
if len(tr_dir_files) != 1:
|
| 183 |
+
raise TrackEvalException(
|
| 184 |
+
tracker_dir_path + " does not contain exactly one json file."
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
with open(os.path.join(tracker_dir_path, tr_dir_files[0])) as f:
|
| 188 |
+
curr_data = json.load(f)
|
| 189 |
+
|
| 190 |
+
self.tracker_data[tracker] = curr_data
|
| 191 |
+
|
| 192 |
+
def get_display_name(self, tracker):
|
| 193 |
+
return self.tracker_to_disp[tracker]
|
| 194 |
+
|
| 195 |
+
def _load_raw_file(self, tracker, seq, is_gt):
|
| 196 |
+
"""Load a file (gt or tracker) in the YouTubeVIS format
|
| 197 |
+
If is_gt, this returns a dict which contains the fields:
|
| 198 |
+
[gt_ids, gt_classes] : list (for each timestep) of 1D NDArrays (for each det).
|
| 199 |
+
[gt_dets]: list (for each timestep) of lists of detections.
|
| 200 |
+
[classes_to_gt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
|
| 201 |
+
keys and corresponding segmentations as values) for each track
|
| 202 |
+
[classes_to_gt_track_ids, classes_to_gt_track_areas, classes_to_gt_track_iscrowd]: dictionary with class values
|
| 203 |
+
as keys and lists (for each track) as values
|
| 204 |
+
|
| 205 |
+
if not is_gt, this returns a dict which contains the fields:
|
| 206 |
+
[tracker_ids, tracker_classes, tracker_confidences] : list (for each timestep) of 1D NDArrays (for each det).
|
| 207 |
+
[tracker_dets]: list (for each timestep) of lists of detections.
|
| 208 |
+
[classes_to_dt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
|
| 209 |
+
keys and corresponding segmentations as values) for each track
|
| 210 |
+
[classes_to_dt_track_ids, classes_to_dt_track_areas]: dictionary with class values as keys and lists as values
|
| 211 |
+
[classes_to_dt_track_scores]: dictionary with class values as keys and 1D numpy arrays as values
|
| 212 |
+
"""
|
| 213 |
+
# select sequence tracks
|
| 214 |
+
seq_id = self.seq_name_to_seq_id[seq]
|
| 215 |
+
if is_gt:
|
| 216 |
+
tracks = [
|
| 217 |
+
ann for ann in self.gt_data["annotations"] if ann["video_id"] == seq_id
|
| 218 |
+
]
|
| 219 |
+
else:
|
| 220 |
+
tracks = self._get_tracker_seq_tracks(tracker, seq_id)
|
| 221 |
+
|
| 222 |
+
# Convert data to required format
|
| 223 |
+
num_timesteps = self.seq_lengths[seq_id]
|
| 224 |
+
data_keys = ["ids", "classes", "dets"]
|
| 225 |
+
if not is_gt:
|
| 226 |
+
data_keys += ["tracker_confidences"]
|
| 227 |
+
raw_data = {key: [None] * num_timesteps for key in data_keys}
|
| 228 |
+
result_key = "segmentations" if self.iou_type == "segm" else "bboxes"
|
| 229 |
+
for t in range(num_timesteps):
|
| 230 |
+
raw_data["dets"][t] = [
|
| 231 |
+
track[result_key][t] for track in tracks if track[result_key][t]
|
| 232 |
+
]
|
| 233 |
+
raw_data["ids"][t] = np.atleast_1d(
|
| 234 |
+
[track["id"] for track in tracks if track[result_key][t]]
|
| 235 |
+
).astype(int)
|
| 236 |
+
raw_data["classes"][t] = np.atleast_1d(
|
| 237 |
+
[track["category_id"] for track in tracks if track[result_key][t]]
|
| 238 |
+
).astype(int)
|
| 239 |
+
if not is_gt:
|
| 240 |
+
raw_data["tracker_confidences"][t] = np.atleast_1d(
|
| 241 |
+
[track["score"] for track in tracks if track[result_key][t]]
|
| 242 |
+
).astype(float)
|
| 243 |
+
|
| 244 |
+
if is_gt:
|
| 245 |
+
key_map = {"ids": "gt_ids", "classes": "gt_classes", "dets": "gt_dets"}
|
| 246 |
+
else:
|
| 247 |
+
key_map = {
|
| 248 |
+
"ids": "tracker_ids",
|
| 249 |
+
"classes": "tracker_classes",
|
| 250 |
+
"dets": "tracker_dets",
|
| 251 |
+
}
|
| 252 |
+
for k, v in key_map.items():
|
| 253 |
+
raw_data[v] = raw_data.pop(k)
|
| 254 |
+
|
| 255 |
+
all_cls_ids = {self.class_name_to_class_id[cls] for cls in self.class_list}
|
| 256 |
+
classes_to_tracks = {
|
| 257 |
+
cls: [track for track in tracks if track["category_id"] == cls]
|
| 258 |
+
for cls in all_cls_ids
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
# mapping from classes to track representations and track information
|
| 262 |
+
raw_data["classes_to_tracks"] = {
|
| 263 |
+
cls: [
|
| 264 |
+
{i: track[result_key][i] for i in range(len(track[result_key]))}
|
| 265 |
+
for track in tracks
|
| 266 |
+
]
|
| 267 |
+
for cls, tracks in classes_to_tracks.items()
|
| 268 |
+
}
|
| 269 |
+
raw_data["classes_to_track_ids"] = {
|
| 270 |
+
cls: [track["id"] for track in tracks]
|
| 271 |
+
for cls, tracks in classes_to_tracks.items()
|
| 272 |
+
}
|
| 273 |
+
raw_data["classes_to_track_areas"] = {
|
| 274 |
+
cls: [track["area"] for track in tracks]
|
| 275 |
+
for cls, tracks in classes_to_tracks.items()
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
if is_gt:
|
| 279 |
+
raw_data["classes_to_gt_track_iscrowd"] = {
|
| 280 |
+
cls: [track["iscrowd"] for track in tracks]
|
| 281 |
+
for cls, tracks in classes_to_tracks.items()
|
| 282 |
+
}
|
| 283 |
+
else:
|
| 284 |
+
raw_data["classes_to_dt_track_scores"] = {
|
| 285 |
+
cls: np.array([track["score"] for track in tracks])
|
| 286 |
+
for cls, tracks in classes_to_tracks.items()
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
if is_gt:
|
| 290 |
+
key_map = {
|
| 291 |
+
"classes_to_tracks": "classes_to_gt_tracks",
|
| 292 |
+
"classes_to_track_ids": "classes_to_gt_track_ids",
|
| 293 |
+
"classes_to_track_areas": "classes_to_gt_track_areas",
|
| 294 |
+
}
|
| 295 |
+
else:
|
| 296 |
+
key_map = {
|
| 297 |
+
"classes_to_tracks": "classes_to_dt_tracks",
|
| 298 |
+
"classes_to_track_ids": "classes_to_dt_track_ids",
|
| 299 |
+
"classes_to_track_areas": "classes_to_dt_track_areas",
|
| 300 |
+
}
|
| 301 |
+
for k, v in key_map.items():
|
| 302 |
+
raw_data[v] = raw_data.pop(k)
|
| 303 |
+
|
| 304 |
+
raw_data["num_timesteps"] = num_timesteps
|
| 305 |
+
raw_data["seq"] = seq
|
| 306 |
+
return raw_data
|
| 307 |
+
|
| 308 |
+
@_timing.time
|
| 309 |
+
def get_preprocessed_seq_data(self, raw_data, cls):
|
| 310 |
+
"""Preprocess data for a single sequence for a single class ready for evaluation.
|
| 311 |
+
Inputs:
|
| 312 |
+
- raw_data is a dict containing the data for the sequence already read in by get_raw_seq_data().
|
| 313 |
+
- cls is the class to be evaluated.
|
| 314 |
+
Outputs:
|
| 315 |
+
- data is a dict containing all of the information that metrics need to perform evaluation.
|
| 316 |
+
It contains the following fields:
|
| 317 |
+
[num_timesteps, num_gt_ids, num_tracker_ids, num_gt_dets, num_tracker_dets] : integers.
|
| 318 |
+
[gt_ids, tracker_ids, tracker_confidences]: list (for each timestep) of 1D NDArrays (for each det).
|
| 319 |
+
[gt_dets, tracker_dets]: list (for each timestep) of lists of detections.
|
| 320 |
+
[similarity_scores]: list (for each timestep) of 2D NDArrays.
|
| 321 |
+
Notes:
|
| 322 |
+
General preprocessing (preproc) occurs in 4 steps. Some datasets may not use all of these steps.
|
| 323 |
+
1) Extract only detections relevant for the class to be evaluated (including distractor detections).
|
| 324 |
+
2) Match gt dets and tracker dets. Remove tracker dets that are matched to a gt det that is of a
|
| 325 |
+
distractor class, or otherwise marked as to be removed.
|
| 326 |
+
3) Remove unmatched tracker dets if they fall within a crowd ignore region or don't meet a certain
|
| 327 |
+
other criteria (e.g. are too small).
|
| 328 |
+
4) Remove gt dets that were only useful for preprocessing and not for actual evaluation.
|
| 329 |
+
After the above preprocessing steps, this function also calculates the number of gt and tracker detections
|
| 330 |
+
and unique track ids. It also relabels gt and tracker ids to be contiguous and checks that ids are
|
| 331 |
+
unique within each timestep.
|
| 332 |
+
YouTubeVIS:
|
| 333 |
+
In YouTubeVIS, the 4 preproc steps are as follow:
|
| 334 |
+
1) There are 40 classes which are evaluated separately.
|
| 335 |
+
2) No matched tracker dets are removed.
|
| 336 |
+
3) No unmatched tracker dets are removed.
|
| 337 |
+
4) No gt dets are removed.
|
| 338 |
+
Further, for TrackMAP computation track representations for the given class are accessed from a dictionary
|
| 339 |
+
and the tracks from the tracker data are sorted according to the tracker confidence.
|
| 340 |
+
"""
|
| 341 |
+
cls_id = self.class_name_to_class_id[cls]
|
| 342 |
+
|
| 343 |
+
data_keys = [
|
| 344 |
+
"gt_ids",
|
| 345 |
+
"tracker_ids",
|
| 346 |
+
"gt_dets",
|
| 347 |
+
"tracker_dets",
|
| 348 |
+
"similarity_scores",
|
| 349 |
+
]
|
| 350 |
+
data = {key: [None] * raw_data["num_timesteps"] for key in data_keys}
|
| 351 |
+
unique_gt_ids = []
|
| 352 |
+
unique_tracker_ids = []
|
| 353 |
+
num_gt_dets = 0
|
| 354 |
+
num_tracker_dets = 0
|
| 355 |
+
|
| 356 |
+
for t in range(raw_data["num_timesteps"]):
|
| 357 |
+
# Only extract relevant dets for this class for eval (cls)
|
| 358 |
+
gt_class_mask = np.atleast_1d(raw_data["gt_classes"][t] == cls_id)
|
| 359 |
+
gt_class_mask = gt_class_mask.astype(bool)
|
| 360 |
+
gt_ids = raw_data["gt_ids"][t][gt_class_mask]
|
| 361 |
+
gt_dets = [
|
| 362 |
+
raw_data["gt_dets"][t][ind]
|
| 363 |
+
for ind in range(len(gt_class_mask))
|
| 364 |
+
if gt_class_mask[ind]
|
| 365 |
+
]
|
| 366 |
+
|
| 367 |
+
tracker_class_mask = np.atleast_1d(raw_data["tracker_classes"][t] == cls_id)
|
| 368 |
+
tracker_class_mask = tracker_class_mask.astype(bool)
|
| 369 |
+
tracker_ids = raw_data["tracker_ids"][t][tracker_class_mask]
|
| 370 |
+
tracker_dets = [
|
| 371 |
+
raw_data["tracker_dets"][t][ind]
|
| 372 |
+
for ind in range(len(tracker_class_mask))
|
| 373 |
+
if tracker_class_mask[ind]
|
| 374 |
+
]
|
| 375 |
+
similarity_scores = raw_data["similarity_scores"][t][gt_class_mask, :][
|
| 376 |
+
:, tracker_class_mask
|
| 377 |
+
]
|
| 378 |
+
|
| 379 |
+
data["tracker_ids"][t] = tracker_ids
|
| 380 |
+
data["tracker_dets"][t] = tracker_dets
|
| 381 |
+
data["gt_ids"][t] = gt_ids
|
| 382 |
+
data["gt_dets"][t] = gt_dets
|
| 383 |
+
data["similarity_scores"][t] = similarity_scores
|
| 384 |
+
|
| 385 |
+
unique_gt_ids += list(np.unique(data["gt_ids"][t]))
|
| 386 |
+
unique_tracker_ids += list(np.unique(data["tracker_ids"][t]))
|
| 387 |
+
num_tracker_dets += len(data["tracker_ids"][t])
|
| 388 |
+
num_gt_dets += len(data["gt_ids"][t])
|
| 389 |
+
|
| 390 |
+
# Re-label IDs such that there are no empty IDs
|
| 391 |
+
if len(unique_gt_ids) > 0:
|
| 392 |
+
unique_gt_ids = np.unique(unique_gt_ids)
|
| 393 |
+
gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
|
| 394 |
+
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
|
| 395 |
+
for t in range(raw_data["num_timesteps"]):
|
| 396 |
+
if len(data["gt_ids"][t]) > 0:
|
| 397 |
+
data["gt_ids"][t] = gt_id_map[data["gt_ids"][t]].astype(int)
|
| 398 |
+
if len(unique_tracker_ids) > 0:
|
| 399 |
+
unique_tracker_ids = np.unique(unique_tracker_ids)
|
| 400 |
+
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
|
| 401 |
+
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
|
| 402 |
+
for t in range(raw_data["num_timesteps"]):
|
| 403 |
+
if len(data["tracker_ids"][t]) > 0:
|
| 404 |
+
data["tracker_ids"][t] = tracker_id_map[
|
| 405 |
+
data["tracker_ids"][t]
|
| 406 |
+
].astype(int)
|
| 407 |
+
|
| 408 |
+
# Ensure that ids are unique per timestep.
|
| 409 |
+
self._check_unique_ids(data)
|
| 410 |
+
|
| 411 |
+
# Record overview statistics.
|
| 412 |
+
data["num_tracker_dets"] = num_tracker_dets
|
| 413 |
+
data["num_gt_dets"] = num_gt_dets
|
| 414 |
+
data["num_tracker_ids"] = len(unique_tracker_ids)
|
| 415 |
+
data["num_gt_ids"] = len(unique_gt_ids)
|
| 416 |
+
data["num_timesteps"] = raw_data["num_timesteps"]
|
| 417 |
+
data["seq"] = raw_data["seq"]
|
| 418 |
+
|
| 419 |
+
# get track representations
|
| 420 |
+
data["gt_tracks"] = raw_data["classes_to_gt_tracks"][cls_id]
|
| 421 |
+
data["gt_track_ids"] = raw_data["classes_to_gt_track_ids"][cls_id]
|
| 422 |
+
data["gt_track_areas"] = raw_data["classes_to_gt_track_areas"][cls_id]
|
| 423 |
+
data["gt_track_iscrowd"] = raw_data["classes_to_gt_track_iscrowd"][cls_id]
|
| 424 |
+
data["dt_tracks"] = raw_data["classes_to_dt_tracks"][cls_id]
|
| 425 |
+
data["dt_track_ids"] = raw_data["classes_to_dt_track_ids"][cls_id]
|
| 426 |
+
data["dt_track_areas"] = raw_data["classes_to_dt_track_areas"][cls_id]
|
| 427 |
+
data["dt_track_scores"] = raw_data["classes_to_dt_track_scores"][cls_id]
|
| 428 |
+
data["iou_type"] = "mask"
|
| 429 |
+
|
| 430 |
+
# sort tracker data tracks by tracker confidence scores
|
| 431 |
+
if data["dt_tracks"]:
|
| 432 |
+
idx = np.argsort(
|
| 433 |
+
[-score for score in data["dt_track_scores"]], kind="mergesort"
|
| 434 |
+
)
|
| 435 |
+
data["dt_track_scores"] = [data["dt_track_scores"][i] for i in idx]
|
| 436 |
+
data["dt_tracks"] = [data["dt_tracks"][i] for i in idx]
|
| 437 |
+
data["dt_track_ids"] = [data["dt_track_ids"][i] for i in idx]
|
| 438 |
+
data["dt_track_areas"] = [data["dt_track_areas"][i] for i in idx]
|
| 439 |
+
|
| 440 |
+
return data
|
| 441 |
+
|
| 442 |
+
def _calculate_similarities(self, gt_dets_t, tracker_dets_t):
|
| 443 |
+
if self.iou_type == "segm":
|
| 444 |
+
similarity_scores = self._calculate_mask_ious(
|
| 445 |
+
gt_dets_t, tracker_dets_t, is_encoded=True, do_ioa=False
|
| 446 |
+
)
|
| 447 |
+
else:
|
| 448 |
+
gt_dets_t = np.array(gt_dets_t, dtype=np.float32).reshape(-1, 4)
|
| 449 |
+
tracker_dets_t = np.array(tracker_dets_t, dtype=np.float32).reshape(-1, 4)
|
| 450 |
+
similarity_scores = self._calculate_box_ious(
|
| 451 |
+
gt_dets_t, tracker_dets_t, box_format="xywh", do_ioa=False
|
| 452 |
+
)
|
| 453 |
+
return similarity_scores
|
| 454 |
+
|
| 455 |
+
def _prepare_gt_annotations(self):
|
| 456 |
+
"""
|
| 457 |
+
Prepares GT data by rle encoding segmentations and computing the average track area.
|
| 458 |
+
:return: None
|
| 459 |
+
"""
|
| 460 |
+
if self.iou_type == "segm":
|
| 461 |
+
# only loaded when needed to reduce minimum requirements
|
| 462 |
+
from pycocotools import mask as mask_utils
|
| 463 |
+
|
| 464 |
+
for track in self.gt_data["annotations"]:
|
| 465 |
+
h = track["height"]
|
| 466 |
+
w = track["width"]
|
| 467 |
+
for i, seg in enumerate(track["segmentations"]):
|
| 468 |
+
if seg is not None and isinstance(seg["counts"], list):
|
| 469 |
+
track["segmentations"][i] = mask_utils.frPyObjects(seg, h, w)
|
| 470 |
+
areas = [a for a in track["areas"] if a]
|
| 471 |
+
if len(areas) == 0:
|
| 472 |
+
track["area"] = 0
|
| 473 |
+
else:
|
| 474 |
+
track["area"] = np.array(areas).mean()
|
| 475 |
+
else:
|
| 476 |
+
for track in self.gt_data["annotations"]:
|
| 477 |
+
# For bbox eval, compute areas from bboxes if not already available
|
| 478 |
+
areas = [a for a in track.get("areas", []) if a]
|
| 479 |
+
if not areas:
|
| 480 |
+
areas = []
|
| 481 |
+
for bbox in track.get("bboxes", []):
|
| 482 |
+
if bbox is not None:
|
| 483 |
+
areas.append(bbox[2] * bbox[3])
|
| 484 |
+
track["area"] = np.array(areas).mean() if areas else 0
|
| 485 |
+
|
| 486 |
+
def _get_tracker_seq_tracks(self, tracker, seq_id):
|
| 487 |
+
"""
|
| 488 |
+
Prepares tracker data for a given sequence. Extracts all annotations for given sequence ID, computes
|
| 489 |
+
average track area and assigns a track ID.
|
| 490 |
+
:param tracker: the given tracker
|
| 491 |
+
:param seq_id: the sequence ID
|
| 492 |
+
:return: the extracted tracks
|
| 493 |
+
"""
|
| 494 |
+
# only loaded when needed to reduce minimum requirements
|
| 495 |
+
from pycocotools import mask as mask_utils
|
| 496 |
+
|
| 497 |
+
tracks = [
|
| 498 |
+
ann for ann in self.tracker_data[tracker] if ann["video_id"] == seq_id
|
| 499 |
+
]
|
| 500 |
+
for track in tracks:
|
| 501 |
+
if "areas" not in track:
|
| 502 |
+
if self.iou_type == "segm":
|
| 503 |
+
for seg in track["segmentations"]:
|
| 504 |
+
if seg:
|
| 505 |
+
track["areas"].append(mask_utils.area(seg))
|
| 506 |
+
else:
|
| 507 |
+
track["areas"].append(None)
|
| 508 |
+
else:
|
| 509 |
+
for bbox in track["bboxes"]:
|
| 510 |
+
if bbox:
|
| 511 |
+
track["areas"].append(bbox[2] * bbox[3])
|
| 512 |
+
else:
|
| 513 |
+
track["areas"].append(None)
|
| 514 |
+
areas = [a for a in track["areas"] if a]
|
| 515 |
+
if len(areas) == 0:
|
| 516 |
+
track["area"] = 0
|
| 517 |
+
else:
|
| 518 |
+
track["area"] = np.array(areas).mean()
|
| 519 |
+
track["id"] = self.global_tid_counter
|
| 520 |
+
self.global_tid_counter += 1
|
| 521 |
+
return tracks
|
| 522 |
+
|
| 523 |
+
def get_name(self):
|
| 524 |
+
return self.dataset_name
|
source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/eval.py
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
import traceback
|
| 6 |
+
from functools import partial
|
| 7 |
+
from multiprocessing.pool import Pool
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from . import _timing, utils
|
| 12 |
+
from .metrics import Count
|
| 13 |
+
from .utils import TrackEvalException
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
import tqdm
|
| 17 |
+
|
| 18 |
+
TQDM_IMPORTED = True
|
| 19 |
+
except ImportError as _:
|
| 20 |
+
TQDM_IMPORTED = False
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class Evaluator:
|
| 24 |
+
"""Evaluator class for evaluating different metrics for different datasets"""
|
| 25 |
+
|
| 26 |
+
@staticmethod
|
| 27 |
+
def get_default_eval_config():
|
| 28 |
+
"""Returns the default config values for evaluation"""
|
| 29 |
+
code_path = utils.get_code_path()
|
| 30 |
+
default_config = {
|
| 31 |
+
"USE_PARALLEL": False,
|
| 32 |
+
"NUM_PARALLEL_CORES": 8,
|
| 33 |
+
"BREAK_ON_ERROR": True, # Raises exception and exits with error
|
| 34 |
+
"RETURN_ON_ERROR": False, # if not BREAK_ON_ERROR, then returns from function on error
|
| 35 |
+
"LOG_ON_ERROR": os.path.join(
|
| 36 |
+
code_path, "error_log.txt"
|
| 37 |
+
), # if not None, save any errors into a log file.
|
| 38 |
+
"PRINT_RESULTS": True,
|
| 39 |
+
"PRINT_ONLY_COMBINED": False,
|
| 40 |
+
"PRINT_CONFIG": True,
|
| 41 |
+
"TIME_PROGRESS": True,
|
| 42 |
+
"DISPLAY_LESS_PROGRESS": True,
|
| 43 |
+
"OUTPUT_SUMMARY": True,
|
| 44 |
+
"OUTPUT_EMPTY_CLASSES": True, # If False, summary files are not output for classes with no detections
|
| 45 |
+
"OUTPUT_DETAILED": True,
|
| 46 |
+
"PLOT_CURVES": True,
|
| 47 |
+
}
|
| 48 |
+
return default_config
|
| 49 |
+
|
| 50 |
+
def __init__(self, config=None):
|
| 51 |
+
"""Initialise the evaluator with a config file"""
|
| 52 |
+
self.config = utils.init_config(config, self.get_default_eval_config(), "Eval")
|
| 53 |
+
# Only run timing analysis if not run in parallel.
|
| 54 |
+
if self.config["TIME_PROGRESS"] and not self.config["USE_PARALLEL"]:
|
| 55 |
+
_timing.DO_TIMING = True
|
| 56 |
+
if self.config["DISPLAY_LESS_PROGRESS"]:
|
| 57 |
+
_timing.DISPLAY_LESS_PROGRESS = True
|
| 58 |
+
|
| 59 |
+
def _combine_results(
|
| 60 |
+
self,
|
| 61 |
+
res,
|
| 62 |
+
metrics_list,
|
| 63 |
+
metric_names,
|
| 64 |
+
dataset,
|
| 65 |
+
res_field="COMBINED_SEQ",
|
| 66 |
+
target_tag=None,
|
| 67 |
+
):
|
| 68 |
+
assert res_field.startswith("COMBINED_SEQ")
|
| 69 |
+
# collecting combined cls keys (cls averaged, det averaged, super classes)
|
| 70 |
+
tracker_list, seq_list, class_list = dataset.get_eval_info()
|
| 71 |
+
combined_cls_keys = []
|
| 72 |
+
res[res_field] = {}
|
| 73 |
+
|
| 74 |
+
# narrow the target for evaluation
|
| 75 |
+
if target_tag is not None:
|
| 76 |
+
target_video_ids = [
|
| 77 |
+
annot["video_id"]
|
| 78 |
+
for annot in dataset.gt_data["annotations"]
|
| 79 |
+
if target_tag in annot["tags"]
|
| 80 |
+
]
|
| 81 |
+
vid2name = {
|
| 82 |
+
video["id"]: video["file_names"][0].split("/")[0]
|
| 83 |
+
for video in dataset.gt_data["videos"]
|
| 84 |
+
}
|
| 85 |
+
target_video_ids = set(target_video_ids)
|
| 86 |
+
target_video = [vid2name[video_id] for video_id in target_video_ids]
|
| 87 |
+
|
| 88 |
+
if len(target_video) == 0:
|
| 89 |
+
raise TrackEvalException(
|
| 90 |
+
"No sequences found with the tag %s" % target_tag
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
target_annotations = [
|
| 94 |
+
annot
|
| 95 |
+
for annot in dataset.gt_data["annotations"]
|
| 96 |
+
if annot["video_id"] in target_video_ids
|
| 97 |
+
]
|
| 98 |
+
assert all(target_tag in annot["tags"] for annot in target_annotations), (
|
| 99 |
+
f"Not all annotations in the target sequences have the target tag {target_tag}. "
|
| 100 |
+
"We currently only support a target tag at the sequence level, not at the annotation level."
|
| 101 |
+
)
|
| 102 |
+
else:
|
| 103 |
+
target_video = seq_list
|
| 104 |
+
|
| 105 |
+
# combine sequences for each class
|
| 106 |
+
for c_cls in class_list:
|
| 107 |
+
res[res_field][c_cls] = {}
|
| 108 |
+
for metric, metric_name in zip(metrics_list, metric_names):
|
| 109 |
+
curr_res = {
|
| 110 |
+
seq_key: seq_value[c_cls][metric_name]
|
| 111 |
+
for seq_key, seq_value in res.items()
|
| 112 |
+
if not seq_key.startswith("COMBINED_SEQ")
|
| 113 |
+
and seq_key in target_video
|
| 114 |
+
}
|
| 115 |
+
res[res_field][c_cls][metric_name] = metric.combine_sequences(curr_res)
|
| 116 |
+
# combine classes
|
| 117 |
+
if dataset.should_classes_combine:
|
| 118 |
+
combined_cls_keys += [
|
| 119 |
+
"cls_comb_cls_av",
|
| 120 |
+
"cls_comb_det_av",
|
| 121 |
+
"all",
|
| 122 |
+
]
|
| 123 |
+
res[res_field]["cls_comb_cls_av"] = {}
|
| 124 |
+
res[res_field]["cls_comb_det_av"] = {}
|
| 125 |
+
for metric, metric_name in zip(metrics_list, metric_names):
|
| 126 |
+
cls_res = {
|
| 127 |
+
cls_key: cls_value[metric_name]
|
| 128 |
+
for cls_key, cls_value in res[res_field].items()
|
| 129 |
+
if cls_key not in combined_cls_keys
|
| 130 |
+
}
|
| 131 |
+
res[res_field]["cls_comb_cls_av"][metric_name] = (
|
| 132 |
+
metric.combine_classes_class_averaged(cls_res)
|
| 133 |
+
)
|
| 134 |
+
res[res_field]["cls_comb_det_av"][metric_name] = (
|
| 135 |
+
metric.combine_classes_det_averaged(cls_res)
|
| 136 |
+
)
|
| 137 |
+
# combine classes to super classes
|
| 138 |
+
if dataset.use_super_categories:
|
| 139 |
+
for cat, sub_cats in dataset.super_categories.items():
|
| 140 |
+
combined_cls_keys.append(cat)
|
| 141 |
+
res[res_field][cat] = {}
|
| 142 |
+
for metric, metric_name in zip(metrics_list, metric_names):
|
| 143 |
+
cat_res = {
|
| 144 |
+
cls_key: cls_value[metric_name]
|
| 145 |
+
for cls_key, cls_value in res[res_field].items()
|
| 146 |
+
if cls_key in sub_cats
|
| 147 |
+
}
|
| 148 |
+
res[res_field][cat][metric_name] = (
|
| 149 |
+
metric.combine_classes_det_averaged(cat_res)
|
| 150 |
+
)
|
| 151 |
+
return res, combined_cls_keys
|
| 152 |
+
|
| 153 |
+
def _summarize_results(
|
| 154 |
+
self,
|
| 155 |
+
res,
|
| 156 |
+
tracker,
|
| 157 |
+
metrics_list,
|
| 158 |
+
metric_names,
|
| 159 |
+
dataset,
|
| 160 |
+
res_field,
|
| 161 |
+
combined_cls_keys,
|
| 162 |
+
):
|
| 163 |
+
config = self.config
|
| 164 |
+
output_fol = dataset.get_output_fol(tracker)
|
| 165 |
+
tracker_display_name = dataset.get_display_name(tracker)
|
| 166 |
+
for c_cls in res[
|
| 167 |
+
res_field
|
| 168 |
+
].keys(): # class_list + combined classes if calculated
|
| 169 |
+
summaries = []
|
| 170 |
+
details = []
|
| 171 |
+
num_dets = res[res_field][c_cls]["Count"]["Dets"]
|
| 172 |
+
if config["OUTPUT_EMPTY_CLASSES"] or num_dets > 0:
|
| 173 |
+
for metric, metric_name in zip(metrics_list, metric_names):
|
| 174 |
+
# for combined classes there is no per sequence evaluation
|
| 175 |
+
if c_cls in combined_cls_keys:
|
| 176 |
+
table_res = {res_field: res[res_field][c_cls][metric_name]}
|
| 177 |
+
else:
|
| 178 |
+
table_res = {
|
| 179 |
+
seq_key: seq_value[c_cls][metric_name]
|
| 180 |
+
for seq_key, seq_value in res.items()
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
if config["PRINT_RESULTS"] and config["PRINT_ONLY_COMBINED"]:
|
| 184 |
+
dont_print = (
|
| 185 |
+
dataset.should_classes_combine
|
| 186 |
+
and c_cls not in combined_cls_keys
|
| 187 |
+
)
|
| 188 |
+
if not dont_print:
|
| 189 |
+
metric.print_table(
|
| 190 |
+
{res_field: table_res[res_field]},
|
| 191 |
+
tracker_display_name,
|
| 192 |
+
c_cls,
|
| 193 |
+
res_field,
|
| 194 |
+
res_field,
|
| 195 |
+
)
|
| 196 |
+
elif config["PRINT_RESULTS"]:
|
| 197 |
+
metric.print_table(
|
| 198 |
+
table_res, tracker_display_name, c_cls, res_field, res_field
|
| 199 |
+
)
|
| 200 |
+
if config["OUTPUT_SUMMARY"]:
|
| 201 |
+
summaries.append(metric.summary_results(table_res))
|
| 202 |
+
if config["OUTPUT_DETAILED"]:
|
| 203 |
+
details.append(metric.detailed_results(table_res))
|
| 204 |
+
if config["PLOT_CURVES"]:
|
| 205 |
+
metric.plot_single_tracker_results(
|
| 206 |
+
table_res,
|
| 207 |
+
tracker_display_name,
|
| 208 |
+
c_cls,
|
| 209 |
+
output_fol,
|
| 210 |
+
)
|
| 211 |
+
if config["OUTPUT_SUMMARY"]:
|
| 212 |
+
utils.write_summary_results(summaries, c_cls, output_fol)
|
| 213 |
+
if config["OUTPUT_DETAILED"]:
|
| 214 |
+
utils.write_detailed_results(details, c_cls, output_fol)
|
| 215 |
+
|
| 216 |
+
@_timing.time
|
| 217 |
+
def evaluate(self, dataset_list, metrics_list, show_progressbar=False):
|
| 218 |
+
"""Evaluate a set of metrics on a set of datasets"""
|
| 219 |
+
config = self.config
|
| 220 |
+
metrics_list = metrics_list + [Count()] # Count metrics are always run
|
| 221 |
+
metric_names = utils.validate_metrics_list(metrics_list)
|
| 222 |
+
dataset_names = [dataset.get_name() for dataset in dataset_list]
|
| 223 |
+
output_res = {}
|
| 224 |
+
output_msg = {}
|
| 225 |
+
|
| 226 |
+
for dataset, dataset_name in zip(dataset_list, dataset_names):
|
| 227 |
+
# Get dataset info about what to evaluate
|
| 228 |
+
output_res[dataset_name] = {}
|
| 229 |
+
output_msg[dataset_name] = {}
|
| 230 |
+
tracker_list, seq_list, class_list = dataset.get_eval_info()
|
| 231 |
+
print(
|
| 232 |
+
"\nEvaluating %i tracker(s) on %i sequence(s) for %i class(es) on %s dataset using the following "
|
| 233 |
+
"metrics: %s\n"
|
| 234 |
+
% (
|
| 235 |
+
len(tracker_list),
|
| 236 |
+
len(seq_list),
|
| 237 |
+
len(class_list),
|
| 238 |
+
dataset_name,
|
| 239 |
+
", ".join(metric_names),
|
| 240 |
+
)
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
# Evaluate each tracker
|
| 244 |
+
for tracker in tracker_list:
|
| 245 |
+
# if not config['BREAK_ON_ERROR'] then go to next tracker without breaking
|
| 246 |
+
try:
|
| 247 |
+
# Evaluate each sequence in parallel or in series.
|
| 248 |
+
# returns a nested dict (res), indexed like: res[seq][class][metric_name][sub_metric field]
|
| 249 |
+
# e.g. res[seq_0001][pedestrian][hota][DetA]
|
| 250 |
+
print("\nEvaluating %s\n" % tracker)
|
| 251 |
+
time_start = time.time()
|
| 252 |
+
if config["USE_PARALLEL"]:
|
| 253 |
+
if show_progressbar and TQDM_IMPORTED:
|
| 254 |
+
seq_list_sorted = sorted(seq_list)
|
| 255 |
+
|
| 256 |
+
with Pool(config["NUM_PARALLEL_CORES"]) as pool, tqdm.tqdm(
|
| 257 |
+
total=len(seq_list)
|
| 258 |
+
) as pbar:
|
| 259 |
+
_eval_sequence = partial(
|
| 260 |
+
eval_sequence,
|
| 261 |
+
dataset=dataset,
|
| 262 |
+
tracker=tracker,
|
| 263 |
+
class_list=class_list,
|
| 264 |
+
metrics_list=metrics_list,
|
| 265 |
+
metric_names=metric_names,
|
| 266 |
+
)
|
| 267 |
+
results = []
|
| 268 |
+
for r in pool.imap(
|
| 269 |
+
_eval_sequence, seq_list_sorted, chunksize=20
|
| 270 |
+
):
|
| 271 |
+
results.append(r)
|
| 272 |
+
pbar.update()
|
| 273 |
+
res = dict(zip(seq_list_sorted, results))
|
| 274 |
+
|
| 275 |
+
else:
|
| 276 |
+
with Pool(config["NUM_PARALLEL_CORES"]) as pool:
|
| 277 |
+
_eval_sequence = partial(
|
| 278 |
+
eval_sequence,
|
| 279 |
+
dataset=dataset,
|
| 280 |
+
tracker=tracker,
|
| 281 |
+
class_list=class_list,
|
| 282 |
+
metrics_list=metrics_list,
|
| 283 |
+
metric_names=metric_names,
|
| 284 |
+
)
|
| 285 |
+
results = pool.map(_eval_sequence, seq_list)
|
| 286 |
+
res = dict(zip(seq_list, results))
|
| 287 |
+
else:
|
| 288 |
+
res = {}
|
| 289 |
+
if show_progressbar and TQDM_IMPORTED:
|
| 290 |
+
seq_list_sorted = sorted(seq_list)
|
| 291 |
+
for curr_seq in tqdm.tqdm(seq_list_sorted):
|
| 292 |
+
res[curr_seq] = eval_sequence(
|
| 293 |
+
curr_seq,
|
| 294 |
+
dataset,
|
| 295 |
+
tracker,
|
| 296 |
+
class_list,
|
| 297 |
+
metrics_list,
|
| 298 |
+
metric_names,
|
| 299 |
+
)
|
| 300 |
+
else:
|
| 301 |
+
for curr_seq in sorted(seq_list):
|
| 302 |
+
res[curr_seq] = eval_sequence(
|
| 303 |
+
curr_seq,
|
| 304 |
+
dataset,
|
| 305 |
+
tracker,
|
| 306 |
+
class_list,
|
| 307 |
+
metrics_list,
|
| 308 |
+
metric_names,
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
# Combine results over all sequences and then over all classes
|
| 312 |
+
res, combined_cls_keys = self._combine_results(
|
| 313 |
+
res, metrics_list, metric_names, dataset, "COMBINED_SEQ"
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
if np.all(
|
| 317 |
+
["tags" in annot for annot in dataset.gt_data["annotations"]]
|
| 318 |
+
):
|
| 319 |
+
# Combine results over the challenging sequences and then over all classes
|
| 320 |
+
# currently only support "tracking_challenging_pair"
|
| 321 |
+
res, _ = self._combine_results(
|
| 322 |
+
res,
|
| 323 |
+
metrics_list,
|
| 324 |
+
metric_names,
|
| 325 |
+
dataset,
|
| 326 |
+
"COMBINED_SEQ_CHALLENGING",
|
| 327 |
+
"tracking_challenging_pair",
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# Print and output results in various formats
|
| 331 |
+
if config["TIME_PROGRESS"]:
|
| 332 |
+
print(
|
| 333 |
+
"\nAll sequences for %s finished in %.2f seconds"
|
| 334 |
+
% (tracker, time.time() - time_start)
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
self._summarize_results(
|
| 338 |
+
res,
|
| 339 |
+
tracker,
|
| 340 |
+
metrics_list,
|
| 341 |
+
metric_names,
|
| 342 |
+
dataset,
|
| 343 |
+
"COMBINED_SEQ",
|
| 344 |
+
combined_cls_keys,
|
| 345 |
+
)
|
| 346 |
+
if "COMBINED_SEQ_CHALLENGING" in res:
|
| 347 |
+
self._summarize_results(
|
| 348 |
+
res,
|
| 349 |
+
tracker,
|
| 350 |
+
metrics_list,
|
| 351 |
+
metric_names,
|
| 352 |
+
dataset,
|
| 353 |
+
"COMBINED_SEQ_CHALLENGING",
|
| 354 |
+
combined_cls_keys,
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
# Output for returning from function
|
| 358 |
+
output_res[dataset_name][tracker] = res
|
| 359 |
+
output_msg[dataset_name][tracker] = "Success"
|
| 360 |
+
|
| 361 |
+
except Exception as err:
|
| 362 |
+
output_res[dataset_name][tracker] = None
|
| 363 |
+
if type(err) == TrackEvalException:
|
| 364 |
+
output_msg[dataset_name][tracker] = str(err)
|
| 365 |
+
else:
|
| 366 |
+
output_msg[dataset_name][tracker] = "Unknown error occurred."
|
| 367 |
+
print("Tracker %s was unable to be evaluated." % tracker)
|
| 368 |
+
print(err)
|
| 369 |
+
traceback.print_exc()
|
| 370 |
+
if config["LOG_ON_ERROR"] is not None:
|
| 371 |
+
with open(config["LOG_ON_ERROR"], "a") as f:
|
| 372 |
+
print(dataset_name, file=f)
|
| 373 |
+
print(tracker, file=f)
|
| 374 |
+
print(traceback.format_exc(), file=f)
|
| 375 |
+
print("\n\n\n", file=f)
|
| 376 |
+
if config["BREAK_ON_ERROR"]:
|
| 377 |
+
raise err
|
| 378 |
+
elif config["RETURN_ON_ERROR"]:
|
| 379 |
+
return output_res, output_msg
|
| 380 |
+
|
| 381 |
+
return output_res, output_msg
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
@_timing.time
|
| 385 |
+
def eval_sequence(seq, dataset, tracker, class_list, metrics_list, metric_names):
|
| 386 |
+
"""Function for evaluating a single sequence"""
|
| 387 |
+
|
| 388 |
+
raw_data = dataset.get_raw_seq_data(tracker, seq)
|
| 389 |
+
seq_res = {}
|
| 390 |
+
for cls in class_list:
|
| 391 |
+
seq_res[cls] = {}
|
| 392 |
+
data = dataset.get_preprocessed_seq_data(raw_data, cls)
|
| 393 |
+
for metric, met_name in zip(metrics_list, metric_names):
|
| 394 |
+
seq_res[cls][met_name] = metric.eval_sequence(data)
|
| 395 |
+
return seq_res
|
source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/metrics/hota.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from scipy.optimize import linear_sum_assignment
|
| 7 |
+
|
| 8 |
+
from .. import _timing
|
| 9 |
+
from ._base_metric import _BaseMetric
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class HOTA(_BaseMetric):
|
| 13 |
+
"""Class which implements the HOTA metrics.
|
| 14 |
+
See: https://link.springer.com/article/10.1007/s11263-020-01375-2
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, config=None):
|
| 18 |
+
super().__init__()
|
| 19 |
+
self.plottable = True
|
| 20 |
+
self.array_labels = np.arange(0.05, 0.99, 0.05)
|
| 21 |
+
self.integer_array_fields = ["HOTA_TP", "HOTA_FN", "HOTA_FP"]
|
| 22 |
+
self.float_array_fields = [
|
| 23 |
+
"HOTA",
|
| 24 |
+
"DetA",
|
| 25 |
+
"AssA",
|
| 26 |
+
"DetRe",
|
| 27 |
+
"DetPr",
|
| 28 |
+
"AssRe",
|
| 29 |
+
"AssPr",
|
| 30 |
+
"LocA",
|
| 31 |
+
"OWTA",
|
| 32 |
+
]
|
| 33 |
+
self.float_fields = ["HOTA(0)", "LocA(0)", "HOTALocA(0)"]
|
| 34 |
+
self.fields = (
|
| 35 |
+
self.float_array_fields + self.integer_array_fields + self.float_fields
|
| 36 |
+
)
|
| 37 |
+
self.summary_fields = self.float_array_fields + self.float_fields
|
| 38 |
+
|
| 39 |
+
@_timing.time
|
| 40 |
+
def eval_sequence(self, data):
|
| 41 |
+
"""Calculates the HOTA metrics for one sequence"""
|
| 42 |
+
|
| 43 |
+
# Initialise results
|
| 44 |
+
res = {}
|
| 45 |
+
for field in self.float_array_fields + self.integer_array_fields:
|
| 46 |
+
res[field] = np.zeros((len(self.array_labels)), dtype=float)
|
| 47 |
+
for field in self.float_fields:
|
| 48 |
+
res[field] = 0
|
| 49 |
+
|
| 50 |
+
# Return result quickly if tracker or gt sequence is empty
|
| 51 |
+
if data["num_tracker_dets"] == 0:
|
| 52 |
+
res["HOTA_FN"] = data["num_gt_dets"] * np.ones(
|
| 53 |
+
(len(self.array_labels)), dtype=float
|
| 54 |
+
)
|
| 55 |
+
res["LocA"] = np.ones((len(self.array_labels)), dtype=float)
|
| 56 |
+
res["LocA(0)"] = 1.0
|
| 57 |
+
return res
|
| 58 |
+
if data["num_gt_dets"] == 0:
|
| 59 |
+
res["HOTA_FP"] = data["num_tracker_dets"] * np.ones(
|
| 60 |
+
(len(self.array_labels)), dtype=float
|
| 61 |
+
)
|
| 62 |
+
res["LocA"] = np.ones((len(self.array_labels)), dtype=float)
|
| 63 |
+
res["LocA(0)"] = 1.0
|
| 64 |
+
return res
|
| 65 |
+
|
| 66 |
+
# Variables counting global association
|
| 67 |
+
potential_matches_count = np.zeros(
|
| 68 |
+
(data["num_gt_ids"], data["num_tracker_ids"])
|
| 69 |
+
)
|
| 70 |
+
gt_id_count = np.zeros((data["num_gt_ids"], 1))
|
| 71 |
+
tracker_id_count = np.zeros((1, data["num_tracker_ids"]))
|
| 72 |
+
|
| 73 |
+
# First loop through each timestep and accumulate global track information.
|
| 74 |
+
for t, (gt_ids_t, tracker_ids_t) in enumerate(
|
| 75 |
+
zip(data["gt_ids"], data["tracker_ids"])
|
| 76 |
+
):
|
| 77 |
+
# Count the potential matches between ids in each timestep
|
| 78 |
+
# These are normalised, weighted by the match similarity.
|
| 79 |
+
similarity = data["similarity_scores"][t]
|
| 80 |
+
sim_iou_denom = (
|
| 81 |
+
similarity.sum(0)[np.newaxis, :]
|
| 82 |
+
+ similarity.sum(1)[:, np.newaxis]
|
| 83 |
+
- similarity
|
| 84 |
+
)
|
| 85 |
+
sim_iou = np.zeros_like(similarity)
|
| 86 |
+
sim_iou_mask = sim_iou_denom > 0 + np.finfo("float").eps
|
| 87 |
+
sim_iou[sim_iou_mask] = (
|
| 88 |
+
similarity[sim_iou_mask] / sim_iou_denom[sim_iou_mask]
|
| 89 |
+
)
|
| 90 |
+
potential_matches_count[
|
| 91 |
+
gt_ids_t[:, np.newaxis], tracker_ids_t[np.newaxis, :]
|
| 92 |
+
] += sim_iou
|
| 93 |
+
|
| 94 |
+
# Calculate the total number of dets for each gt_id and tracker_id.
|
| 95 |
+
gt_id_count[gt_ids_t] += 1
|
| 96 |
+
tracker_id_count[0, tracker_ids_t] += 1
|
| 97 |
+
|
| 98 |
+
# Calculate overall jaccard alignment score (before unique matching) between IDs
|
| 99 |
+
global_alignment_score = potential_matches_count / (
|
| 100 |
+
gt_id_count + tracker_id_count - potential_matches_count
|
| 101 |
+
)
|
| 102 |
+
matches_counts = [
|
| 103 |
+
np.zeros_like(potential_matches_count) for _ in self.array_labels
|
| 104 |
+
]
|
| 105 |
+
|
| 106 |
+
# Calculate scores for each timestep
|
| 107 |
+
for t, (gt_ids_t, tracker_ids_t) in enumerate(
|
| 108 |
+
zip(data["gt_ids"], data["tracker_ids"])
|
| 109 |
+
):
|
| 110 |
+
# Deal with the case that there are no gt_det/tracker_det in a timestep.
|
| 111 |
+
if len(gt_ids_t) == 0:
|
| 112 |
+
for a, alpha in enumerate(self.array_labels):
|
| 113 |
+
res["HOTA_FP"][a] += len(tracker_ids_t)
|
| 114 |
+
continue
|
| 115 |
+
if len(tracker_ids_t) == 0:
|
| 116 |
+
for a, alpha in enumerate(self.array_labels):
|
| 117 |
+
res["HOTA_FN"][a] += len(gt_ids_t)
|
| 118 |
+
continue
|
| 119 |
+
|
| 120 |
+
# Get matching scores between pairs of dets for optimizing HOTA
|
| 121 |
+
similarity = data["similarity_scores"][t]
|
| 122 |
+
score_mat = (
|
| 123 |
+
global_alignment_score[
|
| 124 |
+
gt_ids_t[:, np.newaxis], tracker_ids_t[np.newaxis, :]
|
| 125 |
+
]
|
| 126 |
+
* similarity
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
# Hungarian algorithm to find best matches
|
| 130 |
+
match_rows, match_cols = linear_sum_assignment(-score_mat)
|
| 131 |
+
|
| 132 |
+
# Calculate and accumulate basic statistics
|
| 133 |
+
for a, alpha in enumerate(self.array_labels):
|
| 134 |
+
actually_matched_mask = (
|
| 135 |
+
similarity[match_rows, match_cols] >= alpha - np.finfo("float").eps
|
| 136 |
+
)
|
| 137 |
+
alpha_match_rows = match_rows[actually_matched_mask]
|
| 138 |
+
alpha_match_cols = match_cols[actually_matched_mask]
|
| 139 |
+
num_matches = len(alpha_match_rows)
|
| 140 |
+
res["HOTA_TP"][a] += num_matches
|
| 141 |
+
res["HOTA_FN"][a] += len(gt_ids_t) - num_matches
|
| 142 |
+
res["HOTA_FP"][a] += len(tracker_ids_t) - num_matches
|
| 143 |
+
if num_matches > 0:
|
| 144 |
+
res["LocA"][a] += sum(
|
| 145 |
+
similarity[alpha_match_rows, alpha_match_cols]
|
| 146 |
+
)
|
| 147 |
+
matches_counts[a][
|
| 148 |
+
gt_ids_t[alpha_match_rows], tracker_ids_t[alpha_match_cols]
|
| 149 |
+
] += 1
|
| 150 |
+
|
| 151 |
+
# Calculate association scores (AssA, AssRe, AssPr) for the alpha value.
|
| 152 |
+
# First calculate scores per gt_id/tracker_id combo and then average over the number of detections.
|
| 153 |
+
for a, alpha in enumerate(self.array_labels):
|
| 154 |
+
matches_count = matches_counts[a]
|
| 155 |
+
ass_a = matches_count / np.maximum(
|
| 156 |
+
1, gt_id_count + tracker_id_count - matches_count
|
| 157 |
+
)
|
| 158 |
+
res["AssA"][a] = np.sum(matches_count * ass_a) / np.maximum(
|
| 159 |
+
1, res["HOTA_TP"][a]
|
| 160 |
+
)
|
| 161 |
+
ass_re = matches_count / np.maximum(1, gt_id_count)
|
| 162 |
+
res["AssRe"][a] = np.sum(matches_count * ass_re) / np.maximum(
|
| 163 |
+
1, res["HOTA_TP"][a]
|
| 164 |
+
)
|
| 165 |
+
ass_pr = matches_count / np.maximum(1, tracker_id_count)
|
| 166 |
+
res["AssPr"][a] = np.sum(matches_count * ass_pr) / np.maximum(
|
| 167 |
+
1, res["HOTA_TP"][a]
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# Calculate final scores
|
| 171 |
+
res["LocA"] = np.maximum(1e-10, res["LocA"]) / np.maximum(1e-10, res["HOTA_TP"])
|
| 172 |
+
res = self._compute_final_fields(res)
|
| 173 |
+
return res
|
| 174 |
+
|
| 175 |
+
def combine_sequences(self, all_res):
|
| 176 |
+
"""Combines metrics across all sequences"""
|
| 177 |
+
res = {}
|
| 178 |
+
for field in self.integer_array_fields:
|
| 179 |
+
res[field] = self._combine_sum(all_res, field)
|
| 180 |
+
for field in ["AssRe", "AssPr", "AssA"]:
|
| 181 |
+
res[field] = self._combine_weighted_av(
|
| 182 |
+
all_res, field, res, weight_field="HOTA_TP"
|
| 183 |
+
)
|
| 184 |
+
loca_weighted_sum = sum(
|
| 185 |
+
[all_res[k]["LocA"] * all_res[k]["HOTA_TP"] for k in all_res.keys()]
|
| 186 |
+
)
|
| 187 |
+
res["LocA"] = np.maximum(1e-10, loca_weighted_sum) / np.maximum(
|
| 188 |
+
1e-10, res["HOTA_TP"]
|
| 189 |
+
)
|
| 190 |
+
res = self._compute_final_fields(res)
|
| 191 |
+
return res
|
| 192 |
+
|
| 193 |
+
def combine_classes_class_averaged(self, all_res, ignore_empty_classes=False):
|
| 194 |
+
"""Combines metrics across all classes by averaging over the class values.
|
| 195 |
+
If 'ignore_empty_classes' is True, then it only sums over classes with at least one gt or predicted detection.
|
| 196 |
+
"""
|
| 197 |
+
res = {}
|
| 198 |
+
for field in self.integer_array_fields:
|
| 199 |
+
if ignore_empty_classes:
|
| 200 |
+
res[field] = self._combine_sum(
|
| 201 |
+
{
|
| 202 |
+
k: v
|
| 203 |
+
for k, v in all_res.items()
|
| 204 |
+
if (
|
| 205 |
+
v["HOTA_TP"] + v["HOTA_FN"] + v["HOTA_FP"]
|
| 206 |
+
> 0 + np.finfo("float").eps
|
| 207 |
+
).any()
|
| 208 |
+
},
|
| 209 |
+
field,
|
| 210 |
+
)
|
| 211 |
+
else:
|
| 212 |
+
res[field] = self._combine_sum(
|
| 213 |
+
{k: v for k, v in all_res.items()}, field
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
for field in self.float_fields + self.float_array_fields:
|
| 217 |
+
if ignore_empty_classes:
|
| 218 |
+
res[field] = np.mean(
|
| 219 |
+
[
|
| 220 |
+
v[field]
|
| 221 |
+
for v in all_res.values()
|
| 222 |
+
if (
|
| 223 |
+
v["HOTA_TP"] + v["HOTA_FN"] + v["HOTA_FP"]
|
| 224 |
+
> 0 + np.finfo("float").eps
|
| 225 |
+
).any()
|
| 226 |
+
],
|
| 227 |
+
axis=0,
|
| 228 |
+
)
|
| 229 |
+
else:
|
| 230 |
+
res[field] = np.mean([v[field] for v in all_res.values()], axis=0)
|
| 231 |
+
return res
|
| 232 |
+
|
| 233 |
+
def combine_classes_det_averaged(self, all_res):
|
| 234 |
+
"""Combines metrics across all classes by averaging over the detection values"""
|
| 235 |
+
res = {}
|
| 236 |
+
for field in self.integer_array_fields:
|
| 237 |
+
res[field] = self._combine_sum(all_res, field)
|
| 238 |
+
for field in ["AssRe", "AssPr", "AssA"]:
|
| 239 |
+
res[field] = self._combine_weighted_av(
|
| 240 |
+
all_res, field, res, weight_field="HOTA_TP"
|
| 241 |
+
)
|
| 242 |
+
loca_weighted_sum = sum(
|
| 243 |
+
[all_res[k]["LocA"] * all_res[k]["HOTA_TP"] for k in all_res.keys()]
|
| 244 |
+
)
|
| 245 |
+
res["LocA"] = np.maximum(1e-10, loca_weighted_sum) / np.maximum(
|
| 246 |
+
1e-10, res["HOTA_TP"]
|
| 247 |
+
)
|
| 248 |
+
res = self._compute_final_fields(res)
|
| 249 |
+
return res
|
| 250 |
+
|
| 251 |
+
@staticmethod
|
| 252 |
+
def _compute_final_fields(res):
|
| 253 |
+
"""Calculate sub-metric ('field') values which only depend on other sub-metric values.
|
| 254 |
+
This function is used both for both per-sequence calculation, and in combining values across sequences.
|
| 255 |
+
"""
|
| 256 |
+
res["DetRe"] = res["HOTA_TP"] / np.maximum(1, res["HOTA_TP"] + res["HOTA_FN"])
|
| 257 |
+
res["DetPr"] = res["HOTA_TP"] / np.maximum(1, res["HOTA_TP"] + res["HOTA_FP"])
|
| 258 |
+
res["DetA"] = res["HOTA_TP"] / np.maximum(
|
| 259 |
+
1, res["HOTA_TP"] + res["HOTA_FN"] + res["HOTA_FP"]
|
| 260 |
+
)
|
| 261 |
+
res["HOTA"] = np.sqrt(res["DetA"] * res["AssA"])
|
| 262 |
+
res["OWTA"] = np.sqrt(res["DetRe"] * res["AssA"])
|
| 263 |
+
|
| 264 |
+
res["HOTA(0)"] = res["HOTA"][0]
|
| 265 |
+
res["LocA(0)"] = res["LocA"][0]
|
| 266 |
+
res["HOTALocA(0)"] = res["HOTA(0)"] * res["LocA(0)"]
|
| 267 |
+
return res
|
| 268 |
+
|
| 269 |
+
def plot_single_tracker_results(self, table_res, tracker, cls, output_folder):
|
| 270 |
+
"""Create plot of results"""
|
| 271 |
+
|
| 272 |
+
# Only loaded when run to reduce minimum requirements
|
| 273 |
+
from matplotlib import pyplot as plt
|
| 274 |
+
|
| 275 |
+
res = table_res["COMBINED_SEQ"]
|
| 276 |
+
styles_to_plot = ["r", "b", "g", "b--", "b:", "g--", "g:", "m"]
|
| 277 |
+
for name, style in zip(self.float_array_fields, styles_to_plot):
|
| 278 |
+
plt.plot(self.array_labels, res[name], style)
|
| 279 |
+
plt.xlabel("alpha")
|
| 280 |
+
plt.ylabel("score")
|
| 281 |
+
plt.title(tracker + " - " + cls)
|
| 282 |
+
plt.axis([0, 1, 0, 1])
|
| 283 |
+
legend = []
|
| 284 |
+
for name in self.float_array_fields:
|
| 285 |
+
legend += [name + " (" + str(np.round(np.mean(res[name]), 2)) + ")"]
|
| 286 |
+
plt.legend(legend, loc="lower left")
|
| 287 |
+
out_file = os.path.join(output_folder, cls + "_plot.pdf")
|
| 288 |
+
os.makedirs(os.path.dirname(out_file), exist_ok=True)
|
| 289 |
+
plt.savefig(out_file)
|
| 290 |
+
plt.savefig(out_file.replace(".pdf", ".png"))
|
| 291 |
+
plt.clf()
|
source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/utils.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import csv
|
| 5 |
+
import os
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def init_config(config, default_config, name=None):
|
| 10 |
+
"""Initialise non-given config values with defaults"""
|
| 11 |
+
if config is None:
|
| 12 |
+
config = default_config
|
| 13 |
+
else:
|
| 14 |
+
for k in default_config.keys():
|
| 15 |
+
if k not in config.keys():
|
| 16 |
+
config[k] = default_config[k]
|
| 17 |
+
if name and config["PRINT_CONFIG"]:
|
| 18 |
+
print("\n%s Config:" % name)
|
| 19 |
+
for c in config.keys():
|
| 20 |
+
print("%-20s : %-30s" % (c, config[c]))
|
| 21 |
+
return config
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def update_config(config):
|
| 25 |
+
"""
|
| 26 |
+
Parse the arguments of a script and updates the config values for a given value if specified in the arguments.
|
| 27 |
+
:param config: the config to update
|
| 28 |
+
:return: the updated config
|
| 29 |
+
"""
|
| 30 |
+
parser = argparse.ArgumentParser()
|
| 31 |
+
for setting in config.keys():
|
| 32 |
+
if type(config[setting]) == list or type(config[setting]) == type(None):
|
| 33 |
+
parser.add_argument("--" + setting, nargs="+")
|
| 34 |
+
else:
|
| 35 |
+
parser.add_argument("--" + setting)
|
| 36 |
+
args = parser.parse_args().__dict__
|
| 37 |
+
for setting in args.keys():
|
| 38 |
+
if args[setting] is not None:
|
| 39 |
+
if type(config[setting]) == type(True):
|
| 40 |
+
if args[setting] == "True":
|
| 41 |
+
x = True
|
| 42 |
+
elif args[setting] == "False":
|
| 43 |
+
x = False
|
| 44 |
+
else:
|
| 45 |
+
raise Exception(
|
| 46 |
+
"Command line parameter " + setting + "must be True or False"
|
| 47 |
+
)
|
| 48 |
+
elif type(config[setting]) == type(1):
|
| 49 |
+
x = int(args[setting])
|
| 50 |
+
elif type(args[setting]) == type(None):
|
| 51 |
+
x = None
|
| 52 |
+
else:
|
| 53 |
+
x = args[setting]
|
| 54 |
+
config[setting] = x
|
| 55 |
+
return config
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def get_code_path():
|
| 59 |
+
"""Get base path where code is"""
|
| 60 |
+
return os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def validate_metrics_list(metrics_list):
|
| 64 |
+
"""Get names of metric class and ensures they are unique, further checks that the fields within each metric class
|
| 65 |
+
do not have overlapping names.
|
| 66 |
+
"""
|
| 67 |
+
metric_names = [metric.get_name() for metric in metrics_list]
|
| 68 |
+
# check metric names are unique
|
| 69 |
+
if len(metric_names) != len(set(metric_names)):
|
| 70 |
+
raise TrackEvalException(
|
| 71 |
+
"Code being run with multiple metrics of the same name"
|
| 72 |
+
)
|
| 73 |
+
fields = []
|
| 74 |
+
for m in metrics_list:
|
| 75 |
+
fields += m.fields
|
| 76 |
+
# check metric fields are unique
|
| 77 |
+
if len(fields) != len(set(fields)):
|
| 78 |
+
raise TrackEvalException(
|
| 79 |
+
"Code being run with multiple metrics with fields of the same name"
|
| 80 |
+
)
|
| 81 |
+
return metric_names
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def write_summary_results(summaries, cls, output_folder):
|
| 85 |
+
"""Write summary results to file"""
|
| 86 |
+
|
| 87 |
+
fields = sum([list(s.keys()) for s in summaries], [])
|
| 88 |
+
values = sum([list(s.values()) for s in summaries], [])
|
| 89 |
+
|
| 90 |
+
# In order to remain consistent upon new fields being adding, for each of the following fields if they are present
|
| 91 |
+
# they will be output in the summary first in the order below. Any further fields will be output in the order each
|
| 92 |
+
# metric family is called, and within each family either in the order they were added to the dict (python >= 3.6) or
|
| 93 |
+
# randomly (python < 3.6).
|
| 94 |
+
default_order = [
|
| 95 |
+
"HOTA",
|
| 96 |
+
"DetA",
|
| 97 |
+
"AssA",
|
| 98 |
+
"DetRe",
|
| 99 |
+
"DetPr",
|
| 100 |
+
"AssRe",
|
| 101 |
+
"AssPr",
|
| 102 |
+
"LocA",
|
| 103 |
+
"OWTA",
|
| 104 |
+
"HOTA(0)",
|
| 105 |
+
"LocA(0)",
|
| 106 |
+
"HOTALocA(0)",
|
| 107 |
+
"MOTA",
|
| 108 |
+
"MOTP",
|
| 109 |
+
"MODA",
|
| 110 |
+
"CLR_Re",
|
| 111 |
+
"CLR_Pr",
|
| 112 |
+
"MTR",
|
| 113 |
+
"PTR",
|
| 114 |
+
"MLR",
|
| 115 |
+
"CLR_TP",
|
| 116 |
+
"CLR_FN",
|
| 117 |
+
"CLR_FP",
|
| 118 |
+
"IDSW",
|
| 119 |
+
"MT",
|
| 120 |
+
"PT",
|
| 121 |
+
"ML",
|
| 122 |
+
"Frag",
|
| 123 |
+
"sMOTA",
|
| 124 |
+
"IDF1",
|
| 125 |
+
"IDR",
|
| 126 |
+
"IDP",
|
| 127 |
+
"IDTP",
|
| 128 |
+
"IDFN",
|
| 129 |
+
"IDFP",
|
| 130 |
+
"Dets",
|
| 131 |
+
"GT_Dets",
|
| 132 |
+
"IDs",
|
| 133 |
+
"GT_IDs",
|
| 134 |
+
]
|
| 135 |
+
default_ordered_dict = OrderedDict(
|
| 136 |
+
zip(default_order, [None for _ in default_order])
|
| 137 |
+
)
|
| 138 |
+
for f, v in zip(fields, values):
|
| 139 |
+
default_ordered_dict[f] = v
|
| 140 |
+
for df in default_order:
|
| 141 |
+
if default_ordered_dict[df] is None:
|
| 142 |
+
del default_ordered_dict[df]
|
| 143 |
+
fields = list(default_ordered_dict.keys())
|
| 144 |
+
values = list(default_ordered_dict.values())
|
| 145 |
+
|
| 146 |
+
out_file = os.path.join(output_folder, cls + "_summary.txt")
|
| 147 |
+
os.makedirs(os.path.dirname(out_file), exist_ok=True)
|
| 148 |
+
with open(out_file, "w", newline="") as f:
|
| 149 |
+
writer = csv.writer(f, delimiter=" ")
|
| 150 |
+
writer.writerow(fields)
|
| 151 |
+
writer.writerow(values)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def write_detailed_results(details, cls, output_folder):
|
| 155 |
+
"""Write detailed results to file"""
|
| 156 |
+
sequences = details[0].keys()
|
| 157 |
+
fields = ["seq"] + sum([list(s["COMBINED_SEQ"].keys()) for s in details], [])
|
| 158 |
+
out_file = os.path.join(output_folder, cls + "_detailed.csv")
|
| 159 |
+
os.makedirs(os.path.dirname(out_file), exist_ok=True)
|
| 160 |
+
with open(out_file, "w", newline="") as f:
|
| 161 |
+
writer = csv.writer(f)
|
| 162 |
+
writer.writerow(fields)
|
| 163 |
+
for seq in sorted(sequences):
|
| 164 |
+
if seq == "COMBINED_SEQ":
|
| 165 |
+
continue
|
| 166 |
+
writer.writerow([seq] + sum([list(s[seq].values()) for s in details], []))
|
| 167 |
+
writer.writerow(
|
| 168 |
+
["COMBINED"] + sum([list(s["COMBINED_SEQ"].values()) for s in details], [])
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def load_detail(file):
|
| 173 |
+
"""Loads detailed data for a tracker."""
|
| 174 |
+
data = {}
|
| 175 |
+
with open(file) as f:
|
| 176 |
+
for i, row_text in enumerate(f):
|
| 177 |
+
row = row_text.replace("\r", "").replace("\n", "").split(",")
|
| 178 |
+
if i == 0:
|
| 179 |
+
keys = row[1:]
|
| 180 |
+
continue
|
| 181 |
+
current_values = row[1:]
|
| 182 |
+
seq = row[0]
|
| 183 |
+
if seq == "COMBINED":
|
| 184 |
+
seq = "COMBINED_SEQ"
|
| 185 |
+
if (len(current_values) == len(keys)) and seq != "":
|
| 186 |
+
data[seq] = {}
|
| 187 |
+
for key, value in zip(keys, current_values):
|
| 188 |
+
data[seq][key] = float(value)
|
| 189 |
+
return data
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class TrackEvalException(Exception):
|
| 193 |
+
"""Custom exception for catching expected errors."""
|
| 194 |
+
|
| 195 |
+
...
|
source_code/sam3/sam3/eval/teta_eval_toolkit/datasets/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# fmt: off
|
| 2 |
+
# flake8: noqa
|
| 3 |
+
"""Datasets."""
|
| 4 |
+
from .coco import COCO
|
| 5 |
+
from .tao import TAO
|
source_code/sam3/sam3/eval/teta_eval_toolkit/metrics/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# fmt: off
|
| 2 |
+
# flake8: noqa
|
| 3 |
+
|
| 4 |
+
from .teta import TETA
|
source_code/sam3/sam3/model/utils/misc.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from dataclasses import fields, is_dataclass
|
| 5 |
+
from typing import Any, Mapping, Protocol, runtime_checkable
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _is_named_tuple(x) -> bool:
|
| 11 |
+
return isinstance(x, tuple) and hasattr(x, "_asdict") and hasattr(x, "_fields")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@runtime_checkable
|
| 15 |
+
class _CopyableData(Protocol):
|
| 16 |
+
def to(self, device: torch.device, *args: Any, **kwargs: Any):
|
| 17 |
+
"""Copy data to the specified device"""
|
| 18 |
+
...
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def copy_data_to_device(data, device: torch.device, *args: Any, **kwargs: Any):
|
| 22 |
+
"""Function that recursively copies data to a torch.device.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
data: The data to copy to device
|
| 26 |
+
device: The device to which the data should be copied
|
| 27 |
+
args: positional arguments that will be passed to the `to` call
|
| 28 |
+
kwargs: keyword arguments that will be passed to the `to` call
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
The data on the correct device
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
if _is_named_tuple(data):
|
| 35 |
+
return type(data)(
|
| 36 |
+
**copy_data_to_device(data._asdict(), device, *args, **kwargs)
|
| 37 |
+
)
|
| 38 |
+
elif isinstance(data, (list, tuple)):
|
| 39 |
+
return type(data)(copy_data_to_device(e, device, *args, **kwargs) for e in data)
|
| 40 |
+
elif isinstance(data, defaultdict):
|
| 41 |
+
return type(data)(
|
| 42 |
+
data.default_factory,
|
| 43 |
+
{
|
| 44 |
+
k: copy_data_to_device(v, device, *args, **kwargs)
|
| 45 |
+
for k, v in data.items()
|
| 46 |
+
},
|
| 47 |
+
)
|
| 48 |
+
elif isinstance(data, Mapping):
|
| 49 |
+
return type(data)(
|
| 50 |
+
{
|
| 51 |
+
k: copy_data_to_device(v, device, *args, **kwargs)
|
| 52 |
+
for k, v in data.items()
|
| 53 |
+
}
|
| 54 |
+
)
|
| 55 |
+
elif is_dataclass(data) and not isinstance(data, type):
|
| 56 |
+
new_data_class = type(data)(
|
| 57 |
+
**{
|
| 58 |
+
field.name: copy_data_to_device(
|
| 59 |
+
getattr(data, field.name), device, *args, **kwargs
|
| 60 |
+
)
|
| 61 |
+
for field in fields(data)
|
| 62 |
+
if field.init
|
| 63 |
+
}
|
| 64 |
+
)
|
| 65 |
+
for field in fields(data):
|
| 66 |
+
if not field.init:
|
| 67 |
+
setattr(
|
| 68 |
+
new_data_class,
|
| 69 |
+
field.name,
|
| 70 |
+
copy_data_to_device(
|
| 71 |
+
getattr(data, field.name), device, *args, **kwargs
|
| 72 |
+
),
|
| 73 |
+
)
|
| 74 |
+
return new_data_class
|
| 75 |
+
elif isinstance(data, _CopyableData):
|
| 76 |
+
return data.to(device, *args, **kwargs)
|
| 77 |
+
return data
|
source_code/sam3/sam3/perflib/triton/nms.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
# Adapted from https://github.com/stackav-oss/conch/blob/main/conch/kernels/vision/nms.py
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import triton
|
| 7 |
+
import triton.language as tl
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@triton.autotune(
|
| 11 |
+
configs=[
|
| 12 |
+
triton.Config({"cxpr_block_size": 128}),
|
| 13 |
+
triton.Config({"cxpr_block_size": 256}),
|
| 14 |
+
triton.Config({"cxpr_block_size": 512}),
|
| 15 |
+
triton.Config({"cxpr_block_size": 1024}),
|
| 16 |
+
triton.Config({"cxpr_block_size": 2048}),
|
| 17 |
+
triton.Config({"cxpr_block_size": 4096}),
|
| 18 |
+
triton.Config({"cxpr_block_size": 8192}),
|
| 19 |
+
],
|
| 20 |
+
key=["num_boxes"],
|
| 21 |
+
)
|
| 22 |
+
@triton.jit
|
| 23 |
+
def _nms_suppression_kernel(
|
| 24 |
+
# Tensors
|
| 25 |
+
iou_mask_ptr: tl.tensor, # [N, N]
|
| 26 |
+
keep_mask_ptr: tl.tensor, # [N]
|
| 27 |
+
# Scalars
|
| 28 |
+
num_boxes: tl.int32,
|
| 29 |
+
# Strides
|
| 30 |
+
iou_mask_stride: tl.int32,
|
| 31 |
+
# Constexprs
|
| 32 |
+
cxpr_block_size: tl.constexpr,
|
| 33 |
+
) -> None:
|
| 34 |
+
"""NMS suppression kernel.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
iou_mask_ptr: Pointer to precomputed IoU mask, shape: (N, N).
|
| 38 |
+
keep_mask_ptr: Pointer to keep mask tensor, shape: (N,).
|
| 39 |
+
num_boxes: Number of boxes.
|
| 40 |
+
iou_mask_stride: Stride for IoU mask tensor.
|
| 41 |
+
cxpr_block_size: Block size for processing.
|
| 42 |
+
"""
|
| 43 |
+
# Sequential NMS: for each box in sorted order, suppress later boxes
|
| 44 |
+
for current_box_idx in range(num_boxes - 1):
|
| 45 |
+
# Check if current box is still kept
|
| 46 |
+
is_kept = tl.load(keep_mask_ptr + current_box_idx)
|
| 47 |
+
if is_kept:
|
| 48 |
+
# IoU mask row offset for the current box
|
| 49 |
+
# Because the IoU mask is sorted by score, we will only consider boxes that come after the current box.
|
| 50 |
+
# This means we only need to read the upper triangular part of the IoU mask.
|
| 51 |
+
iou_row_offset = current_box_idx * iou_mask_stride
|
| 52 |
+
|
| 53 |
+
# Only process boxes that come after the current box
|
| 54 |
+
next_box_idx = current_box_idx + 1
|
| 55 |
+
remaining_boxes = num_boxes - next_box_idx
|
| 56 |
+
|
| 57 |
+
# Iterate blockwise through the columns
|
| 58 |
+
for block_idx in range(tl.cdiv(remaining_boxes, cxpr_block_size)):
|
| 59 |
+
# Masked load of indices for the target boxes in the current block
|
| 60 |
+
block_start = next_box_idx + block_idx * cxpr_block_size
|
| 61 |
+
target_box_offsets = block_start + tl.arange(0, cxpr_block_size)
|
| 62 |
+
target_box_mask = target_box_offsets < num_boxes
|
| 63 |
+
|
| 64 |
+
# Suppress boxes with lower scores that have high IoU
|
| 65 |
+
suppression_mask = tl.load(
|
| 66 |
+
iou_mask_ptr + iou_row_offset + target_box_offsets,
|
| 67 |
+
mask=target_box_mask,
|
| 68 |
+
other=False,
|
| 69 |
+
)
|
| 70 |
+
suppression_mask = tl.cast(suppression_mask, tl.int1)
|
| 71 |
+
|
| 72 |
+
# Conditionally store suppression result for high-IoU boxes
|
| 73 |
+
tl.store(
|
| 74 |
+
keep_mask_ptr + target_box_offsets, False, mask=suppression_mask
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
# Potential race condition: we need to ensure all threads complete the store before the next
|
| 78 |
+
# iteration otherwise we may load stale data for whether or not a box has been suppressed.
|
| 79 |
+
tl.debug_barrier()
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def nms_triton(
|
| 83 |
+
ious: torch.Tensor,
|
| 84 |
+
scores: torch.Tensor,
|
| 85 |
+
iou_threshold: float,
|
| 86 |
+
) -> torch.Tensor:
|
| 87 |
+
"""Perform NMS given the iou matrix, the scores and the iou threshold
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
ious: Pairwise IoU tensor of shape (N, N).
|
| 91 |
+
scores: Scores tensor of shape (N,).
|
| 92 |
+
iou_threshold: IoU threshold for suppression.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
Tensor: Indices of kept boxes, sorted by decreasing score.
|
| 96 |
+
"""
|
| 97 |
+
assert scores.dim() == 1, "Scores must be 1D"
|
| 98 |
+
iou_mask = ious > iou_threshold
|
| 99 |
+
assert iou_mask.dim() == 2
|
| 100 |
+
assert iou_mask.shape[0] == iou_mask.shape[1] == scores.shape[0]
|
| 101 |
+
assert iou_mask.device == scores.device
|
| 102 |
+
assert iou_mask.dtype == torch.bool
|
| 103 |
+
|
| 104 |
+
num_boxes = scores.size(0)
|
| 105 |
+
keep_mask = torch.ones(len(scores), device=scores.device, dtype=torch.bool)
|
| 106 |
+
|
| 107 |
+
# Sort boxes by scores in descending order
|
| 108 |
+
_, sorted_indices = torch.sort(scores, dim=0, stable=True, descending=True)
|
| 109 |
+
iou_mask = iou_mask[sorted_indices][:, sorted_indices].contiguous()
|
| 110 |
+
|
| 111 |
+
# For the suppression stage, we need to process sequentially, but we'll still take
|
| 112 |
+
# advantage of parallelism by processing in blocks in one program.
|
| 113 |
+
stage2_grid = (1,)
|
| 114 |
+
_nms_suppression_kernel[stage2_grid](
|
| 115 |
+
# Tensors
|
| 116 |
+
iou_mask_ptr=iou_mask,
|
| 117 |
+
keep_mask_ptr=keep_mask,
|
| 118 |
+
# Scalars
|
| 119 |
+
num_boxes=num_boxes,
|
| 120 |
+
# Strides
|
| 121 |
+
iou_mask_stride=iou_mask.stride(0),
|
| 122 |
+
)
|
| 123 |
+
# Extract indices of kept boxes
|
| 124 |
+
return sorted_indices[keep_mask]
|
source_code/sam3/sam3/sam/common.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
from typing import Type
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class MLPBlock(nn.Module):
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
embedding_dim: int,
|
| 13 |
+
mlp_dim: int,
|
| 14 |
+
act: Type[nn.Module] = nn.GELU,
|
| 15 |
+
) -> None:
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
|
| 18 |
+
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
|
| 19 |
+
self.act = act()
|
| 20 |
+
|
| 21 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 22 |
+
return self.lin2(self.act(self.lin1(x)))
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
|
| 26 |
+
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
|
| 27 |
+
class LayerNorm2d(nn.Module):
|
| 28 |
+
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
|
| 29 |
+
super().__init__()
|
| 30 |
+
self.weight = nn.Parameter(torch.ones(num_channels))
|
| 31 |
+
self.bias = nn.Parameter(torch.zeros(num_channels))
|
| 32 |
+
self.eps = eps
|
| 33 |
+
|
| 34 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 35 |
+
u = x.mean(1, keepdim=True)
|
| 36 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
| 37 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
| 38 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
| 39 |
+
return x
|
source_code/sam3/sam3/train/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_crowded.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/gold_crowded/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path}/gold_crowded_merged_a_release_test.json
|
| 12 |
+
coco_gts:
|
| 13 |
+
- ${paths.base_annotation_path}/gold_crowded_merged_a_release_test.json
|
| 14 |
+
- ${paths.base_annotation_path}/gold_crowded_merged_b_release_test.json
|
| 15 |
+
- ${paths.base_annotation_path}/gold_crowded_merged_c_release_test.json
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# ============================================================================
|
| 19 |
+
# Trainer Configuration
|
| 20 |
+
# ============================================================================
|
| 21 |
+
|
| 22 |
+
trainer:
|
| 23 |
+
data:
|
| 24 |
+
val:
|
| 25 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 26 |
+
dataset:
|
| 27 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 28 |
+
coco_json_loader:
|
| 29 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 30 |
+
_partial_: true
|
| 31 |
+
img_folder: ${paths.metaclip_img_path}
|
| 32 |
+
ann_file: ${paths.coco_gt}
|
| 33 |
+
transforms: ${scratch.base_val_transform}
|
| 34 |
+
max_ann_per_img: 100000
|
| 35 |
+
multiplier: 1
|
| 36 |
+
training: false
|
| 37 |
+
|
| 38 |
+
shuffle: False
|
| 39 |
+
batch_size: ${scratch.val_batch_size}
|
| 40 |
+
num_workers: ${scratch.num_val_workers}
|
| 41 |
+
pin_memory: False
|
| 42 |
+
drop_last: False
|
| 43 |
+
collate_fn:
|
| 44 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 45 |
+
_partial_: true
|
| 46 |
+
repeats: ${scratch.hybrid_repeats}
|
| 47 |
+
dict_key: gold_crowded
|
| 48 |
+
|
| 49 |
+
meters:
|
| 50 |
+
val:
|
| 51 |
+
gold_crowded: # this key matches the "dict_key" in the dataloader's collate function
|
| 52 |
+
cgf1:
|
| 53 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 54 |
+
iou_type: "segm"
|
| 55 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/gold_crowded
|
| 56 |
+
merge_predictions: True
|
| 57 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 58 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 59 |
+
maxdets: 1000000 # no limit
|
| 60 |
+
pred_file_evaluators:
|
| 61 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 62 |
+
gt_path: ${paths.coco_gts}
|
| 63 |
+
iou_type: "bbox"
|
| 64 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 65 |
+
gt_path: ${paths.coco_gts}
|
| 66 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_food.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/gold_fg_food/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path}/gold_fg_food_merged_a_release_test.json
|
| 12 |
+
coco_gts:
|
| 13 |
+
- ${paths.base_annotation_path}/gold_fg_food_merged_a_release_test.json
|
| 14 |
+
- ${paths.base_annotation_path}/gold_fg_food_merged_b_release_test.json
|
| 15 |
+
- ${paths.base_annotation_path}/gold_fg_food_merged_c_release_test.json
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# ============================================================================
|
| 19 |
+
# Trainer Configuration
|
| 20 |
+
# ============================================================================
|
| 21 |
+
|
| 22 |
+
trainer:
|
| 23 |
+
data:
|
| 24 |
+
val:
|
| 25 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 26 |
+
dataset:
|
| 27 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 28 |
+
coco_json_loader:
|
| 29 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 30 |
+
_partial_: true
|
| 31 |
+
img_folder: ${paths.metaclip_img_path}
|
| 32 |
+
ann_file: ${paths.coco_gt}
|
| 33 |
+
transforms: ${scratch.base_val_transform}
|
| 34 |
+
max_ann_per_img: 100000
|
| 35 |
+
multiplier: 1
|
| 36 |
+
training: false
|
| 37 |
+
|
| 38 |
+
shuffle: False
|
| 39 |
+
batch_size: ${scratch.val_batch_size}
|
| 40 |
+
num_workers: ${scratch.num_val_workers}
|
| 41 |
+
pin_memory: False
|
| 42 |
+
drop_last: False
|
| 43 |
+
collate_fn:
|
| 44 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 45 |
+
_partial_: true
|
| 46 |
+
repeats: ${scratch.hybrid_repeats}
|
| 47 |
+
dict_key: gold_fg_food
|
| 48 |
+
|
| 49 |
+
meters:
|
| 50 |
+
val:
|
| 51 |
+
gold_fg_food: # this key matches the "dict_key" in the dataloader's collate function
|
| 52 |
+
cgf1:
|
| 53 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 54 |
+
iou_type: "segm"
|
| 55 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/gold_fg_food
|
| 56 |
+
merge_predictions: True
|
| 57 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 58 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 59 |
+
maxdets: 1000000 # no limit
|
| 60 |
+
pred_file_evaluators:
|
| 61 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 62 |
+
gt_path: ${paths.coco_gts}
|
| 63 |
+
iou_type: "bbox"
|
| 64 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 65 |
+
gt_path: ${paths.coco_gts}
|
| 66 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_fg_sports.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/gold_fg_sports_equipment/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path}/gold_fg_sports_equipment_merged_a_release_test.json
|
| 12 |
+
coco_gts:
|
| 13 |
+
- ${paths.base_annotation_path}/gold_fg_sports_equipment_merged_a_release_test.json
|
| 14 |
+
- ${paths.base_annotation_path}/gold_fg_sports_equipment_merged_b_release_test.json
|
| 15 |
+
- ${paths.base_annotation_path}/gold_fg_sports_equipment_merged_c_release_test.json
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# ============================================================================
|
| 19 |
+
# Trainer Configuration
|
| 20 |
+
# ============================================================================
|
| 21 |
+
|
| 22 |
+
trainer:
|
| 23 |
+
data:
|
| 24 |
+
val:
|
| 25 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 26 |
+
dataset:
|
| 27 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 28 |
+
coco_json_loader:
|
| 29 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 30 |
+
_partial_: true
|
| 31 |
+
img_folder: ${paths.metaclip_img_path}
|
| 32 |
+
ann_file: ${paths.coco_gt}
|
| 33 |
+
transforms: ${scratch.base_val_transform}
|
| 34 |
+
max_ann_per_img: 100000
|
| 35 |
+
multiplier: 1
|
| 36 |
+
training: false
|
| 37 |
+
|
| 38 |
+
shuffle: False
|
| 39 |
+
batch_size: ${scratch.val_batch_size}
|
| 40 |
+
num_workers: ${scratch.num_val_workers}
|
| 41 |
+
pin_memory: False
|
| 42 |
+
drop_last: False
|
| 43 |
+
collate_fn:
|
| 44 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 45 |
+
_partial_: true
|
| 46 |
+
repeats: ${scratch.hybrid_repeats}
|
| 47 |
+
dict_key: gold_fg_sports_equipment
|
| 48 |
+
|
| 49 |
+
meters:
|
| 50 |
+
val:
|
| 51 |
+
gold_fg_sports_equipment: # this key matches the "dict_key" in the dataloader's collate function
|
| 52 |
+
cgf1:
|
| 53 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 54 |
+
iou_type: "segm"
|
| 55 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/gold_fg_sports_equipment
|
| 56 |
+
merge_predictions: True
|
| 57 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 58 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 59 |
+
maxdets: 1000000 # no limit
|
| 60 |
+
pred_file_evaluators:
|
| 61 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 62 |
+
gt_path: ${paths.coco_gts}
|
| 63 |
+
iou_type: "bbox"
|
| 64 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 65 |
+
gt_path: ${paths.coco_gts}
|
| 66 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_wiki_common.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/gold_wiki_common/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path}/gold_wiki_common_merged_a_release_test.json
|
| 12 |
+
coco_gts:
|
| 13 |
+
- ${paths.base_annotation_path}/gold_wiki_common_merged_a_release_test.json
|
| 14 |
+
- ${paths.base_annotation_path}/gold_wiki_common_merged_b_release_test.json
|
| 15 |
+
- ${paths.base_annotation_path}/gold_wiki_common_merged_c_release_test.json
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# ============================================================================
|
| 19 |
+
# Trainer Configuration
|
| 20 |
+
# ============================================================================
|
| 21 |
+
|
| 22 |
+
trainer:
|
| 23 |
+
data:
|
| 24 |
+
val:
|
| 25 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 26 |
+
dataset:
|
| 27 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 28 |
+
coco_json_loader:
|
| 29 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 30 |
+
_partial_: true
|
| 31 |
+
img_folder: ${paths.metaclip_img_path}
|
| 32 |
+
ann_file: ${paths.coco_gt}
|
| 33 |
+
transforms: ${scratch.base_val_transform}
|
| 34 |
+
max_ann_per_img: 100000
|
| 35 |
+
multiplier: 1
|
| 36 |
+
training: false
|
| 37 |
+
|
| 38 |
+
shuffle: False
|
| 39 |
+
batch_size: ${scratch.val_batch_size}
|
| 40 |
+
num_workers: ${scratch.num_val_workers}
|
| 41 |
+
pin_memory: False
|
| 42 |
+
drop_last: False
|
| 43 |
+
collate_fn:
|
| 44 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 45 |
+
_partial_: true
|
| 46 |
+
repeats: ${scratch.hybrid_repeats}
|
| 47 |
+
dict_key: gold_wiki_common
|
| 48 |
+
|
| 49 |
+
meters:
|
| 50 |
+
val:
|
| 51 |
+
gold_wiki_common: # this key matches the "dict_key" in the dataloader's collate function
|
| 52 |
+
cgf1:
|
| 53 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 54 |
+
iou_type: "segm"
|
| 55 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/gold_wiki_common
|
| 56 |
+
merge_predictions: True
|
| 57 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 58 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 59 |
+
maxdets: 1000000 # no limit
|
| 60 |
+
pred_file_evaluators:
|
| 61 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 62 |
+
gt_path: ${paths.coco_gts}
|
| 63 |
+
iou_type: "bbox"
|
| 64 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 65 |
+
gt_path: ${paths.coco_gts}
|
| 66 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_eval.yaml
ADDED
|
@@ -0,0 +1,539 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- _self_
|
| 4 |
+
|
| 5 |
+
# ============================================================================
|
| 6 |
+
# Paths Configuration (Chage this to your own paths)
|
| 7 |
+
# ============================================================================
|
| 8 |
+
paths:
|
| 9 |
+
roboflow_vl_100_root: <YOUR_DATASET_DIR>
|
| 10 |
+
experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
|
| 11 |
+
bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
|
| 12 |
+
|
| 13 |
+
# Roboflow dataset configuration
|
| 14 |
+
roboflow_train:
|
| 15 |
+
num_images: 100 # Note: This is the number of images used for training. If null, all images are used.
|
| 16 |
+
supercategory: ${all_roboflow_supercategories.${string:${submitit.job_array.task_index}}}
|
| 17 |
+
|
| 18 |
+
# Training transforms pipeline
|
| 19 |
+
train_transforms:
|
| 20 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 21 |
+
transforms:
|
| 22 |
+
- _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries
|
| 23 |
+
query_filter:
|
| 24 |
+
_target_: sam3.train.transforms.filter_query_transforms.FilterCrowds
|
| 25 |
+
- _target_: sam3.train.transforms.point_sampling.RandomizeInputBbox
|
| 26 |
+
box_noise_std: 0.1
|
| 27 |
+
box_noise_max: 20
|
| 28 |
+
- _target_: sam3.train.transforms.segmentation.DecodeRle
|
| 29 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 30 |
+
sizes:
|
| 31 |
+
_target_: sam3.train.transforms.basic.get_random_resize_scales
|
| 32 |
+
size: ${scratch.resolution}
|
| 33 |
+
min_size: 480
|
| 34 |
+
rounded: false
|
| 35 |
+
max_size:
|
| 36 |
+
_target_: sam3.train.transforms.basic.get_random_resize_max_size
|
| 37 |
+
size: ${scratch.resolution}
|
| 38 |
+
square: true
|
| 39 |
+
consistent_transform: ${scratch.consistent_transform}
|
| 40 |
+
- _target_: sam3.train.transforms.basic_for_api.PadToSizeAPI
|
| 41 |
+
size: ${scratch.resolution}
|
| 42 |
+
consistent_transform: ${scratch.consistent_transform}
|
| 43 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 44 |
+
- _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries
|
| 45 |
+
query_filter:
|
| 46 |
+
_target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets
|
| 47 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 48 |
+
mean: ${scratch.train_norm_mean}
|
| 49 |
+
std: ${scratch.train_norm_std}
|
| 50 |
+
- _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries
|
| 51 |
+
query_filter:
|
| 52 |
+
_target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets
|
| 53 |
+
- _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries
|
| 54 |
+
query_filter:
|
| 55 |
+
_target_: sam3.train.transforms.filter_query_transforms.FilterFindQueriesWithTooManyOut
|
| 56 |
+
max_num_objects: ${scratch.max_ann_per_img}
|
| 57 |
+
|
| 58 |
+
# Validation transforms pipeline
|
| 59 |
+
val_transforms:
|
| 60 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 61 |
+
transforms:
|
| 62 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 63 |
+
sizes: ${scratch.resolution}
|
| 64 |
+
max_size:
|
| 65 |
+
_target_: sam3.train.transforms.basic.get_random_resize_max_size
|
| 66 |
+
size: ${scratch.resolution}
|
| 67 |
+
square: true
|
| 68 |
+
consistent_transform: False
|
| 69 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 70 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 71 |
+
mean: ${scratch.train_norm_mean}
|
| 72 |
+
std: ${scratch.train_norm_std}
|
| 73 |
+
|
| 74 |
+
# loss config (no mask loss)
|
| 75 |
+
loss:
|
| 76 |
+
_target_: sam3.train.loss.sam3_loss.Sam3LossWrapper
|
| 77 |
+
matcher: ${scratch.matcher}
|
| 78 |
+
o2m_weight: 2.0
|
| 79 |
+
o2m_matcher:
|
| 80 |
+
_target_: sam3.train.matcher.BinaryOneToManyMatcher
|
| 81 |
+
alpha: 0.3
|
| 82 |
+
threshold: 0.4
|
| 83 |
+
topk: 4
|
| 84 |
+
use_o2m_matcher_on_o2m_aux: false # Another option is true
|
| 85 |
+
loss_fns_find:
|
| 86 |
+
- _target_: sam3.train.loss.loss_fns.Boxes
|
| 87 |
+
weight_dict:
|
| 88 |
+
loss_bbox: 5.0
|
| 89 |
+
loss_giou: 2.0
|
| 90 |
+
- _target_: sam3.train.loss.loss_fns.IABCEMdetr
|
| 91 |
+
weak_loss: False
|
| 92 |
+
weight_dict:
|
| 93 |
+
loss_ce: 20.0 # Another option is 100.0
|
| 94 |
+
presence_loss: 20.0
|
| 95 |
+
pos_weight: 10.0 # Another option is 5.0
|
| 96 |
+
alpha: 0.25
|
| 97 |
+
gamma: 2
|
| 98 |
+
use_presence: True # Change
|
| 99 |
+
pos_focal: false
|
| 100 |
+
pad_n_queries: 200
|
| 101 |
+
pad_scale_pos: 1.0
|
| 102 |
+
|
| 103 |
+
loss_fn_semantic_seg: null
|
| 104 |
+
scale_by_find_batch_size: ${scratch.scale_by_find_batch_size}
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# NOTE: Loss to be used for training in case of segmentation
|
| 108 |
+
# loss:
|
| 109 |
+
# _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper
|
| 110 |
+
# matcher: ${scratch.matcher}
|
| 111 |
+
# o2m_weight: 2.0
|
| 112 |
+
# o2m_matcher:
|
| 113 |
+
# _target_: sam3.train.matcher.BinaryOneToManyMatcher
|
| 114 |
+
# alpha: 0.3
|
| 115 |
+
# threshold: 0.4
|
| 116 |
+
# topk: 4
|
| 117 |
+
# use_o2m_matcher_on_o2m_aux: false
|
| 118 |
+
# loss_fns_find:
|
| 119 |
+
# - _target_: sam3.train.loss.loss_fns.Boxes
|
| 120 |
+
# weight_dict:
|
| 121 |
+
# loss_bbox: 5.0
|
| 122 |
+
# loss_giou: 2.0
|
| 123 |
+
# - _target_: sam3.train.loss.loss_fns.IABCEMdetr
|
| 124 |
+
# weak_loss: False
|
| 125 |
+
# weight_dict:
|
| 126 |
+
# loss_ce: 20.0 # Another option is 100.0
|
| 127 |
+
# presence_loss: 20.0
|
| 128 |
+
# pos_weight: 10.0 # Another option is 5.0
|
| 129 |
+
# alpha: 0.25
|
| 130 |
+
# gamma: 2
|
| 131 |
+
# use_presence: True # Change
|
| 132 |
+
# pos_focal: false
|
| 133 |
+
# pad_n_queries: 200
|
| 134 |
+
# pad_scale_pos: 1.0
|
| 135 |
+
# - _target_: sam3.train.loss.loss_fns.Masks
|
| 136 |
+
# focal_alpha: 0.25
|
| 137 |
+
# focal_gamma: 2.0
|
| 138 |
+
# weight_dict:
|
| 139 |
+
# loss_mask: 200.0
|
| 140 |
+
# loss_dice: 10.0
|
| 141 |
+
# compute_aux: false
|
| 142 |
+
# loss_fn_semantic_seg:
|
| 143 |
+
# _target_: sam3.losses.loss_fns.SemanticSegCriterion
|
| 144 |
+
# presence_head: True
|
| 145 |
+
# presence_loss: False # Change
|
| 146 |
+
# focal: True
|
| 147 |
+
# focal_alpha: 0.6
|
| 148 |
+
# focal_gamma: 2.0
|
| 149 |
+
# downsample: False
|
| 150 |
+
# weight_dict:
|
| 151 |
+
# loss_semantic_seg: 20.0
|
| 152 |
+
# loss_semantic_presence: 1.0
|
| 153 |
+
# loss_semantic_dice: 30.0
|
| 154 |
+
# scale_by_find_batch_size: ${scratch.scale_by_find_batch_size}
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# Different helper parameters and functions
|
| 158 |
+
# ============================================================================
|
| 159 |
+
scratch:
|
| 160 |
+
enable_segmentation: False # NOTE: This is the number of queries used for segmentation
|
| 161 |
+
# Model parameters
|
| 162 |
+
d_model: 256
|
| 163 |
+
pos_embed:
|
| 164 |
+
_target_: sam3.model.position_encoding.PositionEmbeddingSine
|
| 165 |
+
num_pos_feats: ${scratch.d_model}
|
| 166 |
+
normalize: true
|
| 167 |
+
scale: null
|
| 168 |
+
temperature: 10000
|
| 169 |
+
|
| 170 |
+
# Box processing
|
| 171 |
+
use_presence_eval: True
|
| 172 |
+
original_box_postprocessor:
|
| 173 |
+
_target_: sam3.eval.postprocessors.PostProcessImage
|
| 174 |
+
max_dets_per_img: -1 # infinite detections
|
| 175 |
+
use_original_ids: true
|
| 176 |
+
use_original_sizes_box: true
|
| 177 |
+
use_presence: ${scratch.use_presence_eval}
|
| 178 |
+
|
| 179 |
+
# Matcher configuration
|
| 180 |
+
matcher:
|
| 181 |
+
_target_: sam3.train.matcher.BinaryHungarianMatcherV2
|
| 182 |
+
focal: true # with `focal: true` it is equivalent to BinaryFocalHungarianMatcher
|
| 183 |
+
cost_class: 2.0
|
| 184 |
+
cost_bbox: 5.0
|
| 185 |
+
cost_giou: 2.0
|
| 186 |
+
alpha: 0.25
|
| 187 |
+
gamma: 2
|
| 188 |
+
stable: False
|
| 189 |
+
scale_by_find_batch_size: True
|
| 190 |
+
|
| 191 |
+
# Image processing parameters
|
| 192 |
+
resolution: 1008
|
| 193 |
+
consistent_transform: False
|
| 194 |
+
max_ann_per_img: 200
|
| 195 |
+
|
| 196 |
+
# Normalization parameters
|
| 197 |
+
train_norm_mean: [0.5, 0.5, 0.5]
|
| 198 |
+
train_norm_std: [0.5, 0.5, 0.5]
|
| 199 |
+
val_norm_mean: [0.5, 0.5, 0.5]
|
| 200 |
+
val_norm_std: [0.5, 0.5, 0.5]
|
| 201 |
+
|
| 202 |
+
# Training parameters
|
| 203 |
+
num_train_workers: 10
|
| 204 |
+
num_val_workers: 0
|
| 205 |
+
max_data_epochs: 20
|
| 206 |
+
target_epoch_size: 1500
|
| 207 |
+
hybrid_repeats: 1
|
| 208 |
+
context_length: 2
|
| 209 |
+
gather_pred_via_filesys: false
|
| 210 |
+
|
| 211 |
+
# Learning rate and scheduler parameters
|
| 212 |
+
lr_scale: 0.1
|
| 213 |
+
lr_transformer: ${times:8e-4,${scratch.lr_scale}}
|
| 214 |
+
lr_vision_backbone: ${times:2.5e-4,${scratch.lr_scale}}
|
| 215 |
+
lr_language_backbone: ${times:5e-5,${scratch.lr_scale}}
|
| 216 |
+
lrd_vision_backbone: 0.9
|
| 217 |
+
wd: 0.1
|
| 218 |
+
scheduler_timescale: 20
|
| 219 |
+
scheduler_warmup: 20
|
| 220 |
+
scheduler_cooldown: 20
|
| 221 |
+
|
| 222 |
+
val_batch_size: 1
|
| 223 |
+
collate_fn_val:
|
| 224 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 225 |
+
_partial_: true
|
| 226 |
+
repeats: ${scratch.hybrid_repeats}
|
| 227 |
+
dict_key: roboflow100
|
| 228 |
+
with_seg_masks: ${scratch.enable_segmentation} # Note: Set this to true if using segmentation masks!
|
| 229 |
+
|
| 230 |
+
gradient_accumulation_steps: 1
|
| 231 |
+
train_batch_size: 1
|
| 232 |
+
collate_fn:
|
| 233 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 234 |
+
_partial_: true
|
| 235 |
+
repeats: ${scratch.hybrid_repeats}
|
| 236 |
+
dict_key: all
|
| 237 |
+
with_seg_masks: ${scratch.enable_segmentation} # Note: Set this to true if using segmentation masks!
|
| 238 |
+
|
| 239 |
+
# ============================================================================
|
| 240 |
+
# Trainer Configuration
|
| 241 |
+
# ============================================================================
|
| 242 |
+
|
| 243 |
+
trainer:
|
| 244 |
+
|
| 245 |
+
_target_: sam3.train.trainer.Trainer
|
| 246 |
+
skip_saving_ckpts: true
|
| 247 |
+
empty_gpu_mem_cache_after_eval: True
|
| 248 |
+
skip_first_val: True
|
| 249 |
+
max_epochs: 20
|
| 250 |
+
accelerator: cuda
|
| 251 |
+
seed_value: 123
|
| 252 |
+
val_epoch_freq: 10
|
| 253 |
+
mode: val
|
| 254 |
+
gradient_accumulation_steps: ${scratch.gradient_accumulation_steps}
|
| 255 |
+
|
| 256 |
+
distributed:
|
| 257 |
+
backend: nccl
|
| 258 |
+
find_unused_parameters: True
|
| 259 |
+
gradient_as_bucket_view: True
|
| 260 |
+
|
| 261 |
+
loss:
|
| 262 |
+
all: ${roboflow_train.loss}
|
| 263 |
+
default:
|
| 264 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 265 |
+
|
| 266 |
+
data:
|
| 267 |
+
train:
|
| 268 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 269 |
+
dataset:
|
| 270 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 271 |
+
limit_ids: ${roboflow_train.num_images}
|
| 272 |
+
transforms: ${roboflow_train.train_transforms}
|
| 273 |
+
load_segmentation: ${scratch.enable_segmentation}
|
| 274 |
+
max_ann_per_img: 500000
|
| 275 |
+
multiplier: 1
|
| 276 |
+
max_train_queries: 50000
|
| 277 |
+
max_val_queries: 50000
|
| 278 |
+
training: true
|
| 279 |
+
use_caching: False
|
| 280 |
+
img_folder: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/train/
|
| 281 |
+
ann_file: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/train/_annotations.coco.json
|
| 282 |
+
|
| 283 |
+
shuffle: True
|
| 284 |
+
batch_size: ${scratch.train_batch_size}
|
| 285 |
+
num_workers: ${scratch.num_train_workers}
|
| 286 |
+
pin_memory: True
|
| 287 |
+
drop_last: True
|
| 288 |
+
collate_fn: ${scratch.collate_fn}
|
| 289 |
+
|
| 290 |
+
val:
|
| 291 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 292 |
+
dataset:
|
| 293 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 294 |
+
load_segmentation: ${scratch.enable_segmentation}
|
| 295 |
+
coco_json_loader:
|
| 296 |
+
_target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON
|
| 297 |
+
include_negatives: true
|
| 298 |
+
category_chunk_size: 2 # Note: You can increase this based on the memory of your GPU.
|
| 299 |
+
_partial_: true
|
| 300 |
+
img_folder: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/
|
| 301 |
+
ann_file: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/_annotations.coco.json
|
| 302 |
+
transforms: ${roboflow_train.val_transforms}
|
| 303 |
+
max_ann_per_img: 100000
|
| 304 |
+
multiplier: 1
|
| 305 |
+
training: false
|
| 306 |
+
|
| 307 |
+
shuffle: False
|
| 308 |
+
batch_size: ${scratch.val_batch_size}
|
| 309 |
+
num_workers: ${scratch.num_val_workers}
|
| 310 |
+
pin_memory: True
|
| 311 |
+
drop_last: False
|
| 312 |
+
collate_fn: ${scratch.collate_fn_val}
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
model:
|
| 316 |
+
_target_: sam3.model_builder.build_sam3_image_model
|
| 317 |
+
bpe_path: ${paths.bpe_path}
|
| 318 |
+
device: cpus
|
| 319 |
+
eval_mode: true
|
| 320 |
+
enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation.
|
| 321 |
+
|
| 322 |
+
meters:
|
| 323 |
+
val:
|
| 324 |
+
roboflow100:
|
| 325 |
+
detection:
|
| 326 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 327 |
+
iou_type: "bbox"
|
| 328 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${roboflow_train.supercategory}
|
| 329 |
+
merge_predictions: True
|
| 330 |
+
postprocessor: ${scratch.original_box_postprocessor}
|
| 331 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 332 |
+
maxdets: 100
|
| 333 |
+
pred_file_evaluators:
|
| 334 |
+
- _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators
|
| 335 |
+
gt_path: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/_annotations.coco.json
|
| 336 |
+
tide: False
|
| 337 |
+
iou_type: "bbox"
|
| 338 |
+
|
| 339 |
+
optim:
|
| 340 |
+
amp:
|
| 341 |
+
enabled: True
|
| 342 |
+
amp_dtype: bfloat16
|
| 343 |
+
|
| 344 |
+
optimizer:
|
| 345 |
+
_target_: torch.optim.AdamW
|
| 346 |
+
|
| 347 |
+
gradient_clip:
|
| 348 |
+
_target_: sam3.train.optim.optimizer.GradientClipper
|
| 349 |
+
max_norm: 0.1
|
| 350 |
+
norm_type: 2
|
| 351 |
+
|
| 352 |
+
param_group_modifiers:
|
| 353 |
+
- _target_: sam3.train.optim.optimizer.layer_decay_param_modifier
|
| 354 |
+
_partial_: True
|
| 355 |
+
layer_decay_value: ${scratch.lrd_vision_backbone}
|
| 356 |
+
apply_to: 'backbone.vision_backbone.trunk'
|
| 357 |
+
overrides:
|
| 358 |
+
- pattern: '*pos_embed*'
|
| 359 |
+
value: 1.0
|
| 360 |
+
|
| 361 |
+
options:
|
| 362 |
+
lr:
|
| 363 |
+
- scheduler: # transformer and class_embed
|
| 364 |
+
_target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
|
| 365 |
+
base_lr: ${scratch.lr_transformer}
|
| 366 |
+
timescale: ${scratch.scheduler_timescale}
|
| 367 |
+
warmup_steps: ${scratch.scheduler_warmup}
|
| 368 |
+
cooldown_steps: ${scratch.scheduler_cooldown}
|
| 369 |
+
- scheduler:
|
| 370 |
+
_target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
|
| 371 |
+
base_lr: ${scratch.lr_vision_backbone}
|
| 372 |
+
timescale: ${scratch.scheduler_timescale}
|
| 373 |
+
warmup_steps: ${scratch.scheduler_warmup}
|
| 374 |
+
cooldown_steps: ${scratch.scheduler_cooldown}
|
| 375 |
+
param_names:
|
| 376 |
+
- 'backbone.vision_backbone.*'
|
| 377 |
+
- scheduler:
|
| 378 |
+
_target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
|
| 379 |
+
base_lr: ${scratch.lr_language_backbone}
|
| 380 |
+
timescale: ${scratch.scheduler_timescale}
|
| 381 |
+
warmup_steps: ${scratch.scheduler_warmup}
|
| 382 |
+
cooldown_steps: ${scratch.scheduler_cooldown}
|
| 383 |
+
param_names:
|
| 384 |
+
- 'backbone.language_backbone.*'
|
| 385 |
+
|
| 386 |
+
weight_decay:
|
| 387 |
+
- scheduler:
|
| 388 |
+
_target_: fvcore.common.param_scheduler.ConstantParamScheduler
|
| 389 |
+
value: ${scratch.wd}
|
| 390 |
+
- scheduler:
|
| 391 |
+
_target_: fvcore.common.param_scheduler.ConstantParamScheduler
|
| 392 |
+
value: 0.0
|
| 393 |
+
param_names:
|
| 394 |
+
- '*bias*'
|
| 395 |
+
module_cls_names: ['torch.nn.LayerNorm']
|
| 396 |
+
|
| 397 |
+
checkpoint:
|
| 398 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
| 399 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
| 400 |
+
|
| 401 |
+
logging:
|
| 402 |
+
tensorboard_writer:
|
| 403 |
+
_target_: sam3.train.utils.logger.make_tensorboard_logger
|
| 404 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
| 405 |
+
flush_secs: 120
|
| 406 |
+
should_log: True
|
| 407 |
+
wandb_writer: null
|
| 408 |
+
log_dir: ${launcher.experiment_log_dir}/logs/${roboflow_train.supercategory}
|
| 409 |
+
log_freq: 10
|
| 410 |
+
|
| 411 |
+
# ============================================================================
|
| 412 |
+
# Launcher and Submitit Configuration
|
| 413 |
+
# ============================================================================
|
| 414 |
+
|
| 415 |
+
launcher:
|
| 416 |
+
num_nodes: 1
|
| 417 |
+
gpus_per_node: 2
|
| 418 |
+
experiment_log_dir: ${paths.experiment_log_dir}
|
| 419 |
+
multiprocessing_context: forkserver
|
| 420 |
+
|
| 421 |
+
submitit:
|
| 422 |
+
account: null
|
| 423 |
+
partition: null
|
| 424 |
+
qos: null
|
| 425 |
+
timeout_hour: 72
|
| 426 |
+
use_cluster: True
|
| 427 |
+
cpus_per_task: 10
|
| 428 |
+
port_range: [10000, 65000]
|
| 429 |
+
constraint: null
|
| 430 |
+
# Uncomment for job array configuration
|
| 431 |
+
job_array:
|
| 432 |
+
num_tasks: 100
|
| 433 |
+
task_index: 0
|
| 434 |
+
|
| 435 |
+
# ============================================================================
|
| 436 |
+
# Available Roboflow Supercategories (for reference)
|
| 437 |
+
# ============================================================================
|
| 438 |
+
|
| 439 |
+
all_roboflow_supercategories:
|
| 440 |
+
- -grccs
|
| 441 |
+
- zebrasatasturias
|
| 442 |
+
- cod-mw-warzone
|
| 443 |
+
- canalstenosis
|
| 444 |
+
- label-printing-defect-version-2
|
| 445 |
+
- new-defects-in-wood
|
| 446 |
+
- orionproducts
|
| 447 |
+
- aquarium-combined
|
| 448 |
+
- varroa-mites-detection--test-set
|
| 449 |
+
- clashroyalechardetector
|
| 450 |
+
- stomata-cells
|
| 451 |
+
- halo-infinite-angel-videogame
|
| 452 |
+
- pig-detection
|
| 453 |
+
- urine-analysis1
|
| 454 |
+
- aerial-sheep
|
| 455 |
+
- orgharvest
|
| 456 |
+
- actions
|
| 457 |
+
- mahjong
|
| 458 |
+
- liver-disease
|
| 459 |
+
- needle-base-tip-min-max
|
| 460 |
+
- wheel-defect-detection
|
| 461 |
+
- aircraft-turnaround-dataset
|
| 462 |
+
- xray
|
| 463 |
+
- wildfire-smoke
|
| 464 |
+
- spinefrxnormalvindr
|
| 465 |
+
- ufba-425
|
| 466 |
+
- speech-bubbles-detection
|
| 467 |
+
- train
|
| 468 |
+
- pill
|
| 469 |
+
- truck-movement
|
| 470 |
+
- car-logo-detection
|
| 471 |
+
- inbreast
|
| 472 |
+
- sea-cucumbers-new-tiles
|
| 473 |
+
- uavdet-small
|
| 474 |
+
- penguin-finder-seg
|
| 475 |
+
- aerial-airport
|
| 476 |
+
- bibdetection
|
| 477 |
+
- taco-trash-annotations-in-context
|
| 478 |
+
- bees
|
| 479 |
+
- recode-waste
|
| 480 |
+
- screwdetectclassification
|
| 481 |
+
- wine-labels
|
| 482 |
+
- aerial-cows
|
| 483 |
+
- into-the-vale
|
| 484 |
+
- gwhd2021
|
| 485 |
+
- lacrosse-object-detection
|
| 486 |
+
- defect-detection
|
| 487 |
+
- dataconvert
|
| 488 |
+
- x-ray-id
|
| 489 |
+
- ball
|
| 490 |
+
- tube
|
| 491 |
+
- 2024-frc
|
| 492 |
+
- crystal-clean-brain-tumors-mri-dataset
|
| 493 |
+
- grapes-5
|
| 494 |
+
- human-detection-in-floods
|
| 495 |
+
- buoy-onboarding
|
| 496 |
+
- apoce-aerial-photographs-for-object-detection-of-construction-equipment
|
| 497 |
+
- l10ul502
|
| 498 |
+
- floating-waste
|
| 499 |
+
- deeppcb
|
| 500 |
+
- ism-band-packet-detection
|
| 501 |
+
- weeds4
|
| 502 |
+
- invoice-processing
|
| 503 |
+
- thermal-cheetah
|
| 504 |
+
- tomatoes-2
|
| 505 |
+
- marine-sharks
|
| 506 |
+
- peixos-fish
|
| 507 |
+
- sssod
|
| 508 |
+
- aerial-pool
|
| 509 |
+
- countingpills
|
| 510 |
+
- asphaltdistressdetection
|
| 511 |
+
- roboflow-trained-dataset
|
| 512 |
+
- everdaynew
|
| 513 |
+
- underwater-objects
|
| 514 |
+
- soda-bottles
|
| 515 |
+
- dentalai
|
| 516 |
+
- jellyfish
|
| 517 |
+
- deepfruits
|
| 518 |
+
- activity-diagrams
|
| 519 |
+
- circuit-voltages
|
| 520 |
+
- all-elements
|
| 521 |
+
- macro-segmentation
|
| 522 |
+
- exploratorium-daphnia
|
| 523 |
+
- signatures
|
| 524 |
+
- conveyor-t-shirts
|
| 525 |
+
- fruitjes
|
| 526 |
+
- grass-weeds
|
| 527 |
+
- infraredimageofpowerequipment
|
| 528 |
+
- 13-lkc01
|
| 529 |
+
- wb-prova
|
| 530 |
+
- flir-camera-objects
|
| 531 |
+
- paper-parts
|
| 532 |
+
- football-player-detection
|
| 533 |
+
- trail-camera
|
| 534 |
+
- smd-components
|
| 535 |
+
- water-meter
|
| 536 |
+
- nih-xray
|
| 537 |
+
- the-dreidel-project
|
| 538 |
+
- electric-pylon-detection-in-rsi
|
| 539 |
+
- cable-damage
|
source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- _self_
|
| 4 |
+
|
| 5 |
+
# ============================================================================
|
| 6 |
+
# Paths Configuration (Chage this to your own paths)
|
| 7 |
+
# ============================================================================
|
| 8 |
+
paths:
|
| 9 |
+
|
| 10 |
+
dump_file_name: saco_veval_sav_test
|
| 11 |
+
experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
|
| 12 |
+
ytvis_json: <YOUR_GT_PATH>/saco_veval_sav_test.json
|
| 13 |
+
ytvis_dir : <YOUR_VIDEO_JPG_DIR>
|
| 14 |
+
bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
|
| 15 |
+
num_videos: null
|
| 16 |
+
|
| 17 |
+
# ============================================================================
|
| 18 |
+
# Different helper parameters and functions
|
| 19 |
+
# ============================================================================
|
| 20 |
+
scratch:
|
| 21 |
+
vid_mask_postprocessor:
|
| 22 |
+
_target_: sam3.eval.postprocessors.PostProcessNullOp
|
| 23 |
+
|
| 24 |
+
use_presence_eval: True
|
| 25 |
+
|
| 26 |
+
video_transforms_val:
|
| 27 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 28 |
+
transforms:
|
| 29 |
+
- _target_: sam3.train.transforms.segmentation.DecodeRle
|
| 30 |
+
# resize the image to 1024x1024 resolution
|
| 31 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 32 |
+
sizes: ${scratch.resolution} # originally `resolution: 1024`
|
| 33 |
+
square: true
|
| 34 |
+
consistent_transform: true
|
| 35 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 36 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 37 |
+
mean: ${scratch.val_norm_mean}
|
| 38 |
+
std: ${scratch.val_norm_std}
|
| 39 |
+
|
| 40 |
+
# Model parameters
|
| 41 |
+
d_model: 256
|
| 42 |
+
|
| 43 |
+
# Image processing parameters
|
| 44 |
+
resolution: 1008
|
| 45 |
+
|
| 46 |
+
# Normalization parameters
|
| 47 |
+
train_norm_mean: [0.5, 0.5, 0.5]
|
| 48 |
+
train_norm_std: [0.5, 0.5, 0.5]
|
| 49 |
+
val_norm_mean: [0.5, 0.5, 0.5]
|
| 50 |
+
val_norm_std: [0.5, 0.5, 0.5]
|
| 51 |
+
|
| 52 |
+
val_batch_size: 1
|
| 53 |
+
num_val_workers: 0
|
| 54 |
+
max_data_epochs: 20
|
| 55 |
+
hybrid_repeats: 1
|
| 56 |
+
gather_pred_via_filesys: false
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ============================================================================
|
| 60 |
+
# Trainer Configuration
|
| 61 |
+
# ============================================================================
|
| 62 |
+
|
| 63 |
+
trainer:
|
| 64 |
+
_target_: sam3.train.trainer.Trainer
|
| 65 |
+
skip_saving_ckpts: true
|
| 66 |
+
empty_gpu_mem_cache_after_eval: True
|
| 67 |
+
skip_first_val: True
|
| 68 |
+
max_epochs: ${scratch.max_data_epochs}
|
| 69 |
+
accelerator: cuda
|
| 70 |
+
seed_value: 123
|
| 71 |
+
val_epoch_freq: 10
|
| 72 |
+
mode: val
|
| 73 |
+
|
| 74 |
+
distributed:
|
| 75 |
+
backend: nccl
|
| 76 |
+
find_unused_parameters: True
|
| 77 |
+
gradient_as_bucket_view: True
|
| 78 |
+
|
| 79 |
+
loss:
|
| 80 |
+
all:
|
| 81 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 82 |
+
default:
|
| 83 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 84 |
+
|
| 85 |
+
data:
|
| 86 |
+
train: null
|
| 87 |
+
val:
|
| 88 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 89 |
+
dataset:
|
| 90 |
+
_target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset
|
| 91 |
+
limit_ids: ${paths.num_videos}
|
| 92 |
+
img_folder: ${paths.ytvis_dir}
|
| 93 |
+
ann_file: ${paths.ytvis_json}
|
| 94 |
+
coco_json_loader:
|
| 95 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP
|
| 96 |
+
_partial_: true
|
| 97 |
+
|
| 98 |
+
transforms: ${scratch.video_transforms_val}
|
| 99 |
+
max_ann_per_img: 100000 # filtered in transforms
|
| 100 |
+
max_val_queries: 100000
|
| 101 |
+
multiplier: 1
|
| 102 |
+
load_segmentation: true
|
| 103 |
+
training: false
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
shuffle: False
|
| 107 |
+
batch_size: ${scratch.val_batch_size}
|
| 108 |
+
num_workers: ${scratch.num_val_workers}
|
| 109 |
+
pin_memory: True
|
| 110 |
+
drop_last: False
|
| 111 |
+
collate_fn:
|
| 112 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 113 |
+
_partial_: true
|
| 114 |
+
repeats: ${scratch.hybrid_repeats}
|
| 115 |
+
dict_key: ytvis_val
|
| 116 |
+
with_seg_masks: true
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
model:
|
| 120 |
+
_target_: sam3.model_builder.build_sam3_video_model
|
| 121 |
+
bpe_path: ${paths.bpe_path}
|
| 122 |
+
has_presence_token: True
|
| 123 |
+
geo_encoder_use_img_cross_attn: True
|
| 124 |
+
apply_temporal_disambiguation: True
|
| 125 |
+
|
| 126 |
+
meters:
|
| 127 |
+
val:
|
| 128 |
+
ytvis_val:
|
| 129 |
+
pred_file: # key
|
| 130 |
+
_target_: sam3.eval.ytvis_eval.YTVISResultsWriter
|
| 131 |
+
dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json
|
| 132 |
+
postprocessor: ${scratch.vid_mask_postprocessor}
|
| 133 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 134 |
+
|
| 135 |
+
optim:
|
| 136 |
+
amp:
|
| 137 |
+
enabled: True
|
| 138 |
+
amp_dtype: bfloat16
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
checkpoint:
|
| 142 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
| 143 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
logging:
|
| 147 |
+
tensorboard_writer:
|
| 148 |
+
_target_: sam3.train.utils.logger.make_tensorboard_logger
|
| 149 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
| 150 |
+
flush_secs: 120
|
| 151 |
+
should_log: True
|
| 152 |
+
wandb_writer: null
|
| 153 |
+
log_dir: ${launcher.experiment_log_dir}/logs/
|
| 154 |
+
log_freq: 10
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# Launcher and Submitit Configuration
|
| 158 |
+
# ============================================================================
|
| 159 |
+
|
| 160 |
+
launcher:
|
| 161 |
+
num_nodes: 8
|
| 162 |
+
gpus_per_node: 8
|
| 163 |
+
experiment_log_dir: ${paths.experiment_log_dir}
|
| 164 |
+
multiprocessing_context: forkserver
|
| 165 |
+
|
| 166 |
+
submitit:
|
| 167 |
+
account: null
|
| 168 |
+
partition: null
|
| 169 |
+
qos: null
|
| 170 |
+
timeout_hour: 72
|
| 171 |
+
use_cluster: True
|
| 172 |
+
cpus_per_task: 10
|
| 173 |
+
port_range: [10000, 65000]
|
| 174 |
+
constraint: null
|
source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_test_noheur.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- _self_
|
| 4 |
+
|
| 5 |
+
# ============================================================================
|
| 6 |
+
# Paths Configuration (Chage this to your own paths)
|
| 7 |
+
# ============================================================================
|
| 8 |
+
paths:
|
| 9 |
+
|
| 10 |
+
dump_file_name: saco_veval_sav_test
|
| 11 |
+
experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
|
| 12 |
+
ytvis_json: <YOUR_GT_PATH>/saco_veval_sav_test.json
|
| 13 |
+
ytvis_dir : <YOUR_VIDEO_JPG_DIR>
|
| 14 |
+
bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
|
| 15 |
+
num_videos: null
|
| 16 |
+
|
| 17 |
+
# ============================================================================
|
| 18 |
+
# Different helper parameters and functions
|
| 19 |
+
# ============================================================================
|
| 20 |
+
scratch:
|
| 21 |
+
vid_mask_postprocessor:
|
| 22 |
+
_target_: sam3.eval.postprocessors.PostProcessNullOp
|
| 23 |
+
|
| 24 |
+
use_presence_eval: True
|
| 25 |
+
|
| 26 |
+
video_transforms_val:
|
| 27 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 28 |
+
transforms:
|
| 29 |
+
- _target_: sam3.train.transforms.segmentation.DecodeRle
|
| 30 |
+
# resize the image to 1024x1024 resolution
|
| 31 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 32 |
+
sizes: ${scratch.resolution} # originally `resolution: 1024`
|
| 33 |
+
square: true
|
| 34 |
+
consistent_transform: true
|
| 35 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 36 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 37 |
+
mean: ${scratch.val_norm_mean}
|
| 38 |
+
std: ${scratch.val_norm_std}
|
| 39 |
+
|
| 40 |
+
# Model parameters
|
| 41 |
+
d_model: 256
|
| 42 |
+
|
| 43 |
+
# Image processing parameters
|
| 44 |
+
resolution: 1008
|
| 45 |
+
|
| 46 |
+
# Normalization parameters
|
| 47 |
+
train_norm_mean: [0.5, 0.5, 0.5]
|
| 48 |
+
train_norm_std: [0.5, 0.5, 0.5]
|
| 49 |
+
val_norm_mean: [0.5, 0.5, 0.5]
|
| 50 |
+
val_norm_std: [0.5, 0.5, 0.5]
|
| 51 |
+
|
| 52 |
+
val_batch_size: 1
|
| 53 |
+
num_val_workers: 0
|
| 54 |
+
max_data_epochs: 20
|
| 55 |
+
hybrid_repeats: 1
|
| 56 |
+
gather_pred_via_filesys: false
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ============================================================================
|
| 60 |
+
# Trainer Configuration
|
| 61 |
+
# ============================================================================
|
| 62 |
+
|
| 63 |
+
trainer:
|
| 64 |
+
_target_: sam3.train.trainer.Trainer
|
| 65 |
+
skip_saving_ckpts: true
|
| 66 |
+
empty_gpu_mem_cache_after_eval: True
|
| 67 |
+
skip_first_val: True
|
| 68 |
+
max_epochs: ${scratch.max_data_epochs}
|
| 69 |
+
accelerator: cuda
|
| 70 |
+
seed_value: 123
|
| 71 |
+
val_epoch_freq: 10
|
| 72 |
+
mode: val
|
| 73 |
+
|
| 74 |
+
distributed:
|
| 75 |
+
backend: nccl
|
| 76 |
+
find_unused_parameters: True
|
| 77 |
+
gradient_as_bucket_view: True
|
| 78 |
+
|
| 79 |
+
loss:
|
| 80 |
+
all:
|
| 81 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 82 |
+
default:
|
| 83 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 84 |
+
|
| 85 |
+
data:
|
| 86 |
+
train: null
|
| 87 |
+
val:
|
| 88 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 89 |
+
dataset:
|
| 90 |
+
_target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset
|
| 91 |
+
limit_ids: ${paths.num_videos}
|
| 92 |
+
img_folder: ${paths.ytvis_dir}
|
| 93 |
+
ann_file: ${paths.ytvis_json}
|
| 94 |
+
coco_json_loader:
|
| 95 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP
|
| 96 |
+
_partial_: true
|
| 97 |
+
|
| 98 |
+
transforms: ${scratch.video_transforms_val}
|
| 99 |
+
max_ann_per_img: 100000 # filtered in transforms
|
| 100 |
+
max_val_queries: 100000
|
| 101 |
+
multiplier: 1
|
| 102 |
+
load_segmentation: true
|
| 103 |
+
training: false
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
shuffle: False
|
| 107 |
+
batch_size: ${scratch.val_batch_size}
|
| 108 |
+
num_workers: ${scratch.num_val_workers}
|
| 109 |
+
pin_memory: True
|
| 110 |
+
drop_last: False
|
| 111 |
+
collate_fn:
|
| 112 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 113 |
+
_partial_: true
|
| 114 |
+
repeats: ${scratch.hybrid_repeats}
|
| 115 |
+
dict_key: ytvis_val
|
| 116 |
+
with_seg_masks: true
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
model:
|
| 120 |
+
_target_: sam3.model_builder.build_sam3_video_model
|
| 121 |
+
bpe_path: ${paths.bpe_path}
|
| 122 |
+
has_presence_token: True
|
| 123 |
+
geo_encoder_use_img_cross_attn: True
|
| 124 |
+
apply_temporal_disambiguation: False
|
| 125 |
+
|
| 126 |
+
meters:
|
| 127 |
+
val:
|
| 128 |
+
ytvis_val:
|
| 129 |
+
pred_file: # key
|
| 130 |
+
_target_: sam3.eval.ytvis_eval.YTVISResultsWriter
|
| 131 |
+
dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json
|
| 132 |
+
postprocessor: ${scratch.vid_mask_postprocessor}
|
| 133 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 134 |
+
|
| 135 |
+
optim:
|
| 136 |
+
amp:
|
| 137 |
+
enabled: True
|
| 138 |
+
amp_dtype: bfloat16
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
checkpoint:
|
| 142 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
| 143 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
logging:
|
| 147 |
+
tensorboard_writer:
|
| 148 |
+
_target_: sam3.train.utils.logger.make_tensorboard_logger
|
| 149 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
| 150 |
+
flush_secs: 120
|
| 151 |
+
should_log: True
|
| 152 |
+
wandb_writer: null
|
| 153 |
+
log_dir: ${launcher.experiment_log_dir}/logs/
|
| 154 |
+
log_freq: 10
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# Launcher and Submitit Configuration
|
| 158 |
+
# ============================================================================
|
| 159 |
+
|
| 160 |
+
launcher:
|
| 161 |
+
num_nodes: 8
|
| 162 |
+
gpus_per_node: 8
|
| 163 |
+
experiment_log_dir: ${paths.experiment_log_dir}
|
| 164 |
+
multiprocessing_context: forkserver
|
| 165 |
+
|
| 166 |
+
submitit:
|
| 167 |
+
account: null
|
| 168 |
+
partition: null
|
| 169 |
+
qos: null
|
| 170 |
+
timeout_hour: 72
|
| 171 |
+
use_cluster: True
|
| 172 |
+
cpus_per_task: 10
|
| 173 |
+
port_range: [10000, 65000]
|
| 174 |
+
constraint: null
|
source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_sav_val.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- _self_
|
| 4 |
+
|
| 5 |
+
# ============================================================================
|
| 6 |
+
# Paths Configuration (Chage this to your own paths)
|
| 7 |
+
# ============================================================================
|
| 8 |
+
paths:
|
| 9 |
+
|
| 10 |
+
dump_file_name: saco_veval_sav_val
|
| 11 |
+
experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
|
| 12 |
+
ytvis_json: <YOUR_GT_PATH>/saco_veval_sav_val.json
|
| 13 |
+
ytvis_dir : <YOUR_VIDEO_JPG_DIR>
|
| 14 |
+
bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
|
| 15 |
+
num_videos: null
|
| 16 |
+
|
| 17 |
+
# ============================================================================
|
| 18 |
+
# Different helper parameters and functions
|
| 19 |
+
# ============================================================================
|
| 20 |
+
scratch:
|
| 21 |
+
vid_mask_postprocessor:
|
| 22 |
+
_target_: sam3.eval.postprocessors.PostProcessNullOp
|
| 23 |
+
|
| 24 |
+
use_presence_eval: True
|
| 25 |
+
|
| 26 |
+
video_transforms_val:
|
| 27 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 28 |
+
transforms:
|
| 29 |
+
- _target_: sam3.train.transforms.segmentation.DecodeRle
|
| 30 |
+
# resize the image to 1024x1024 resolution
|
| 31 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 32 |
+
sizes: ${scratch.resolution} # originally `resolution: 1024`
|
| 33 |
+
square: true
|
| 34 |
+
consistent_transform: true
|
| 35 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 36 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 37 |
+
mean: ${scratch.val_norm_mean}
|
| 38 |
+
std: ${scratch.val_norm_std}
|
| 39 |
+
|
| 40 |
+
# Model parameters
|
| 41 |
+
d_model: 256
|
| 42 |
+
|
| 43 |
+
# Image processing parameters
|
| 44 |
+
resolution: 1008
|
| 45 |
+
|
| 46 |
+
# Normalization parameters
|
| 47 |
+
train_norm_mean: [0.5, 0.5, 0.5]
|
| 48 |
+
train_norm_std: [0.5, 0.5, 0.5]
|
| 49 |
+
val_norm_mean: [0.5, 0.5, 0.5]
|
| 50 |
+
val_norm_std: [0.5, 0.5, 0.5]
|
| 51 |
+
|
| 52 |
+
val_batch_size: 1
|
| 53 |
+
num_val_workers: 0
|
| 54 |
+
max_data_epochs: 20
|
| 55 |
+
hybrid_repeats: 1
|
| 56 |
+
gather_pred_via_filesys: false
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ============================================================================
|
| 60 |
+
# Trainer Configuration
|
| 61 |
+
# ============================================================================
|
| 62 |
+
|
| 63 |
+
trainer:
|
| 64 |
+
_target_: sam3.train.trainer.Trainer
|
| 65 |
+
skip_saving_ckpts: true
|
| 66 |
+
empty_gpu_mem_cache_after_eval: True
|
| 67 |
+
skip_first_val: True
|
| 68 |
+
max_epochs: ${scratch.max_data_epochs}
|
| 69 |
+
accelerator: cuda
|
| 70 |
+
seed_value: 123
|
| 71 |
+
val_epoch_freq: 10
|
| 72 |
+
mode: val
|
| 73 |
+
|
| 74 |
+
distributed:
|
| 75 |
+
backend: nccl
|
| 76 |
+
find_unused_parameters: True
|
| 77 |
+
gradient_as_bucket_view: True
|
| 78 |
+
|
| 79 |
+
loss:
|
| 80 |
+
all:
|
| 81 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 82 |
+
default:
|
| 83 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 84 |
+
|
| 85 |
+
data:
|
| 86 |
+
train: null
|
| 87 |
+
val:
|
| 88 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 89 |
+
dataset:
|
| 90 |
+
_target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset
|
| 91 |
+
limit_ids: ${paths.num_videos}
|
| 92 |
+
img_folder: ${paths.ytvis_dir}
|
| 93 |
+
ann_file: ${paths.ytvis_json}
|
| 94 |
+
coco_json_loader:
|
| 95 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP
|
| 96 |
+
_partial_: true
|
| 97 |
+
|
| 98 |
+
transforms: ${scratch.video_transforms_val}
|
| 99 |
+
max_ann_per_img: 100000 # filtered in transforms
|
| 100 |
+
max_val_queries: 100000
|
| 101 |
+
multiplier: 1
|
| 102 |
+
load_segmentation: true
|
| 103 |
+
training: false
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
shuffle: False
|
| 107 |
+
batch_size: ${scratch.val_batch_size}
|
| 108 |
+
num_workers: ${scratch.num_val_workers}
|
| 109 |
+
pin_memory: True
|
| 110 |
+
drop_last: False
|
| 111 |
+
collate_fn:
|
| 112 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 113 |
+
_partial_: true
|
| 114 |
+
repeats: ${scratch.hybrid_repeats}
|
| 115 |
+
dict_key: ytvis_val
|
| 116 |
+
with_seg_masks: true
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
model:
|
| 120 |
+
_target_: sam3.model_builder.build_sam3_video_model
|
| 121 |
+
bpe_path: ${paths.bpe_path}
|
| 122 |
+
has_presence_token: True
|
| 123 |
+
geo_encoder_use_img_cross_attn: True
|
| 124 |
+
apply_temporal_disambiguation: True
|
| 125 |
+
|
| 126 |
+
meters:
|
| 127 |
+
val:
|
| 128 |
+
ytvis_val:
|
| 129 |
+
pred_file: # key
|
| 130 |
+
_target_: sam3.eval.ytvis_eval.YTVISResultsWriter
|
| 131 |
+
dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json
|
| 132 |
+
postprocessor: ${scratch.vid_mask_postprocessor}
|
| 133 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 134 |
+
|
| 135 |
+
optim:
|
| 136 |
+
amp:
|
| 137 |
+
enabled: True
|
| 138 |
+
amp_dtype: bfloat16
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
checkpoint:
|
| 142 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
| 143 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
logging:
|
| 147 |
+
tensorboard_writer:
|
| 148 |
+
_target_: sam3.train.utils.logger.make_tensorboard_logger
|
| 149 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
| 150 |
+
flush_secs: 120
|
| 151 |
+
should_log: True
|
| 152 |
+
wandb_writer: null
|
| 153 |
+
log_dir: ${launcher.experiment_log_dir}/logs/
|
| 154 |
+
log_freq: 10
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# Launcher and Submitit Configuration
|
| 158 |
+
# ============================================================================
|
| 159 |
+
|
| 160 |
+
launcher:
|
| 161 |
+
num_nodes: 8
|
| 162 |
+
gpus_per_node: 8
|
| 163 |
+
experiment_log_dir: ${paths.experiment_log_dir}
|
| 164 |
+
multiprocessing_context: forkserver
|
| 165 |
+
|
| 166 |
+
submitit:
|
| 167 |
+
account: null
|
| 168 |
+
partition: null
|
| 169 |
+
qos: null
|
| 170 |
+
timeout_hour: 72
|
| 171 |
+
use_cluster: True
|
| 172 |
+
cpus_per_task: 10
|
| 173 |
+
port_range: [10000, 65000]
|
| 174 |
+
constraint: null
|
source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_test.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- _self_
|
| 4 |
+
|
| 5 |
+
# ============================================================================
|
| 6 |
+
# Paths Configuration (Chage this to your own paths)
|
| 7 |
+
# ============================================================================
|
| 8 |
+
paths:
|
| 9 |
+
|
| 10 |
+
dump_file_name: saco_veval_smartglasses_test
|
| 11 |
+
experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
|
| 12 |
+
ytvis_json: <YOUR_GT_PATH>/saco_veval_smartglasses_test.json
|
| 13 |
+
ytvis_dir : <YOUR_VIDEO_JPG_DIR>
|
| 14 |
+
bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
|
| 15 |
+
num_videos: null
|
| 16 |
+
|
| 17 |
+
# ============================================================================
|
| 18 |
+
# Different helper parameters and functions
|
| 19 |
+
# ============================================================================
|
| 20 |
+
scratch:
|
| 21 |
+
vid_mask_postprocessor:
|
| 22 |
+
_target_: sam3.eval.postprocessors.PostProcessNullOp
|
| 23 |
+
|
| 24 |
+
use_presence_eval: True
|
| 25 |
+
|
| 26 |
+
video_transforms_val:
|
| 27 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 28 |
+
transforms:
|
| 29 |
+
- _target_: sam3.train.transforms.segmentation.DecodeRle
|
| 30 |
+
# resize the image to 1024x1024 resolution
|
| 31 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 32 |
+
sizes: ${scratch.resolution} # originally `resolution: 1024`
|
| 33 |
+
square: true
|
| 34 |
+
consistent_transform: true
|
| 35 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 36 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 37 |
+
mean: ${scratch.val_norm_mean}
|
| 38 |
+
std: ${scratch.val_norm_std}
|
| 39 |
+
|
| 40 |
+
# Model parameters
|
| 41 |
+
d_model: 256
|
| 42 |
+
|
| 43 |
+
# Image processing parameters
|
| 44 |
+
resolution: 1008
|
| 45 |
+
|
| 46 |
+
# Normalization parameters
|
| 47 |
+
train_norm_mean: [0.5, 0.5, 0.5]
|
| 48 |
+
train_norm_std: [0.5, 0.5, 0.5]
|
| 49 |
+
val_norm_mean: [0.5, 0.5, 0.5]
|
| 50 |
+
val_norm_std: [0.5, 0.5, 0.5]
|
| 51 |
+
|
| 52 |
+
val_batch_size: 1
|
| 53 |
+
num_val_workers: 0
|
| 54 |
+
max_data_epochs: 20
|
| 55 |
+
hybrid_repeats: 1
|
| 56 |
+
gather_pred_via_filesys: false
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ============================================================================
|
| 60 |
+
# Trainer Configuration
|
| 61 |
+
# ============================================================================
|
| 62 |
+
|
| 63 |
+
trainer:
|
| 64 |
+
_target_: sam3.train.trainer.Trainer
|
| 65 |
+
skip_saving_ckpts: true
|
| 66 |
+
empty_gpu_mem_cache_after_eval: True
|
| 67 |
+
skip_first_val: True
|
| 68 |
+
max_epochs: ${scratch.max_data_epochs}
|
| 69 |
+
accelerator: cuda
|
| 70 |
+
seed_value: 123
|
| 71 |
+
val_epoch_freq: 10
|
| 72 |
+
mode: val
|
| 73 |
+
|
| 74 |
+
distributed:
|
| 75 |
+
backend: nccl
|
| 76 |
+
find_unused_parameters: True
|
| 77 |
+
gradient_as_bucket_view: True
|
| 78 |
+
|
| 79 |
+
loss:
|
| 80 |
+
all:
|
| 81 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 82 |
+
default:
|
| 83 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 84 |
+
|
| 85 |
+
data:
|
| 86 |
+
train: null
|
| 87 |
+
val:
|
| 88 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 89 |
+
dataset:
|
| 90 |
+
_target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset
|
| 91 |
+
limit_ids: ${paths.num_videos}
|
| 92 |
+
img_folder: ${paths.ytvis_dir}
|
| 93 |
+
ann_file: ${paths.ytvis_json}
|
| 94 |
+
coco_json_loader:
|
| 95 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP
|
| 96 |
+
_partial_: true
|
| 97 |
+
|
| 98 |
+
transforms: ${scratch.video_transforms_val}
|
| 99 |
+
max_ann_per_img: 100000 # filtered in transforms
|
| 100 |
+
max_val_queries: 100000
|
| 101 |
+
multiplier: 1
|
| 102 |
+
load_segmentation: true
|
| 103 |
+
training: false
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
shuffle: False
|
| 107 |
+
batch_size: ${scratch.val_batch_size}
|
| 108 |
+
num_workers: ${scratch.num_val_workers}
|
| 109 |
+
pin_memory: True
|
| 110 |
+
drop_last: False
|
| 111 |
+
collate_fn:
|
| 112 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 113 |
+
_partial_: true
|
| 114 |
+
repeats: ${scratch.hybrid_repeats}
|
| 115 |
+
dict_key: ytvis_val
|
| 116 |
+
with_seg_masks: true
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
model:
|
| 120 |
+
_target_: sam3.model_builder.build_sam3_video_model
|
| 121 |
+
bpe_path: ${paths.bpe_path}
|
| 122 |
+
has_presence_token: True
|
| 123 |
+
geo_encoder_use_img_cross_attn: True
|
| 124 |
+
apply_temporal_disambiguation: True
|
| 125 |
+
|
| 126 |
+
meters:
|
| 127 |
+
val:
|
| 128 |
+
ytvis_val:
|
| 129 |
+
pred_file: # key
|
| 130 |
+
_target_: sam3.eval.ytvis_eval.YTVISResultsWriter
|
| 131 |
+
dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json
|
| 132 |
+
postprocessor: ${scratch.vid_mask_postprocessor}
|
| 133 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 134 |
+
|
| 135 |
+
optim:
|
| 136 |
+
amp:
|
| 137 |
+
enabled: True
|
| 138 |
+
amp_dtype: bfloat16
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
checkpoint:
|
| 142 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
| 143 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
logging:
|
| 147 |
+
tensorboard_writer:
|
| 148 |
+
_target_: sam3.train.utils.logger.make_tensorboard_logger
|
| 149 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
| 150 |
+
flush_secs: 120
|
| 151 |
+
should_log: True
|
| 152 |
+
wandb_writer: null
|
| 153 |
+
log_dir: ${launcher.experiment_log_dir}/logs/
|
| 154 |
+
log_freq: 10
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# Launcher and Submitit Configuration
|
| 158 |
+
# ============================================================================
|
| 159 |
+
|
| 160 |
+
launcher:
|
| 161 |
+
num_nodes: 8
|
| 162 |
+
gpus_per_node: 8
|
| 163 |
+
experiment_log_dir: ${paths.experiment_log_dir}
|
| 164 |
+
multiprocessing_context: forkserver
|
| 165 |
+
|
| 166 |
+
submitit:
|
| 167 |
+
account: null
|
| 168 |
+
partition: null
|
| 169 |
+
qos: null
|
| 170 |
+
timeout_hour: 72
|
| 171 |
+
use_cluster: True
|
| 172 |
+
cpus_per_task: 10
|
| 173 |
+
port_range: [10000, 65000]
|
| 174 |
+
constraint: null
|
source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- _self_
|
| 4 |
+
|
| 5 |
+
# ============================================================================
|
| 6 |
+
# Paths Configuration (Chage this to your own paths)
|
| 7 |
+
# ============================================================================
|
| 8 |
+
paths:
|
| 9 |
+
|
| 10 |
+
dump_file_name: saco_veval_yt1b_test
|
| 11 |
+
experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
|
| 12 |
+
ytvis_json: <YOUR_GT_PATH>/saco_veval_yt1b_test.json
|
| 13 |
+
ytvis_dir : <YOUR_VIDEO_JPG_DIR>
|
| 14 |
+
bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
|
| 15 |
+
num_videos: null
|
| 16 |
+
|
| 17 |
+
# ============================================================================
|
| 18 |
+
# Different helper parameters and functions
|
| 19 |
+
# ============================================================================
|
| 20 |
+
scratch:
|
| 21 |
+
vid_mask_postprocessor:
|
| 22 |
+
_target_: sam3.eval.postprocessors.PostProcessNullOp
|
| 23 |
+
|
| 24 |
+
use_presence_eval: True
|
| 25 |
+
|
| 26 |
+
video_transforms_val:
|
| 27 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 28 |
+
transforms:
|
| 29 |
+
- _target_: sam3.train.transforms.segmentation.DecodeRle
|
| 30 |
+
# resize the image to 1024x1024 resolution
|
| 31 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 32 |
+
sizes: ${scratch.resolution} # originally `resolution: 1024`
|
| 33 |
+
square: true
|
| 34 |
+
consistent_transform: true
|
| 35 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 36 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 37 |
+
mean: ${scratch.val_norm_mean}
|
| 38 |
+
std: ${scratch.val_norm_std}
|
| 39 |
+
|
| 40 |
+
# Model parameters
|
| 41 |
+
d_model: 256
|
| 42 |
+
|
| 43 |
+
# Image processing parameters
|
| 44 |
+
resolution: 1008
|
| 45 |
+
|
| 46 |
+
# Normalization parameters
|
| 47 |
+
train_norm_mean: [0.5, 0.5, 0.5]
|
| 48 |
+
train_norm_std: [0.5, 0.5, 0.5]
|
| 49 |
+
val_norm_mean: [0.5, 0.5, 0.5]
|
| 50 |
+
val_norm_std: [0.5, 0.5, 0.5]
|
| 51 |
+
|
| 52 |
+
val_batch_size: 1
|
| 53 |
+
num_val_workers: 0
|
| 54 |
+
max_data_epochs: 20
|
| 55 |
+
hybrid_repeats: 1
|
| 56 |
+
gather_pred_via_filesys: false
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ============================================================================
|
| 60 |
+
# Trainer Configuration
|
| 61 |
+
# ============================================================================
|
| 62 |
+
|
| 63 |
+
trainer:
|
| 64 |
+
_target_: sam3.train.trainer.Trainer
|
| 65 |
+
skip_saving_ckpts: true
|
| 66 |
+
empty_gpu_mem_cache_after_eval: True
|
| 67 |
+
skip_first_val: True
|
| 68 |
+
max_epochs: ${scratch.max_data_epochs}
|
| 69 |
+
accelerator: cuda
|
| 70 |
+
seed_value: 123
|
| 71 |
+
val_epoch_freq: 10
|
| 72 |
+
mode: val
|
| 73 |
+
|
| 74 |
+
distributed:
|
| 75 |
+
backend: nccl
|
| 76 |
+
find_unused_parameters: True
|
| 77 |
+
gradient_as_bucket_view: True
|
| 78 |
+
|
| 79 |
+
loss:
|
| 80 |
+
all:
|
| 81 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 82 |
+
default:
|
| 83 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 84 |
+
|
| 85 |
+
data:
|
| 86 |
+
train: null
|
| 87 |
+
val:
|
| 88 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 89 |
+
dataset:
|
| 90 |
+
_target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset
|
| 91 |
+
limit_ids: ${paths.num_videos}
|
| 92 |
+
img_folder: ${paths.ytvis_dir}
|
| 93 |
+
ann_file: ${paths.ytvis_json}
|
| 94 |
+
coco_json_loader:
|
| 95 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP
|
| 96 |
+
_partial_: true
|
| 97 |
+
|
| 98 |
+
transforms: ${scratch.video_transforms_val}
|
| 99 |
+
max_ann_per_img: 100000 # filtered in transforms
|
| 100 |
+
max_val_queries: 100000
|
| 101 |
+
multiplier: 1
|
| 102 |
+
load_segmentation: true
|
| 103 |
+
training: false
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
shuffle: False
|
| 107 |
+
batch_size: ${scratch.val_batch_size}
|
| 108 |
+
num_workers: ${scratch.num_val_workers}
|
| 109 |
+
pin_memory: True
|
| 110 |
+
drop_last: False
|
| 111 |
+
collate_fn:
|
| 112 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 113 |
+
_partial_: true
|
| 114 |
+
repeats: ${scratch.hybrid_repeats}
|
| 115 |
+
dict_key: ytvis_val
|
| 116 |
+
with_seg_masks: true
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
model:
|
| 120 |
+
_target_: sam3.model_builder.build_sam3_video_model
|
| 121 |
+
bpe_path: ${paths.bpe_path}
|
| 122 |
+
has_presence_token: True
|
| 123 |
+
geo_encoder_use_img_cross_attn: True
|
| 124 |
+
apply_temporal_disambiguation: True
|
| 125 |
+
|
| 126 |
+
meters:
|
| 127 |
+
val:
|
| 128 |
+
ytvis_val:
|
| 129 |
+
pred_file: # key
|
| 130 |
+
_target_: sam3.eval.ytvis_eval.YTVISResultsWriter
|
| 131 |
+
dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json
|
| 132 |
+
postprocessor: ${scratch.vid_mask_postprocessor}
|
| 133 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 134 |
+
|
| 135 |
+
optim:
|
| 136 |
+
amp:
|
| 137 |
+
enabled: True
|
| 138 |
+
amp_dtype: bfloat16
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
checkpoint:
|
| 142 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
| 143 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
logging:
|
| 147 |
+
tensorboard_writer:
|
| 148 |
+
_target_: sam3.train.utils.logger.make_tensorboard_logger
|
| 149 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
| 150 |
+
flush_secs: 120
|
| 151 |
+
should_log: True
|
| 152 |
+
wandb_writer: null
|
| 153 |
+
log_dir: ${launcher.experiment_log_dir}/logs/
|
| 154 |
+
log_freq: 10
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# Launcher and Submitit Configuration
|
| 158 |
+
# ============================================================================
|
| 159 |
+
|
| 160 |
+
launcher:
|
| 161 |
+
num_nodes: 8
|
| 162 |
+
gpus_per_node: 8
|
| 163 |
+
experiment_log_dir: ${paths.experiment_log_dir}
|
| 164 |
+
multiprocessing_context: forkserver
|
| 165 |
+
|
| 166 |
+
submitit:
|
| 167 |
+
account: null
|
| 168 |
+
partition: null
|
| 169 |
+
qos: null
|
| 170 |
+
timeout_hour: 72
|
| 171 |
+
use_cluster: True
|
| 172 |
+
cpus_per_task: 10
|
| 173 |
+
port_range: [10000, 65000]
|
| 174 |
+
constraint: null
|
source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- _self_
|
| 4 |
+
|
| 5 |
+
# ============================================================================
|
| 6 |
+
# Paths Configuration (Chage this to your own paths)
|
| 7 |
+
# ============================================================================
|
| 8 |
+
paths:
|
| 9 |
+
|
| 10 |
+
dump_file_name: saco_veval_yt1b_val
|
| 11 |
+
experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
|
| 12 |
+
ytvis_json: <YOUR_GT_PATH>/saco_veval_yt1b_val.json
|
| 13 |
+
ytvis_dir : <YOUR_VIDEO_JPG_DIR>
|
| 14 |
+
bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
|
| 15 |
+
num_videos: null
|
| 16 |
+
|
| 17 |
+
# ============================================================================
|
| 18 |
+
# Different helper parameters and functions
|
| 19 |
+
# ============================================================================
|
| 20 |
+
scratch:
|
| 21 |
+
vid_mask_postprocessor:
|
| 22 |
+
_target_: sam3.eval.postprocessors.PostProcessNullOp
|
| 23 |
+
|
| 24 |
+
use_presence_eval: True
|
| 25 |
+
|
| 26 |
+
video_transforms_val:
|
| 27 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 28 |
+
transforms:
|
| 29 |
+
- _target_: sam3.train.transforms.segmentation.DecodeRle
|
| 30 |
+
# resize the image to 1024x1024 resolution
|
| 31 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 32 |
+
sizes: ${scratch.resolution} # originally `resolution: 1024`
|
| 33 |
+
square: true
|
| 34 |
+
consistent_transform: true
|
| 35 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 36 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 37 |
+
mean: ${scratch.val_norm_mean}
|
| 38 |
+
std: ${scratch.val_norm_std}
|
| 39 |
+
|
| 40 |
+
# Model parameters
|
| 41 |
+
d_model: 256
|
| 42 |
+
|
| 43 |
+
# Image processing parameters
|
| 44 |
+
resolution: 1008
|
| 45 |
+
|
| 46 |
+
# Normalization parameters
|
| 47 |
+
train_norm_mean: [0.5, 0.5, 0.5]
|
| 48 |
+
train_norm_std: [0.5, 0.5, 0.5]
|
| 49 |
+
val_norm_mean: [0.5, 0.5, 0.5]
|
| 50 |
+
val_norm_std: [0.5, 0.5, 0.5]
|
| 51 |
+
|
| 52 |
+
val_batch_size: 1
|
| 53 |
+
num_val_workers: 0
|
| 54 |
+
max_data_epochs: 20
|
| 55 |
+
hybrid_repeats: 1
|
| 56 |
+
gather_pred_via_filesys: false
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ============================================================================
|
| 60 |
+
# Trainer Configuration
|
| 61 |
+
# ============================================================================
|
| 62 |
+
|
| 63 |
+
trainer:
|
| 64 |
+
_target_: sam3.train.trainer.Trainer
|
| 65 |
+
skip_saving_ckpts: true
|
| 66 |
+
empty_gpu_mem_cache_after_eval: True
|
| 67 |
+
skip_first_val: True
|
| 68 |
+
max_epochs: ${scratch.max_data_epochs}
|
| 69 |
+
accelerator: cuda
|
| 70 |
+
seed_value: 123
|
| 71 |
+
val_epoch_freq: 10
|
| 72 |
+
mode: val
|
| 73 |
+
|
| 74 |
+
distributed:
|
| 75 |
+
backend: nccl
|
| 76 |
+
find_unused_parameters: True
|
| 77 |
+
gradient_as_bucket_view: True
|
| 78 |
+
|
| 79 |
+
loss:
|
| 80 |
+
all:
|
| 81 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 82 |
+
default:
|
| 83 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 84 |
+
|
| 85 |
+
data:
|
| 86 |
+
train: null
|
| 87 |
+
val:
|
| 88 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 89 |
+
dataset:
|
| 90 |
+
_target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset
|
| 91 |
+
limit_ids: ${paths.num_videos}
|
| 92 |
+
img_folder: ${paths.ytvis_dir}
|
| 93 |
+
ann_file: ${paths.ytvis_json}
|
| 94 |
+
coco_json_loader:
|
| 95 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP
|
| 96 |
+
_partial_: true
|
| 97 |
+
|
| 98 |
+
transforms: ${scratch.video_transforms_val}
|
| 99 |
+
max_ann_per_img: 100000 # filtered in transforms
|
| 100 |
+
max_val_queries: 100000
|
| 101 |
+
multiplier: 1
|
| 102 |
+
load_segmentation: true
|
| 103 |
+
training: false
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
shuffle: False
|
| 107 |
+
batch_size: ${scratch.val_batch_size}
|
| 108 |
+
num_workers: ${scratch.num_val_workers}
|
| 109 |
+
pin_memory: True
|
| 110 |
+
drop_last: False
|
| 111 |
+
collate_fn:
|
| 112 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 113 |
+
_partial_: true
|
| 114 |
+
repeats: ${scratch.hybrid_repeats}
|
| 115 |
+
dict_key: ytvis_val
|
| 116 |
+
with_seg_masks: true
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
model:
|
| 120 |
+
_target_: sam3.model_builder.build_sam3_video_model
|
| 121 |
+
bpe_path: ${paths.bpe_path}
|
| 122 |
+
has_presence_token: True
|
| 123 |
+
geo_encoder_use_img_cross_attn: True
|
| 124 |
+
apply_temporal_disambiguation: True
|
| 125 |
+
|
| 126 |
+
meters:
|
| 127 |
+
val:
|
| 128 |
+
ytvis_val:
|
| 129 |
+
pred_file: # key
|
| 130 |
+
_target_: sam3.eval.ytvis_eval.YTVISResultsWriter
|
| 131 |
+
dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json
|
| 132 |
+
postprocessor: ${scratch.vid_mask_postprocessor}
|
| 133 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 134 |
+
|
| 135 |
+
optim:
|
| 136 |
+
amp:
|
| 137 |
+
enabled: True
|
| 138 |
+
amp_dtype: bfloat16
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
checkpoint:
|
| 142 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
| 143 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
logging:
|
| 147 |
+
tensorboard_writer:
|
| 148 |
+
_target_: sam3.train.utils.logger.make_tensorboard_logger
|
| 149 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
| 150 |
+
flush_secs: 120
|
| 151 |
+
should_log: True
|
| 152 |
+
wandb_writer: null
|
| 153 |
+
log_dir: ${launcher.experiment_log_dir}/logs/
|
| 154 |
+
log_freq: 10
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# Launcher and Submitit Configuration
|
| 158 |
+
# ============================================================================
|
| 159 |
+
|
| 160 |
+
launcher:
|
| 161 |
+
num_nodes: 8
|
| 162 |
+
gpus_per_node: 8
|
| 163 |
+
experiment_log_dir: ${paths.experiment_log_dir}
|
| 164 |
+
multiprocessing_context: forkserver
|
| 165 |
+
|
| 166 |
+
submitit:
|
| 167 |
+
account: null
|
| 168 |
+
partition: null
|
| 169 |
+
qos: null
|
| 170 |
+
timeout_hour: 72
|
| 171 |
+
use_cluster: True
|
| 172 |
+
cpus_per_task: 10
|
| 173 |
+
port_range: [10000, 65000]
|
| 174 |
+
constraint: null
|
source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_val_noheur.yaml
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- _self_
|
| 4 |
+
|
| 5 |
+
# ============================================================================
|
| 6 |
+
# Paths Configuration (Chage this to your own paths)
|
| 7 |
+
# ============================================================================
|
| 8 |
+
paths:
|
| 9 |
+
|
| 10 |
+
dump_file_name: saco_veval_yt1b_val
|
| 11 |
+
experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
|
| 12 |
+
ytvis_json: <YOUR_GT_PATH>/saco_veval_yt1b_val.json
|
| 13 |
+
ytvis_dir : <YOUR_VIDEO_JPG_DIR>
|
| 14 |
+
bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
|
| 15 |
+
num_videos: null
|
| 16 |
+
|
| 17 |
+
# ============================================================================
|
| 18 |
+
# Different helper parameters and functions
|
| 19 |
+
# ============================================================================
|
| 20 |
+
scratch:
|
| 21 |
+
vid_mask_postprocessor:
|
| 22 |
+
_target_: sam3.eval.postprocessors.PostProcessNullOp
|
| 23 |
+
|
| 24 |
+
use_presence_eval: True
|
| 25 |
+
|
| 26 |
+
video_transforms_val:
|
| 27 |
+
- _target_: sam3.train.transforms.basic_for_api.ComposeAPI
|
| 28 |
+
transforms:
|
| 29 |
+
- _target_: sam3.train.transforms.segmentation.DecodeRle
|
| 30 |
+
# resize the image to 1024x1024 resolution
|
| 31 |
+
- _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
|
| 32 |
+
sizes: ${scratch.resolution} # originally `resolution: 1024`
|
| 33 |
+
square: true
|
| 34 |
+
consistent_transform: true
|
| 35 |
+
- _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
|
| 36 |
+
- _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
|
| 37 |
+
mean: ${scratch.val_norm_mean}
|
| 38 |
+
std: ${scratch.val_norm_std}
|
| 39 |
+
|
| 40 |
+
# Model parameters
|
| 41 |
+
d_model: 256
|
| 42 |
+
|
| 43 |
+
# Image processing parameters
|
| 44 |
+
resolution: 1008
|
| 45 |
+
|
| 46 |
+
# Normalization parameters
|
| 47 |
+
train_norm_mean: [0.5, 0.5, 0.5]
|
| 48 |
+
train_norm_std: [0.5, 0.5, 0.5]
|
| 49 |
+
val_norm_mean: [0.5, 0.5, 0.5]
|
| 50 |
+
val_norm_std: [0.5, 0.5, 0.5]
|
| 51 |
+
|
| 52 |
+
val_batch_size: 1
|
| 53 |
+
num_val_workers: 0
|
| 54 |
+
max_data_epochs: 20
|
| 55 |
+
hybrid_repeats: 1
|
| 56 |
+
gather_pred_via_filesys: false
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ============================================================================
|
| 60 |
+
# Trainer Configuration
|
| 61 |
+
# ============================================================================
|
| 62 |
+
|
| 63 |
+
trainer:
|
| 64 |
+
_target_: sam3.train.trainer.Trainer
|
| 65 |
+
skip_saving_ckpts: true
|
| 66 |
+
empty_gpu_mem_cache_after_eval: True
|
| 67 |
+
skip_first_val: True
|
| 68 |
+
max_epochs: ${scratch.max_data_epochs}
|
| 69 |
+
accelerator: cuda
|
| 70 |
+
seed_value: 123
|
| 71 |
+
val_epoch_freq: 10
|
| 72 |
+
mode: val
|
| 73 |
+
|
| 74 |
+
distributed:
|
| 75 |
+
backend: nccl
|
| 76 |
+
find_unused_parameters: True
|
| 77 |
+
gradient_as_bucket_view: True
|
| 78 |
+
|
| 79 |
+
loss:
|
| 80 |
+
all:
|
| 81 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 82 |
+
default:
|
| 83 |
+
_target_: sam3.train.loss.sam3_loss.DummyLoss
|
| 84 |
+
|
| 85 |
+
data:
|
| 86 |
+
train: null
|
| 87 |
+
val:
|
| 88 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 89 |
+
dataset:
|
| 90 |
+
_target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset
|
| 91 |
+
limit_ids: ${paths.num_videos}
|
| 92 |
+
img_folder: ${paths.ytvis_dir}
|
| 93 |
+
ann_file: ${paths.ytvis_json}
|
| 94 |
+
coco_json_loader:
|
| 95 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP
|
| 96 |
+
_partial_: true
|
| 97 |
+
|
| 98 |
+
transforms: ${scratch.video_transforms_val}
|
| 99 |
+
max_ann_per_img: 100000 # filtered in transforms
|
| 100 |
+
max_val_queries: 100000
|
| 101 |
+
multiplier: 1
|
| 102 |
+
load_segmentation: true
|
| 103 |
+
training: false
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
shuffle: False
|
| 107 |
+
batch_size: ${scratch.val_batch_size}
|
| 108 |
+
num_workers: ${scratch.num_val_workers}
|
| 109 |
+
pin_memory: True
|
| 110 |
+
drop_last: False
|
| 111 |
+
collate_fn:
|
| 112 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 113 |
+
_partial_: true
|
| 114 |
+
repeats: ${scratch.hybrid_repeats}
|
| 115 |
+
dict_key: ytvis_val
|
| 116 |
+
with_seg_masks: true
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
model:
|
| 120 |
+
_target_: sam3.model_builder.build_sam3_video_model
|
| 121 |
+
bpe_path: ${paths.bpe_path}
|
| 122 |
+
has_presence_token: True
|
| 123 |
+
geo_encoder_use_img_cross_attn: True
|
| 124 |
+
apply_temporal_disambiguation: False
|
| 125 |
+
|
| 126 |
+
meters:
|
| 127 |
+
val:
|
| 128 |
+
ytvis_val:
|
| 129 |
+
pred_file: # key
|
| 130 |
+
_target_: sam3.eval.ytvis_eval.YTVISResultsWriter
|
| 131 |
+
dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json
|
| 132 |
+
postprocessor: ${scratch.vid_mask_postprocessor}
|
| 133 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 134 |
+
|
| 135 |
+
optim:
|
| 136 |
+
amp:
|
| 137 |
+
enabled: True
|
| 138 |
+
amp_dtype: bfloat16
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
checkpoint:
|
| 142 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
| 143 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
logging:
|
| 147 |
+
tensorboard_writer:
|
| 148 |
+
_target_: sam3.train.utils.logger.make_tensorboard_logger
|
| 149 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
| 150 |
+
flush_secs: 120
|
| 151 |
+
should_log: True
|
| 152 |
+
wandb_writer: null
|
| 153 |
+
log_dir: ${launcher.experiment_log_dir}/logs/
|
| 154 |
+
log_freq: 10
|
| 155 |
+
|
| 156 |
+
# ============================================================================
|
| 157 |
+
# Launcher and Submitit Configuration
|
| 158 |
+
# ============================================================================
|
| 159 |
+
|
| 160 |
+
launcher:
|
| 161 |
+
num_nodes: 8
|
| 162 |
+
gpus_per_node: 8
|
| 163 |
+
experiment_log_dir: ${paths.experiment_log_dir}
|
| 164 |
+
multiprocessing_context: forkserver
|
| 165 |
+
|
| 166 |
+
submitit:
|
| 167 |
+
account: null
|
| 168 |
+
partition: null
|
| 169 |
+
qos: null
|
| 170 |
+
timeout_hour: 72
|
| 171 |
+
use_cluster: True
|
| 172 |
+
cpus_per_task: 10
|
| 173 |
+
port_range: [10000, 65000]
|
| 174 |
+
constraint: null
|
source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_droid.yaml
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/silver_droid/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path_silver}/silver_droid_merged_test.json
|
| 12 |
+
img_path: ${paths.silver_img_path}/droid/
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# ============================================================================
|
| 17 |
+
# Trainer Configuration
|
| 18 |
+
# ============================================================================
|
| 19 |
+
|
| 20 |
+
trainer:
|
| 21 |
+
data:
|
| 22 |
+
val:
|
| 23 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 24 |
+
dataset:
|
| 25 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 26 |
+
coco_json_loader:
|
| 27 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 28 |
+
_partial_: true
|
| 29 |
+
img_folder: ${paths.img_path}
|
| 30 |
+
ann_file: ${paths.coco_gt}
|
| 31 |
+
transforms: ${scratch.base_val_transform}
|
| 32 |
+
max_ann_per_img: 100000
|
| 33 |
+
multiplier: 1
|
| 34 |
+
training: false
|
| 35 |
+
|
| 36 |
+
shuffle: False
|
| 37 |
+
batch_size: ${scratch.val_batch_size}
|
| 38 |
+
num_workers: ${scratch.num_val_workers}
|
| 39 |
+
pin_memory: False
|
| 40 |
+
drop_last: False
|
| 41 |
+
collate_fn:
|
| 42 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 43 |
+
_partial_: true
|
| 44 |
+
repeats: ${scratch.hybrid_repeats}
|
| 45 |
+
dict_key: silver_droid
|
| 46 |
+
|
| 47 |
+
meters:
|
| 48 |
+
val:
|
| 49 |
+
silver_droid: # this key matches the "dict_key" in the dataloader's collate function
|
| 50 |
+
cgf1:
|
| 51 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 52 |
+
iou_type: "segm"
|
| 53 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/silver_droid
|
| 54 |
+
merge_predictions: True
|
| 55 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 56 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 57 |
+
maxdets: 1000000 # no limit
|
| 58 |
+
pred_file_evaluators:
|
| 59 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 60 |
+
gt_path: ${paths.coco_gt}
|
| 61 |
+
iou_type: "bbox"
|
| 62 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 63 |
+
gt_path: ${paths.coco_gt}
|
| 64 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_ego4d.yaml
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/silver_ego4d/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path_silver}/silver_ego4d_merged_test.json
|
| 12 |
+
img_path: ${paths.silver_img_path}/ego4d/
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# ============================================================================
|
| 17 |
+
# Trainer Configuration
|
| 18 |
+
# ============================================================================
|
| 19 |
+
|
| 20 |
+
trainer:
|
| 21 |
+
data:
|
| 22 |
+
val:
|
| 23 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 24 |
+
dataset:
|
| 25 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 26 |
+
coco_json_loader:
|
| 27 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 28 |
+
_partial_: true
|
| 29 |
+
img_folder: ${paths.img_path}
|
| 30 |
+
ann_file: ${paths.coco_gt}
|
| 31 |
+
transforms: ${scratch.base_val_transform}
|
| 32 |
+
max_ann_per_img: 100000
|
| 33 |
+
multiplier: 1
|
| 34 |
+
training: false
|
| 35 |
+
|
| 36 |
+
shuffle: False
|
| 37 |
+
batch_size: ${scratch.val_batch_size}
|
| 38 |
+
num_workers: ${scratch.num_val_workers}
|
| 39 |
+
pin_memory: False
|
| 40 |
+
drop_last: False
|
| 41 |
+
collate_fn:
|
| 42 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 43 |
+
_partial_: true
|
| 44 |
+
repeats: ${scratch.hybrid_repeats}
|
| 45 |
+
dict_key: silver_ego4d
|
| 46 |
+
|
| 47 |
+
meters:
|
| 48 |
+
val:
|
| 49 |
+
silver_ego4d: # this key matches the "dict_key" in the dataloader's collate function
|
| 50 |
+
cgf1:
|
| 51 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 52 |
+
iou_type: "segm"
|
| 53 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/silver_ego4d
|
| 54 |
+
merge_predictions: True
|
| 55 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 56 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 57 |
+
maxdets: 1000000 # no limit
|
| 58 |
+
pred_file_evaluators:
|
| 59 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 60 |
+
gt_path: ${paths.coco_gt}
|
| 61 |
+
iou_type: "bbox"
|
| 62 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 63 |
+
gt_path: ${paths.coco_gt}
|
| 64 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_fathomnet.yaml
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/silver_fathomnet/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path_silver}/silver_fathomnet_test.json
|
| 12 |
+
img_path: ${paths.silver_img_path}/fathomnet/
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# ============================================================================
|
| 17 |
+
# Trainer Configuration
|
| 18 |
+
# ============================================================================
|
| 19 |
+
|
| 20 |
+
trainer:
|
| 21 |
+
data:
|
| 22 |
+
val:
|
| 23 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 24 |
+
dataset:
|
| 25 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 26 |
+
coco_json_loader:
|
| 27 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 28 |
+
_partial_: true
|
| 29 |
+
img_folder: ${paths.img_path}
|
| 30 |
+
ann_file: ${paths.coco_gt}
|
| 31 |
+
transforms: ${scratch.base_val_transform}
|
| 32 |
+
max_ann_per_img: 100000
|
| 33 |
+
multiplier: 1
|
| 34 |
+
training: false
|
| 35 |
+
|
| 36 |
+
shuffle: False
|
| 37 |
+
batch_size: ${scratch.val_batch_size}
|
| 38 |
+
num_workers: ${scratch.num_val_workers}
|
| 39 |
+
pin_memory: False
|
| 40 |
+
drop_last: False
|
| 41 |
+
collate_fn:
|
| 42 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 43 |
+
_partial_: true
|
| 44 |
+
repeats: ${scratch.hybrid_repeats}
|
| 45 |
+
dict_key: silver_fathomnet
|
| 46 |
+
|
| 47 |
+
meters:
|
| 48 |
+
val:
|
| 49 |
+
silver_fathomnet: # this key matches the "dict_key" in the dataloader's collate function
|
| 50 |
+
cgf1:
|
| 51 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 52 |
+
iou_type: "segm"
|
| 53 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/silver_fathomnet
|
| 54 |
+
merge_predictions: True
|
| 55 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 56 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 57 |
+
maxdets: 1000000 # no limit
|
| 58 |
+
pred_file_evaluators:
|
| 59 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 60 |
+
gt_path: ${paths.coco_gt}
|
| 61 |
+
iou_type: "bbox"
|
| 62 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 63 |
+
gt_path: ${paths.coco_gt}
|
| 64 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_geode.yaml
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/silver_geode/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path_silver}/silver_geode_merged_test.json
|
| 12 |
+
img_path: ${paths.silver_img_path}/geode/
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# ============================================================================
|
| 17 |
+
# Trainer Configuration
|
| 18 |
+
# ============================================================================
|
| 19 |
+
|
| 20 |
+
trainer:
|
| 21 |
+
data:
|
| 22 |
+
val:
|
| 23 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 24 |
+
dataset:
|
| 25 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 26 |
+
coco_json_loader:
|
| 27 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 28 |
+
_partial_: true
|
| 29 |
+
img_folder: ${paths.img_path}
|
| 30 |
+
ann_file: ${paths.coco_gt}
|
| 31 |
+
transforms: ${scratch.base_val_transform}
|
| 32 |
+
max_ann_per_img: 100000
|
| 33 |
+
multiplier: 1
|
| 34 |
+
training: false
|
| 35 |
+
|
| 36 |
+
shuffle: False
|
| 37 |
+
batch_size: ${scratch.val_batch_size}
|
| 38 |
+
num_workers: ${scratch.num_val_workers}
|
| 39 |
+
pin_memory: False
|
| 40 |
+
drop_last: False
|
| 41 |
+
collate_fn:
|
| 42 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 43 |
+
_partial_: true
|
| 44 |
+
repeats: ${scratch.hybrid_repeats}
|
| 45 |
+
dict_key: silver_geode
|
| 46 |
+
|
| 47 |
+
meters:
|
| 48 |
+
val:
|
| 49 |
+
silver_geode: # this key matches the "dict_key" in the dataloader's collate function
|
| 50 |
+
cgf1:
|
| 51 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 52 |
+
iou_type: "segm"
|
| 53 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/silver_geode
|
| 54 |
+
merge_predictions: True
|
| 55 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 56 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 57 |
+
maxdets: 1000000 # no limit
|
| 58 |
+
pred_file_evaluators:
|
| 59 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 60 |
+
gt_path: ${paths.coco_gt}
|
| 61 |
+
iou_type: "bbox"
|
| 62 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 63 |
+
gt_path: ${paths.coco_gt}
|
| 64 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_yt1b.yaml
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# @package _global_
|
| 2 |
+
defaults:
|
| 3 |
+
- /configs/eval_base.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
# ============================================================================
|
| 7 |
+
# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
|
| 8 |
+
# ============================================================================
|
| 9 |
+
paths:
|
| 10 |
+
experiment_log_dir: ${paths.base_experiment_log_dir}/silver_yt1b/
|
| 11 |
+
coco_gt: ${paths.base_annotation_path_silver}/silver_yt1b_merged_test.json
|
| 12 |
+
img_path: ${paths.silver_img_path}/yt1b/
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# ============================================================================
|
| 17 |
+
# Trainer Configuration
|
| 18 |
+
# ============================================================================
|
| 19 |
+
|
| 20 |
+
trainer:
|
| 21 |
+
data:
|
| 22 |
+
val:
|
| 23 |
+
_target_: sam3.train.data.torch_dataset.TorchDataset
|
| 24 |
+
dataset:
|
| 25 |
+
_target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
|
| 26 |
+
coco_json_loader:
|
| 27 |
+
_target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
|
| 28 |
+
_partial_: true
|
| 29 |
+
img_folder: ${paths.img_path}
|
| 30 |
+
ann_file: ${paths.coco_gt}
|
| 31 |
+
transforms: ${scratch.base_val_transform}
|
| 32 |
+
max_ann_per_img: 100000
|
| 33 |
+
multiplier: 1
|
| 34 |
+
training: false
|
| 35 |
+
|
| 36 |
+
shuffle: False
|
| 37 |
+
batch_size: ${scratch.val_batch_size}
|
| 38 |
+
num_workers: ${scratch.num_val_workers}
|
| 39 |
+
pin_memory: False
|
| 40 |
+
drop_last: False
|
| 41 |
+
collate_fn:
|
| 42 |
+
_target_: sam3.train.data.collator.collate_fn_api
|
| 43 |
+
_partial_: true
|
| 44 |
+
repeats: ${scratch.hybrid_repeats}
|
| 45 |
+
dict_key: silver_yt1b
|
| 46 |
+
|
| 47 |
+
meters:
|
| 48 |
+
val:
|
| 49 |
+
silver_yt1b: # this key matches the "dict_key" in the dataloader's collate function
|
| 50 |
+
cgf1:
|
| 51 |
+
_target_: sam3.eval.coco_writer.PredictionDumper
|
| 52 |
+
iou_type: "segm"
|
| 53 |
+
dump_dir: ${launcher.experiment_log_dir}/dumps/silver_yt1b
|
| 54 |
+
merge_predictions: True
|
| 55 |
+
postprocessor: ${scratch.mask_postprocessor_thresholded}
|
| 56 |
+
gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
|
| 57 |
+
maxdets: 1000000 # no limit
|
| 58 |
+
pred_file_evaluators:
|
| 59 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 60 |
+
gt_path: ${paths.coco_gt}
|
| 61 |
+
iou_type: "bbox"
|
| 62 |
+
- _target_: sam3.eval.cgf1_eval.CGF1Evaluator
|
| 63 |
+
gt_path: ${paths.coco_gt}
|
| 64 |
+
iou_type: "segm"
|
source_code/sam3/sam3/train/data/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
source_code/sam3/sam3/train/data/sam3_image_dataset.py
ADDED
|
@@ -0,0 +1,528 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
"""Dataset class for modulated detection"""
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
import random
|
| 8 |
+
import sys
|
| 9 |
+
import traceback
|
| 10 |
+
from collections import Counter
|
| 11 |
+
from dataclasses import dataclass
|
| 12 |
+
from enum import Enum
|
| 13 |
+
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
import torch.utils.data
|
| 17 |
+
import torchvision
|
| 18 |
+
from decord import cpu, VideoReader
|
| 19 |
+
from iopath.common.file_io import g_pathmgr
|
| 20 |
+
|
| 21 |
+
from PIL import Image as PILImage
|
| 22 |
+
from PIL.Image import DecompressionBombError
|
| 23 |
+
|
| 24 |
+
from sam3.model.box_ops import box_xywh_to_xyxy
|
| 25 |
+
from torchvision.datasets.vision import VisionDataset
|
| 26 |
+
|
| 27 |
+
from .coco_json_loaders import COCO_FROM_JSON
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@dataclass
|
| 31 |
+
class InferenceMetadata:
|
| 32 |
+
"""Metadata required for postprocessing"""
|
| 33 |
+
|
| 34 |
+
# Coco id that corresponds to the "image" for evaluation by the coco evaluator
|
| 35 |
+
# This is used for our own "class agnostic" evaluation
|
| 36 |
+
coco_image_id: int
|
| 37 |
+
|
| 38 |
+
# id in the original dataset, such that we can use the original evaluator
|
| 39 |
+
original_image_id: int
|
| 40 |
+
|
| 41 |
+
# Original category id (if we want to use the original evaluator)
|
| 42 |
+
original_category_id: int
|
| 43 |
+
|
| 44 |
+
# Size of the raw image (height, width)
|
| 45 |
+
original_size: Tuple[int, int]
|
| 46 |
+
|
| 47 |
+
# Id of the object in the media
|
| 48 |
+
object_id: int
|
| 49 |
+
|
| 50 |
+
# Index of the frame in the media (0 if single image)
|
| 51 |
+
frame_index: int
|
| 52 |
+
|
| 53 |
+
# Whether it is for conditioning only, e.g., 0-th frame in TA is for conditioning
|
| 54 |
+
# as we assume GT available in frame 0.
|
| 55 |
+
is_conditioning_only: Optional[bool] = False
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@dataclass
|
| 59 |
+
class FindQuery:
|
| 60 |
+
query_text: str
|
| 61 |
+
|
| 62 |
+
image_id: int
|
| 63 |
+
|
| 64 |
+
# In case of a find query, the list of object ids that have to be predicted
|
| 65 |
+
object_ids_output: List[int]
|
| 66 |
+
|
| 67 |
+
# This is "instance exhaustivity".
|
| 68 |
+
# true iff all instances are separable and annotated
|
| 69 |
+
# See below the slightly different "pixel exhaustivity"
|
| 70 |
+
is_exhaustive: bool
|
| 71 |
+
|
| 72 |
+
# The order in which the queries are processed (only meaningful for video)
|
| 73 |
+
query_processing_order: int = 0
|
| 74 |
+
|
| 75 |
+
# Input geometry, initially in denormalized XYXY format. Then
|
| 76 |
+
# 1. converted to normalized CxCyWH by the Normalize transform
|
| 77 |
+
input_bbox: Optional[torch.Tensor] = None
|
| 78 |
+
input_bbox_label: Optional[torch.Tensor] = None
|
| 79 |
+
|
| 80 |
+
# Only for the PVS task
|
| 81 |
+
input_points: Optional[torch.Tensor] = None
|
| 82 |
+
|
| 83 |
+
semantic_target: Optional[torch.Tensor] = None
|
| 84 |
+
|
| 85 |
+
# pixel exhaustivity: true iff the union of all segments (including crowds)
|
| 86 |
+
# covers every pixel belonging to the target class
|
| 87 |
+
# Note that instance_exhaustive implies pixel_exhaustive
|
| 88 |
+
is_pixel_exhaustive: Optional[bool] = None
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@dataclass
|
| 92 |
+
class FindQueryLoaded(FindQuery):
|
| 93 |
+
# Must have default value since FindQuery has entries with default values
|
| 94 |
+
inference_metadata: Optional[InferenceMetadata] = None
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@dataclass
|
| 98 |
+
class Object:
|
| 99 |
+
# Initially in denormalized XYXY format, gets converted to normalized CxCyWH by the Normalize transform
|
| 100 |
+
bbox: torch.Tensor
|
| 101 |
+
area: float
|
| 102 |
+
|
| 103 |
+
# Id of the object in the media
|
| 104 |
+
object_id: Optional[int] = -1
|
| 105 |
+
|
| 106 |
+
# Index of the frame in the media (0 if single image)
|
| 107 |
+
frame_index: Optional[int] = -1
|
| 108 |
+
|
| 109 |
+
segment: Optional[Union[torch.Tensor, dict]] = None # RLE dict or binary mask
|
| 110 |
+
|
| 111 |
+
is_crowd: bool = False
|
| 112 |
+
|
| 113 |
+
source: Optional[str] = None
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@dataclass
|
| 117 |
+
class Image:
|
| 118 |
+
data: Union[torch.Tensor, PILImage.Image]
|
| 119 |
+
objects: List[Object]
|
| 120 |
+
size: Tuple[int, int] # (height, width)
|
| 121 |
+
|
| 122 |
+
# For blurring augmentation
|
| 123 |
+
blurring_mask: Optional[Dict[str, Any]] = None
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
@dataclass
|
| 127 |
+
class Datapoint:
|
| 128 |
+
"""Refers to an image/video and all its annotations"""
|
| 129 |
+
|
| 130 |
+
find_queries: List[FindQueryLoaded]
|
| 131 |
+
images: List[Image]
|
| 132 |
+
raw_images: Optional[List[PILImage.Image]] = None
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class CustomCocoDetectionAPI(VisionDataset):
|
| 136 |
+
"""`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
root (string): Root directory where images are downloaded to.
|
| 140 |
+
annFile (string): Path to json annotation file.
|
| 141 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 142 |
+
and returns a transformed version. E.g, ``transforms.ToTensor``
|
| 143 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 144 |
+
target and transforms it.
|
| 145 |
+
transforms (callable, optional): A function/transform that takes input sample and its target as entry
|
| 146 |
+
and returns a transformed version.
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
def __init__(
|
| 150 |
+
self,
|
| 151 |
+
root: str,
|
| 152 |
+
annFile: str,
|
| 153 |
+
load_segmentation: bool,
|
| 154 |
+
fix_fname: bool = False,
|
| 155 |
+
training: bool = True,
|
| 156 |
+
blurring_masks_path: Optional[str] = None,
|
| 157 |
+
use_caching: bool = True,
|
| 158 |
+
zstd_dict_path=None,
|
| 159 |
+
filter_query=None,
|
| 160 |
+
coco_json_loader: Callable = COCO_FROM_JSON,
|
| 161 |
+
limit_ids: int = None,
|
| 162 |
+
) -> None:
|
| 163 |
+
super().__init__(root)
|
| 164 |
+
|
| 165 |
+
self.annFile = annFile
|
| 166 |
+
self.use_caching = use_caching
|
| 167 |
+
self.zstd_dict_path = zstd_dict_path
|
| 168 |
+
|
| 169 |
+
self.curr_epoch = 0 # Used in case data loader behavior changes across epochs
|
| 170 |
+
self.load_segmentation = load_segmentation
|
| 171 |
+
self.fix_fname = fix_fname
|
| 172 |
+
self.filter_query = filter_query
|
| 173 |
+
|
| 174 |
+
self.coco = None
|
| 175 |
+
self.coco_json_loader = coco_json_loader
|
| 176 |
+
self.limit_ids = limit_ids
|
| 177 |
+
self.set_sharded_annotation_file(0)
|
| 178 |
+
self.training = training
|
| 179 |
+
self.blurring_masks_path = blurring_masks_path
|
| 180 |
+
|
| 181 |
+
def _load_images(
|
| 182 |
+
self, datapoint_id: int, img_ids_to_load: Optional[Set[int]] = None
|
| 183 |
+
) -> Tuple[List[Tuple[int, PILImage.Image]], List[Dict[str, Any]]]:
|
| 184 |
+
all_images = []
|
| 185 |
+
all_img_metadata = []
|
| 186 |
+
for current_meta in self.coco.loadImagesFromDatapoint(datapoint_id):
|
| 187 |
+
img_id = current_meta["id"]
|
| 188 |
+
if img_ids_to_load is not None and img_id not in img_ids_to_load:
|
| 189 |
+
continue
|
| 190 |
+
if self.fix_fname:
|
| 191 |
+
current_meta["file_name"] = current_meta["file_name"].split("/")[-1]
|
| 192 |
+
path = current_meta["file_name"]
|
| 193 |
+
if self.blurring_masks_path is not None:
|
| 194 |
+
mask_fname = os.path.basename(path).replace(".jpg", "-mask.json")
|
| 195 |
+
mask_path = os.path.join(self.blurring_masks_path, mask_fname)
|
| 196 |
+
if os.path.exists(mask_path):
|
| 197 |
+
with open(mask_path, "r") as fopen:
|
| 198 |
+
current_meta["blurring_mask"] = json.load(fopen)
|
| 199 |
+
|
| 200 |
+
all_img_metadata.append(current_meta)
|
| 201 |
+
path = os.path.join(self.root, path)
|
| 202 |
+
try:
|
| 203 |
+
if ".mp4" in path and path[-4:] == ".mp4":
|
| 204 |
+
# Going to load a video frame
|
| 205 |
+
video_path, frame = path.split("@")
|
| 206 |
+
video = VideoReader(video_path, ctx=cpu(0))
|
| 207 |
+
# Convert to PIL image
|
| 208 |
+
all_images.append(
|
| 209 |
+
(
|
| 210 |
+
img_id,
|
| 211 |
+
torchvision.transforms.ToPILImage()(
|
| 212 |
+
video[int(frame)].asnumpy()
|
| 213 |
+
),
|
| 214 |
+
)
|
| 215 |
+
)
|
| 216 |
+
else:
|
| 217 |
+
with g_pathmgr.open(path, "rb") as fopen:
|
| 218 |
+
all_images.append((img_id, PILImage.open(fopen).convert("RGB")))
|
| 219 |
+
except FileNotFoundError as e:
|
| 220 |
+
print(f"File not found: {path} from dataset: {self.annFile}")
|
| 221 |
+
raise e
|
| 222 |
+
|
| 223 |
+
return all_images, all_img_metadata
|
| 224 |
+
|
| 225 |
+
def set_curr_epoch(self, epoch: int):
|
| 226 |
+
self.curr_epoch = epoch
|
| 227 |
+
|
| 228 |
+
def set_epoch(self, epoch: int):
|
| 229 |
+
pass
|
| 230 |
+
|
| 231 |
+
def set_sharded_annotation_file(self, data_epoch: int):
|
| 232 |
+
if self.coco is not None:
|
| 233 |
+
return
|
| 234 |
+
|
| 235 |
+
assert g_pathmgr.isfile(
|
| 236 |
+
self.annFile
|
| 237 |
+
), f"please provide valid annotation file. Missing: {self.annFile}"
|
| 238 |
+
annFile = g_pathmgr.get_local_path(self.annFile)
|
| 239 |
+
|
| 240 |
+
if self.coco is not None:
|
| 241 |
+
del self.coco
|
| 242 |
+
|
| 243 |
+
self.coco = self.coco_json_loader(annFile)
|
| 244 |
+
# Use a torch tensor here to optimize memory usage when using several dataloaders
|
| 245 |
+
ids_list = list(sorted(self.coco.getDatapointIds()))
|
| 246 |
+
if self.limit_ids is not None:
|
| 247 |
+
local_random = random.Random(len(ids_list))
|
| 248 |
+
local_random.shuffle(ids_list)
|
| 249 |
+
ids_list = ids_list[: self.limit_ids]
|
| 250 |
+
self.ids = torch.as_tensor(ids_list, dtype=torch.long)
|
| 251 |
+
|
| 252 |
+
def __getitem__(self, index: int) -> Datapoint:
|
| 253 |
+
return self._load_datapoint(index)
|
| 254 |
+
|
| 255 |
+
def _load_datapoint(self, index: int) -> Datapoint:
|
| 256 |
+
"""A separate method for easy overriding in subclasses."""
|
| 257 |
+
id = self.ids[index].item()
|
| 258 |
+
pil_images, img_metadata = self._load_images(id)
|
| 259 |
+
queries, annotations = self.coco.loadQueriesAndAnnotationsFromDatapoint(id)
|
| 260 |
+
return self.load_queries(pil_images, annotations, queries, img_metadata)
|
| 261 |
+
|
| 262 |
+
def load_queries(self, pil_images, annotations, queries, img_metadata):
|
| 263 |
+
"""Transform the raw image and queries into a Datapoint sample."""
|
| 264 |
+
images: List[Image] = []
|
| 265 |
+
id2index_img = {}
|
| 266 |
+
id2index_obj = {}
|
| 267 |
+
id2index_find_query = {}
|
| 268 |
+
id2imsize = {}
|
| 269 |
+
assert len(pil_images) == len(img_metadata)
|
| 270 |
+
for i in range(len(pil_images)):
|
| 271 |
+
w, h = pil_images[i][1].size
|
| 272 |
+
blurring_mask = None
|
| 273 |
+
if "blurring_mask" in img_metadata[i]:
|
| 274 |
+
blurring_mask = img_metadata[i]["blurring_mask"]
|
| 275 |
+
images.append(
|
| 276 |
+
Image(
|
| 277 |
+
data=pil_images[i][1],
|
| 278 |
+
objects=[],
|
| 279 |
+
size=(h, w),
|
| 280 |
+
blurring_mask=blurring_mask,
|
| 281 |
+
)
|
| 282 |
+
)
|
| 283 |
+
id2index_img[pil_images[i][0]] = i
|
| 284 |
+
id2imsize[pil_images[i][0]] = (h, w)
|
| 285 |
+
|
| 286 |
+
for annotation in annotations:
|
| 287 |
+
image_id = id2index_img[annotation["image_id"]]
|
| 288 |
+
bbox = box_xywh_to_xyxy(torch.as_tensor(annotation["bbox"])).view(1, 4)
|
| 289 |
+
h, w = id2imsize[annotation["image_id"]]
|
| 290 |
+
bbox[:, 0::2].mul_(w).clamp_(min=0, max=w)
|
| 291 |
+
bbox[:, 1::2].mul_(h).clamp_(min=0, max=h)
|
| 292 |
+
segment = None
|
| 293 |
+
if self.load_segmentation and "segmentation" in annotation:
|
| 294 |
+
# We're not decoding the RLE here, a transform will do it lazily later
|
| 295 |
+
segment = annotation["segmentation"]
|
| 296 |
+
images[image_id].objects.append(
|
| 297 |
+
Object(
|
| 298 |
+
bbox=bbox[0],
|
| 299 |
+
area=annotation["area"],
|
| 300 |
+
object_id=(
|
| 301 |
+
annotation["object_id"] if "object_id" in annotation else -1
|
| 302 |
+
),
|
| 303 |
+
frame_index=(
|
| 304 |
+
annotation["frame_index"] if "frame_index" in annotation else -1
|
| 305 |
+
),
|
| 306 |
+
segment=segment,
|
| 307 |
+
is_crowd=(
|
| 308 |
+
annotation["is_crowd"] if "is_crowd" in annotation else None
|
| 309 |
+
),
|
| 310 |
+
source=annotation["source"] if "source" in annotation else "",
|
| 311 |
+
)
|
| 312 |
+
)
|
| 313 |
+
id2index_obj[annotation["id"]] = len(images[image_id].objects) - 1
|
| 314 |
+
|
| 315 |
+
find_queries = []
|
| 316 |
+
stage2num_queries = Counter()
|
| 317 |
+
for i, query in enumerate(queries):
|
| 318 |
+
stage2num_queries[query["query_processing_order"]] += 1
|
| 319 |
+
id2index_find_query[query["id"]] = i
|
| 320 |
+
|
| 321 |
+
# Sanity check: all the stages should have the same number of queries
|
| 322 |
+
if len(stage2num_queries) == 0:
|
| 323 |
+
num_queries_per_stage = 0
|
| 324 |
+
else:
|
| 325 |
+
num_queries_per_stage = stage2num_queries.most_common(1)[0][1]
|
| 326 |
+
for stage, num_queries in stage2num_queries.items():
|
| 327 |
+
assert (
|
| 328 |
+
num_queries == num_queries_per_stage
|
| 329 |
+
), f"Number of queries in stage {stage} is {num_queries}, expected {num_queries_per_stage}"
|
| 330 |
+
|
| 331 |
+
for query_id, query in enumerate(queries):
|
| 332 |
+
h, w = id2imsize[query["image_id"]]
|
| 333 |
+
if (
|
| 334 |
+
"input_box" in query
|
| 335 |
+
and query["input_box"] is not None
|
| 336 |
+
and len(query["input_box"]) > 0
|
| 337 |
+
):
|
| 338 |
+
bbox = box_xywh_to_xyxy(torch.as_tensor(query["input_box"])).view(-1, 4)
|
| 339 |
+
bbox[:, 0::2].mul_(w).clamp_(min=0, max=w)
|
| 340 |
+
bbox[:, 1::2].mul_(h).clamp_(min=0, max=h)
|
| 341 |
+
if "input_box_label" in query and query["input_box_label"] is not None:
|
| 342 |
+
bbox_label = torch.as_tensor(
|
| 343 |
+
query["input_box_label"], dtype=torch.long
|
| 344 |
+
).view(-1)
|
| 345 |
+
assert len(bbox_label) == len(bbox)
|
| 346 |
+
else:
|
| 347 |
+
# assume the boxes are positives
|
| 348 |
+
bbox_label = torch.ones(len(bbox), dtype=torch.long)
|
| 349 |
+
else:
|
| 350 |
+
bbox = None
|
| 351 |
+
bbox_label = None
|
| 352 |
+
|
| 353 |
+
if "input_points" in query and query["input_points"] is not None:
|
| 354 |
+
points = torch.as_tensor(query["input_points"]).view(1, -1, 3)
|
| 355 |
+
points[:, :, 0:1].mul_(w).clamp_(min=0, max=w)
|
| 356 |
+
points[:, :, 1:2].mul_(h).clamp_(min=0, max=h)
|
| 357 |
+
else:
|
| 358 |
+
points = None
|
| 359 |
+
|
| 360 |
+
try:
|
| 361 |
+
original_image_id = int(
|
| 362 |
+
img_metadata[id2index_img[query["image_id"]]]["original_img_id"]
|
| 363 |
+
)
|
| 364 |
+
except ValueError:
|
| 365 |
+
original_image_id = -1
|
| 366 |
+
|
| 367 |
+
try:
|
| 368 |
+
img_metadata_query = img_metadata[id2index_img[query["image_id"]]]
|
| 369 |
+
coco_image_id = (
|
| 370 |
+
int(img_metadata_query["coco_img_id"])
|
| 371 |
+
if "coco_img_id" in img_metadata_query
|
| 372 |
+
else query["id"]
|
| 373 |
+
)
|
| 374 |
+
except KeyError:
|
| 375 |
+
coco_image_id = -1
|
| 376 |
+
|
| 377 |
+
try:
|
| 378 |
+
original_category_id = int(query["original_cat_id"])
|
| 379 |
+
except (ValueError, KeyError):
|
| 380 |
+
original_category_id = -1
|
| 381 |
+
|
| 382 |
+
# For evaluation, we associate the ids of the object to be tracked to the query
|
| 383 |
+
if query["object_ids_output"]:
|
| 384 |
+
obj_id = query["object_ids_output"][0]
|
| 385 |
+
obj_idx = id2index_obj[obj_id]
|
| 386 |
+
image_idx = id2index_img[query["image_id"]]
|
| 387 |
+
object_id = images[image_idx].objects[obj_idx].object_id
|
| 388 |
+
frame_index = images[image_idx].objects[obj_idx].frame_index
|
| 389 |
+
else:
|
| 390 |
+
object_id = -1
|
| 391 |
+
frame_index = -1
|
| 392 |
+
|
| 393 |
+
find_queries.append(
|
| 394 |
+
FindQueryLoaded(
|
| 395 |
+
# id=query["id"],
|
| 396 |
+
# query_type=qtype,
|
| 397 |
+
query_text=(
|
| 398 |
+
query["query_text"] if query["query_text"] is not None else ""
|
| 399 |
+
),
|
| 400 |
+
image_id=id2index_img[query["image_id"]],
|
| 401 |
+
input_bbox=bbox,
|
| 402 |
+
input_bbox_label=bbox_label,
|
| 403 |
+
input_points=points,
|
| 404 |
+
object_ids_output=[
|
| 405 |
+
id2index_obj[obj_id] for obj_id in query["object_ids_output"]
|
| 406 |
+
],
|
| 407 |
+
is_exhaustive=query["is_exhaustive"],
|
| 408 |
+
is_pixel_exhaustive=(
|
| 409 |
+
query["is_pixel_exhaustive"]
|
| 410 |
+
if "is_pixel_exhaustive" in query
|
| 411 |
+
else (
|
| 412 |
+
query["is_exhaustive"] if query["is_exhaustive"] else None
|
| 413 |
+
)
|
| 414 |
+
),
|
| 415 |
+
query_processing_order=query["query_processing_order"],
|
| 416 |
+
inference_metadata=InferenceMetadata(
|
| 417 |
+
coco_image_id=-1 if self.training else coco_image_id,
|
| 418 |
+
original_image_id=(-1 if self.training else original_image_id),
|
| 419 |
+
frame_index=frame_index,
|
| 420 |
+
original_category_id=original_category_id,
|
| 421 |
+
original_size=(h, w),
|
| 422 |
+
object_id=object_id,
|
| 423 |
+
),
|
| 424 |
+
)
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
return Datapoint(
|
| 428 |
+
find_queries=find_queries,
|
| 429 |
+
images=images,
|
| 430 |
+
raw_images=[p[1] for p in pil_images],
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
def __len__(self) -> int:
|
| 434 |
+
return len(self.ids)
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
class Sam3ImageDataset(CustomCocoDetectionAPI):
|
| 438 |
+
def __init__(
|
| 439 |
+
self,
|
| 440 |
+
img_folder,
|
| 441 |
+
ann_file,
|
| 442 |
+
transforms,
|
| 443 |
+
max_ann_per_img: int,
|
| 444 |
+
multiplier: int,
|
| 445 |
+
training: bool,
|
| 446 |
+
load_segmentation: bool = False,
|
| 447 |
+
max_train_queries: int = 81,
|
| 448 |
+
max_val_queries: int = 300,
|
| 449 |
+
fix_fname: bool = False,
|
| 450 |
+
is_sharded_annotation_dir: bool = False,
|
| 451 |
+
blurring_masks_path: Optional[str] = None,
|
| 452 |
+
use_caching: bool = True,
|
| 453 |
+
zstd_dict_path=None,
|
| 454 |
+
filter_query=None,
|
| 455 |
+
coco_json_loader: Callable = COCO_FROM_JSON,
|
| 456 |
+
limit_ids: int = None,
|
| 457 |
+
):
|
| 458 |
+
super(Sam3ImageDataset, self).__init__(
|
| 459 |
+
img_folder,
|
| 460 |
+
ann_file,
|
| 461 |
+
fix_fname=fix_fname,
|
| 462 |
+
load_segmentation=load_segmentation,
|
| 463 |
+
training=training,
|
| 464 |
+
blurring_masks_path=blurring_masks_path,
|
| 465 |
+
use_caching=use_caching,
|
| 466 |
+
zstd_dict_path=zstd_dict_path,
|
| 467 |
+
filter_query=filter_query,
|
| 468 |
+
coco_json_loader=coco_json_loader,
|
| 469 |
+
limit_ids=limit_ids,
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
self._transforms = transforms
|
| 473 |
+
self.training = training
|
| 474 |
+
self.max_ann_per_img = max_ann_per_img
|
| 475 |
+
self.max_train_queries = max_train_queries
|
| 476 |
+
self.max_val_queries = max_val_queries
|
| 477 |
+
|
| 478 |
+
self.repeat_factors = torch.ones(len(self.ids), dtype=torch.float32)
|
| 479 |
+
|
| 480 |
+
self.repeat_factors *= multiplier
|
| 481 |
+
print(f"Raw dataset length = {len(self.ids)}")
|
| 482 |
+
|
| 483 |
+
self._MAX_RETRIES = 100
|
| 484 |
+
|
| 485 |
+
def __getitem__(self, idx):
|
| 486 |
+
return self.__orig_getitem__(idx)
|
| 487 |
+
|
| 488 |
+
def __orig_getitem__(self, idx):
|
| 489 |
+
for _ in range(self._MAX_RETRIES):
|
| 490 |
+
try:
|
| 491 |
+
datapoint = super(Sam3ImageDataset, self).__getitem__(idx)
|
| 492 |
+
|
| 493 |
+
# This can be done better by filtering the offending find queries
|
| 494 |
+
# However, this requires care:
|
| 495 |
+
# - Delete any find/get query that may depend on the deleted one
|
| 496 |
+
# - Re-compute the indexes in the pointers to account for the deleted finds
|
| 497 |
+
for q in datapoint.find_queries:
|
| 498 |
+
if len(q.object_ids_output) > self.max_ann_per_img:
|
| 499 |
+
raise DecompressionBombError(
|
| 500 |
+
f"Too many outputs ({len(q.object_ids_output)})"
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
max_queries = (
|
| 504 |
+
self.max_train_queries if self.training else self.max_val_queries
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
if len(datapoint.find_queries) > max_queries:
|
| 508 |
+
raise DecompressionBombError(
|
| 509 |
+
f"Too many find queries ({len(datapoint.find_queries)})"
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
if len(datapoint.find_queries) == 0:
|
| 513 |
+
raise DecompressionBombError("No find queries")
|
| 514 |
+
for transform in self._transforms:
|
| 515 |
+
datapoint = transform(datapoint, epoch=self.curr_epoch)
|
| 516 |
+
|
| 517 |
+
break
|
| 518 |
+
except (DecompressionBombError, OSError, ValueError) as error:
|
| 519 |
+
sys.stderr.write(f"ERROR: got loading error on datapoint {idx}\n")
|
| 520 |
+
sys.stderr.write(f"Exception: {error}\n")
|
| 521 |
+
sys.stderr.write(traceback.format_exc())
|
| 522 |
+
idx = (idx + 1) % len(self)
|
| 523 |
+
else:
|
| 524 |
+
raise RuntimeError(
|
| 525 |
+
f"Failed {self._MAX_RETRIES} times trying to load an image."
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
return datapoint
|
source_code/sam3/sam3/train/loss/mask_sampling.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
from typing import Callable
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch.nn import functional as F
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# Adapted from https://github.com/facebookresearch/detectron2/blob/main/projects/PointRend/point_rend/point_features.py
|
| 10 |
+
def point_sample(input, point_coords, **kwargs):
|
| 11 |
+
"""
|
| 12 |
+
A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.
|
| 13 |
+
Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside
|
| 14 |
+
[0, 1] x [0, 1] square.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.
|
| 18 |
+
point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains
|
| 19 |
+
[0, 1] x [0, 1] normalized point coordinates.
|
| 20 |
+
|
| 21 |
+
Returns:
|
| 22 |
+
output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains
|
| 23 |
+
features for points in `point_coords`. The features are obtained via bilinear
|
| 24 |
+
interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.
|
| 25 |
+
"""
|
| 26 |
+
add_dim = False
|
| 27 |
+
if point_coords.dim() == 3:
|
| 28 |
+
add_dim = True
|
| 29 |
+
point_coords = point_coords.unsqueeze(2)
|
| 30 |
+
normalized_point_coords = 2.0 * point_coords - 1.0 # Normalize to [-1,1]
|
| 31 |
+
output = F.grid_sample(input, normalized_point_coords, **kwargs)
|
| 32 |
+
if add_dim:
|
| 33 |
+
output = output.squeeze(3)
|
| 34 |
+
return output
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# Adapted from https://github.com/facebookresearch/detectron2/blob/main/projects/PointRend/point_rend/point_features.py
|
| 38 |
+
def get_uncertain_point_coords_with_randomness(
|
| 39 |
+
logits: torch.Tensor,
|
| 40 |
+
uncertainty_func: Callable,
|
| 41 |
+
num_points: int,
|
| 42 |
+
oversample_ratio: int,
|
| 43 |
+
importance_sample_ratio: float,
|
| 44 |
+
) -> torch.Tensor:
|
| 45 |
+
"""
|
| 46 |
+
Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties
|
| 47 |
+
are calculated for each point using 'uncertainty_func' function that takes point's logit
|
| 48 |
+
prediction as input.
|
| 49 |
+
See PointRend paper for details.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for
|
| 53 |
+
class-specific or class-agnostic prediction.
|
| 54 |
+
uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that
|
| 55 |
+
contains logit predictions for P points and returns their uncertainties as a Tensor of
|
| 56 |
+
shape (N, 1, P).
|
| 57 |
+
num_points (int): The number of points P to sample.
|
| 58 |
+
oversample_ratio (int): Oversampling parameter.
|
| 59 |
+
importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P
|
| 63 |
+
sampled points.
|
| 64 |
+
"""
|
| 65 |
+
assert oversample_ratio >= 1
|
| 66 |
+
assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0
|
| 67 |
+
num_boxes = logits.shape[0]
|
| 68 |
+
num_sampled = int(num_points * oversample_ratio)
|
| 69 |
+
point_coords = torch.rand(num_boxes, num_sampled, 2, device=logits.device)
|
| 70 |
+
point_logits = point_sample(logits, point_coords, align_corners=False)
|
| 71 |
+
# It is crucial to calculate uncertainty based on the sampled prediction value for the points.
|
| 72 |
+
# Calculating uncertainties of the predictions first and sampling them for points leads
|
| 73 |
+
# to incorrect results.
|
| 74 |
+
# To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between
|
| 75 |
+
# two predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value.
|
| 76 |
+
# However, if we calculate uncertainties for the predictions first,
|
| 77 |
+
# both will have -1 uncertainty, and the sampled point will get -1 uncertainty.
|
| 78 |
+
point_uncertainties = uncertainty_func(point_logits)
|
| 79 |
+
num_uncertain_points = int(importance_sample_ratio * num_points)
|
| 80 |
+
num_random_points = num_points - num_uncertain_points
|
| 81 |
+
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
|
| 82 |
+
# Flatten the indices
|
| 83 |
+
shift = num_sampled * torch.arange(
|
| 84 |
+
num_boxes, dtype=torch.long, device=logits.device
|
| 85 |
+
)
|
| 86 |
+
idx += shift[:, None]
|
| 87 |
+
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
|
| 88 |
+
num_boxes, num_uncertain_points, 2
|
| 89 |
+
)
|
| 90 |
+
if num_random_points > 0:
|
| 91 |
+
point_coords = torch.cat(
|
| 92 |
+
[
|
| 93 |
+
point_coords,
|
| 94 |
+
torch.rand(num_boxes, num_random_points, 2, device=logits.device),
|
| 95 |
+
],
|
| 96 |
+
dim=1,
|
| 97 |
+
)
|
| 98 |
+
return point_coords
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# Adapted from https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/criterion.py
|
| 102 |
+
def calculate_uncertainty(logits: torch.Tensor) -> torch.Tensor:
|
| 103 |
+
"""
|
| 104 |
+
Estimates uncerainty as L1 distance between 0.0 and the logit prediction.
|
| 105 |
+
Args:
|
| 106 |
+
logits (Tensor): A tensor of shape (R, 1, ...) for class-agnostic
|
| 107 |
+
predicted masks
|
| 108 |
+
Returns:
|
| 109 |
+
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
|
| 110 |
+
the most uncertain locations having the highest uncertainty score.
|
| 111 |
+
"""
|
| 112 |
+
assert logits.shape[1] == 1
|
| 113 |
+
return -(torch.abs(logits))
|
source_code/sam3/sam3/train/optim/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
source_code/sam3/sam3/train/optim/optimizer.py
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
import fnmatch
|
| 4 |
+
import inspect
|
| 5 |
+
import itertools
|
| 6 |
+
import logging
|
| 7 |
+
import types
|
| 8 |
+
from typing import (
|
| 9 |
+
Any,
|
| 10 |
+
Callable,
|
| 11 |
+
Dict,
|
| 12 |
+
Iterable,
|
| 13 |
+
List,
|
| 14 |
+
Mapping,
|
| 15 |
+
Optional,
|
| 16 |
+
Set,
|
| 17 |
+
Tuple,
|
| 18 |
+
Type,
|
| 19 |
+
Union,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
import hydra
|
| 23 |
+
|
| 24 |
+
import torch
|
| 25 |
+
import torch.nn as nn
|
| 26 |
+
from omegaconf import DictConfig
|
| 27 |
+
from torch import Tensor
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Optimizer:
|
| 31 |
+
def __init__(self, optimizer, schedulers=None) -> None:
|
| 32 |
+
self.optimizer = optimizer
|
| 33 |
+
self.schedulers = schedulers
|
| 34 |
+
self._validate_optimizer_schedulers()
|
| 35 |
+
self.step_schedulers(0.0, 0)
|
| 36 |
+
|
| 37 |
+
def _validate_optimizer_schedulers(self):
|
| 38 |
+
if self.schedulers is None:
|
| 39 |
+
return
|
| 40 |
+
for _, set_of_schedulers in enumerate(self.schedulers):
|
| 41 |
+
for option, _ in set_of_schedulers.items():
|
| 42 |
+
assert option in self.optimizer.defaults, (
|
| 43 |
+
"Optimizer option "
|
| 44 |
+
f"{option} not found in {self.optimizer}. Valid options are "
|
| 45 |
+
f"{self.optimizer.defaults.keys()}"
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
def step_schedulers(self, where: float, step: int) -> None:
|
| 49 |
+
if self.schedulers is None:
|
| 50 |
+
return
|
| 51 |
+
for i, param_group in enumerate(self.optimizer.param_groups):
|
| 52 |
+
for option, scheduler in self.schedulers[i].items():
|
| 53 |
+
if "step" in inspect.signature(scheduler.__call__).parameters:
|
| 54 |
+
new_value = scheduler(step=step, where=where)
|
| 55 |
+
elif (
|
| 56 |
+
hasattr(scheduler, "scheduler")
|
| 57 |
+
and "step"
|
| 58 |
+
in inspect.signature(scheduler.scheduler.__call__).parameters
|
| 59 |
+
):
|
| 60 |
+
# To handle ValueScaler wrappers
|
| 61 |
+
new_value = scheduler(step=step, where=where)
|
| 62 |
+
else:
|
| 63 |
+
new_value = scheduler(where)
|
| 64 |
+
param_group[option] = new_value
|
| 65 |
+
|
| 66 |
+
def step(self, where, step, closure=None):
|
| 67 |
+
self.step_schedulers(where, step)
|
| 68 |
+
return self.optimizer.step(closure)
|
| 69 |
+
|
| 70 |
+
def zero_grad(self, *args, **kwargs):
|
| 71 |
+
return self.optimizer.zero_grad(*args, **kwargs)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def set_default_parameters(
|
| 75 |
+
scheduler_cfgs: List[DictConfig], all_parameter_names: Set[str]
|
| 76 |
+
) -> None:
|
| 77 |
+
"""Set up the "default" scheduler with the right parameters.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
scheduler_cgfs: A list of scheduler configs, where each scheduler also
|
| 81 |
+
specifies which parameters it applies to, based on the names of parameters
|
| 82 |
+
or the class of the modules. At most one scheduler is allowed to skip this
|
| 83 |
+
specification, which is used as a "default" specification for any remaining
|
| 84 |
+
parameters.
|
| 85 |
+
all_parameter_names: Names of all the parameters to consider.
|
| 86 |
+
"""
|
| 87 |
+
constraints = [
|
| 88 |
+
scheduler_cfg.parameter_names
|
| 89 |
+
for scheduler_cfg in scheduler_cfgs
|
| 90 |
+
if scheduler_cfg.parameter_names is not None
|
| 91 |
+
]
|
| 92 |
+
if len(constraints) == 0:
|
| 93 |
+
default_params = set(all_parameter_names)
|
| 94 |
+
else:
|
| 95 |
+
default_params = all_parameter_names - set.union(*constraints)
|
| 96 |
+
default_count = 0
|
| 97 |
+
for scheduler_cfg in scheduler_cfgs:
|
| 98 |
+
if scheduler_cfg.parameter_names is None:
|
| 99 |
+
scheduler_cfg.parameter_names = default_params
|
| 100 |
+
default_count += 1
|
| 101 |
+
assert default_count <= 1, "Only one scheduler per option can be default"
|
| 102 |
+
if default_count == 0:
|
| 103 |
+
# No default scheduler specified, add a default, but without any scheduler
|
| 104 |
+
# for that option
|
| 105 |
+
scheduler_cfgs.append({"parameter_names": default_params})
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def name_constraints_to_parameters(
|
| 109 |
+
param_constraints: List[Set[str]], named_parameters: Dict[str, Tensor]
|
| 110 |
+
) -> List[torch.nn.Parameter]:
|
| 111 |
+
"""Return parameters which match the intersection of parameter constraints.
|
| 112 |
+
|
| 113 |
+
Note that this returns the parameters themselves, not their names.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
param_constraints: A list, with each element being a set of allowed parameters.
|
| 117 |
+
named_parameters: Mapping from a parameter name to the parameter itself.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
A list containing the parameters which overlap with _each_ constraint set from
|
| 121 |
+
param_constraints.
|
| 122 |
+
"""
|
| 123 |
+
matching_names = set.intersection(*param_constraints)
|
| 124 |
+
return [value for name, value in named_parameters.items() if name in matching_names]
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def map_scheduler_cfgs_to_param_groups(
|
| 128 |
+
all_scheduler_cfgs: Iterable[List[Dict]],
|
| 129 |
+
named_parameters: Dict[str, Tensor],
|
| 130 |
+
) -> Tuple[List[Dict[Any, Any]], List[Dict[str, List[torch.nn.Parameter]]]]:
|
| 131 |
+
"""Produce parameter groups corresponding to all the scheduler configs.
|
| 132 |
+
|
| 133 |
+
Takes all the scheduler configs, each of which applies to a specific optimizer
|
| 134 |
+
option (like "lr" or "weight_decay") and has a set of parameter names which it
|
| 135 |
+
applies to, and produces a final set of param groups where each param group
|
| 136 |
+
covers all the options which apply to a particular set of parameters.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
all_scheduler_cfgs: All the scheduler configs covering every option.
|
| 140 |
+
named_parameters: Mapping from a parameter name to the parameter itself.
|
| 141 |
+
Returns:
|
| 142 |
+
Tuple of lists of schedulers and param_groups, where schedulers[i]
|
| 143 |
+
applies to param_groups[i].
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
scheduler_cfgs_per_param_group = itertools.product(*all_scheduler_cfgs)
|
| 147 |
+
schedulers = []
|
| 148 |
+
param_groups = []
|
| 149 |
+
for scheduler_cfgs in scheduler_cfgs_per_param_group:
|
| 150 |
+
param_constraints = [
|
| 151 |
+
scheduler_cfg["parameter_names"] for scheduler_cfg in scheduler_cfgs
|
| 152 |
+
]
|
| 153 |
+
matching_parameters = name_constraints_to_parameters(
|
| 154 |
+
param_constraints, named_parameters
|
| 155 |
+
)
|
| 156 |
+
if len(matching_parameters) == 0: # If no overlap of parameters, skip
|
| 157 |
+
continue
|
| 158 |
+
schedulers_for_group = {
|
| 159 |
+
scheduler_cfg["option"]: scheduler_cfg["scheduler"]
|
| 160 |
+
for scheduler_cfg in scheduler_cfgs
|
| 161 |
+
if "option" in scheduler_cfg
|
| 162 |
+
}
|
| 163 |
+
schedulers.append(schedulers_for_group)
|
| 164 |
+
param_groups.append({"params": matching_parameters})
|
| 165 |
+
return schedulers, param_groups
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def validate_param_group_params(param_groups: List[Dict], model: nn.Module):
|
| 169 |
+
"""Check that the param groups are non-overlapping and cover all the parameters.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
param_groups: List of all param groups
|
| 173 |
+
model: Model to validate against. The check ensures that all the model
|
| 174 |
+
parameters are part of param_groups
|
| 175 |
+
"""
|
| 176 |
+
for pg in param_groups:
|
| 177 |
+
# no param should be repeated within a group
|
| 178 |
+
assert len(pg["params"]) == len(set(pg["params"]))
|
| 179 |
+
parameters = [set(param_group["params"]) for param_group in param_groups]
|
| 180 |
+
model_parameters = {parameter for _, parameter in model.named_parameters()}
|
| 181 |
+
for p1, p2 in itertools.permutations(parameters, 2):
|
| 182 |
+
assert p1.isdisjoint(p2), "Scheduler generated param_groups should be disjoint"
|
| 183 |
+
assert set.union(*parameters) == model_parameters, (
|
| 184 |
+
"Scheduler generated param_groups must include all parameters of the model."
|
| 185 |
+
f" Found {len(set.union(*parameters))} params whereas model has"
|
| 186 |
+
f" {len(model_parameters)} params"
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def unix_module_cls_pattern_to_parameter_names(
|
| 191 |
+
filter_module_cls_names: List[str],
|
| 192 |
+
module_cls_to_param_names: Dict[Type, str],
|
| 193 |
+
) -> Union[None, Set[str]]:
|
| 194 |
+
"""Returns param names which pass the filters specified in filter_module_cls_names.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
filter_module_cls_names: A list of filter strings containing class names, like
|
| 198 |
+
["torch.nn.LayerNorm", "torch.nn.BatchNorm2d"]
|
| 199 |
+
module_cls_to_param_names: Mapping from module classes to the parameter names
|
| 200 |
+
they contain. See `get_module_cls_to_param_names`.
|
| 201 |
+
"""
|
| 202 |
+
if filter_module_cls_names is None:
|
| 203 |
+
return set()
|
| 204 |
+
allowed_parameter_names = []
|
| 205 |
+
for module_cls_name in filter_module_cls_names:
|
| 206 |
+
module_cls = hydra.utils.get_class(module_cls_name)
|
| 207 |
+
if module_cls not in module_cls_to_param_names:
|
| 208 |
+
raise AssertionError(
|
| 209 |
+
f"module_cls_name {module_cls_name} does not "
|
| 210 |
+
"match any classes in the model"
|
| 211 |
+
)
|
| 212 |
+
matching_parameters = module_cls_to_param_names[module_cls]
|
| 213 |
+
assert (
|
| 214 |
+
len(matching_parameters) > 0
|
| 215 |
+
), f"module_cls_name {module_cls_name} does not contain any parameters in the model"
|
| 216 |
+
logging.info(
|
| 217 |
+
f"Matches for module_cls_name [{module_cls_name}]: {matching_parameters} "
|
| 218 |
+
)
|
| 219 |
+
allowed_parameter_names.append(matching_parameters)
|
| 220 |
+
return set.union(*allowed_parameter_names)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def unix_param_pattern_to_parameter_names(
|
| 224 |
+
filter_param_names: Optional[List[str]],
|
| 225 |
+
parameter_names: Dict[str, torch.Tensor],
|
| 226 |
+
) -> Union[None, Set[str]]:
|
| 227 |
+
"""Returns param names which pass the filters specified in filter_param_names.
|
| 228 |
+
|
| 229 |
+
Args:
|
| 230 |
+
filter_param_names: A list of unix-style filter strings with optional
|
| 231 |
+
wildcards, like ["block.2.*", "block.2.linear.weight"]
|
| 232 |
+
module_cls_to_param_names: Mapping from module classes to the parameter names
|
| 233 |
+
they contain. See `get_module_cls_to_param_names`.
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
if filter_param_names is None:
|
| 237 |
+
return set()
|
| 238 |
+
allowed_parameter_names = []
|
| 239 |
+
for param_name in filter_param_names:
|
| 240 |
+
matching_parameters = set(fnmatch.filter(parameter_names, param_name))
|
| 241 |
+
assert (
|
| 242 |
+
len(matching_parameters) >= 1
|
| 243 |
+
), f"param_name {param_name} does not match any parameters in the model"
|
| 244 |
+
logging.info(f"Matches for param_name [{param_name}]: {matching_parameters}")
|
| 245 |
+
allowed_parameter_names.append(matching_parameters)
|
| 246 |
+
return set.union(*allowed_parameter_names)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def _unix_pattern_to_parameter_names(
|
| 250 |
+
scheduler_cfg: DictConfig,
|
| 251 |
+
parameter_names: Set[str],
|
| 252 |
+
module_cls_to_param_names: Dict[Type, str],
|
| 253 |
+
) -> Union[None, Set[str]]:
|
| 254 |
+
"""Returns param names which pass the filters specified in scheduler_cfg.
|
| 255 |
+
|
| 256 |
+
Args:
|
| 257 |
+
scheduler_cfg: The config for the scheduler
|
| 258 |
+
parameter_names: The set of all parameter names which will be filtered
|
| 259 |
+
"""
|
| 260 |
+
if "param_names" not in scheduler_cfg and "module_cls_names" not in scheduler_cfg:
|
| 261 |
+
return None
|
| 262 |
+
return unix_param_pattern_to_parameter_names(
|
| 263 |
+
scheduler_cfg.get("param_names"), parameter_names
|
| 264 |
+
).union(
|
| 265 |
+
unix_module_cls_pattern_to_parameter_names(
|
| 266 |
+
scheduler_cfg.get("module_cls_names"), module_cls_to_param_names
|
| 267 |
+
)
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def get_module_cls_to_param_names(
|
| 272 |
+
model: nn.Module, param_allowlist: Set[str] = None
|
| 273 |
+
) -> Dict[Type, str]:
|
| 274 |
+
"""Produce a mapping from all the modules classes to the names of parames they own.
|
| 275 |
+
|
| 276 |
+
Only counts a parameter as part of the immediate parent module, i.e. recursive
|
| 277 |
+
parents do not count.
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
model: Model to iterate over
|
| 281 |
+
param_allowlist: If specified, only these param names will be processed
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
module_cls_to_params = {}
|
| 285 |
+
for module_name, module in model.named_modules():
|
| 286 |
+
module_cls = type(module)
|
| 287 |
+
module_cls_to_params.setdefault(module_cls, set())
|
| 288 |
+
for param_name, _ in module.named_parameters(recurse=False):
|
| 289 |
+
full_param_name = get_full_parameter_name(module_name, param_name)
|
| 290 |
+
if param_allowlist is None or full_param_name in param_allowlist:
|
| 291 |
+
module_cls_to_params[module_cls].add(full_param_name)
|
| 292 |
+
return module_cls_to_params
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def construct_optimizer(
|
| 296 |
+
model: torch.nn.Module,
|
| 297 |
+
optimizer_conf: Any,
|
| 298 |
+
options_conf: Mapping[str, List] = None,
|
| 299 |
+
param_group_modifiers_conf: List[Callable] = None,
|
| 300 |
+
param_allowlist: Optional[Set[str]] = None,
|
| 301 |
+
validate_param_groups=True,
|
| 302 |
+
) -> Optimizer:
|
| 303 |
+
"""
|
| 304 |
+
Constructs a stochastic gradient descent or ADAM (or ADAMw) optimizer
|
| 305 |
+
with momentum. i.e, constructs a torch.optim.Optimizer with zero-weight decay
|
| 306 |
+
Batchnorm and/or no-update 1-D parameters support, based on the config.
|
| 307 |
+
|
| 308 |
+
Supports wrapping the optimizer with Layer-wise Adaptive Rate Scaling
|
| 309 |
+
(LARS): https://arxiv.org/abs/1708.03888
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
model: model to perform stochastic gradient descent
|
| 313 |
+
optimization or ADAM optimization.
|
| 314 |
+
optimizer_conf: Hydra config consisting a partial torch optimizer like SGD or
|
| 315 |
+
ADAM, still missing the params argument which this function provides to
|
| 316 |
+
produce the final optimizer
|
| 317 |
+
param_group_modifiers_conf: Optional user specified functions which can modify
|
| 318 |
+
the final scheduler configs before the optimizer's param groups are built
|
| 319 |
+
param_allowlist: The parameters to optimize. Parameters which are not part of
|
| 320 |
+
this allowlist will be skipped.
|
| 321 |
+
validate_param_groups: If enabled, valides that the produced param_groups don't
|
| 322 |
+
overlap and cover all the model parameters.
|
| 323 |
+
"""
|
| 324 |
+
if param_allowlist is None:
|
| 325 |
+
param_allowlist = {name for name, _ in model.named_parameters()}
|
| 326 |
+
|
| 327 |
+
named_parameters = {
|
| 328 |
+
name: param
|
| 329 |
+
for name, param in model.named_parameters()
|
| 330 |
+
if name in param_allowlist
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
if not options_conf:
|
| 334 |
+
optimizer = hydra.utils.instantiate(optimizer_conf, named_parameters.values())
|
| 335 |
+
return Optimizer(optimizer)
|
| 336 |
+
|
| 337 |
+
all_parameter_names = {
|
| 338 |
+
name for name, _ in model.named_parameters() if name in param_allowlist
|
| 339 |
+
}
|
| 340 |
+
module_cls_to_all_param_names = get_module_cls_to_param_names(
|
| 341 |
+
model, param_allowlist
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
scheduler_cfgs_per_option = hydra.utils.instantiate(options_conf)
|
| 345 |
+
all_scheduler_cfgs = []
|
| 346 |
+
for option, scheduler_cfgs in scheduler_cfgs_per_option.items():
|
| 347 |
+
for config in scheduler_cfgs:
|
| 348 |
+
config.option = option
|
| 349 |
+
config.parameter_names = _unix_pattern_to_parameter_names(
|
| 350 |
+
config, all_parameter_names, module_cls_to_all_param_names
|
| 351 |
+
)
|
| 352 |
+
set_default_parameters(scheduler_cfgs, all_parameter_names)
|
| 353 |
+
all_scheduler_cfgs.append(scheduler_cfgs)
|
| 354 |
+
|
| 355 |
+
if param_group_modifiers_conf:
|
| 356 |
+
for custom_param_modifier in param_group_modifiers_conf:
|
| 357 |
+
custom_param_modifier = hydra.utils.instantiate(custom_param_modifier)
|
| 358 |
+
all_scheduler_cfgs = custom_param_modifier(
|
| 359 |
+
scheduler_cfgs=all_scheduler_cfgs, model=model
|
| 360 |
+
)
|
| 361 |
+
schedulers, param_groups = map_scheduler_cfgs_to_param_groups(
|
| 362 |
+
all_scheduler_cfgs, named_parameters
|
| 363 |
+
)
|
| 364 |
+
if validate_param_groups:
|
| 365 |
+
validate_param_group_params(param_groups, model)
|
| 366 |
+
optimizer = hydra.utils.instantiate(optimizer_conf, param_groups)
|
| 367 |
+
return Optimizer(optimizer, schedulers)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def get_full_parameter_name(module_name, param_name):
|
| 371 |
+
if module_name == "":
|
| 372 |
+
return param_name
|
| 373 |
+
return f"{module_name}.{param_name}"
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
class GradientClipper:
|
| 377 |
+
"""
|
| 378 |
+
Gradient clipping utils that works for DDP
|
| 379 |
+
"""
|
| 380 |
+
|
| 381 |
+
def __init__(self, max_norm: float = 1.0, norm_type: int = 2):
|
| 382 |
+
assert isinstance(max_norm, (int, float)) or max_norm is None
|
| 383 |
+
self.max_norm = max_norm if max_norm is None else float(max_norm)
|
| 384 |
+
self.norm_type = norm_type
|
| 385 |
+
|
| 386 |
+
def __call__(self, model: nn.Module):
|
| 387 |
+
if self.max_norm is None:
|
| 388 |
+
return # no-op
|
| 389 |
+
|
| 390 |
+
nn.utils.clip_grad_norm_(
|
| 391 |
+
model.parameters(), max_norm=self.max_norm, norm_type=self.norm_type
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
class ValueScaler:
|
| 396 |
+
def __init__(self, scheduler, mult_val: float):
|
| 397 |
+
self.scheduler = scheduler
|
| 398 |
+
self.mult_val = mult_val
|
| 399 |
+
|
| 400 |
+
def __call__(self, *args, **kwargs):
|
| 401 |
+
val = self.scheduler(*args, **kwargs)
|
| 402 |
+
return val * self.mult_val
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def rgetattr(obj, rattrs: str = None):
|
| 406 |
+
"""
|
| 407 |
+
Like getattr(), but supports dotted notation for nested objects.
|
| 408 |
+
rattrs is a str of form 'attr1.attr2', returns obj.attr1.attr2
|
| 409 |
+
"""
|
| 410 |
+
if rattrs is None:
|
| 411 |
+
return obj
|
| 412 |
+
attrs = rattrs.split(".")
|
| 413 |
+
for attr in attrs:
|
| 414 |
+
obj = getattr(obj, attr)
|
| 415 |
+
return obj
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def layer_decay_param_modifier(
|
| 419 |
+
scheduler_cfgs: List[List[Dict]],
|
| 420 |
+
model,
|
| 421 |
+
layer_decay_value: float,
|
| 422 |
+
layer_decay_min: Optional[float] = None,
|
| 423 |
+
apply_to: Optional[str] = None,
|
| 424 |
+
overrides: List[Dict] = (),
|
| 425 |
+
) -> List[List[Dict]]:
|
| 426 |
+
"""
|
| 427 |
+
Args
|
| 428 |
+
- scheduler_cfgs: a list of omegaconf.ListConfigs.
|
| 429 |
+
Each element in the list is a omegaconfg.DictConfig with the following structure
|
| 430 |
+
{
|
| 431 |
+
"scheduler": <some fvcore scheduler>
|
| 432 |
+
"option": <value> possible options are "lr", "weight_decay" etc.
|
| 433 |
+
"parameter_names": Set of str indicating param names that this scheduler applies to
|
| 434 |
+
}
|
| 435 |
+
- model: a model that implements a method `get_layer_id` that maps layer_name to an integer and
|
| 436 |
+
and a method get_num_layers.
|
| 437 |
+
Alternatively, use apply_to argument to select a specific component of the model.
|
| 438 |
+
- layer_decay_value: float
|
| 439 |
+
- layer_decay_min: min val for layer decay
|
| 440 |
+
- apply_to: optional arg to select which component of the model to apply the the layer decay modifier to
|
| 441 |
+
- overrides: to manually override lr for specific patterns. Is a list of dicts. Each dict, has keys "pattern", "value".
|
| 442 |
+
Returns
|
| 443 |
+
- scheduler_configs: same structure as the input, elements can be modified
|
| 444 |
+
"""
|
| 445 |
+
model = rgetattr(model, apply_to)
|
| 446 |
+
num_layers = model.get_num_layers() + 1
|
| 447 |
+
layer_decays = [
|
| 448 |
+
layer_decay_value ** (num_layers - i) for i in range(num_layers + 1)
|
| 449 |
+
]
|
| 450 |
+
if layer_decay_min is not None:
|
| 451 |
+
layer_decays = [max(val, layer_decay_min) for val in layer_decays]
|
| 452 |
+
final_scheduler_cfgs = []
|
| 453 |
+
# scheduler_cfgs is a list of lists
|
| 454 |
+
for scheduler_cfg_group in scheduler_cfgs:
|
| 455 |
+
curr_cfg_group = []
|
| 456 |
+
# scheduler_cfg_group is a list of dictionaries
|
| 457 |
+
for scheduler_cfg in scheduler_cfg_group:
|
| 458 |
+
if scheduler_cfg["option"] != "lr":
|
| 459 |
+
curr_cfg_group.append(scheduler_cfg)
|
| 460 |
+
continue
|
| 461 |
+
# Need sorted so that the list of parameter names is deterministic and consistent
|
| 462 |
+
# across re-runs of this job. Else it was causing issues with loading the optimizer
|
| 463 |
+
# state during a job restart
|
| 464 |
+
parameter_names = sorted(scheduler_cfg["parameter_names"])
|
| 465 |
+
|
| 466 |
+
# Only want one cfg group per layer
|
| 467 |
+
layer_cfg_groups = {}
|
| 468 |
+
for param_name in parameter_names:
|
| 469 |
+
layer_id = num_layers
|
| 470 |
+
this_scale = layer_decays[layer_id]
|
| 471 |
+
if param_name.startswith(apply_to):
|
| 472 |
+
layer_id = model.get_layer_id(param_name)
|
| 473 |
+
this_scale = layer_decays[layer_id]
|
| 474 |
+
# Overrides
|
| 475 |
+
for override in overrides:
|
| 476 |
+
if fnmatch.fnmatchcase(param_name, override["pattern"]):
|
| 477 |
+
this_scale = float(override["value"])
|
| 478 |
+
layer_id = override["pattern"]
|
| 479 |
+
break
|
| 480 |
+
|
| 481 |
+
if layer_id not in layer_cfg_groups:
|
| 482 |
+
curr_param = {
|
| 483 |
+
"option": scheduler_cfg["option"],
|
| 484 |
+
"scheduler": ValueScaler(
|
| 485 |
+
scheduler_cfg["scheduler"], this_scale
|
| 486 |
+
),
|
| 487 |
+
"parameter_names": {param_name},
|
| 488 |
+
}
|
| 489 |
+
else:
|
| 490 |
+
curr_param = layer_cfg_groups[layer_id]
|
| 491 |
+
curr_param["parameter_names"].add(param_name)
|
| 492 |
+
layer_cfg_groups[layer_id] = curr_param
|
| 493 |
+
|
| 494 |
+
for layer_cfg in layer_cfg_groups.values():
|
| 495 |
+
curr_cfg_group.append(layer_cfg)
|
| 496 |
+
|
| 497 |
+
final_scheduler_cfgs.append(curr_cfg_group)
|
| 498 |
+
return final_scheduler_cfgs
|
source_code/sam3/sam3/train/transforms/basic_for_api.py
ADDED
|
@@ -0,0 +1,1396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
Transforms and data augmentation for both image + bbox.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
|
| 9 |
+
import numbers
|
| 10 |
+
import random
|
| 11 |
+
from collections.abc import Sequence
|
| 12 |
+
from typing import Iterable
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torchvision.transforms as T
|
| 16 |
+
import torchvision.transforms.functional as F
|
| 17 |
+
import torchvision.transforms.v2.functional as Fv2
|
| 18 |
+
|
| 19 |
+
from PIL import Image as PILImage
|
| 20 |
+
|
| 21 |
+
from sam3.model.box_ops import box_xyxy_to_cxcywh, masks_to_boxes
|
| 22 |
+
from sam3.train.data.sam3_image_dataset import Datapoint
|
| 23 |
+
from torchvision.transforms import InterpolationMode
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def crop(
|
| 27 |
+
datapoint,
|
| 28 |
+
index,
|
| 29 |
+
region,
|
| 30 |
+
v2=False,
|
| 31 |
+
check_validity=True,
|
| 32 |
+
check_input_validity=True,
|
| 33 |
+
recompute_box_from_mask=False,
|
| 34 |
+
):
|
| 35 |
+
if v2:
|
| 36 |
+
rtop, rleft, rheight, rwidth = (int(round(r)) for r in region)
|
| 37 |
+
datapoint.images[index].data = Fv2.crop(
|
| 38 |
+
datapoint.images[index].data,
|
| 39 |
+
top=rtop,
|
| 40 |
+
left=rleft,
|
| 41 |
+
height=rheight,
|
| 42 |
+
width=rwidth,
|
| 43 |
+
)
|
| 44 |
+
else:
|
| 45 |
+
datapoint.images[index].data = F.crop(datapoint.images[index].data, *region)
|
| 46 |
+
|
| 47 |
+
i, j, h, w = region
|
| 48 |
+
|
| 49 |
+
# should we do something wrt the original size?
|
| 50 |
+
datapoint.images[index].size = (h, w)
|
| 51 |
+
|
| 52 |
+
for obj in datapoint.images[index].objects:
|
| 53 |
+
# crop the mask
|
| 54 |
+
if obj.segment is not None:
|
| 55 |
+
obj.segment = F.crop(obj.segment, int(i), int(j), int(h), int(w))
|
| 56 |
+
|
| 57 |
+
# crop the bounding box
|
| 58 |
+
if recompute_box_from_mask and obj.segment is not None:
|
| 59 |
+
# here the boxes are still in XYXY format with absolute coordinates (they are
|
| 60 |
+
# converted to CxCyWH with relative coordinates in basic_for_api.NormalizeAPI)
|
| 61 |
+
obj.bbox, obj.area = get_bbox_xyxy_abs_coords_from_mask(obj.segment)
|
| 62 |
+
else:
|
| 63 |
+
if recompute_box_from_mask and obj.segment is None and obj.area > 0:
|
| 64 |
+
logging.warning(
|
| 65 |
+
"Cannot recompute bounding box from mask since `obj.segment` is None. "
|
| 66 |
+
"Falling back to directly cropping from the input bounding box."
|
| 67 |
+
)
|
| 68 |
+
boxes = obj.bbox.view(1, 4)
|
| 69 |
+
max_size = torch.as_tensor([w, h], dtype=torch.float32)
|
| 70 |
+
cropped_boxes = boxes - torch.as_tensor([j, i, j, i], dtype=torch.float32)
|
| 71 |
+
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
|
| 72 |
+
cropped_boxes = cropped_boxes.clamp(min=0)
|
| 73 |
+
obj.area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
|
| 74 |
+
obj.bbox = cropped_boxes.reshape(-1, 4)
|
| 75 |
+
|
| 76 |
+
for query in datapoint.find_queries:
|
| 77 |
+
if query.semantic_target is not None:
|
| 78 |
+
query.semantic_target = F.crop(
|
| 79 |
+
query.semantic_target, int(i), int(j), int(h), int(w)
|
| 80 |
+
)
|
| 81 |
+
if query.image_id == index and query.input_bbox is not None:
|
| 82 |
+
boxes = query.input_bbox
|
| 83 |
+
max_size = torch.as_tensor([w, h], dtype=torch.float32)
|
| 84 |
+
cropped_boxes = boxes - torch.as_tensor([j, i, j, i], dtype=torch.float32)
|
| 85 |
+
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
|
| 86 |
+
cropped_boxes = cropped_boxes.clamp(min=0)
|
| 87 |
+
|
| 88 |
+
# cur_area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
|
| 89 |
+
# if check_input_validity:
|
| 90 |
+
# assert (
|
| 91 |
+
# (cur_area > 0).all().item()
|
| 92 |
+
# ), "Some input box got cropped out by the crop transform"
|
| 93 |
+
|
| 94 |
+
query.input_bbox = cropped_boxes.reshape(-1, 4)
|
| 95 |
+
if query.image_id == index and query.input_points is not None:
|
| 96 |
+
print(
|
| 97 |
+
"Warning! Point cropping with this function may lead to unexpected results"
|
| 98 |
+
)
|
| 99 |
+
points = query.input_points
|
| 100 |
+
# Unlike right-lower box edges, which are exclusive, the
|
| 101 |
+
# point must be in [0, length-1], hence the -1
|
| 102 |
+
max_size = torch.as_tensor([w, h], dtype=torch.float32) - 1
|
| 103 |
+
cropped_points = points - torch.as_tensor([j, i, 0], dtype=torch.float32)
|
| 104 |
+
cropped_points[:, :, :2] = torch.min(cropped_points[:, :, :2], max_size)
|
| 105 |
+
cropped_points[:, :, :2] = cropped_points[:, :, :2].clamp(min=0)
|
| 106 |
+
query.input_points = cropped_points
|
| 107 |
+
|
| 108 |
+
if check_validity:
|
| 109 |
+
# Check that all boxes are still valid
|
| 110 |
+
for obj in datapoint.images[index].objects:
|
| 111 |
+
assert obj.area > 0, "Box {} has no area".format(obj.bbox)
|
| 112 |
+
|
| 113 |
+
return datapoint
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def hflip(datapoint, index):
|
| 117 |
+
datapoint.images[index].data = F.hflip(datapoint.images[index].data)
|
| 118 |
+
|
| 119 |
+
w, h = datapoint.images[index].data.size
|
| 120 |
+
for obj in datapoint.images[index].objects:
|
| 121 |
+
boxes = obj.bbox.view(1, 4)
|
| 122 |
+
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor(
|
| 123 |
+
[-1, 1, -1, 1]
|
| 124 |
+
) + torch.as_tensor([w, 0, w, 0])
|
| 125 |
+
obj.bbox = boxes
|
| 126 |
+
if obj.segment is not None:
|
| 127 |
+
obj.segment = F.hflip(obj.segment)
|
| 128 |
+
|
| 129 |
+
for query in datapoint.find_queries:
|
| 130 |
+
if query.semantic_target is not None:
|
| 131 |
+
query.semantic_target = F.hflip(query.semantic_target)
|
| 132 |
+
if query.image_id == index and query.input_bbox is not None:
|
| 133 |
+
boxes = query.input_bbox
|
| 134 |
+
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor(
|
| 135 |
+
[-1, 1, -1, 1]
|
| 136 |
+
) + torch.as_tensor([w, 0, w, 0])
|
| 137 |
+
query.input_bbox = boxes
|
| 138 |
+
if query.image_id == index and query.input_points is not None:
|
| 139 |
+
points = query.input_points
|
| 140 |
+
points = points * torch.as_tensor([-1, 1, 1]) + torch.as_tensor([w, 0, 0])
|
| 141 |
+
query.input_points = points
|
| 142 |
+
return datapoint
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def get_size_with_aspect_ratio(image_size, size, max_size=None):
|
| 146 |
+
w, h = image_size
|
| 147 |
+
if max_size is not None:
|
| 148 |
+
min_original_size = float(min((w, h)))
|
| 149 |
+
max_original_size = float(max((w, h)))
|
| 150 |
+
if max_original_size / min_original_size * size > max_size:
|
| 151 |
+
size = max_size * min_original_size / max_original_size
|
| 152 |
+
|
| 153 |
+
if (w <= h and w == size) or (h <= w and h == size):
|
| 154 |
+
return (h, w)
|
| 155 |
+
|
| 156 |
+
if w < h:
|
| 157 |
+
ow = int(round(size))
|
| 158 |
+
oh = int(round(size * h / w))
|
| 159 |
+
else:
|
| 160 |
+
oh = int(round(size))
|
| 161 |
+
ow = int(round(size * w / h))
|
| 162 |
+
|
| 163 |
+
return (oh, ow)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def resize(datapoint, index, size, max_size=None, square=False, v2=False):
|
| 167 |
+
# size can be min_size (scalar) or (w, h) tuple
|
| 168 |
+
|
| 169 |
+
def get_size(image_size, size, max_size=None):
|
| 170 |
+
if isinstance(size, (list, tuple)):
|
| 171 |
+
return size[::-1]
|
| 172 |
+
else:
|
| 173 |
+
return get_size_with_aspect_ratio(image_size, size, max_size)
|
| 174 |
+
|
| 175 |
+
if square:
|
| 176 |
+
size = size, size
|
| 177 |
+
else:
|
| 178 |
+
cur_size = (
|
| 179 |
+
datapoint.images[index].data.size()[-2:][::-1]
|
| 180 |
+
if v2
|
| 181 |
+
else datapoint.images[index].data.size
|
| 182 |
+
)
|
| 183 |
+
size = get_size(cur_size, size, max_size)
|
| 184 |
+
|
| 185 |
+
old_size = (
|
| 186 |
+
datapoint.images[index].data.size()[-2:][::-1]
|
| 187 |
+
if v2
|
| 188 |
+
else datapoint.images[index].data.size
|
| 189 |
+
)
|
| 190 |
+
if v2:
|
| 191 |
+
datapoint.images[index].data = Fv2.resize(
|
| 192 |
+
datapoint.images[index].data, size, antialias=True
|
| 193 |
+
)
|
| 194 |
+
else:
|
| 195 |
+
datapoint.images[index].data = F.resize(datapoint.images[index].data, size)
|
| 196 |
+
|
| 197 |
+
new_size = (
|
| 198 |
+
datapoint.images[index].data.size()[-2:][::-1]
|
| 199 |
+
if v2
|
| 200 |
+
else datapoint.images[index].data.size
|
| 201 |
+
)
|
| 202 |
+
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(new_size, old_size))
|
| 203 |
+
ratio_width, ratio_height = ratios
|
| 204 |
+
|
| 205 |
+
for obj in datapoint.images[index].objects:
|
| 206 |
+
boxes = obj.bbox.view(1, 4)
|
| 207 |
+
scaled_boxes = boxes * torch.as_tensor(
|
| 208 |
+
[ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32
|
| 209 |
+
)
|
| 210 |
+
obj.bbox = scaled_boxes
|
| 211 |
+
obj.area *= ratio_width * ratio_height
|
| 212 |
+
if obj.segment is not None:
|
| 213 |
+
obj.segment = F.resize(obj.segment[None, None], size).squeeze()
|
| 214 |
+
|
| 215 |
+
for query in datapoint.find_queries:
|
| 216 |
+
if query.semantic_target is not None:
|
| 217 |
+
query.semantic_target = F.resize(
|
| 218 |
+
query.semantic_target[None, None], size
|
| 219 |
+
).squeeze()
|
| 220 |
+
if query.image_id == index and query.input_bbox is not None:
|
| 221 |
+
boxes = query.input_bbox
|
| 222 |
+
scaled_boxes = boxes * torch.as_tensor(
|
| 223 |
+
[ratio_width, ratio_height, ratio_width, ratio_height],
|
| 224 |
+
dtype=torch.float32,
|
| 225 |
+
)
|
| 226 |
+
query.input_bbox = scaled_boxes
|
| 227 |
+
if query.image_id == index and query.input_points is not None:
|
| 228 |
+
points = query.input_points
|
| 229 |
+
scaled_points = points * torch.as_tensor(
|
| 230 |
+
[ratio_width, ratio_height, 1],
|
| 231 |
+
dtype=torch.float32,
|
| 232 |
+
)
|
| 233 |
+
query.input_points = scaled_points
|
| 234 |
+
|
| 235 |
+
h, w = size
|
| 236 |
+
datapoint.images[index].size = (h, w)
|
| 237 |
+
return datapoint
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def pad(datapoint, index, padding, v2=False):
|
| 241 |
+
old_h, old_w = datapoint.images[index].size
|
| 242 |
+
h, w = old_h, old_w
|
| 243 |
+
if len(padding) == 2:
|
| 244 |
+
# assumes that we only pad on the bottom right corners
|
| 245 |
+
if v2:
|
| 246 |
+
datapoint.images[index].data = Fv2.pad(
|
| 247 |
+
datapoint.images[index].data, (0, 0, padding[0], padding[1])
|
| 248 |
+
)
|
| 249 |
+
else:
|
| 250 |
+
datapoint.images[index].data = F.pad(
|
| 251 |
+
datapoint.images[index].data, (0, 0, padding[0], padding[1])
|
| 252 |
+
)
|
| 253 |
+
h += padding[1]
|
| 254 |
+
w += padding[0]
|
| 255 |
+
else:
|
| 256 |
+
if v2:
|
| 257 |
+
# left, top, right, bottom
|
| 258 |
+
datapoint.images[index].data = Fv2.pad(
|
| 259 |
+
datapoint.images[index].data,
|
| 260 |
+
(padding[0], padding[1], padding[2], padding[3]),
|
| 261 |
+
)
|
| 262 |
+
else:
|
| 263 |
+
# left, top, right, bottom
|
| 264 |
+
datapoint.images[index].data = F.pad(
|
| 265 |
+
datapoint.images[index].data,
|
| 266 |
+
(padding[0], padding[1], padding[2], padding[3]),
|
| 267 |
+
)
|
| 268 |
+
h += padding[1] + padding[3]
|
| 269 |
+
w += padding[0] + padding[2]
|
| 270 |
+
|
| 271 |
+
datapoint.images[index].size = (h, w)
|
| 272 |
+
|
| 273 |
+
for obj in datapoint.images[index].objects:
|
| 274 |
+
if len(padding) != 2:
|
| 275 |
+
obj.bbox += torch.as_tensor(
|
| 276 |
+
[padding[0], padding[1], padding[0], padding[1]], dtype=torch.float32
|
| 277 |
+
)
|
| 278 |
+
if obj.segment is not None:
|
| 279 |
+
if v2:
|
| 280 |
+
if len(padding) == 2:
|
| 281 |
+
obj.segment = Fv2.pad(
|
| 282 |
+
obj.segment[None], (0, 0, padding[0], padding[1])
|
| 283 |
+
).squeeze(0)
|
| 284 |
+
else:
|
| 285 |
+
obj.segment = Fv2.pad(obj.segment[None], tuple(padding)).squeeze(0)
|
| 286 |
+
else:
|
| 287 |
+
if len(padding) == 2:
|
| 288 |
+
obj.segment = F.pad(obj.segment, (0, 0, padding[0], padding[1]))
|
| 289 |
+
else:
|
| 290 |
+
obj.segment = F.pad(obj.segment, tuple(padding))
|
| 291 |
+
|
| 292 |
+
for query in datapoint.find_queries:
|
| 293 |
+
if query.semantic_target is not None:
|
| 294 |
+
if v2:
|
| 295 |
+
if len(padding) == 2:
|
| 296 |
+
query.semantic_target = Fv2.pad(
|
| 297 |
+
query.semantic_target[None, None],
|
| 298 |
+
(0, 0, padding[0], padding[1]),
|
| 299 |
+
).squeeze()
|
| 300 |
+
else:
|
| 301 |
+
query.semantic_target = Fv2.pad(
|
| 302 |
+
query.semantic_target[None, None], tuple(padding)
|
| 303 |
+
).squeeze()
|
| 304 |
+
else:
|
| 305 |
+
if len(padding) == 2:
|
| 306 |
+
query.semantic_target = F.pad(
|
| 307 |
+
query.semantic_target[None, None],
|
| 308 |
+
(0, 0, padding[0], padding[1]),
|
| 309 |
+
).squeeze()
|
| 310 |
+
else:
|
| 311 |
+
query.semantic_target = F.pad(
|
| 312 |
+
query.semantic_target[None, None], tuple(padding)
|
| 313 |
+
).squeeze()
|
| 314 |
+
if query.image_id == index and query.input_bbox is not None:
|
| 315 |
+
if len(padding) != 2:
|
| 316 |
+
query.input_bbox += torch.as_tensor(
|
| 317 |
+
[padding[0], padding[1], padding[0], padding[1]],
|
| 318 |
+
dtype=torch.float32,
|
| 319 |
+
)
|
| 320 |
+
if query.image_id == index and query.input_points is not None:
|
| 321 |
+
if len(padding) != 2:
|
| 322 |
+
query.input_points += torch.as_tensor(
|
| 323 |
+
[padding[0], padding[1], 0], dtype=torch.float32
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
return datapoint
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
class RandomSizeCropAPI:
|
| 330 |
+
def __init__(
|
| 331 |
+
self,
|
| 332 |
+
min_size: int,
|
| 333 |
+
max_size: int,
|
| 334 |
+
respect_boxes: bool,
|
| 335 |
+
consistent_transform: bool,
|
| 336 |
+
respect_input_boxes: bool = True,
|
| 337 |
+
v2: bool = False,
|
| 338 |
+
recompute_box_from_mask: bool = False,
|
| 339 |
+
):
|
| 340 |
+
self.min_size = min_size
|
| 341 |
+
self.max_size = max_size
|
| 342 |
+
self.respect_boxes = respect_boxes # if True we can't crop a box out
|
| 343 |
+
self.respect_input_boxes = respect_input_boxes
|
| 344 |
+
self.consistent_transform = consistent_transform
|
| 345 |
+
self.v2 = v2
|
| 346 |
+
self.recompute_box_from_mask = recompute_box_from_mask
|
| 347 |
+
|
| 348 |
+
def _sample_no_respect_boxes(self, img):
|
| 349 |
+
w = random.randint(self.min_size, min(img.width, self.max_size))
|
| 350 |
+
h = random.randint(self.min_size, min(img.height, self.max_size))
|
| 351 |
+
return T.RandomCrop.get_params(img, (h, w))
|
| 352 |
+
|
| 353 |
+
def _sample_respect_boxes(self, img, boxes, points, min_box_size=10.0):
|
| 354 |
+
"""
|
| 355 |
+
Assure that no box or point is dropped via cropping, though portions
|
| 356 |
+
of boxes may be removed.
|
| 357 |
+
"""
|
| 358 |
+
if len(boxes) == 0 and len(points) == 0:
|
| 359 |
+
return self._sample_no_respect_boxes(img)
|
| 360 |
+
|
| 361 |
+
if self.v2:
|
| 362 |
+
img_height, img_width = img.size()[-2:]
|
| 363 |
+
else:
|
| 364 |
+
img_width, img_height = img.size
|
| 365 |
+
|
| 366 |
+
minW, minH, maxW, maxH = (
|
| 367 |
+
min(img_width, self.min_size),
|
| 368 |
+
min(img_height, self.min_size),
|
| 369 |
+
min(img_width, self.max_size),
|
| 370 |
+
min(img_height, self.max_size),
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
# The crop box must extend one pixel beyond points to the bottom/right
|
| 374 |
+
# to assure the exclusive box contains the points.
|
| 375 |
+
minX = (
|
| 376 |
+
torch.cat([boxes[:, 0] + min_box_size, points[:, 0] + 1], dim=0)
|
| 377 |
+
.max()
|
| 378 |
+
.item()
|
| 379 |
+
)
|
| 380 |
+
minY = (
|
| 381 |
+
torch.cat([boxes[:, 1] + min_box_size, points[:, 1] + 1], dim=0)
|
| 382 |
+
.max()
|
| 383 |
+
.item()
|
| 384 |
+
)
|
| 385 |
+
minX = min(img_width, minX)
|
| 386 |
+
minY = min(img_height, minY)
|
| 387 |
+
maxX = torch.cat([boxes[:, 2] - min_box_size, points[:, 0]], dim=0).min().item()
|
| 388 |
+
maxY = torch.cat([boxes[:, 3] - min_box_size, points[:, 1]], dim=0).min().item()
|
| 389 |
+
maxX = max(0.0, maxX)
|
| 390 |
+
maxY = max(0.0, maxY)
|
| 391 |
+
minW = max(minW, minX - maxX)
|
| 392 |
+
minH = max(minH, minY - maxY)
|
| 393 |
+
w = random.uniform(minW, max(minW, maxW))
|
| 394 |
+
h = random.uniform(minH, max(minH, maxH))
|
| 395 |
+
if minX > maxX:
|
| 396 |
+
# i = random.uniform(max(0, minX - w + 1), max(maxX, max(0, minX - w + 1)))
|
| 397 |
+
i = random.uniform(max(0, minX - w), max(maxX, max(0, minX - w)))
|
| 398 |
+
else:
|
| 399 |
+
i = random.uniform(
|
| 400 |
+
max(0, minX - w + 1), max(maxX - 1, max(0, minX - w + 1))
|
| 401 |
+
)
|
| 402 |
+
if minY > maxY:
|
| 403 |
+
# j = random.uniform(max(0, minY - h + 1), max(maxY, max(0, minY - h + 1)))
|
| 404 |
+
j = random.uniform(max(0, minY - h), max(maxY, max(0, minY - h)))
|
| 405 |
+
else:
|
| 406 |
+
j = random.uniform(
|
| 407 |
+
max(0, minY - h + 1), max(maxY - 1, max(0, minY - h + 1))
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
return [j, i, h, w]
|
| 411 |
+
|
| 412 |
+
def __call__(self, datapoint, **kwargs):
|
| 413 |
+
if self.respect_boxes or self.respect_input_boxes:
|
| 414 |
+
if self.consistent_transform:
|
| 415 |
+
# Check that all the images are the same size
|
| 416 |
+
w, h = datapoint.images[0].data.size
|
| 417 |
+
for img in datapoint.images:
|
| 418 |
+
assert img.data.size == (w, h)
|
| 419 |
+
|
| 420 |
+
all_boxes = []
|
| 421 |
+
# Getting all boxes in all the images
|
| 422 |
+
if self.respect_boxes:
|
| 423 |
+
all_boxes += [
|
| 424 |
+
obj.bbox.view(-1, 4)
|
| 425 |
+
for img in datapoint.images
|
| 426 |
+
for obj in img.objects
|
| 427 |
+
]
|
| 428 |
+
# Get all the boxes in the find queries
|
| 429 |
+
if self.respect_input_boxes:
|
| 430 |
+
all_boxes += [
|
| 431 |
+
q.input_bbox.view(-1, 4)
|
| 432 |
+
for q in datapoint.find_queries
|
| 433 |
+
if q.input_bbox is not None
|
| 434 |
+
]
|
| 435 |
+
if all_boxes:
|
| 436 |
+
all_boxes = torch.cat(all_boxes, 0)
|
| 437 |
+
else:
|
| 438 |
+
all_boxes = torch.empty(0, 4)
|
| 439 |
+
|
| 440 |
+
all_points = [
|
| 441 |
+
q.input_points.view(-1, 3)[:, :2]
|
| 442 |
+
for q in datapoint.find_queries
|
| 443 |
+
if q.input_points is not None
|
| 444 |
+
]
|
| 445 |
+
if all_points:
|
| 446 |
+
all_points = torch.cat(all_points, 0)
|
| 447 |
+
else:
|
| 448 |
+
all_points = torch.empty(0, 2)
|
| 449 |
+
|
| 450 |
+
crop_param = self._sample_respect_boxes(
|
| 451 |
+
datapoint.images[0].data, all_boxes, all_points
|
| 452 |
+
)
|
| 453 |
+
for i in range(len(datapoint.images)):
|
| 454 |
+
datapoint = crop(
|
| 455 |
+
datapoint,
|
| 456 |
+
i,
|
| 457 |
+
crop_param,
|
| 458 |
+
v2=self.v2,
|
| 459 |
+
check_validity=self.respect_boxes,
|
| 460 |
+
check_input_validity=self.respect_input_boxes,
|
| 461 |
+
recompute_box_from_mask=self.recompute_box_from_mask,
|
| 462 |
+
)
|
| 463 |
+
return datapoint
|
| 464 |
+
else:
|
| 465 |
+
for i in range(len(datapoint.images)):
|
| 466 |
+
all_boxes = []
|
| 467 |
+
# Get all boxes in the current image
|
| 468 |
+
if self.respect_boxes:
|
| 469 |
+
all_boxes += [
|
| 470 |
+
obj.bbox.view(-1, 4) for obj in datapoint.images[i].objects
|
| 471 |
+
]
|
| 472 |
+
# Get all the boxes in the find queries that correspond to this image
|
| 473 |
+
if self.respect_input_boxes:
|
| 474 |
+
all_boxes += [
|
| 475 |
+
q.input_bbox.view(-1, 4)
|
| 476 |
+
for q in datapoint.find_queries
|
| 477 |
+
if q.image_id == i and q.input_bbox is not None
|
| 478 |
+
]
|
| 479 |
+
if all_boxes:
|
| 480 |
+
all_boxes = torch.cat(all_boxes, 0)
|
| 481 |
+
else:
|
| 482 |
+
all_boxes = torch.empty(0, 4)
|
| 483 |
+
|
| 484 |
+
all_points = [
|
| 485 |
+
q.input_points.view(-1, 3)[:, :2]
|
| 486 |
+
for q in datapoint.find_queries
|
| 487 |
+
if q.input_points is not None
|
| 488 |
+
]
|
| 489 |
+
if all_points:
|
| 490 |
+
all_points = torch.cat(all_points, 0)
|
| 491 |
+
else:
|
| 492 |
+
all_points = torch.empty(0, 2)
|
| 493 |
+
|
| 494 |
+
crop_param = self._sample_respect_boxes(
|
| 495 |
+
datapoint.images[i].data, all_boxes, all_points
|
| 496 |
+
)
|
| 497 |
+
datapoint = crop(
|
| 498 |
+
datapoint,
|
| 499 |
+
i,
|
| 500 |
+
crop_param,
|
| 501 |
+
v2=self.v2,
|
| 502 |
+
check_validity=self.respect_boxes,
|
| 503 |
+
check_input_validity=self.respect_input_boxes,
|
| 504 |
+
recompute_box_from_mask=self.recompute_box_from_mask,
|
| 505 |
+
)
|
| 506 |
+
return datapoint
|
| 507 |
+
else:
|
| 508 |
+
if self.consistent_transform:
|
| 509 |
+
# Check that all the images are the same size
|
| 510 |
+
w, h = datapoint.images[0].data.size
|
| 511 |
+
for img in datapoint.images:
|
| 512 |
+
assert img.data.size == (w, h)
|
| 513 |
+
|
| 514 |
+
crop_param = self._sample_no_respect_boxes(datapoint.images[0].data)
|
| 515 |
+
for i in range(len(datapoint.images)):
|
| 516 |
+
datapoint = crop(
|
| 517 |
+
datapoint,
|
| 518 |
+
i,
|
| 519 |
+
crop_param,
|
| 520 |
+
v2=self.v2,
|
| 521 |
+
check_validity=self.respect_boxes,
|
| 522 |
+
check_input_validity=self.respect_input_boxes,
|
| 523 |
+
recompute_box_from_mask=self.recompute_box_from_mask,
|
| 524 |
+
)
|
| 525 |
+
return datapoint
|
| 526 |
+
else:
|
| 527 |
+
for i in range(len(datapoint.images)):
|
| 528 |
+
crop_param = self._sample_no_respect_boxes(datapoint.images[i].data)
|
| 529 |
+
datapoint = crop(
|
| 530 |
+
datapoint,
|
| 531 |
+
i,
|
| 532 |
+
crop_param,
|
| 533 |
+
v2=self.v2,
|
| 534 |
+
check_validity=self.respect_boxes,
|
| 535 |
+
check_input_validity=self.respect_input_boxes,
|
| 536 |
+
recompute_box_from_mask=self.recompute_box_from_mask,
|
| 537 |
+
)
|
| 538 |
+
return datapoint
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
class CenterCropAPI:
|
| 542 |
+
def __init__(self, size, consistent_transform, recompute_box_from_mask=False):
|
| 543 |
+
self.size = size
|
| 544 |
+
self.consistent_transform = consistent_transform
|
| 545 |
+
self.recompute_box_from_mask = recompute_box_from_mask
|
| 546 |
+
|
| 547 |
+
def _sample_crop(self, image_width, image_height):
|
| 548 |
+
crop_height, crop_width = self.size
|
| 549 |
+
crop_top = int(round((image_height - crop_height) / 2.0))
|
| 550 |
+
crop_left = int(round((image_width - crop_width) / 2.0))
|
| 551 |
+
return crop_top, crop_left, crop_height, crop_width
|
| 552 |
+
|
| 553 |
+
def __call__(self, datapoint, **kwargs):
|
| 554 |
+
if self.consistent_transform:
|
| 555 |
+
# Check that all the images are the same size
|
| 556 |
+
w, h = datapoint.images[0].data.size
|
| 557 |
+
for img in datapoint.images:
|
| 558 |
+
assert img.size == (w, h)
|
| 559 |
+
|
| 560 |
+
crop_top, crop_left, crop_height, crop_width = self._sample_crop(w, h)
|
| 561 |
+
for i in range(len(datapoint.images)):
|
| 562 |
+
datapoint = crop(
|
| 563 |
+
datapoint,
|
| 564 |
+
i,
|
| 565 |
+
(crop_top, crop_left, crop_height, crop_width),
|
| 566 |
+
recompute_box_from_mask=self.recompute_box_from_mask,
|
| 567 |
+
)
|
| 568 |
+
return datapoint
|
| 569 |
+
|
| 570 |
+
for i in range(len(datapoint.images)):
|
| 571 |
+
w, h = datapoint.images[i].data.size
|
| 572 |
+
crop_top, crop_left, crop_height, crop_width = self._sample_crop(w, h)
|
| 573 |
+
datapoint = crop(
|
| 574 |
+
datapoint,
|
| 575 |
+
i,
|
| 576 |
+
(crop_top, crop_left, crop_height, crop_width),
|
| 577 |
+
recompute_box_from_mask=self.recompute_box_from_mask,
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
return datapoint
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
class RandomHorizontalFlip:
|
| 584 |
+
def __init__(self, consistent_transform, p=0.5):
|
| 585 |
+
self.p = p
|
| 586 |
+
self.consistent_transform = consistent_transform
|
| 587 |
+
|
| 588 |
+
def __call__(self, datapoint, **kwargs):
|
| 589 |
+
if self.consistent_transform:
|
| 590 |
+
if random.random() < self.p:
|
| 591 |
+
for i in range(len(datapoint.images)):
|
| 592 |
+
datapoint = hflip(datapoint, i)
|
| 593 |
+
return datapoint
|
| 594 |
+
for i in range(len(datapoint.images)):
|
| 595 |
+
if random.random() < self.p:
|
| 596 |
+
datapoint = hflip(datapoint, i)
|
| 597 |
+
return datapoint
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
class RandomResizeAPI:
|
| 601 |
+
def __init__(
|
| 602 |
+
self, sizes, consistent_transform, max_size=None, square=False, v2=False
|
| 603 |
+
):
|
| 604 |
+
if isinstance(sizes, int):
|
| 605 |
+
sizes = (sizes,)
|
| 606 |
+
assert isinstance(sizes, Iterable)
|
| 607 |
+
self.sizes = list(sizes)
|
| 608 |
+
self.max_size = max_size
|
| 609 |
+
self.square = square
|
| 610 |
+
self.consistent_transform = consistent_transform
|
| 611 |
+
self.v2 = v2
|
| 612 |
+
|
| 613 |
+
def __call__(self, datapoint, **kwargs):
|
| 614 |
+
if self.consistent_transform:
|
| 615 |
+
size = random.choice(self.sizes)
|
| 616 |
+
for i in range(len(datapoint.images)):
|
| 617 |
+
datapoint = resize(
|
| 618 |
+
datapoint, i, size, self.max_size, square=self.square, v2=self.v2
|
| 619 |
+
)
|
| 620 |
+
return datapoint
|
| 621 |
+
for i in range(len(datapoint.images)):
|
| 622 |
+
size = random.choice(self.sizes)
|
| 623 |
+
datapoint = resize(
|
| 624 |
+
datapoint, i, size, self.max_size, square=self.square, v2=self.v2
|
| 625 |
+
)
|
| 626 |
+
return datapoint
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
class ScheduledRandomResizeAPI(RandomResizeAPI):
|
| 630 |
+
def __init__(self, size_scheduler, consistent_transform, square=False):
|
| 631 |
+
self.size_scheduler = size_scheduler
|
| 632 |
+
# Just a meaningful init value for super
|
| 633 |
+
params = self.size_scheduler(epoch_num=0)
|
| 634 |
+
sizes, max_size = params["sizes"], params["max_size"]
|
| 635 |
+
super().__init__(sizes, consistent_transform, max_size=max_size, square=square)
|
| 636 |
+
|
| 637 |
+
def __call__(self, datapoint, **kwargs):
|
| 638 |
+
assert "epoch" in kwargs, "Param scheduler needs to know the current epoch"
|
| 639 |
+
params = self.size_scheduler(kwargs["epoch"])
|
| 640 |
+
sizes, max_size = params["sizes"], params["max_size"]
|
| 641 |
+
self.sizes = sizes
|
| 642 |
+
self.max_size = max_size
|
| 643 |
+
datapoint = super(ScheduledRandomResizeAPI, self).__call__(datapoint, **kwargs)
|
| 644 |
+
return datapoint
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
class RandomPadAPI:
|
| 648 |
+
def __init__(self, max_pad, consistent_transform):
|
| 649 |
+
self.max_pad = max_pad
|
| 650 |
+
self.consistent_transform = consistent_transform
|
| 651 |
+
|
| 652 |
+
def _sample_pad(self):
|
| 653 |
+
pad_x = random.randint(0, self.max_pad)
|
| 654 |
+
pad_y = random.randint(0, self.max_pad)
|
| 655 |
+
return pad_x, pad_y
|
| 656 |
+
|
| 657 |
+
def __call__(self, datapoint, **kwargs):
|
| 658 |
+
if self.consistent_transform:
|
| 659 |
+
pad_x, pad_y = self._sample_pad()
|
| 660 |
+
for i in range(len(datapoint.images)):
|
| 661 |
+
datapoint = pad(datapoint, i, (pad_x, pad_y))
|
| 662 |
+
return datapoint
|
| 663 |
+
|
| 664 |
+
for i in range(len(datapoint.images)):
|
| 665 |
+
pad_x, pad_y = self._sample_pad()
|
| 666 |
+
datapoint = pad(datapoint, i, (pad_x, pad_y))
|
| 667 |
+
return datapoint
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
class PadToSizeAPI:
|
| 671 |
+
def __init__(self, size, consistent_transform, bottom_right=False, v2=False):
|
| 672 |
+
self.size = size
|
| 673 |
+
self.consistent_transform = consistent_transform
|
| 674 |
+
self.v2 = v2
|
| 675 |
+
self.bottom_right = bottom_right
|
| 676 |
+
|
| 677 |
+
def _sample_pad(self, w, h):
|
| 678 |
+
pad_x = self.size - w
|
| 679 |
+
pad_y = self.size - h
|
| 680 |
+
assert pad_x >= 0 and pad_y >= 0
|
| 681 |
+
pad_left = random.randint(0, pad_x)
|
| 682 |
+
pad_right = pad_x - pad_left
|
| 683 |
+
pad_top = random.randint(0, pad_y)
|
| 684 |
+
pad_bottom = pad_y - pad_top
|
| 685 |
+
return pad_left, pad_top, pad_right, pad_bottom
|
| 686 |
+
|
| 687 |
+
def __call__(self, datapoint, **kwargs):
|
| 688 |
+
if self.consistent_transform:
|
| 689 |
+
# Check that all the images are the same size
|
| 690 |
+
w, h = datapoint.images[0].data.size
|
| 691 |
+
for img in datapoint.images:
|
| 692 |
+
assert img.size == (w, h)
|
| 693 |
+
if self.bottom_right:
|
| 694 |
+
pad_right = self.size - w
|
| 695 |
+
pad_bottom = self.size - h
|
| 696 |
+
padding = (pad_right, pad_bottom)
|
| 697 |
+
else:
|
| 698 |
+
padding = self._sample_pad(w, h)
|
| 699 |
+
for i in range(len(datapoint.images)):
|
| 700 |
+
datapoint = pad(datapoint, i, padding, v2=self.v2)
|
| 701 |
+
return datapoint
|
| 702 |
+
|
| 703 |
+
for i, img in enumerate(datapoint.images):
|
| 704 |
+
w, h = img.data.size
|
| 705 |
+
if self.bottom_right:
|
| 706 |
+
pad_right = self.size - w
|
| 707 |
+
pad_bottom = self.size - h
|
| 708 |
+
padding = (pad_right, pad_bottom)
|
| 709 |
+
else:
|
| 710 |
+
padding = self._sample_pad(w, h)
|
| 711 |
+
datapoint = pad(datapoint, i, padding, v2=self.v2)
|
| 712 |
+
return datapoint
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
class RandomMosaicVideoAPI:
|
| 716 |
+
def __init__(self, prob=0.15, grid_h=2, grid_w=2, use_random_hflip=False):
|
| 717 |
+
self.prob = prob
|
| 718 |
+
self.grid_h = grid_h
|
| 719 |
+
self.grid_w = grid_w
|
| 720 |
+
self.use_random_hflip = use_random_hflip
|
| 721 |
+
|
| 722 |
+
def __call__(self, datapoint, **kwargs):
|
| 723 |
+
if random.random() > self.prob:
|
| 724 |
+
return datapoint
|
| 725 |
+
|
| 726 |
+
# select a random location to place the target mask in the mosaic
|
| 727 |
+
target_grid_y = random.randint(0, self.grid_h - 1)
|
| 728 |
+
target_grid_x = random.randint(0, self.grid_w - 1)
|
| 729 |
+
# whether to flip each grid in the mosaic horizontally
|
| 730 |
+
if self.use_random_hflip:
|
| 731 |
+
should_hflip = torch.rand(self.grid_h, self.grid_w) < 0.5
|
| 732 |
+
else:
|
| 733 |
+
should_hflip = torch.zeros(self.grid_h, self.grid_w, dtype=torch.bool)
|
| 734 |
+
for i in range(len(datapoint.images)):
|
| 735 |
+
datapoint = random_mosaic_frame(
|
| 736 |
+
datapoint,
|
| 737 |
+
i,
|
| 738 |
+
grid_h=self.grid_h,
|
| 739 |
+
grid_w=self.grid_w,
|
| 740 |
+
target_grid_y=target_grid_y,
|
| 741 |
+
target_grid_x=target_grid_x,
|
| 742 |
+
should_hflip=should_hflip,
|
| 743 |
+
)
|
| 744 |
+
|
| 745 |
+
return datapoint
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
def random_mosaic_frame(
|
| 749 |
+
datapoint,
|
| 750 |
+
index,
|
| 751 |
+
grid_h,
|
| 752 |
+
grid_w,
|
| 753 |
+
target_grid_y,
|
| 754 |
+
target_grid_x,
|
| 755 |
+
should_hflip,
|
| 756 |
+
):
|
| 757 |
+
# Step 1: downsize the images and paste them into a mosaic
|
| 758 |
+
image_data = datapoint.images[index].data
|
| 759 |
+
is_pil = isinstance(image_data, PILImage.Image)
|
| 760 |
+
if is_pil:
|
| 761 |
+
H_im = image_data.height
|
| 762 |
+
W_im = image_data.width
|
| 763 |
+
image_data_output = PILImage.new("RGB", (W_im, H_im))
|
| 764 |
+
else:
|
| 765 |
+
H_im = image_data.size(-2)
|
| 766 |
+
W_im = image_data.size(-1)
|
| 767 |
+
image_data_output = torch.zeros_like(image_data)
|
| 768 |
+
|
| 769 |
+
downsize_cache = {}
|
| 770 |
+
for grid_y in range(grid_h):
|
| 771 |
+
for grid_x in range(grid_w):
|
| 772 |
+
y_offset_b = grid_y * H_im // grid_h
|
| 773 |
+
x_offset_b = grid_x * W_im // grid_w
|
| 774 |
+
y_offset_e = (grid_y + 1) * H_im // grid_h
|
| 775 |
+
x_offset_e = (grid_x + 1) * W_im // grid_w
|
| 776 |
+
H_im_downsize = y_offset_e - y_offset_b
|
| 777 |
+
W_im_downsize = x_offset_e - x_offset_b
|
| 778 |
+
|
| 779 |
+
if (H_im_downsize, W_im_downsize) in downsize_cache:
|
| 780 |
+
image_data_downsize = downsize_cache[(H_im_downsize, W_im_downsize)]
|
| 781 |
+
else:
|
| 782 |
+
image_data_downsize = F.resize(
|
| 783 |
+
image_data,
|
| 784 |
+
size=(H_im_downsize, W_im_downsize),
|
| 785 |
+
interpolation=InterpolationMode.BILINEAR,
|
| 786 |
+
antialias=True, # antialiasing for downsizing
|
| 787 |
+
)
|
| 788 |
+
downsize_cache[(H_im_downsize, W_im_downsize)] = image_data_downsize
|
| 789 |
+
if should_hflip[grid_y, grid_x].item():
|
| 790 |
+
image_data_downsize = F.hflip(image_data_downsize)
|
| 791 |
+
|
| 792 |
+
if is_pil:
|
| 793 |
+
image_data_output.paste(image_data_downsize, (x_offset_b, y_offset_b))
|
| 794 |
+
else:
|
| 795 |
+
image_data_output[:, y_offset_b:y_offset_e, x_offset_b:x_offset_e] = (
|
| 796 |
+
image_data_downsize
|
| 797 |
+
)
|
| 798 |
+
|
| 799 |
+
datapoint.images[index].data = image_data_output
|
| 800 |
+
|
| 801 |
+
# Step 2: downsize the masks and paste them into the target grid of the mosaic
|
| 802 |
+
# (note that we don't scale input/target boxes since they are not used in TA)
|
| 803 |
+
for obj in datapoint.images[index].objects:
|
| 804 |
+
if obj.segment is None:
|
| 805 |
+
continue
|
| 806 |
+
assert obj.segment.shape == (H_im, W_im) and obj.segment.dtype == torch.uint8
|
| 807 |
+
segment_output = torch.zeros_like(obj.segment)
|
| 808 |
+
|
| 809 |
+
target_y_offset_b = target_grid_y * H_im // grid_h
|
| 810 |
+
target_x_offset_b = target_grid_x * W_im // grid_w
|
| 811 |
+
target_y_offset_e = (target_grid_y + 1) * H_im // grid_h
|
| 812 |
+
target_x_offset_e = (target_grid_x + 1) * W_im // grid_w
|
| 813 |
+
target_H_im_downsize = target_y_offset_e - target_y_offset_b
|
| 814 |
+
target_W_im_downsize = target_x_offset_e - target_x_offset_b
|
| 815 |
+
|
| 816 |
+
segment_downsize = F.resize(
|
| 817 |
+
obj.segment[None, None],
|
| 818 |
+
size=(target_H_im_downsize, target_W_im_downsize),
|
| 819 |
+
interpolation=InterpolationMode.BILINEAR,
|
| 820 |
+
antialias=True, # antialiasing for downsizing
|
| 821 |
+
)[0, 0]
|
| 822 |
+
if should_hflip[target_grid_y, target_grid_x].item():
|
| 823 |
+
segment_downsize = F.hflip(segment_downsize[None, None])[0, 0]
|
| 824 |
+
|
| 825 |
+
segment_output[
|
| 826 |
+
target_y_offset_b:target_y_offset_e, target_x_offset_b:target_x_offset_e
|
| 827 |
+
] = segment_downsize
|
| 828 |
+
obj.segment = segment_output
|
| 829 |
+
|
| 830 |
+
return datapoint
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
class ScheduledPadToSizeAPI(PadToSizeAPI):
|
| 834 |
+
def __init__(self, size_scheduler, consistent_transform):
|
| 835 |
+
self.size_scheduler = size_scheduler
|
| 836 |
+
size = self.size_scheduler(epoch_num=0)["sizes"]
|
| 837 |
+
super().__init__(size, consistent_transform)
|
| 838 |
+
|
| 839 |
+
def __call__(self, datapoint, **kwargs):
|
| 840 |
+
assert "epoch" in kwargs, "Param scheduler needs to know the current epoch"
|
| 841 |
+
params = self.size_scheduler(kwargs["epoch"])
|
| 842 |
+
self.size = params["resolution"]
|
| 843 |
+
return super(ScheduledPadToSizeAPI, self).__call__(datapoint, **kwargs)
|
| 844 |
+
|
| 845 |
+
|
| 846 |
+
class IdentityAPI:
|
| 847 |
+
def __call__(self, datapoint, **kwargs):
|
| 848 |
+
return datapoint
|
| 849 |
+
|
| 850 |
+
|
| 851 |
+
class RandomSelectAPI:
|
| 852 |
+
"""
|
| 853 |
+
Randomly selects between transforms1 and transforms2,
|
| 854 |
+
with probability p for transforms1 and (1 - p) for transforms2
|
| 855 |
+
"""
|
| 856 |
+
|
| 857 |
+
def __init__(self, transforms1=None, transforms2=None, p=0.5):
|
| 858 |
+
self.transforms1 = transforms1 or IdentityAPI()
|
| 859 |
+
self.transforms2 = transforms2 or IdentityAPI()
|
| 860 |
+
self.p = p
|
| 861 |
+
|
| 862 |
+
def __call__(self, datapoint, **kwargs):
|
| 863 |
+
if random.random() < self.p:
|
| 864 |
+
return self.transforms1(datapoint, **kwargs)
|
| 865 |
+
return self.transforms2(datapoint, **kwargs)
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
class ToTensorAPI:
|
| 869 |
+
def __init__(self, v2=False):
|
| 870 |
+
self.v2 = v2
|
| 871 |
+
|
| 872 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 873 |
+
for img in datapoint.images:
|
| 874 |
+
if self.v2:
|
| 875 |
+
img.data = Fv2.to_image_tensor(img.data)
|
| 876 |
+
# img.data = Fv2.to_dtype(img.data, torch.uint8, scale=True)
|
| 877 |
+
# img.data = Fv2.convert_image_dtype(img.data, torch.uint8)
|
| 878 |
+
else:
|
| 879 |
+
img.data = F.to_tensor(img.data)
|
| 880 |
+
return datapoint
|
| 881 |
+
|
| 882 |
+
|
| 883 |
+
class NormalizeAPI:
|
| 884 |
+
def __init__(self, mean, std, v2=False):
|
| 885 |
+
self.mean = mean
|
| 886 |
+
self.std = std
|
| 887 |
+
self.v2 = v2
|
| 888 |
+
|
| 889 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 890 |
+
for img in datapoint.images:
|
| 891 |
+
if self.v2:
|
| 892 |
+
img.data = Fv2.convert_image_dtype(img.data, torch.float32)
|
| 893 |
+
img.data = Fv2.normalize(img.data, mean=self.mean, std=self.std)
|
| 894 |
+
else:
|
| 895 |
+
img.data = F.normalize(img.data, mean=self.mean, std=self.std)
|
| 896 |
+
for obj in img.objects:
|
| 897 |
+
boxes = obj.bbox
|
| 898 |
+
cur_h, cur_w = img.data.shape[-2:]
|
| 899 |
+
boxes = box_xyxy_to_cxcywh(boxes)
|
| 900 |
+
boxes = boxes / torch.tensor(
|
| 901 |
+
[cur_w, cur_h, cur_w, cur_h], dtype=torch.float32
|
| 902 |
+
)
|
| 903 |
+
obj.bbox = boxes
|
| 904 |
+
|
| 905 |
+
for query in datapoint.find_queries:
|
| 906 |
+
if query.input_bbox is not None:
|
| 907 |
+
boxes = query.input_bbox
|
| 908 |
+
cur_h, cur_w = datapoint.images[query.image_id].data.shape[-2:]
|
| 909 |
+
boxes = box_xyxy_to_cxcywh(boxes)
|
| 910 |
+
boxes = boxes / torch.tensor(
|
| 911 |
+
[cur_w, cur_h, cur_w, cur_h], dtype=torch.float32
|
| 912 |
+
)
|
| 913 |
+
query.input_bbox = boxes
|
| 914 |
+
if query.input_points is not None:
|
| 915 |
+
points = query.input_points
|
| 916 |
+
cur_h, cur_w = datapoint.images[query.image_id].data.shape[-2:]
|
| 917 |
+
points = points / torch.tensor([cur_w, cur_h, 1.0], dtype=torch.float32)
|
| 918 |
+
query.input_points = points
|
| 919 |
+
|
| 920 |
+
return datapoint
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
class ComposeAPI:
|
| 924 |
+
def __init__(self, transforms):
|
| 925 |
+
self.transforms = transforms
|
| 926 |
+
|
| 927 |
+
def __call__(self, datapoint, **kwargs):
|
| 928 |
+
for t in self.transforms:
|
| 929 |
+
datapoint = t(datapoint, **kwargs)
|
| 930 |
+
return datapoint
|
| 931 |
+
|
| 932 |
+
def __repr__(self):
|
| 933 |
+
format_string = self.__class__.__name__ + "("
|
| 934 |
+
for t in self.transforms:
|
| 935 |
+
format_string += "\n"
|
| 936 |
+
format_string += " {0}".format(t)
|
| 937 |
+
format_string += "\n)"
|
| 938 |
+
return format_string
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
class RandomGrayscale:
|
| 942 |
+
def __init__(self, consistent_transform, p=0.5):
|
| 943 |
+
self.p = p
|
| 944 |
+
self.consistent_transform = consistent_transform
|
| 945 |
+
self.Grayscale = T.Grayscale(num_output_channels=3)
|
| 946 |
+
|
| 947 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 948 |
+
if self.consistent_transform:
|
| 949 |
+
if random.random() < self.p:
|
| 950 |
+
for img in datapoint.images:
|
| 951 |
+
img.data = self.Grayscale(img.data)
|
| 952 |
+
return datapoint
|
| 953 |
+
for img in datapoint.images:
|
| 954 |
+
if random.random() < self.p:
|
| 955 |
+
img.data = self.Grayscale(img.data)
|
| 956 |
+
return datapoint
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
class ColorJitter:
|
| 960 |
+
def __init__(self, consistent_transform, brightness, contrast, saturation, hue):
|
| 961 |
+
self.consistent_transform = consistent_transform
|
| 962 |
+
self.brightness = (
|
| 963 |
+
brightness
|
| 964 |
+
if isinstance(brightness, list)
|
| 965 |
+
else [max(0, 1 - brightness), 1 + brightness]
|
| 966 |
+
)
|
| 967 |
+
self.contrast = (
|
| 968 |
+
contrast
|
| 969 |
+
if isinstance(contrast, list)
|
| 970 |
+
else [max(0, 1 - contrast), 1 + contrast]
|
| 971 |
+
)
|
| 972 |
+
self.saturation = (
|
| 973 |
+
saturation
|
| 974 |
+
if isinstance(saturation, list)
|
| 975 |
+
else [max(0, 1 - saturation), 1 + saturation]
|
| 976 |
+
)
|
| 977 |
+
self.hue = hue if isinstance(hue, list) or hue is None else ([-hue, hue])
|
| 978 |
+
|
| 979 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 980 |
+
if self.consistent_transform:
|
| 981 |
+
# Create a color jitter transformation params
|
| 982 |
+
(
|
| 983 |
+
fn_idx,
|
| 984 |
+
brightness_factor,
|
| 985 |
+
contrast_factor,
|
| 986 |
+
saturation_factor,
|
| 987 |
+
hue_factor,
|
| 988 |
+
) = T.ColorJitter.get_params(
|
| 989 |
+
self.brightness, self.contrast, self.saturation, self.hue
|
| 990 |
+
)
|
| 991 |
+
for img in datapoint.images:
|
| 992 |
+
if not self.consistent_transform:
|
| 993 |
+
(
|
| 994 |
+
fn_idx,
|
| 995 |
+
brightness_factor,
|
| 996 |
+
contrast_factor,
|
| 997 |
+
saturation_factor,
|
| 998 |
+
hue_factor,
|
| 999 |
+
) = T.ColorJitter.get_params(
|
| 1000 |
+
self.brightness, self.contrast, self.saturation, self.hue
|
| 1001 |
+
)
|
| 1002 |
+
for fn_id in fn_idx:
|
| 1003 |
+
if fn_id == 0 and brightness_factor is not None:
|
| 1004 |
+
img.data = F.adjust_brightness(img.data, brightness_factor)
|
| 1005 |
+
elif fn_id == 1 and contrast_factor is not None:
|
| 1006 |
+
img.data = F.adjust_contrast(img.data, contrast_factor)
|
| 1007 |
+
elif fn_id == 2 and saturation_factor is not None:
|
| 1008 |
+
img.data = F.adjust_saturation(img.data, saturation_factor)
|
| 1009 |
+
elif fn_id == 3 and hue_factor is not None:
|
| 1010 |
+
img.data = F.adjust_hue(img.data, hue_factor)
|
| 1011 |
+
return datapoint
|
| 1012 |
+
|
| 1013 |
+
|
| 1014 |
+
class RandomAffine:
|
| 1015 |
+
def __init__(
|
| 1016 |
+
self,
|
| 1017 |
+
degrees,
|
| 1018 |
+
consistent_transform,
|
| 1019 |
+
scale=None,
|
| 1020 |
+
translate=None,
|
| 1021 |
+
shear=None,
|
| 1022 |
+
image_mean=(123, 116, 103),
|
| 1023 |
+
log_warning=True,
|
| 1024 |
+
num_tentatives=1,
|
| 1025 |
+
image_interpolation="bicubic",
|
| 1026 |
+
):
|
| 1027 |
+
"""
|
| 1028 |
+
The mask is required for this transform.
|
| 1029 |
+
if consistent_transform if True, then the same random affine is applied to all frames and masks.
|
| 1030 |
+
"""
|
| 1031 |
+
self.degrees = degrees if isinstance(degrees, list) else ([-degrees, degrees])
|
| 1032 |
+
self.scale = scale
|
| 1033 |
+
self.shear = (
|
| 1034 |
+
shear if isinstance(shear, list) else ([-shear, shear] if shear else None)
|
| 1035 |
+
)
|
| 1036 |
+
self.translate = translate
|
| 1037 |
+
self.fill_img = image_mean
|
| 1038 |
+
self.consistent_transform = consistent_transform
|
| 1039 |
+
self.log_warning = log_warning
|
| 1040 |
+
self.num_tentatives = num_tentatives
|
| 1041 |
+
|
| 1042 |
+
if image_interpolation == "bicubic":
|
| 1043 |
+
self.image_interpolation = InterpolationMode.BICUBIC
|
| 1044 |
+
elif image_interpolation == "bilinear":
|
| 1045 |
+
self.image_interpolation = InterpolationMode.BILINEAR
|
| 1046 |
+
else:
|
| 1047 |
+
raise NotImplementedError
|
| 1048 |
+
|
| 1049 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 1050 |
+
for _tentative in range(self.num_tentatives):
|
| 1051 |
+
res = self.transform_datapoint(datapoint)
|
| 1052 |
+
if res is not None:
|
| 1053 |
+
return res
|
| 1054 |
+
|
| 1055 |
+
if self.log_warning:
|
| 1056 |
+
logging.warning(
|
| 1057 |
+
f"Skip RandomAffine for zero-area mask in first frame after {self.num_tentatives} tentatives"
|
| 1058 |
+
)
|
| 1059 |
+
return datapoint
|
| 1060 |
+
|
| 1061 |
+
def transform_datapoint(self, datapoint: Datapoint):
|
| 1062 |
+
_, height, width = F.get_dimensions(datapoint.images[0].data)
|
| 1063 |
+
img_size = [width, height]
|
| 1064 |
+
|
| 1065 |
+
if self.consistent_transform:
|
| 1066 |
+
# Create a random affine transformation
|
| 1067 |
+
affine_params = T.RandomAffine.get_params(
|
| 1068 |
+
degrees=self.degrees,
|
| 1069 |
+
translate=self.translate,
|
| 1070 |
+
scale_ranges=self.scale,
|
| 1071 |
+
shears=self.shear,
|
| 1072 |
+
img_size=img_size,
|
| 1073 |
+
)
|
| 1074 |
+
|
| 1075 |
+
for img_idx, img in enumerate(datapoint.images):
|
| 1076 |
+
this_masks = [
|
| 1077 |
+
obj.segment.unsqueeze(0) if obj.segment is not None else None
|
| 1078 |
+
for obj in img.objects
|
| 1079 |
+
]
|
| 1080 |
+
if not self.consistent_transform:
|
| 1081 |
+
# if not consistent we create a new affine params for every frame&mask pair Create a random affine transformation
|
| 1082 |
+
affine_params = T.RandomAffine.get_params(
|
| 1083 |
+
degrees=self.degrees,
|
| 1084 |
+
translate=self.translate,
|
| 1085 |
+
scale_ranges=self.scale,
|
| 1086 |
+
shears=self.shear,
|
| 1087 |
+
img_size=img_size,
|
| 1088 |
+
)
|
| 1089 |
+
|
| 1090 |
+
transformed_bboxes, transformed_masks = [], []
|
| 1091 |
+
for i in range(len(img.objects)):
|
| 1092 |
+
if this_masks[i] is None:
|
| 1093 |
+
transformed_masks.append(None)
|
| 1094 |
+
# Dummy bbox for a dummy target
|
| 1095 |
+
transformed_bboxes.append(torch.tensor([[0, 0, 0, 0]]))
|
| 1096 |
+
else:
|
| 1097 |
+
transformed_mask = F.affine(
|
| 1098 |
+
this_masks[i],
|
| 1099 |
+
*affine_params,
|
| 1100 |
+
interpolation=InterpolationMode.NEAREST,
|
| 1101 |
+
fill=0.0,
|
| 1102 |
+
)
|
| 1103 |
+
if img_idx == 0 and transformed_mask.max() == 0:
|
| 1104 |
+
# We are dealing with a video and the object is not visible in the first frame
|
| 1105 |
+
# Return the datapoint without transformation
|
| 1106 |
+
return None
|
| 1107 |
+
transformed_bbox = masks_to_boxes(transformed_mask)
|
| 1108 |
+
transformed_bboxes.append(transformed_bbox)
|
| 1109 |
+
transformed_masks.append(transformed_mask.squeeze())
|
| 1110 |
+
|
| 1111 |
+
for i in range(len(img.objects)):
|
| 1112 |
+
img.objects[i].bbox = transformed_bboxes[i]
|
| 1113 |
+
img.objects[i].segment = transformed_masks[i]
|
| 1114 |
+
|
| 1115 |
+
img.data = F.affine(
|
| 1116 |
+
img.data,
|
| 1117 |
+
*affine_params,
|
| 1118 |
+
interpolation=self.image_interpolation,
|
| 1119 |
+
fill=self.fill_img,
|
| 1120 |
+
)
|
| 1121 |
+
return datapoint
|
| 1122 |
+
|
| 1123 |
+
|
| 1124 |
+
class RandomResizedCrop:
|
| 1125 |
+
def __init__(
|
| 1126 |
+
self,
|
| 1127 |
+
consistent_transform,
|
| 1128 |
+
size,
|
| 1129 |
+
scale=None,
|
| 1130 |
+
ratio=None,
|
| 1131 |
+
log_warning=True,
|
| 1132 |
+
num_tentatives=4,
|
| 1133 |
+
keep_aspect_ratio=False,
|
| 1134 |
+
):
|
| 1135 |
+
"""
|
| 1136 |
+
The mask is required for this transform.
|
| 1137 |
+
if consistent_transform if True, then the same random resized crop is applied to all frames and masks.
|
| 1138 |
+
"""
|
| 1139 |
+
if isinstance(size, numbers.Number):
|
| 1140 |
+
self.size = (int(size), int(size))
|
| 1141 |
+
elif isinstance(size, Sequence) and len(size) == 1:
|
| 1142 |
+
self.size = (size[0], size[0])
|
| 1143 |
+
elif len(size) != 2:
|
| 1144 |
+
raise ValueError("Please provide only two dimensions (h, w) for size.")
|
| 1145 |
+
else:
|
| 1146 |
+
self.size = size
|
| 1147 |
+
|
| 1148 |
+
self.scale = scale if scale is not None else (0.08, 1.0)
|
| 1149 |
+
self.ratio = ratio if ratio is not None else (3.0 / 4.0, 4.0 / 3.0)
|
| 1150 |
+
self.consistent_transform = consistent_transform
|
| 1151 |
+
self.log_warning = log_warning
|
| 1152 |
+
self.num_tentatives = num_tentatives
|
| 1153 |
+
self.keep_aspect_ratio = keep_aspect_ratio
|
| 1154 |
+
|
| 1155 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 1156 |
+
for _tentative in range(self.num_tentatives):
|
| 1157 |
+
res = self.transform_datapoint(datapoint)
|
| 1158 |
+
if res is not None:
|
| 1159 |
+
return res
|
| 1160 |
+
|
| 1161 |
+
if self.log_warning:
|
| 1162 |
+
logging.warning(
|
| 1163 |
+
f"Skip RandomResizeCrop for zero-area mask in first frame after {self.num_tentatives} tentatives"
|
| 1164 |
+
)
|
| 1165 |
+
return datapoint
|
| 1166 |
+
|
| 1167 |
+
def transform_datapoint(self, datapoint: Datapoint):
|
| 1168 |
+
if self.keep_aspect_ratio:
|
| 1169 |
+
original_size = datapoint.images[0].size
|
| 1170 |
+
original_ratio = original_size[1] / original_size[0]
|
| 1171 |
+
ratio = [r * original_ratio for r in self.ratio]
|
| 1172 |
+
else:
|
| 1173 |
+
ratio = self.ratio
|
| 1174 |
+
|
| 1175 |
+
if self.consistent_transform:
|
| 1176 |
+
# Create a random crop transformation
|
| 1177 |
+
crop_params = T.RandomResizedCrop.get_params(
|
| 1178 |
+
img=datapoint.images[0].data,
|
| 1179 |
+
scale=self.scale,
|
| 1180 |
+
ratio=ratio,
|
| 1181 |
+
)
|
| 1182 |
+
|
| 1183 |
+
for img_idx, img in enumerate(datapoint.images):
|
| 1184 |
+
if not self.consistent_transform:
|
| 1185 |
+
# Create a random crop transformation
|
| 1186 |
+
crop_params = T.RandomResizedCrop.get_params(
|
| 1187 |
+
img=img.data,
|
| 1188 |
+
scale=self.scale,
|
| 1189 |
+
ratio=ratio,
|
| 1190 |
+
)
|
| 1191 |
+
|
| 1192 |
+
this_masks = [
|
| 1193 |
+
obj.segment.unsqueeze(0) if obj.segment is not None else None
|
| 1194 |
+
for obj in img.objects
|
| 1195 |
+
]
|
| 1196 |
+
|
| 1197 |
+
transformed_bboxes, transformed_masks = [], []
|
| 1198 |
+
for i in range(len(img.objects)):
|
| 1199 |
+
if this_masks[i] is None:
|
| 1200 |
+
transformed_masks.append(None)
|
| 1201 |
+
# Dummy bbox for a dummy target
|
| 1202 |
+
transformed_bboxes.append(torch.tensor([[0, 0, 0, 0]]))
|
| 1203 |
+
else:
|
| 1204 |
+
transformed_mask = F.resized_crop(
|
| 1205 |
+
this_masks[i],
|
| 1206 |
+
*crop_params,
|
| 1207 |
+
size=self.size,
|
| 1208 |
+
interpolation=InterpolationMode.NEAREST,
|
| 1209 |
+
)
|
| 1210 |
+
if img_idx == 0 and transformed_mask.max() == 0:
|
| 1211 |
+
# We are dealing with a video and the object is not visible in the first frame
|
| 1212 |
+
# Return the datapoint without transformation
|
| 1213 |
+
return None
|
| 1214 |
+
transformed_masks.append(transformed_mask.squeeze())
|
| 1215 |
+
transformed_bbox = masks_to_boxes(transformed_mask)
|
| 1216 |
+
transformed_bboxes.append(transformed_bbox)
|
| 1217 |
+
|
| 1218 |
+
# Set the new boxes and masks if all transformed masks and boxes are good.
|
| 1219 |
+
for i in range(len(img.objects)):
|
| 1220 |
+
img.objects[i].bbox = transformed_bboxes[i]
|
| 1221 |
+
img.objects[i].segment = transformed_masks[i]
|
| 1222 |
+
|
| 1223 |
+
img.data = F.resized_crop(
|
| 1224 |
+
img.data,
|
| 1225 |
+
*crop_params,
|
| 1226 |
+
size=self.size,
|
| 1227 |
+
interpolation=InterpolationMode.BILINEAR,
|
| 1228 |
+
)
|
| 1229 |
+
return datapoint
|
| 1230 |
+
|
| 1231 |
+
|
| 1232 |
+
class ResizeToMaxIfAbove:
|
| 1233 |
+
# Resize datapoint image if one of its sides is larger that max_size
|
| 1234 |
+
def __init__(
|
| 1235 |
+
self,
|
| 1236 |
+
max_size=None,
|
| 1237 |
+
):
|
| 1238 |
+
self.max_size = max_size
|
| 1239 |
+
|
| 1240 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 1241 |
+
_, height, width = F.get_dimensions(datapoint.images[0].data)
|
| 1242 |
+
|
| 1243 |
+
if height <= self.max_size and width <= self.max_size:
|
| 1244 |
+
# The original frames are small enough
|
| 1245 |
+
return datapoint
|
| 1246 |
+
elif height >= width:
|
| 1247 |
+
new_height = self.max_size
|
| 1248 |
+
new_width = int(round(self.max_size * width / height))
|
| 1249 |
+
else:
|
| 1250 |
+
new_height = int(round(self.max_size * height / width))
|
| 1251 |
+
new_width = self.max_size
|
| 1252 |
+
|
| 1253 |
+
size = new_height, new_width
|
| 1254 |
+
|
| 1255 |
+
for index in range(len(datapoint.images)):
|
| 1256 |
+
datapoint.images[index].data = F.resize(datapoint.images[index].data, size)
|
| 1257 |
+
|
| 1258 |
+
for obj in datapoint.images[index].objects:
|
| 1259 |
+
obj.segment = F.resize(
|
| 1260 |
+
obj.segment[None, None],
|
| 1261 |
+
size,
|
| 1262 |
+
interpolation=InterpolationMode.NEAREST,
|
| 1263 |
+
).squeeze()
|
| 1264 |
+
|
| 1265 |
+
h, w = size
|
| 1266 |
+
datapoint.images[index].size = (h, w)
|
| 1267 |
+
return datapoint
|
| 1268 |
+
|
| 1269 |
+
|
| 1270 |
+
def get_bbox_xyxy_abs_coords_from_mask(mask):
|
| 1271 |
+
"""Get the bounding box (XYXY format w/ absolute coordinates) of a binary mask."""
|
| 1272 |
+
assert mask.dim() == 2
|
| 1273 |
+
rows = torch.any(mask, dim=1)
|
| 1274 |
+
cols = torch.any(mask, dim=0)
|
| 1275 |
+
row_inds = rows.nonzero().view(-1)
|
| 1276 |
+
col_inds = cols.nonzero().view(-1)
|
| 1277 |
+
if row_inds.numel() == 0:
|
| 1278 |
+
# mask is empty
|
| 1279 |
+
bbox = torch.zeros(1, 4, dtype=torch.float32)
|
| 1280 |
+
bbox_area = 0.0
|
| 1281 |
+
else:
|
| 1282 |
+
ymin, ymax = row_inds.min(), row_inds.max()
|
| 1283 |
+
xmin, xmax = col_inds.min(), col_inds.max()
|
| 1284 |
+
bbox = torch.tensor([xmin, ymin, xmax, ymax], dtype=torch.float32).view(1, 4)
|
| 1285 |
+
bbox_area = float((ymax - ymin) * (xmax - xmin))
|
| 1286 |
+
return bbox, bbox_area
|
| 1287 |
+
|
| 1288 |
+
|
| 1289 |
+
class MotionBlur:
|
| 1290 |
+
def __init__(self, kernel_size=5, consistent_transform=True, p=0.5):
|
| 1291 |
+
assert kernel_size % 2 == 1, "Kernel size must be odd."
|
| 1292 |
+
self.kernel_size = kernel_size
|
| 1293 |
+
self.consistent_transform = consistent_transform
|
| 1294 |
+
self.p = p
|
| 1295 |
+
|
| 1296 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 1297 |
+
if random.random() >= self.p:
|
| 1298 |
+
return datapoint
|
| 1299 |
+
if self.consistent_transform:
|
| 1300 |
+
# Generate a single motion blur kernel for all images
|
| 1301 |
+
kernel = self._generate_motion_blur_kernel()
|
| 1302 |
+
for img in datapoint.images:
|
| 1303 |
+
if not self.consistent_transform:
|
| 1304 |
+
# Generate a new motion blur kernel for each image
|
| 1305 |
+
kernel = self._generate_motion_blur_kernel()
|
| 1306 |
+
img.data = self._apply_motion_blur(img.data, kernel)
|
| 1307 |
+
|
| 1308 |
+
return datapoint
|
| 1309 |
+
|
| 1310 |
+
def _generate_motion_blur_kernel(self):
|
| 1311 |
+
kernel = torch.zeros((self.kernel_size, self.kernel_size))
|
| 1312 |
+
direction = random.choice(["horizontal", "vertical", "diagonal"])
|
| 1313 |
+
if direction == "horizontal":
|
| 1314 |
+
kernel[self.kernel_size // 2, :] = 1.0
|
| 1315 |
+
elif direction == "vertical":
|
| 1316 |
+
kernel[:, self.kernel_size // 2] = 1.0
|
| 1317 |
+
elif direction == "diagonal":
|
| 1318 |
+
for i in range(self.kernel_size):
|
| 1319 |
+
kernel[i, i] = 1.0
|
| 1320 |
+
kernel /= kernel.sum()
|
| 1321 |
+
return kernel
|
| 1322 |
+
|
| 1323 |
+
def _apply_motion_blur(self, image, kernel):
|
| 1324 |
+
if isinstance(image, PILImage.Image):
|
| 1325 |
+
image = F.to_tensor(image)
|
| 1326 |
+
channels = image.shape[0]
|
| 1327 |
+
kernel = kernel.to(image.device).unsqueeze(0).unsqueeze(0)
|
| 1328 |
+
blurred_image = torch.nn.functional.conv2d(
|
| 1329 |
+
image.unsqueeze(0),
|
| 1330 |
+
kernel.repeat(channels, 1, 1, 1),
|
| 1331 |
+
padding=self.kernel_size // 2,
|
| 1332 |
+
groups=channels,
|
| 1333 |
+
)
|
| 1334 |
+
return F.to_pil_image(blurred_image.squeeze(0))
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
class LargeScaleJitter:
|
| 1338 |
+
def __init__(
|
| 1339 |
+
self,
|
| 1340 |
+
scale_range=(0.1, 2.0),
|
| 1341 |
+
aspect_ratio_range=(0.75, 1.33),
|
| 1342 |
+
crop_size=(640, 640),
|
| 1343 |
+
consistent_transform=True,
|
| 1344 |
+
p=0.5,
|
| 1345 |
+
):
|
| 1346 |
+
"""
|
| 1347 |
+
Args:rack
|
| 1348 |
+
scale_range (tuple): Range of scaling factors (min_scale, max_scale).
|
| 1349 |
+
aspect_ratio_range (tuple): Range of aspect ratios (min_aspect_ratio, max_aspect_ratio).
|
| 1350 |
+
crop_size (tuple): Target size of the cropped region (width, height).
|
| 1351 |
+
consistent_transform (bool): Whether to apply the same transformation across all frames.
|
| 1352 |
+
p (float): Probability of applying the transformation.
|
| 1353 |
+
"""
|
| 1354 |
+
self.scale_range = scale_range
|
| 1355 |
+
self.aspect_ratio_range = aspect_ratio_range
|
| 1356 |
+
self.crop_size = crop_size
|
| 1357 |
+
self.consistent_transform = consistent_transform
|
| 1358 |
+
self.p = p
|
| 1359 |
+
|
| 1360 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 1361 |
+
if random.random() >= self.p:
|
| 1362 |
+
return datapoint
|
| 1363 |
+
|
| 1364 |
+
# Sample a single scale factor and aspect ratio for all frames
|
| 1365 |
+
log_ratio = torch.log(torch.tensor(self.aspect_ratio_range))
|
| 1366 |
+
scale_factor = torch.empty(1).uniform_(*self.scale_range).item()
|
| 1367 |
+
aspect_ratio = torch.exp(
|
| 1368 |
+
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
|
| 1369 |
+
).item()
|
| 1370 |
+
|
| 1371 |
+
for idx, img in enumerate(datapoint.images):
|
| 1372 |
+
if not self.consistent_transform:
|
| 1373 |
+
# Sample a new scale factor and aspect ratio for each frame
|
| 1374 |
+
log_ratio = torch.log(torch.tensor(self.aspect_ratio_range))
|
| 1375 |
+
scale_factor = torch.empty(1).uniform_(*self.scale_range).item()
|
| 1376 |
+
aspect_ratio = torch.exp(
|
| 1377 |
+
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
|
| 1378 |
+
).item()
|
| 1379 |
+
|
| 1380 |
+
# Compute the dimensions of the jittered crop
|
| 1381 |
+
original_width, original_height = img.data.size
|
| 1382 |
+
target_area = original_width * original_height * scale_factor
|
| 1383 |
+
crop_width = int(round((target_area * aspect_ratio) ** 0.5))
|
| 1384 |
+
crop_height = int(round((target_area / aspect_ratio) ** 0.5))
|
| 1385 |
+
|
| 1386 |
+
# Randomly select the top-left corner of the crop
|
| 1387 |
+
crop_x = random.randint(0, max(0, original_width - crop_width))
|
| 1388 |
+
crop_y = random.randint(0, max(0, original_height - crop_height))
|
| 1389 |
+
|
| 1390 |
+
# Extract the cropped region
|
| 1391 |
+
datapoint = crop(datapoint, idx, (crop_x, crop_y, crop_width, crop_height))
|
| 1392 |
+
|
| 1393 |
+
# Resize the cropped region to the target crop size
|
| 1394 |
+
datapoint = resize(datapoint, idx, self.crop_size)
|
| 1395 |
+
|
| 1396 |
+
return datapoint
|
source_code/sam3/sam3/train/transforms/point_sampling.py
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from PIL import Image as PILImage
|
| 7 |
+
from pycocotools import mask as mask_util
|
| 8 |
+
|
| 9 |
+
from sam3.train.data.sam3_image_dataset import Datapoint
|
| 10 |
+
from torchvision.ops import masks_to_boxes
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def sample_points_from_rle(rle, n_points, mode, box=None, normalize=True):
|
| 14 |
+
"""
|
| 15 |
+
Sample random points from a mask provided in COCO RLE format. 'mode'
|
| 16 |
+
'mode' is in ["centered", "random_mask", "random_box"]
|
| 17 |
+
"centered": points are sampled farthest from the mask edges and each other
|
| 18 |
+
"random_mask": points are sampled uniformly from the mask
|
| 19 |
+
"random_box": points are sampled uniformly from the annotation's box
|
| 20 |
+
'box' must be provided if 'mode' is "random_box".
|
| 21 |
+
If 'normalize' is true, points are in [0,1], relative to mask h,w.
|
| 22 |
+
"""
|
| 23 |
+
mask = np.ascontiguousarray(mask_util.decode(rle))
|
| 24 |
+
points = sample_points_from_mask(mask, n_points, mode, box)
|
| 25 |
+
|
| 26 |
+
if normalize:
|
| 27 |
+
h, w = mask.shape
|
| 28 |
+
norm = np.array([w, h, 1.0])[None, :]
|
| 29 |
+
points = points / norm
|
| 30 |
+
|
| 31 |
+
return points
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def sample_points_from_mask(mask, n_points, mode, box=None):
|
| 35 |
+
if mode == "centered":
|
| 36 |
+
points = center_positive_sample(mask, n_points)
|
| 37 |
+
elif mode == "random_mask":
|
| 38 |
+
points = uniform_positive_sample(mask, n_points)
|
| 39 |
+
elif mode == "random_box":
|
| 40 |
+
assert box is not None, "'random_box' mode requires a provided box."
|
| 41 |
+
points = uniform_sample_from_box(mask, box, n_points)
|
| 42 |
+
else:
|
| 43 |
+
raise ValueError(f"Unknown point sampling mode {mode}.")
|
| 44 |
+
return points
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def uniform_positive_sample(mask, n_points):
|
| 48 |
+
"""
|
| 49 |
+
Samples positive points uniformly from the mask. Only integer pixel
|
| 50 |
+
values are sampled.
|
| 51 |
+
"""
|
| 52 |
+
# Sampling directly from the uncompressed RLE would be faster but is
|
| 53 |
+
# likely unnecessary.
|
| 54 |
+
mask_points = np.stack(np.nonzero(mask), axis=0).transpose(1, 0)
|
| 55 |
+
assert len(mask_points) > 0, "Can't sample positive points from an empty mask."
|
| 56 |
+
selected_idxs = np.random.randint(low=0, high=len(mask_points), size=n_points)
|
| 57 |
+
selected_points = mask_points[selected_idxs]
|
| 58 |
+
|
| 59 |
+
selected_points = selected_points[:, ::-1] # (y, x) -> (x, y)
|
| 60 |
+
labels = np.ones((len(selected_points), 1))
|
| 61 |
+
selected_points = np.concatenate([selected_points, labels], axis=1)
|
| 62 |
+
|
| 63 |
+
return selected_points
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def center_positive_sample(mask, n_points):
|
| 67 |
+
"""
|
| 68 |
+
Samples points farthest from mask edges (by distance transform)
|
| 69 |
+
and subsequent points also farthest from each other. Each new point
|
| 70 |
+
sampled is treated as an edge for future points. Edges of the image are
|
| 71 |
+
treated as edges of the mask.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
# Pad mask by one pixel on each end to assure distance transform
|
| 75 |
+
# avoids edges
|
| 76 |
+
padded_mask = np.pad(mask, 1)
|
| 77 |
+
|
| 78 |
+
points = []
|
| 79 |
+
for _ in range(n_points):
|
| 80 |
+
assert np.max(mask) > 0, "Can't sample positive points from an empty mask."
|
| 81 |
+
dist = cv2.distanceTransform(padded_mask, cv2.DIST_L2, 0)
|
| 82 |
+
point = np.unravel_index(dist.argmax(), dist.shape)
|
| 83 |
+
# Mark selected point as background so next point avoids it
|
| 84 |
+
padded_mask[point[0], point[1]] = 0
|
| 85 |
+
points.append(point[::-1]) # (y, x) -> (x, y)
|
| 86 |
+
|
| 87 |
+
points = np.stack(points, axis=0)
|
| 88 |
+
points = points - 1 # Subtract left/top padding of 1
|
| 89 |
+
labels = np.ones((len(points), 1))
|
| 90 |
+
points = np.concatenate([points, labels], axis=1)
|
| 91 |
+
|
| 92 |
+
return points
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def uniform_sample_from_box(mask, box, n_points):
|
| 96 |
+
"""
|
| 97 |
+
Sample points uniformly from the provided box. The points' labels
|
| 98 |
+
are determined by the provided mask. Does not guarantee a positive
|
| 99 |
+
point is sampled. The box is assumed unnormalized in XYXY format.
|
| 100 |
+
Points are sampled at integer values.
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
# Since lower/right edges are exclusive, ceil can be applied to all edges
|
| 104 |
+
int_box = np.ceil(box)
|
| 105 |
+
|
| 106 |
+
x = np.random.randint(low=int_box[0], high=int_box[2], size=n_points)
|
| 107 |
+
y = np.random.randint(low=int_box[1], high=int_box[3], size=n_points)
|
| 108 |
+
labels = mask[y, x]
|
| 109 |
+
points = np.stack([x, y, labels], axis=1)
|
| 110 |
+
|
| 111 |
+
return points
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def rescale_box_xyxy(box, factor, imsize=None):
|
| 115 |
+
"""
|
| 116 |
+
Rescale a box providing in unnormalized XYXY format, fixing the center.
|
| 117 |
+
If imsize is provided, clamp to the image.
|
| 118 |
+
"""
|
| 119 |
+
cx, cy = (box[0] + box[2]) / 2, (box[1] + box[3]) / 2
|
| 120 |
+
w, h = box[2] - box[0], box[3] - box[1]
|
| 121 |
+
|
| 122 |
+
new_w, new_h = factor * w, factor * h
|
| 123 |
+
|
| 124 |
+
new_x0, new_y0 = cx - new_w / 2, cy - new_h / 2
|
| 125 |
+
new_x1, new_y1 = cx + new_w / 2, cy + new_h / 2
|
| 126 |
+
|
| 127 |
+
if imsize is not None:
|
| 128 |
+
new_x0 = max(min(new_x0, imsize[1]), 0)
|
| 129 |
+
new_x1 = max(min(new_x1, imsize[1]), 0)
|
| 130 |
+
new_y0 = max(min(new_y0, imsize[0]), 0)
|
| 131 |
+
new_y1 = max(min(new_y1, imsize[0]), 0)
|
| 132 |
+
|
| 133 |
+
return [new_x0, new_y0, new_x1, new_y1]
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def noise_box(box, im_size, box_noise_std, box_noise_max, min_box_area):
|
| 137 |
+
if box_noise_std <= 0.0:
|
| 138 |
+
return box
|
| 139 |
+
noise = box_noise_std * torch.randn(size=(4,))
|
| 140 |
+
w, h = box[2] - box[0], box[3] - box[1]
|
| 141 |
+
scale_factor = torch.tensor([w, h, w, h])
|
| 142 |
+
noise = noise * scale_factor
|
| 143 |
+
if box_noise_max is not None:
|
| 144 |
+
noise = torch.clamp(noise, -box_noise_max, box_noise_max)
|
| 145 |
+
input_box = box + noise
|
| 146 |
+
# Clamp to maximum image size
|
| 147 |
+
img_clamp = torch.tensor([im_size[1], im_size[0], im_size[1], im_size[0]])
|
| 148 |
+
input_box = torch.maximum(input_box, torch.zeros_like(input_box))
|
| 149 |
+
input_box = torch.minimum(input_box, img_clamp)
|
| 150 |
+
if (input_box[2] - input_box[0]) * (input_box[3] - input_box[1]) <= min_box_area:
|
| 151 |
+
return box
|
| 152 |
+
|
| 153 |
+
return input_box
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class RandomGeometricInputsAPI:
|
| 157 |
+
"""
|
| 158 |
+
For geometric queries, replaces the input box or points with a random
|
| 159 |
+
one sampled from the GT mask. Segments must be provided for objects
|
| 160 |
+
that are targets of geometric queries, and must be binary masks. Existing
|
| 161 |
+
point and box queries in the datapoint will be ignored and completely replaced.
|
| 162 |
+
Will sample points and boxes in XYXY format in absolute pixel space.
|
| 163 |
+
|
| 164 |
+
Geometry queries are currently determined by taking any query whose
|
| 165 |
+
query text is a set value.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
num_points (int or (int, int)): how many points to sample. If a tuple,
|
| 169 |
+
sample a random number of points uniformly over the inclusive range.
|
| 170 |
+
box_chance (float): fraction of time a box is sampled. A box will replace
|
| 171 |
+
one sampled point.
|
| 172 |
+
box_noise_std (float): if greater than 0, add noise to the sampled boxes
|
| 173 |
+
with this std. Noise is relative to the length of the box side.
|
| 174 |
+
box_noise_max (int): if not none, truncate any box noise larger than this
|
| 175 |
+
in terms of absolute pixels.
|
| 176 |
+
resample_box_from_mask (bool): if True, any sampled box will be determined
|
| 177 |
+
by finding the extrema of the provided mask. If False, the bbox provided
|
| 178 |
+
in the target object will be used.
|
| 179 |
+
point_sample_mode (str): In ["centered", "random_mask", "random_box"],
|
| 180 |
+
controlling how points are sampled:
|
| 181 |
+
"centered": points are sampled farthest from the mask edges and each other
|
| 182 |
+
"random_mask": points are sampled uniformly from the mask
|
| 183 |
+
"random_box": points are sampled uniformly from the annotation's box
|
| 184 |
+
Note that "centered" may be too slow for on-line generation.
|
| 185 |
+
geometric_query_str (str): what string in query_text indicates a
|
| 186 |
+
geometry query.
|
| 187 |
+
minimum_box_area (float): sampled boxes with area this size or smaller after
|
| 188 |
+
noising will use the original box instead. It is the input's responsibility
|
| 189 |
+
to avoid original boxes that violate necessary area bounds.
|
| 190 |
+
concat_points (bool): if True, any sampled points will be added to existing
|
| 191 |
+
ones instead of replacing them.
|
| 192 |
+
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
def __init__(
|
| 196 |
+
self,
|
| 197 |
+
num_points,
|
| 198 |
+
box_chance,
|
| 199 |
+
box_noise_std=0.0,
|
| 200 |
+
box_noise_max=None,
|
| 201 |
+
minimum_box_area=0.0,
|
| 202 |
+
resample_box_from_mask=False,
|
| 203 |
+
point_sample_mode="random_mask",
|
| 204 |
+
sample_box_scale_factor=1.0,
|
| 205 |
+
geometric_query_str="geometric",
|
| 206 |
+
concat_points=False,
|
| 207 |
+
):
|
| 208 |
+
self.num_points = num_points
|
| 209 |
+
if not isinstance(self.num_points, int):
|
| 210 |
+
# Convert from inclusive range to exclusive range expected by torch
|
| 211 |
+
self.num_points[1] += 1
|
| 212 |
+
self.num_points = tuple(self.num_points)
|
| 213 |
+
self.box_chance = box_chance
|
| 214 |
+
self.box_noise_std = box_noise_std
|
| 215 |
+
self.box_noise_max = box_noise_max
|
| 216 |
+
self.minimum_box_area = minimum_box_area
|
| 217 |
+
self.resample_box_from_mask = resample_box_from_mask
|
| 218 |
+
self.point_sample_mode = point_sample_mode
|
| 219 |
+
assert point_sample_mode in [
|
| 220 |
+
"centered",
|
| 221 |
+
"random_mask",
|
| 222 |
+
"random_box",
|
| 223 |
+
], "Unknown point sample mode."
|
| 224 |
+
self.geometric_query_str = geometric_query_str
|
| 225 |
+
self.concat_points = concat_points
|
| 226 |
+
self.sample_box_scale_factor = sample_box_scale_factor
|
| 227 |
+
|
| 228 |
+
def _sample_num_points_and_if_box(self):
|
| 229 |
+
if isinstance(self.num_points, tuple):
|
| 230 |
+
n_points = torch.randint(
|
| 231 |
+
low=self.num_points[0], high=self.num_points[1], size=(1,)
|
| 232 |
+
).item()
|
| 233 |
+
else:
|
| 234 |
+
n_points = self.num_points
|
| 235 |
+
if self.box_chance > 0.0:
|
| 236 |
+
use_box = torch.rand(size=(1,)).item() < self.box_chance
|
| 237 |
+
n_points -= int(use_box) # box stands in for one point
|
| 238 |
+
else:
|
| 239 |
+
use_box = False
|
| 240 |
+
return n_points, use_box
|
| 241 |
+
|
| 242 |
+
def _get_original_box(self, target_object):
|
| 243 |
+
if not self.resample_box_from_mask:
|
| 244 |
+
return target_object.bbox
|
| 245 |
+
mask = target_object.segment
|
| 246 |
+
return masks_to_boxes(mask[None, :, :])[0]
|
| 247 |
+
|
| 248 |
+
def _get_target_object(self, datapoint, query):
|
| 249 |
+
img = datapoint.images[query.image_id]
|
| 250 |
+
targets = query.object_ids_output
|
| 251 |
+
assert (
|
| 252 |
+
len(targets) == 1
|
| 253 |
+
), "Geometric queries only support a single target object."
|
| 254 |
+
target_idx = targets[0]
|
| 255 |
+
return img.objects[target_idx]
|
| 256 |
+
|
| 257 |
+
def __call__(self, datapoint, **kwargs):
|
| 258 |
+
for query in datapoint.find_queries:
|
| 259 |
+
if query.query_text != self.geometric_query_str:
|
| 260 |
+
continue
|
| 261 |
+
|
| 262 |
+
target_object = self._get_target_object(datapoint, query)
|
| 263 |
+
n_points, use_box = self._sample_num_points_and_if_box()
|
| 264 |
+
box = self._get_original_box(target_object)
|
| 265 |
+
|
| 266 |
+
mask = target_object.segment
|
| 267 |
+
if n_points > 0:
|
| 268 |
+
# FIXME: The conversion to numpy and back to reuse code
|
| 269 |
+
# is awkward, but this is all in the dataloader worker anyway
|
| 270 |
+
# on CPU and so I don't think it should matter.
|
| 271 |
+
if self.sample_box_scale_factor != 1.0:
|
| 272 |
+
sample_box = rescale_box_xyxy(
|
| 273 |
+
box.numpy(), self.sample_box_scale_factor, mask.shape
|
| 274 |
+
)
|
| 275 |
+
else:
|
| 276 |
+
sample_box = box.numpy()
|
| 277 |
+
input_points = sample_points_from_mask(
|
| 278 |
+
mask.numpy(),
|
| 279 |
+
n_points,
|
| 280 |
+
self.point_sample_mode,
|
| 281 |
+
sample_box,
|
| 282 |
+
)
|
| 283 |
+
input_points = torch.as_tensor(input_points)
|
| 284 |
+
input_points = input_points[None, :, :]
|
| 285 |
+
if self.concat_points and query.input_points is not None:
|
| 286 |
+
input_points = torch.cat([query.input_points, input_points], dim=1)
|
| 287 |
+
else:
|
| 288 |
+
input_points = query.input_points if self.concat_points else None
|
| 289 |
+
|
| 290 |
+
if use_box:
|
| 291 |
+
w, h = datapoint.images[query.image_id].size
|
| 292 |
+
input_box = noise_box(
|
| 293 |
+
box,
|
| 294 |
+
(h, w),
|
| 295 |
+
box_noise_std=self.box_noise_std,
|
| 296 |
+
box_noise_max=self.box_noise_max,
|
| 297 |
+
min_box_area=self.minimum_box_area,
|
| 298 |
+
)
|
| 299 |
+
input_box = input_box[None, :]
|
| 300 |
+
else:
|
| 301 |
+
input_box = query.input_bbox if self.concat_points else None
|
| 302 |
+
|
| 303 |
+
query.input_points = input_points
|
| 304 |
+
query.input_bbox = input_box
|
| 305 |
+
|
| 306 |
+
return datapoint
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
class RandomizeInputBbox:
|
| 310 |
+
"""
|
| 311 |
+
Simplified version of the geometric transform that only deals with input boxes
|
| 312 |
+
"""
|
| 313 |
+
|
| 314 |
+
def __init__(
|
| 315 |
+
self,
|
| 316 |
+
box_noise_std=0.0,
|
| 317 |
+
box_noise_max=None,
|
| 318 |
+
minimum_box_area=0.0,
|
| 319 |
+
):
|
| 320 |
+
self.box_noise_std = box_noise_std
|
| 321 |
+
self.box_noise_max = box_noise_max
|
| 322 |
+
self.minimum_box_area = minimum_box_area
|
| 323 |
+
|
| 324 |
+
def __call__(self, datapoint: Datapoint, **kwargs):
|
| 325 |
+
for query in datapoint.find_queries:
|
| 326 |
+
if query.input_bbox is None:
|
| 327 |
+
continue
|
| 328 |
+
|
| 329 |
+
img = datapoint.images[query.image_id].data
|
| 330 |
+
if isinstance(img, PILImage.Image):
|
| 331 |
+
w, h = img.size
|
| 332 |
+
else:
|
| 333 |
+
assert isinstance(img, torch.Tensor)
|
| 334 |
+
h, w = img.shape[-2:]
|
| 335 |
+
|
| 336 |
+
for box_id in range(query.input_bbox.shape[0]):
|
| 337 |
+
query.input_bbox[box_id, :] = noise_box(
|
| 338 |
+
query.input_bbox[box_id, :].view(4),
|
| 339 |
+
(h, w),
|
| 340 |
+
box_noise_std=self.box_noise_std,
|
| 341 |
+
box_noise_max=self.box_noise_max,
|
| 342 |
+
min_box_area=self.minimum_box_area,
|
| 343 |
+
).view(1, 4)
|
| 344 |
+
|
| 345 |
+
return datapoint
|
source_code/sam3/sam3/train/utils/distributed.py
ADDED
|
@@ -0,0 +1,585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
|
| 6 |
+
# This source code is licensed under the license found in the
|
| 7 |
+
# LICENSE file in the root directory of this source tree.
|
| 8 |
+
|
| 9 |
+
import datetime
|
| 10 |
+
import functools
|
| 11 |
+
import io
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import random
|
| 15 |
+
import tempfile
|
| 16 |
+
import time
|
| 17 |
+
from typing import Any, Callable, List, Tuple
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
import torch.autograd as autograd
|
| 21 |
+
import torch.distributed as dist
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Default to GPU 0
|
| 25 |
+
_cuda_device_index: int = 0
|
| 26 |
+
|
| 27 |
+
# Setting _cuda_device_index to -1 internally implies that we should use CPU
|
| 28 |
+
_CPU_DEVICE_INDEX = -1
|
| 29 |
+
_PRIMARY_RANK = 0
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@functools.lru_cache()
|
| 33 |
+
def _get_global_gloo_group():
|
| 34 |
+
"""
|
| 35 |
+
Return a process group based on gloo backend, containing all the ranks
|
| 36 |
+
The result is cached.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
if dist.get_backend() == "nccl":
|
| 40 |
+
# Increase timeout from 1800 sec to 43200 sec (12 hr) to avoid some processes
|
| 41 |
+
# being much slower than others causing a timeout (which can happen in relation
|
| 42 |
+
# or LVIS class mAP evaluation).
|
| 43 |
+
timeout = 43200
|
| 44 |
+
return dist.new_group(
|
| 45 |
+
backend="gloo",
|
| 46 |
+
timeout=datetime.timedelta(seconds=timeout),
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
return dist.group.WORLD
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def is_main_process():
|
| 53 |
+
"""Return true if the current process is the main one"""
|
| 54 |
+
return get_rank() == 0
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def all_gather_via_filesys(data, filesys_save_dir=None, gather_to_rank_0_only=False):
|
| 58 |
+
"""
|
| 59 |
+
Run all_gather on arbitrary picklable data (not necessarily tensors), similar to
|
| 60 |
+
`all_gather` above, but using filesystem instead of collective ops.
|
| 61 |
+
|
| 62 |
+
If gather_to_rank_0_only is True, only rank 0 will load the gathered object list
|
| 63 |
+
(and other ranks will have an empty list).
|
| 64 |
+
"""
|
| 65 |
+
world_size = get_world_size()
|
| 66 |
+
if world_size == 1:
|
| 67 |
+
return [data]
|
| 68 |
+
|
| 69 |
+
print("gathering via files")
|
| 70 |
+
cpu_group = _get_global_gloo_group()
|
| 71 |
+
|
| 72 |
+
# if unspecified, we will save to the current python file dir
|
| 73 |
+
if filesys_save_dir is not None:
|
| 74 |
+
save_dir = filesys_save_dir
|
| 75 |
+
elif "EXP_DIR" in os.environ:
|
| 76 |
+
save_dir = os.environ["EXP_DIR"]
|
| 77 |
+
else:
|
| 78 |
+
# try the same directory where the code is stored
|
| 79 |
+
save_dir = filesys_save_dir or os.path.dirname(__file__)
|
| 80 |
+
save_dir = os.path.join(save_dir, "all_gather_via_filesys")
|
| 81 |
+
if is_main_process():
|
| 82 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 83 |
+
|
| 84 |
+
# use a timestamp and salt to distinguish different all_gather
|
| 85 |
+
timestamp = int(time.time()) if is_main_process() else 0
|
| 86 |
+
salt = random.randint(0, 2**31 - 1) if is_main_process() else 0
|
| 87 |
+
# broadcast the timestamp and salt across ranks
|
| 88 |
+
# (all-reduce will do the broadcasting since only rank 0 is non-zero)
|
| 89 |
+
timestamp_and_salt = torch.tensor([timestamp, salt], dtype=torch.long)
|
| 90 |
+
dist.all_reduce(timestamp_and_salt, group=cpu_group)
|
| 91 |
+
timestamp, salt = timestamp_and_salt.tolist()
|
| 92 |
+
|
| 93 |
+
# save the data to a file on the disk
|
| 94 |
+
rank_save = get_rank()
|
| 95 |
+
save_data_filename = f"data_to_gather_{timestamp}_{salt}_{rank_save}.pkl"
|
| 96 |
+
save_data_path = os.path.join(save_dir, save_data_filename)
|
| 97 |
+
assert not os.path.exists(save_data_path), f"{save_data_path} already exists"
|
| 98 |
+
torch.save(data, save_data_path)
|
| 99 |
+
dist.barrier(group=cpu_group)
|
| 100 |
+
|
| 101 |
+
# read the data from the files
|
| 102 |
+
data_list = []
|
| 103 |
+
if rank_save == 0 or not gather_to_rank_0_only:
|
| 104 |
+
for rank_load in range(world_size):
|
| 105 |
+
load_data_filename = f"data_to_gather_{timestamp}_{salt}_{rank_load}.pkl"
|
| 106 |
+
load_data_path = os.path.join(save_dir, load_data_filename)
|
| 107 |
+
assert os.path.exists(load_data_path), f"cannot read {save_data_path}"
|
| 108 |
+
data_list.append(torch.load(load_data_path, weights_only=False))
|
| 109 |
+
dist.barrier(group=cpu_group)
|
| 110 |
+
|
| 111 |
+
# delete the saved file
|
| 112 |
+
os.remove(save_data_path)
|
| 113 |
+
return data_list
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def all_gather(data, force_cpu=False, force_filesys=False, filesys_save_dir=None):
|
| 117 |
+
"""
|
| 118 |
+
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
| 119 |
+
Args:
|
| 120 |
+
data: any picklable object
|
| 121 |
+
Returns:
|
| 122 |
+
list[data]: list of data gathered from each rank
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
world_size = get_world_size()
|
| 126 |
+
if world_size == 1:
|
| 127 |
+
return [data]
|
| 128 |
+
|
| 129 |
+
if os.getenv("MDETR_FILESYS_REDUCE_RANK_0_ONLY") == "1":
|
| 130 |
+
return all_gather_via_filesys(
|
| 131 |
+
data, filesys_save_dir, gather_to_rank_0_only=True
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
if os.getenv("MDETR_FILESYS_REDUCE") == "1" or force_filesys:
|
| 135 |
+
return all_gather_via_filesys(data, filesys_save_dir)
|
| 136 |
+
|
| 137 |
+
cpu_group = None
|
| 138 |
+
if os.getenv("MDETR_CPU_REDUCE") == "1" or force_cpu:
|
| 139 |
+
cpu_group = _get_global_gloo_group()
|
| 140 |
+
|
| 141 |
+
buffer = io.BytesIO()
|
| 142 |
+
torch.save(data, buffer)
|
| 143 |
+
data_view = buffer.getbuffer()
|
| 144 |
+
device = "cuda" if cpu_group is None else "cpu"
|
| 145 |
+
tensor = torch.ByteTensor(data_view).to(device)
|
| 146 |
+
|
| 147 |
+
# obtain Tensor size of each rank
|
| 148 |
+
local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long)
|
| 149 |
+
size_list = [
|
| 150 |
+
torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)
|
| 151 |
+
]
|
| 152 |
+
if cpu_group is None:
|
| 153 |
+
dist.all_gather(size_list, local_size)
|
| 154 |
+
else:
|
| 155 |
+
print("gathering on cpu")
|
| 156 |
+
dist.all_gather(size_list, local_size, group=cpu_group)
|
| 157 |
+
size_list = [int(size.item()) for size in size_list]
|
| 158 |
+
max_size = max(size_list)
|
| 159 |
+
assert isinstance(local_size.item(), int)
|
| 160 |
+
local_size = int(local_size.item())
|
| 161 |
+
|
| 162 |
+
# receiving Tensor from all ranks
|
| 163 |
+
# we pad the tensor because torch all_gather does not support
|
| 164 |
+
# gathering tensors of different shapes
|
| 165 |
+
tensor_list = []
|
| 166 |
+
for _ in size_list:
|
| 167 |
+
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
|
| 168 |
+
if local_size != max_size:
|
| 169 |
+
padding = torch.empty(
|
| 170 |
+
size=(max_size - local_size,), dtype=torch.uint8, device=device
|
| 171 |
+
)
|
| 172 |
+
tensor = torch.cat((tensor, padding), dim=0)
|
| 173 |
+
if cpu_group is None:
|
| 174 |
+
dist.all_gather(tensor_list, tensor)
|
| 175 |
+
else:
|
| 176 |
+
dist.all_gather(tensor_list, tensor, group=cpu_group)
|
| 177 |
+
|
| 178 |
+
data_list = []
|
| 179 |
+
for size, tensor in zip(size_list, tensor_list):
|
| 180 |
+
tensor = torch.split(tensor, [size, max_size - size], dim=0)[0]
|
| 181 |
+
buffer = io.BytesIO(tensor.cpu().numpy())
|
| 182 |
+
obj = torch.load(buffer, weights_only=False)
|
| 183 |
+
data_list.append(obj)
|
| 184 |
+
|
| 185 |
+
return data_list
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def convert_to_distributed_tensor(tensor: torch.Tensor) -> Tuple[torch.Tensor, str]:
|
| 189 |
+
"""
|
| 190 |
+
For some backends, such as NCCL, communication only works if the
|
| 191 |
+
tensor is on the GPU. This helper function converts to the correct
|
| 192 |
+
device and returns the tensor + original device.
|
| 193 |
+
"""
|
| 194 |
+
orig_device = "cpu" if not tensor.is_cuda else "gpu"
|
| 195 |
+
if (
|
| 196 |
+
torch.distributed.is_available()
|
| 197 |
+
and torch.distributed.get_backend() == torch.distributed.Backend.NCCL
|
| 198 |
+
and not tensor.is_cuda
|
| 199 |
+
):
|
| 200 |
+
tensor = tensor.cuda()
|
| 201 |
+
return (tensor, orig_device)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def convert_to_normal_tensor(tensor: torch.Tensor, orig_device: str) -> torch.Tensor:
|
| 205 |
+
"""
|
| 206 |
+
For some backends, such as NCCL, communication only works if the
|
| 207 |
+
tensor is on the GPU. This converts the tensor back to original device.
|
| 208 |
+
"""
|
| 209 |
+
if tensor.is_cuda and orig_device == "cpu":
|
| 210 |
+
tensor = tensor.cpu()
|
| 211 |
+
return tensor
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def is_distributed_training_run() -> bool:
|
| 215 |
+
return (
|
| 216 |
+
torch.distributed.is_available()
|
| 217 |
+
and torch.distributed.is_initialized()
|
| 218 |
+
and (torch.distributed.get_world_size() > 1)
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def is_primary() -> bool:
|
| 223 |
+
"""
|
| 224 |
+
Returns True if this is rank 0 of a distributed training job OR if it is
|
| 225 |
+
a single trainer job. Otherwise False.
|
| 226 |
+
"""
|
| 227 |
+
return get_rank() == _PRIMARY_RANK
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
|
| 231 |
+
"""
|
| 232 |
+
Wrapper over torch.distributed.all_reduce for performing mean reduction
|
| 233 |
+
of tensor over all processes.
|
| 234 |
+
"""
|
| 235 |
+
return all_reduce_op(
|
| 236 |
+
tensor,
|
| 237 |
+
torch.distributed.ReduceOp.SUM,
|
| 238 |
+
lambda t: t / torch.distributed.get_world_size(),
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def all_reduce_sum(tensor: torch.Tensor) -> torch.Tensor:
|
| 243 |
+
"""
|
| 244 |
+
Wrapper over torch.distributed.all_reduce for performing sum
|
| 245 |
+
reduction of tensor over all processes in both distributed /
|
| 246 |
+
non-distributed scenarios.
|
| 247 |
+
"""
|
| 248 |
+
return all_reduce_op(tensor, torch.distributed.ReduceOp.SUM)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def all_reduce_min(tensor: torch.Tensor) -> torch.Tensor:
|
| 252 |
+
"""
|
| 253 |
+
Wrapper over torch.distributed.all_reduce for performing min
|
| 254 |
+
reduction of tensor over all processes in both distributed /
|
| 255 |
+
non-distributed scenarios.
|
| 256 |
+
"""
|
| 257 |
+
return all_reduce_op(tensor, torch.distributed.ReduceOp.MIN)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def all_reduce_max(tensor: torch.Tensor) -> torch.Tensor:
|
| 261 |
+
"""
|
| 262 |
+
Wrapper over torch.distributed.all_reduce for performing min
|
| 263 |
+
reduction of tensor over all processes in both distributed /
|
| 264 |
+
non-distributed scenarios.
|
| 265 |
+
"""
|
| 266 |
+
return all_reduce_op(tensor, torch.distributed.ReduceOp.MAX)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def all_reduce_op(
|
| 270 |
+
tensor: torch.Tensor,
|
| 271 |
+
op: torch.distributed.ReduceOp,
|
| 272 |
+
after_op_func: Callable[[torch.Tensor], torch.Tensor] = None,
|
| 273 |
+
) -> torch.Tensor:
|
| 274 |
+
"""
|
| 275 |
+
Wrapper over torch.distributed.all_reduce for performing
|
| 276 |
+
reduction of tensor over all processes in both distributed /
|
| 277 |
+
non-distributed scenarios.
|
| 278 |
+
"""
|
| 279 |
+
if is_distributed_training_run():
|
| 280 |
+
tensor, orig_device = convert_to_distributed_tensor(tensor)
|
| 281 |
+
torch.distributed.all_reduce(tensor, op)
|
| 282 |
+
if after_op_func is not None:
|
| 283 |
+
tensor = after_op_func(tensor)
|
| 284 |
+
tensor = convert_to_normal_tensor(tensor, orig_device)
|
| 285 |
+
return tensor
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def gather_tensors_from_all(tensor: torch.Tensor) -> List[torch.Tensor]:
|
| 289 |
+
"""
|
| 290 |
+
Wrapper over torch.distributed.all_gather for performing
|
| 291 |
+
'gather' of 'tensor' over all processes in both distributed /
|
| 292 |
+
non-distributed scenarios.
|
| 293 |
+
"""
|
| 294 |
+
if tensor.ndim == 0:
|
| 295 |
+
# 0 dim tensors cannot be gathered. so unsqueeze
|
| 296 |
+
tensor = tensor.unsqueeze(0)
|
| 297 |
+
|
| 298 |
+
if is_distributed_training_run():
|
| 299 |
+
tensor, orig_device = convert_to_distributed_tensor(tensor)
|
| 300 |
+
gathered_tensors = [
|
| 301 |
+
torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())
|
| 302 |
+
]
|
| 303 |
+
torch.distributed.all_gather(gathered_tensors, tensor)
|
| 304 |
+
gathered_tensors = [
|
| 305 |
+
convert_to_normal_tensor(_tensor, orig_device)
|
| 306 |
+
for _tensor in gathered_tensors
|
| 307 |
+
]
|
| 308 |
+
else:
|
| 309 |
+
gathered_tensors = [tensor]
|
| 310 |
+
|
| 311 |
+
return gathered_tensors
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def gather_from_all(tensor: torch.Tensor) -> torch.Tensor:
|
| 315 |
+
gathered_tensors = gather_tensors_from_all(tensor)
|
| 316 |
+
gathered_tensor = torch.cat(gathered_tensors, 0)
|
| 317 |
+
return gathered_tensor
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def broadcast(tensor: torch.Tensor, src: int = 0) -> torch.Tensor:
|
| 321 |
+
"""
|
| 322 |
+
Wrapper over torch.distributed.broadcast for broadcasting a tensor from the source
|
| 323 |
+
to all processes in both distributed / non-distributed scenarios.
|
| 324 |
+
"""
|
| 325 |
+
if is_distributed_training_run():
|
| 326 |
+
tensor, orig_device = convert_to_distributed_tensor(tensor)
|
| 327 |
+
torch.distributed.broadcast(tensor, src)
|
| 328 |
+
tensor = convert_to_normal_tensor(tensor, orig_device)
|
| 329 |
+
return tensor
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def barrier() -> None:
|
| 333 |
+
"""
|
| 334 |
+
Wrapper over torch.distributed.barrier, returns without waiting
|
| 335 |
+
if the distributed process group is not initialized instead of throwing error.
|
| 336 |
+
"""
|
| 337 |
+
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
|
| 338 |
+
return
|
| 339 |
+
torch.distributed.barrier()
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def get_world_size() -> int:
|
| 343 |
+
"""
|
| 344 |
+
Simple wrapper for correctly getting worldsize in both distributed
|
| 345 |
+
/ non-distributed settings
|
| 346 |
+
"""
|
| 347 |
+
return (
|
| 348 |
+
torch.distributed.get_world_size()
|
| 349 |
+
if torch.distributed.is_available() and torch.distributed.is_initialized()
|
| 350 |
+
else 1
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def get_rank() -> int:
|
| 355 |
+
"""
|
| 356 |
+
Simple wrapper for correctly getting rank in both distributed
|
| 357 |
+
/ non-distributed settings
|
| 358 |
+
"""
|
| 359 |
+
return (
|
| 360 |
+
torch.distributed.get_rank()
|
| 361 |
+
if torch.distributed.is_available() and torch.distributed.is_initialized()
|
| 362 |
+
else 0
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def get_primary_rank() -> int:
|
| 367 |
+
return _PRIMARY_RANK
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def set_cuda_device_index(idx: int) -> None:
|
| 371 |
+
global _cuda_device_index
|
| 372 |
+
_cuda_device_index = idx
|
| 373 |
+
torch.cuda.set_device(_cuda_device_index)
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def set_cpu_device() -> None:
|
| 377 |
+
global _cuda_device_index
|
| 378 |
+
_cuda_device_index = _CPU_DEVICE_INDEX
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def get_cuda_device_index() -> int:
|
| 382 |
+
return _cuda_device_index
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def init_distributed_data_parallel_model(
|
| 386 |
+
model: torch.nn.Module,
|
| 387 |
+
broadcast_buffers: bool = False,
|
| 388 |
+
find_unused_parameters: bool = True,
|
| 389 |
+
bucket_cap_mb: int = 25,
|
| 390 |
+
) -> torch.nn.parallel.DistributedDataParallel:
|
| 391 |
+
global _cuda_device_index
|
| 392 |
+
|
| 393 |
+
if _cuda_device_index == _CPU_DEVICE_INDEX:
|
| 394 |
+
# CPU-only model, don't specify device
|
| 395 |
+
return torch.nn.parallel.DistributedDataParallel(
|
| 396 |
+
model,
|
| 397 |
+
broadcast_buffers=broadcast_buffers,
|
| 398 |
+
find_unused_parameters=find_unused_parameters,
|
| 399 |
+
bucket_cap_mb=bucket_cap_mb,
|
| 400 |
+
)
|
| 401 |
+
else:
|
| 402 |
+
# GPU model
|
| 403 |
+
return torch.nn.parallel.DistributedDataParallel(
|
| 404 |
+
model,
|
| 405 |
+
device_ids=[_cuda_device_index],
|
| 406 |
+
output_device=_cuda_device_index,
|
| 407 |
+
broadcast_buffers=broadcast_buffers,
|
| 408 |
+
find_unused_parameters=find_unused_parameters,
|
| 409 |
+
bucket_cap_mb=bucket_cap_mb,
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def broadcast_object(obj: Any, src: int = _PRIMARY_RANK, use_disk: bool = True) -> Any:
|
| 414 |
+
"""Broadcast an object from a source to all workers.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
obj: Object to broadcast, must be serializable
|
| 418 |
+
src: Source rank for broadcast (default is primary)
|
| 419 |
+
use_disk: If enabled, removes redundant CPU memory copies by writing to
|
| 420 |
+
disk
|
| 421 |
+
"""
|
| 422 |
+
# Either broadcast from primary to the fleet (default),
|
| 423 |
+
# or use the src setting as the original rank
|
| 424 |
+
if get_rank() == src:
|
| 425 |
+
# Emit data
|
| 426 |
+
buffer = io.BytesIO()
|
| 427 |
+
torch.save(obj, buffer)
|
| 428 |
+
data_view = buffer.getbuffer()
|
| 429 |
+
length_tensor = torch.LongTensor([len(data_view)])
|
| 430 |
+
length_tensor = broadcast(length_tensor, src=src)
|
| 431 |
+
data_tensor = torch.ByteTensor(data_view)
|
| 432 |
+
data_tensor = broadcast(data_tensor, src=src)
|
| 433 |
+
else:
|
| 434 |
+
# Fetch from the source
|
| 435 |
+
length_tensor = torch.LongTensor([0])
|
| 436 |
+
length_tensor = broadcast(length_tensor, src=src)
|
| 437 |
+
data_tensor = torch.empty([length_tensor.item()], dtype=torch.uint8)
|
| 438 |
+
data_tensor = broadcast(data_tensor, src=src)
|
| 439 |
+
if use_disk:
|
| 440 |
+
with tempfile.TemporaryFile("r+b") as f:
|
| 441 |
+
f.write(data_tensor.numpy())
|
| 442 |
+
# remove reference to the data tensor and hope that Python garbage
|
| 443 |
+
# collects it
|
| 444 |
+
del data_tensor
|
| 445 |
+
f.seek(0)
|
| 446 |
+
obj = torch.load(f, weights_only=False)
|
| 447 |
+
else:
|
| 448 |
+
buffer = io.BytesIO(data_tensor.numpy())
|
| 449 |
+
obj = torch.load(buffer, weights_only=False)
|
| 450 |
+
return obj
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def all_gather_tensor(tensor: torch.Tensor, world_size=None):
|
| 454 |
+
if world_size is None:
|
| 455 |
+
world_size = get_world_size()
|
| 456 |
+
# make contiguous because NCCL won't gather the tensor otherwise
|
| 457 |
+
assert tensor.is_contiguous(), f"{tensor.shape} is not contiguous!"
|
| 458 |
+
tensor, orig_device = convert_to_distributed_tensor(tensor)
|
| 459 |
+
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
|
| 460 |
+
dist.all_gather(tensor_all, tensor, async_op=False) # performance opt
|
| 461 |
+
tensor_all = [
|
| 462 |
+
convert_to_normal_tensor(tensor, orig_device) for tensor in tensor_all
|
| 463 |
+
]
|
| 464 |
+
return tensor_all
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def all_gather_batch(tensors: List[torch.Tensor]):
|
| 468 |
+
"""
|
| 469 |
+
Performs all_gather operation on the provided tensors.
|
| 470 |
+
"""
|
| 471 |
+
# Queue the gathered tensors
|
| 472 |
+
world_size = get_world_size()
|
| 473 |
+
# There is no need for reduction in the single-proc case
|
| 474 |
+
if world_size == 1:
|
| 475 |
+
return tensors
|
| 476 |
+
tensor_list = []
|
| 477 |
+
output_tensor = []
|
| 478 |
+
for tensor in tensors:
|
| 479 |
+
tensor_all = all_gather_tensor(tensor, world_size)
|
| 480 |
+
tensor_list.append(tensor_all)
|
| 481 |
+
|
| 482 |
+
for tensor_all in tensor_list:
|
| 483 |
+
output_tensor.append(torch.cat(tensor_all, dim=0))
|
| 484 |
+
return output_tensor
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
class GatherLayer(autograd.Function):
|
| 488 |
+
"""
|
| 489 |
+
Gather tensors from all workers with support for backward propagation:
|
| 490 |
+
This implementation does not cut the gradients as torch.distributed.all_gather does.
|
| 491 |
+
"""
|
| 492 |
+
|
| 493 |
+
@staticmethod
|
| 494 |
+
def forward(ctx, x):
|
| 495 |
+
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
|
| 496 |
+
dist.all_gather(output, x)
|
| 497 |
+
return tuple(output)
|
| 498 |
+
|
| 499 |
+
@staticmethod
|
| 500 |
+
def backward(ctx, *grads):
|
| 501 |
+
all_gradients = torch.stack(grads)
|
| 502 |
+
dist.all_reduce(all_gradients)
|
| 503 |
+
return all_gradients[dist.get_rank()]
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def all_gather_batch_with_grad(tensors):
|
| 507 |
+
"""
|
| 508 |
+
Performs all_gather operation on the provided tensors.
|
| 509 |
+
Graph remains connected for backward grad computation.
|
| 510 |
+
"""
|
| 511 |
+
# Queue the gathered tensors
|
| 512 |
+
world_size = get_world_size()
|
| 513 |
+
# There is no need for reduction in the single-proc case
|
| 514 |
+
if world_size == 1:
|
| 515 |
+
return tensors
|
| 516 |
+
tensor_list = []
|
| 517 |
+
output_tensor = []
|
| 518 |
+
|
| 519 |
+
for tensor in tensors:
|
| 520 |
+
tensor_all = GatherLayer.apply(tensor)
|
| 521 |
+
tensor_list.append(tensor_all)
|
| 522 |
+
|
| 523 |
+
for tensor_all in tensor_list:
|
| 524 |
+
output_tensor.append(torch.cat(tensor_all, dim=0))
|
| 525 |
+
return output_tensor
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def unwrap_ddp_if_wrapped(model):
|
| 529 |
+
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
|
| 530 |
+
return model.module
|
| 531 |
+
return model
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
def create_new_process_group(group_size):
|
| 535 |
+
"""
|
| 536 |
+
Creates process groups of a gives `group_size` and returns
|
| 537 |
+
process group that current GPU participates in.
|
| 538 |
+
|
| 539 |
+
`group_size` must divide the total number of GPUs (world_size).
|
| 540 |
+
|
| 541 |
+
Modified from
|
| 542 |
+
https://github.com/NVIDIA/apex/blob/4e1ae43f7f7ac69113ef426dd15f37123f0a2ed3/apex/parallel/__init__.py#L60
|
| 543 |
+
|
| 544 |
+
Args:
|
| 545 |
+
group_size (int): number of GPU's to collaborate for sync bn
|
| 546 |
+
"""
|
| 547 |
+
|
| 548 |
+
assert group_size > 0
|
| 549 |
+
|
| 550 |
+
world_size = torch.distributed.get_world_size()
|
| 551 |
+
if world_size <= 8:
|
| 552 |
+
if group_size > world_size:
|
| 553 |
+
logging.warning(
|
| 554 |
+
f"Requested group size [{group_size}] > world size [{world_size}]. "
|
| 555 |
+
"Assuming local debug run and capping it to world size."
|
| 556 |
+
)
|
| 557 |
+
group_size = world_size
|
| 558 |
+
assert world_size >= group_size
|
| 559 |
+
assert world_size % group_size == 0
|
| 560 |
+
|
| 561 |
+
group = None
|
| 562 |
+
for group_num in range(world_size // group_size):
|
| 563 |
+
group_ids = range(group_num * group_size, (group_num + 1) * group_size)
|
| 564 |
+
cur_group = torch.distributed.new_group(ranks=group_ids)
|
| 565 |
+
if torch.distributed.get_rank() // group_size == group_num:
|
| 566 |
+
group = cur_group
|
| 567 |
+
# can not drop out and return here, every process must go through creation of all subgroups
|
| 568 |
+
|
| 569 |
+
assert group is not None
|
| 570 |
+
return group
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
def is_dist_avail_and_initialized():
|
| 574 |
+
if not dist.is_available():
|
| 575 |
+
return False
|
| 576 |
+
if not dist.is_initialized():
|
| 577 |
+
return False
|
| 578 |
+
return True
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
def gather_to_rank_0_via_filesys(data, filesys_save_dir=None):
|
| 582 |
+
"""
|
| 583 |
+
Gather any picklable data to rank 0 via filesystem, using all_gather_via_filesys.
|
| 584 |
+
"""
|
| 585 |
+
return all_gather_via_filesys(data, filesys_save_dir, gather_to_rank_0_only=True)
|
source_code/sam3/scripts/eval/gold/README.md
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SA-Co/Gold benchmark
|
| 2 |
+
|
| 3 |
+
SA-Co/Gold is a benchmark for promptable concept segmentation (PCS) in images. The benchmark contains images paired with text labels, also referred as Noun Phrases (NPs), each annotated exhaustively with masks on all object instances that match the label. SA-Co/Gold comprises 7 subsets, each targeting a different annotation domain: MetaCLIP captioner NPs, SA-1B captioner NPs, Attributes, Crowded Scenes, Wiki-Common1K, Wiki-Food/Drink, Wiki-Sports Equipment. The images are originally from the MetaCLIP and SA-1B datasets.
|
| 4 |
+
|
| 5 |
+
For each subset, the annotations are multi-reviewed by 3 independent human annotators. Each row in the figure shows an image and noun phrase pair from
|
| 6 |
+
one of the domains, and masks from the 3 annotators. Dashed borders indicate special group masks that cover more than a single instance, used when separating into instances is deemed too difficult. Annotators sometimes disagree on precise mask borders, the number of instances, and whether the phrase exists. Having 3 independent annotations allow us to measure human agreement on the task, which serves as an upper bound for model performance.
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
<p align="center">
|
| 10 |
+
<img src="../../../assets/saco_gold_annotation.png?" style="width:80%;" />
|
| 11 |
+
</p>
|
| 12 |
+
# Preparation
|
| 13 |
+
|
| 14 |
+
## Download annotations
|
| 15 |
+
|
| 16 |
+
The GT annotations can be downloaded from [Hugging Face](https://huggingface.co/datasets/facebook/SACo-Gold) or [Roboflow](https://universe.roboflow.com/sa-co-gold)
|
| 17 |
+
|
| 18 |
+
## Download images
|
| 19 |
+
|
| 20 |
+
There are two image sources for the evaluation dataset: MetaCLIP and SA-1B.
|
| 21 |
+
|
| 22 |
+
1) The MetaCLIP images are referred in 6 out of 7 subsets (MetaCLIP captioner NPs, Attributes, Crowded Scenes, Wiki-Common1K, Wiki-Food/Drink, Wiki-Sports Equipment) and can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-gold/gold-metaclip-merged-a-release-test/).
|
| 23 |
+
|
| 24 |
+
2) The SA-1B images are referred in 1 out of 7 subsets (SA-1B captioner NPs) and can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-gold/gold-sa-1b-merged-a-release-test/). Alternatively, they can be downloaded from [here](https://ai.meta.com/datasets/segment-anything-downloads/). Please access the link for `sa_co_gold.tar` from dynamic links available under `Download text file` to download the SA-1B images referred in SA-Co/Gold.
|
| 25 |
+
|
| 26 |
+
# Usage
|
| 27 |
+
## Visualization
|
| 28 |
+
|
| 29 |
+
- Visualize GT annotations: [saco_gold_silver_vis_example.ipynb](https://github.com/facebookresearch/sam3/blob/main/examples/saco_gold_silver_vis_example.ipynb)
|
| 30 |
+
- Visualize GT annotations and sample predictions side-by-side: [sam3_data_and_predictions_visualization.ipynb](https://github.com/facebookresearch/sam3/blob/main/examples/sam3_data_and_predictions_visualization.ipynb)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
## Run evaluation
|
| 34 |
+
|
| 35 |
+
The official metric for SA-Co/Gold is cgF1. Please refer to the SAM3 paper for details.
|
| 36 |
+
Our evaluator inherits from the official COCO evaluator, with some modifications. Recall that in the Gold subset, there are three annotations for each datapoint. We evaluate against each of them and picks the most favorable (oracle setting). It has minimal dependency (pycocotools, numpy and scipy), to help reusability in other projects. In this section we provide several pointers to run evaluation of SAM3 or third-party models.
|
| 37 |
+
|
| 38 |
+
### Evaluate SAM3
|
| 39 |
+
|
| 40 |
+
We provide inference configurations to reproduce the evaluation of SAM3.
|
| 41 |
+
First, please edit the file [eval_base.yaml](https://github.com/facebookresearch/sam3/blob/main/sam3/train/configs/eval_base.yaml) with the paths where you downloaded the images and annotations above.
|
| 42 |
+
|
| 43 |
+
There are 7 subsets and as many configurations to be run.
|
| 44 |
+
Let's take the first subset as an example. The inference can be run locally using the following command (you can adjust the number of gpus):
|
| 45 |
+
```bash
|
| 46 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_metaclip_nps.yaml --use-cluster 0 --num-gpus 1
|
| 47 |
+
```
|
| 48 |
+
The predictions will be dumped in the folder specified in eval_base.yaml.
|
| 49 |
+
|
| 50 |
+
We also provide support for SLURM-based cluster inference. Edit the eval_base.yaml file to reflect your slurm configuration (partition, qos, ...), then run
|
| 51 |
+
|
| 52 |
+
```bash
|
| 53 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_metaclip_nps.yaml --use-cluster 1
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
We provide the commands for all subsets below
|
| 57 |
+
#### MetaCLIP captioner NPs
|
| 58 |
+
|
| 59 |
+
```bash
|
| 60 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_metaclip_nps.yaml --use-cluster 1
|
| 61 |
+
```
|
| 62 |
+
#### SA-1B captioner NPs
|
| 63 |
+
|
| 64 |
+
Refer to SA-1B images for this subset. For the other 6 subsets, refer to MetaCLIP images.
|
| 65 |
+
|
| 66 |
+
```bash
|
| 67 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_sa1b_nps.yaml --use-cluster 1
|
| 68 |
+
```
|
| 69 |
+
#### Attributes
|
| 70 |
+
|
| 71 |
+
```bash
|
| 72 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_attributes.yaml --use-cluster 1
|
| 73 |
+
```
|
| 74 |
+
#### Crowded Scenes
|
| 75 |
+
|
| 76 |
+
```bash
|
| 77 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_crowded.yaml --use-cluster 1
|
| 78 |
+
```
|
| 79 |
+
#### Wiki-Common1K
|
| 80 |
+
|
| 81 |
+
```bash
|
| 82 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_wiki_common.yaml --use-cluster 1
|
| 83 |
+
```
|
| 84 |
+
#### Wiki-Food/Drink
|
| 85 |
+
|
| 86 |
+
```bash
|
| 87 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_fg_food.yaml --use-cluster 1
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
#### Wiki-Sports Equipment
|
| 91 |
+
|
| 92 |
+
```bash
|
| 93 |
+
python sam3/train/train.py -c configs/gold_image_evals/sam3_gold_image_fg_sports.yaml --use-cluster 1
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
### Offline evaluation
|
| 97 |
+
|
| 98 |
+
If you have the predictions in the COCO result format (see [here](https://cocodataset.org/#format-results)), then we provide scripts to easily run the evaluation.
|
| 99 |
+
|
| 100 |
+
For an example on how to run the evaluator on all subsets and aggregate results, see the following notebook: [saco_gold_silver_eval_example.ipynb](https://github.com/facebookresearch/sam3/blob/main/examples/saco_gold_silver_eval_example.ipynb)
|
| 101 |
+
Alternatively, you can run `python scripts/eval/gold/eval_sam3.py`
|
| 102 |
+
|
| 103 |
+
If you have a prediction file for a given subset, you can run the evaluator specifically for that one using the standalone script. Example:
|
| 104 |
+
```bash
|
| 105 |
+
python scripts/eval/standalone_cgf1.py --pred_file /path/to/coco_predictions_segm.json --gt_files /path/to/annotations/gold_metaclip_merged_a_release_test.json /path/to/annotations/gold_metaclip_merged_b_release_test.json /path/to/annotations/gold_metaclip_merged_c_release_test.json
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# Results
|
| 110 |
+
Here we collect the segmentation results for SAM3 and some baselines. Note that the baselines that do not produce masks are evaluated by converting the boxes to masks using SAM2
|
| 111 |
+
<table style="border-color:black;border-style:solid;border-width:1px;border-collapse:collapse;border-spacing:0;text-align:right" class="tg"><thead>
|
| 112 |
+
<tr><th style="text-align:center"></th><th style="text-align:center" colspan="3">Average</th><th style="text-align:center" colspan="3">Captioner metaclip</th><th style="text-align:center" colspan="3">Captioner sa1b</th>
|
| 113 |
+
<th style="text-align:center" colspan="3">Crowded</th><th style="text-align:center" colspan="3">FG food</th><th style="text-align:center" colspan="3">FG sport</th><th style="text-align:center" colspan="3">Attributes</th>
|
| 114 |
+
<th style="text-align:center" colspan="3">Wiki common</th></tr>
|
| 115 |
+
</thead>
|
| 116 |
+
<tbody>
|
| 117 |
+
<tr><td ></td><td >cgF1</td><td >IL_MCC</td><td >positive_micro_F1</td>
|
| 118 |
+
<td >cgF1</td><td >IL_MCC</td><td >positive_micro_F1</td><td >cgF1</td>
|
| 119 |
+
<td >IL_MCC</td><td >positive_micro_F1</td><td >cgF1</td><td >IL_MCC</td>
|
| 120 |
+
<td >positive_micro_F1</td><td >cgF1</td><td >IL_MCC</td><td >positive_micro_F1</td>
|
| 121 |
+
<td >cgF1</td><td >IL_MCC</td><td >positive_micro_F1</td><td >cgF1</td>
|
| 122 |
+
<td >IL_MCC</td><td >positive_micro_F1</td><td >cgF1</td><td >IL_MCC</td>
|
| 123 |
+
<td >positive_micro_F1</td></tr>
|
| 124 |
+
<tr><td >gDino-T</td><td >3.25</td><td >0.15</td><td >16.2</td>
|
| 125 |
+
<td >2.89</td><td >0.21</td><td >13.88</td><td >3.07</td>
|
| 126 |
+
<td >0.2</td><td >15.35</td><td >0.28</td><td >0.08</td>
|
| 127 |
+
<td >3.37</td><td >0.96</td><td >0.1</td><td >9.83</td>
|
| 128 |
+
<td >1.12</td><td >0.1</td><td >11.2</td><td >13.75</td>
|
| 129 |
+
<td >0.29</td><td >47.3</td><td >0.7</td><td >0.06</td>
|
| 130 |
+
<td >12.14</td></tr>
|
| 131 |
+
<tr><td >OWLv2*</td><td >24.59</td><td >0.57</td><td >42</td>
|
| 132 |
+
<td >17.69</td><td >0.52</td><td >34.27</td><td >13.32</td>
|
| 133 |
+
<td >0.5</td><td >26.83</td><td >15.8</td><td >0.51</td>
|
| 134 |
+
<td >30.74</td><td >31.96</td><td >0.65</td><td >49.35</td>
|
| 135 |
+
<td >36.01</td><td >0.64</td><td >56.19</td><td >35.61</td>
|
| 136 |
+
<td >0.63</td><td >56.23</td><td >21.73</td><td >0.54</td>
|
| 137 |
+
<td >40.25</td></tr>
|
| 138 |
+
<tr><td >OWLv2</td><td >17.27</td><td >0.46</td><td >36.8</td>
|
| 139 |
+
<td >12.21</td><td >0.39</td><td >31.33</td><td >9.76</td>
|
| 140 |
+
<td >0.45</td><td >21.65</td><td >8.87</td><td >0.36</td>
|
| 141 |
+
<td >24.77</td><td >24.36</td><td >0.51</td><td >47.85</td>
|
| 142 |
+
<td >24.44</td><td >0.52</td><td >46.97</td><td >25.85</td>
|
| 143 |
+
<td >0.54</td><td >48.22</td><td >15.4</td><td >0.42</td>
|
| 144 |
+
<td >36.64</td></tr>
|
| 145 |
+
<tr><td >LLMDet-L</td><td >6.5</td><td >0.21</td><td >27.3</td>
|
| 146 |
+
<td >4.49</td><td >0.23</td><td >19.36</td><td >5.32</td>
|
| 147 |
+
<td >0.23</td><td >22.81</td><td >2.42</td><td >0.18</td>
|
| 148 |
+
<td >13.74</td><td >5.5</td><td >0.19</td><td >29.12</td>
|
| 149 |
+
<td >4.39</td><td >0.17</td><td >25.34</td><td >22.17</td>
|
| 150 |
+
<td >0.39</td><td >57.13</td><td >1.18</td><td >0.05</td>
|
| 151 |
+
<td >23.3</td></tr>
|
| 152 |
+
<tr><td >APE</td><td >16.41</td><td >0.4</td><td >36.9</td>
|
| 153 |
+
<td >12.6</td><td >0.42</td><td >30.11</td><td >2.23</td>
|
| 154 |
+
<td >0.22</td><td >10.01</td><td >7.15</td><td >0.35</td>
|
| 155 |
+
<td >20.3</td><td >22.74</td><td >0.51</td><td >45.01</td>
|
| 156 |
+
<td >31.79</td><td >0.56</td><td >56.45</td><td >26.74</td>
|
| 157 |
+
<td >0.47</td><td >57.27</td><td >11.59</td><td >0.29</td>
|
| 158 |
+
<td >39.46</td></tr>
|
| 159 |
+
<tr><td >DINO-X</td><td >21.26</td><td >0.38</td><td >55.2</td>
|
| 160 |
+
<td >17.21</td><td >0.35</td><td >49.17</td><td >19.66</td>
|
| 161 |
+
<td >0.48</td><td >40.93</td><td >12.86</td><td >0.34</td>
|
| 162 |
+
<td >37.48</td><td >30.07</td><td >0.49</td><td >61.72</td>
|
| 163 |
+
<td >28.36</td><td >0.41</td><td >69.4</td><td >30.97</td>
|
| 164 |
+
<td >0.42</td><td >74.04</td><td >9.72</td><td >0.18</td>
|
| 165 |
+
<td >53.52</td></tr>
|
| 166 |
+
<tr><td >Gemini 2.5</td><td >13.03</td><td >0.29</td><td >46.1</td>
|
| 167 |
+
<td >9.9</td><td >0.29</td><td >33.79</td><td >13.1</td>
|
| 168 |
+
<td >0.41</td><td >32.1</td><td >8.15</td><td >0.27</td>
|
| 169 |
+
<td >30.34</td><td >19.63</td><td >0.33</td><td >59.52</td>
|
| 170 |
+
<td >15.07</td><td >0.28</td><td >53.5</td><td >18.84</td>
|
| 171 |
+
<td >0.3</td><td >63.14</td><td >6.5</td><td >0.13</td>
|
| 172 |
+
<td >50.32</td></tr>
|
| 173 |
+
<tr><td >SAM 3</td><td >54.06</td><td >0.82</td><td >66.11</td>
|
| 174 |
+
<td >47.26</td><td >0.81</td><td >58.58</td><td >53.69</td>
|
| 175 |
+
<td >0.86</td><td >62.55</td><td >61.08</td><td >0.9</td>
|
| 176 |
+
<td >67.73</td><td >53.41</td><td >0.79</td><td >67.28</td>
|
| 177 |
+
<td >65.52</td><td >0.89</td><td >73.75</td><td >54.93</td>
|
| 178 |
+
<td >0.76</td><td >72</td><td >42.53</td><td >0.7</td>
|
| 179 |
+
<td >60.85</td></tr>
|
| 180 |
+
</tbody></table>
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
# Annotation format
|
| 185 |
+
|
| 186 |
+
The annotation format is derived from [COCO format](https://cocodataset.org/#format-data). Notable data fields are:
|
| 187 |
+
|
| 188 |
+
- `images`: a `list` of `dict` features, contains a list of all image-NP pairs. Each entry is related to an image-NP pair and has the following items.
|
| 189 |
+
- `id`: an `int` feature, unique identifier for the image-NP pair
|
| 190 |
+
- `text_input`: a `string` feature, the noun phrase for the image-NP pair
|
| 191 |
+
- `file_name`: a `string` feature, the relative image path in the corresponding data folder.
|
| 192 |
+
- `height`/`width`: dimension of the image
|
| 193 |
+
- `is_instance_exhaustive`: Boolean (0 or 1). If it's 1 then all the instances are correctly annotated. For instance segmentation, we only use those datapoints. Otherwise, there may be either missing instances or crowd segments (a segment covering multiple instances)
|
| 194 |
+
- `is_pixel_exhaustive`: Boolean (0 or 1). If it's 1, then the union of all masks cover all pixels corresponding to the prompt. This is weaker than instance_exhaustive since it allows crowd segments. It can be used for semantic segmentation evaluations.
|
| 195 |
+
|
| 196 |
+
- `annotations`: a `list` of `dict` features, containing a list of all annotations including bounding box, segmentation mask, area etc.
|
| 197 |
+
- `image_id`: an `int` feature, maps to the identifier for the image-np pair in images
|
| 198 |
+
- `bbox`: a `list` of float features, containing bounding box in [x,y,w,h] format, normalized by the image dimensions
|
| 199 |
+
- `segmentation`: a dict feature, containing segmentation mask in RLE format
|
| 200 |
+
- `category_id`: For compatibility with the coco format. Will always be 1 and is unused.
|
| 201 |
+
- `is_crowd`: Boolean (0 or 1). If 1, then the segment overlaps several instances (used in cases where instances are not separable, for e.g. due to poor image quality)
|
| 202 |
+
|
| 203 |
+
- `categories`: a `list` of `dict` features, containing a list of all categories. Here, we provide the category key for compatibility with the COCO format, but in open-vocabulary detection we do not use it. Instead, the text prompt is stored directly in each image (text_input in images). Note that in our setting, a unique image (id in images) actually corresponds to an (image, text prompt) combination.
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
For `id` in images that have corresponding annotations (i.e. exist as `image_id` in `annotations`), we refer to them as a "positive" NP. And, for `id` in `images` that don't have any annotations (i.e. they do not exist as `image_id` in `annotations`), we refer to them as a "negative" NP.
|
| 207 |
+
|
| 208 |
+
A sample annotation from Wiki-Food/Drink domain looks as follows:
|
| 209 |
+
|
| 210 |
+
#### images
|
| 211 |
+
|
| 212 |
+
```
|
| 213 |
+
[
|
| 214 |
+
{
|
| 215 |
+
"id": 10000000,
|
| 216 |
+
"file_name": "1/1001/metaclip_1_1001_c122868928880ae52b33fae1.jpeg",
|
| 217 |
+
"text_input": "chili",
|
| 218 |
+
"width": 600,
|
| 219 |
+
"height": 600,
|
| 220 |
+
"queried_category": "0",
|
| 221 |
+
"is_instance_exhaustive": 1,
|
| 222 |
+
"is_pixel_exhaustive": 1
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"id": 10000001,
|
| 226 |
+
"file_name": "1/1001/metaclip_1_1001_c122868928880ae52b33fae1.jpeg",
|
| 227 |
+
"text_input": "the fish ball",
|
| 228 |
+
"width": 600,
|
| 229 |
+
"height": 600,
|
| 230 |
+
"queried_category": "2001",
|
| 231 |
+
"is_instance_exhaustive": 1,
|
| 232 |
+
"is_pixel_exhaustive": 1
|
| 233 |
+
}
|
| 234 |
+
]
|
| 235 |
+
```
|
| 236 |
+
|
| 237 |
+
#### annotations
|
| 238 |
+
|
| 239 |
+
```
|
| 240 |
+
[
|
| 241 |
+
{
|
| 242 |
+
"id": 1,
|
| 243 |
+
"image_id": 10000000,
|
| 244 |
+
"source": "manual",
|
| 245 |
+
"area": 0.002477777777777778,
|
| 246 |
+
"bbox": [
|
| 247 |
+
0.44333332777023315,
|
| 248 |
+
0.0,
|
| 249 |
+
0.10833333432674408,
|
| 250 |
+
0.05833333358168602
|
| 251 |
+
],
|
| 252 |
+
"segmentation": {
|
| 253 |
+
"counts": "`kk42fb01O1O1O1O001O1O1O001O1O00001O1O001O001O0000000000O1001000O010O02O001N10001N0100000O10O1000O10O010O100O1O1O1O1O0000001O0O2O1N2N2Nobm4",
|
| 254 |
+
"size": [
|
| 255 |
+
600,
|
| 256 |
+
600
|
| 257 |
+
]
|
| 258 |
+
},
|
| 259 |
+
"category_id": 1,
|
| 260 |
+
"iscrowd": 0
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"id": 2,
|
| 264 |
+
"image_id": 10000000,
|
| 265 |
+
"source": "manual",
|
| 266 |
+
"area": 0.001275,
|
| 267 |
+
"bbox": [
|
| 268 |
+
0.5116666555404663,
|
| 269 |
+
0.5716666579246521,
|
| 270 |
+
0.061666667461395264,
|
| 271 |
+
0.036666665226221085
|
| 272 |
+
],
|
| 273 |
+
"segmentation": {
|
| 274 |
+
"counts": "aWd51db05M1O2N100O1O1O1O1O1O010O100O10O10O010O010O01O100O100O1O00100O1O100O1O2MZee4",
|
| 275 |
+
"size": [
|
| 276 |
+
600,
|
| 277 |
+
600
|
| 278 |
+
]
|
| 279 |
+
},
|
| 280 |
+
"category_id": 1,
|
| 281 |
+
"iscrowd": 0
|
| 282 |
+
}
|
| 283 |
+
]
|
| 284 |
+
```
|
| 285 |
+
|
| 286 |
+
# Data Stats
|
| 287 |
+
|
| 288 |
+
Here are the stats for the 7 annotation domains. The # Image-NPs represent the total number of unique image-NP pairs including both “positive” and “negative” NPs.
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
| Domain | Media | # Image-NPs | # Image-NP-Masks|
|
| 292 |
+
|--------------------------|--------------|---------------| ----------------|
|
| 293 |
+
| MetaCLIP captioner NPs | MetaCLIP | 33393 | 20144 |
|
| 294 |
+
| SA-1B captioner NPs | SA-1B | 13258 | 30306 |
|
| 295 |
+
| Attributes | MetaCLIP | 9245 | 3663 |
|
| 296 |
+
| Crowded Scenes | MetaCLIP | 20687 | 50417 |
|
| 297 |
+
| Wiki-Common1K | MetaCLIP | 65502 | 6448 |
|
| 298 |
+
| Wiki-Food&Drink | MetaCLIP | 13951 | 9825 |
|
| 299 |
+
| Wiki-Sports Equipment | MetaCLIP | 12166 | 5075 |
|
source_code/sam3/scripts/eval/gold/eval_sam3.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
"""Script to run the evaluator offline given the GTs for SAC-Gold test set and SAM3 model prediction files.
|
| 4 |
+
It reports CGF1, IL_MCC, PM_F1 metrics for each subset of SAC-Gold test set.
|
| 5 |
+
|
| 6 |
+
Usage: python eval_sam3.py --gt-folder <folder_with_gts> --pred-folder <folder_with_predictions>
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
from sam3.eval.cgf1_eval import CGF1Evaluator
|
| 13 |
+
|
| 14 |
+
# Relative file names for GT files for 7 SA-Co/Gold subsets
|
| 15 |
+
|
| 16 |
+
saco_gold_gts = {
|
| 17 |
+
# MetaCLIP Captioner
|
| 18 |
+
"metaclip_nps": [
|
| 19 |
+
"gold_metaclip_merged_a_release_test.json",
|
| 20 |
+
"gold_metaclip_merged_b_release_test.json",
|
| 21 |
+
"gold_metaclip_merged_c_release_test.json",
|
| 22 |
+
],
|
| 23 |
+
# SA-1B captioner
|
| 24 |
+
"sa1b_nps": [
|
| 25 |
+
"gold_sa1b_merged_a_release_test.json",
|
| 26 |
+
"gold_sa1b_merged_b_release_test.json",
|
| 27 |
+
"gold_sa1b_merged_c_release_test.json",
|
| 28 |
+
],
|
| 29 |
+
# Crowded
|
| 30 |
+
"crowded": [
|
| 31 |
+
"gold_crowded_merged_a_release_test.json",
|
| 32 |
+
"gold_crowded_merged_b_release_test.json",
|
| 33 |
+
"gold_crowded_merged_c_release_test.json",
|
| 34 |
+
],
|
| 35 |
+
# FG Food
|
| 36 |
+
"fg_food": [
|
| 37 |
+
"gold_fg_food_merged_a_release_test.json",
|
| 38 |
+
"gold_fg_food_merged_b_release_test.json",
|
| 39 |
+
"gold_fg_food_merged_c_release_test.json",
|
| 40 |
+
],
|
| 41 |
+
# FG Sports
|
| 42 |
+
"fg_sports_equipment": [
|
| 43 |
+
"gold_fg_sports_equipment_merged_a_release_test.json",
|
| 44 |
+
"gold_fg_sports_equipment_merged_b_release_test.json",
|
| 45 |
+
"gold_fg_sports_equipment_merged_c_release_test.json",
|
| 46 |
+
],
|
| 47 |
+
# Attributes
|
| 48 |
+
"attributes": [
|
| 49 |
+
"gold_attributes_merged_a_release_test.json",
|
| 50 |
+
"gold_attributes_merged_b_release_test.json",
|
| 51 |
+
"gold_attributes_merged_c_release_test.json",
|
| 52 |
+
],
|
| 53 |
+
# Wiki common
|
| 54 |
+
"wiki_common": [
|
| 55 |
+
"gold_wiki_common_merged_a_release_test.json",
|
| 56 |
+
"gold_wiki_common_merged_b_release_test.json",
|
| 57 |
+
"gold_wiki_common_merged_c_release_test.json",
|
| 58 |
+
],
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def main():
|
| 63 |
+
parser = argparse.ArgumentParser()
|
| 64 |
+
parser.add_argument(
|
| 65 |
+
"-g",
|
| 66 |
+
"--gt-folder",
|
| 67 |
+
type=str,
|
| 68 |
+
help="Path to the folder containing the ground truth json files.",
|
| 69 |
+
)
|
| 70 |
+
parser.add_argument(
|
| 71 |
+
"-p",
|
| 72 |
+
"--pred-folder",
|
| 73 |
+
type=str,
|
| 74 |
+
help="Path to the folder containing the predictions json files.",
|
| 75 |
+
)
|
| 76 |
+
args = parser.parse_args()
|
| 77 |
+
|
| 78 |
+
results = ""
|
| 79 |
+
|
| 80 |
+
for subset_name, gts in saco_gold_gts.items():
|
| 81 |
+
print("Processing subset: ", subset_name)
|
| 82 |
+
gt_paths = [os.path.join(args.gt_folder, gt) for gt in gts]
|
| 83 |
+
evaluator = CGF1Evaluator(
|
| 84 |
+
gt_path=gt_paths, verbose=True, iou_type="segm"
|
| 85 |
+
) # change to bbox if you want detection performance
|
| 86 |
+
|
| 87 |
+
pred_path = os.path.join(
|
| 88 |
+
args.pred_folder,
|
| 89 |
+
f"gold_{subset_name}/dumps/gold_{subset_name}/coco_predictions_segm.json",
|
| 90 |
+
)
|
| 91 |
+
summary = evaluator.evaluate(pred_path)
|
| 92 |
+
|
| 93 |
+
cgf1 = str(round(summary["cgF1_eval_segm_cgF1"] * 100, 2))
|
| 94 |
+
il_mcc = str(round(summary["cgF1_eval_segm_IL_MCC"], 2))
|
| 95 |
+
pmf1 = str(round(summary["cgF1_eval_segm_positive_micro_F1"] * 100, 2))
|
| 96 |
+
final_str = f"{cgf1},{il_mcc},{pmf1}"
|
| 97 |
+
results += subset_name + ": " + final_str + "\n"
|
| 98 |
+
|
| 99 |
+
print("Subset name, CG_F1, IL_MCC, pmF1")
|
| 100 |
+
print(results)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
if __name__ == "__main__":
|
| 104 |
+
main()
|
source_code/sam3/scripts/eval/silver/README.md
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SA-Co/Silver benchmark
|
| 2 |
+
|
| 3 |
+
SA-Co/Silver is a benchmark for promptable concept segmentation (PCS) in images. The benchmark contains images paired with text labels (also referred as Noun Phrases aka NPs), each annotated exhaustively with masks on all object instances that match the label.
|
| 4 |
+
|
| 5 |
+
SA-Co/Silver comprises 10 subsets, covering a diverse array of domains including food, art, robotics, driving etc. Unlike SA-Co/Gold, there is only a single ground-truth for each datapoint, which means the results may have a bit more variance and tend to underestimate model performance, since they don't account for possible different interpretations of each query.
|
| 6 |
+
|
| 7 |
+
- BDD100k
|
| 8 |
+
- DROID
|
| 9 |
+
- Ego4D
|
| 10 |
+
- MyFoodRepo-273
|
| 11 |
+
- GeoDE
|
| 12 |
+
- iNaturalist-2017
|
| 13 |
+
- National Gallery of Art
|
| 14 |
+
- SA-V
|
| 15 |
+
- YT-Temporal-1B
|
| 16 |
+
- Fathomnet
|
| 17 |
+
|
| 18 |
+
The README contains instructions on how to download and setup the annotations, image data to prepare them for evaluation on SA-Co/Silver.
|
| 19 |
+
|
| 20 |
+
# Preparation
|
| 21 |
+
## Download annotations
|
| 22 |
+
|
| 23 |
+
The GT annotations can be downloaded from [Hugging Face](https://huggingface.co/datasets/facebook/SACo-Silver) or [Roboflow](https://universe.roboflow.com/sa-co-silver)
|
| 24 |
+
|
| 25 |
+
## Download images and video frames
|
| 26 |
+
|
| 27 |
+
### Image Datasets
|
| 28 |
+
|
| 29 |
+
#### GeoDE
|
| 30 |
+
|
| 31 |
+
The processed images needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/geode/) OR follow the below steps to prepare the processed images.
|
| 32 |
+
|
| 33 |
+
1. Download dataset with raw images from [GeoDE](https://geodiverse-data-collection.cs.princeton.edu/).
|
| 34 |
+
2. Extract the downloaded file to a location, say `<RAW_GEODE_IMAGES_FOLDER>`
|
| 35 |
+
|
| 36 |
+
3. Run the below command to pre-process the images and prepare for evaluation. The proceesed images will be saved to the location specified in `<PROCESSED_GEODE_IMAGES_FOLDER>`
|
| 37 |
+
```
|
| 38 |
+
python preprocess_silver_geode_bdd100k_food_rec.py --annotation_file <FOLDER_WITH_SILVER_ANNOTATIONS>/silver_geode_merged_test.json --raw_images_folder <RAW_GEODE_IMAGES_FOLDER> --processed_images_folder <PROCESSED_GEODE_IMAGES_FOLDER> --dataset_name geode
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
#### National Gallery of Art (NGA)
|
| 42 |
+
|
| 43 |
+
The processed images needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/national-gallery-of-art/) OR follow the below steps to prepare the processed images.
|
| 44 |
+
|
| 45 |
+
1. Run the below command to download raw images and pre-process the images to prepare for evaluation. The proceesed images will be saved to the location specified in `<PROCESSED_NGA_IMAGES_FOLDER>`.
|
| 46 |
+
```
|
| 47 |
+
python download_preprocess_nga.py --annotation_file <FOLDER_WITH_SILVER_ANNOTATIONS>/silver_nga_art_merged_test.json --raw_images_folder <RAW_NGA_IMAGES_FOLDER> --processed_images_folder <PROCESSED_NGA_IMAGES_FOLDER>
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
#### Berkeley Driving Dataset (BDD) 100k
|
| 51 |
+
|
| 52 |
+
The processed images needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/bdd100k-gwmh6/) OR follow the below steps to prepare the processed images.
|
| 53 |
+
|
| 54 |
+
1. Download data with raw images from the `100K Images` dataset in [BDD100k](http://bdd-data.berkeley.edu/download.html)
|
| 55 |
+
2. Extract the downloaded file to a location, say `<RAW_BDD_IMAGES_FOLDER>`
|
| 56 |
+
3. Run the below command to pre-process the images and prepare for evaluation. The proceesed images will be saved to the location specified in `<PROCESSED_BDD_IMAGES_FOLDER>`
|
| 57 |
+
```
|
| 58 |
+
python preprocess_silver_geode_bdd100k_food_rec.py --annotation_file <FOLDER_WITH_SILVER_ANNOTATIONS>/silver_bdd100k_merged_test.json --raw_images_folder <RAW_BDD_IMAGES_FOLDER> --processed_images_folder <PROCESSED_BDD_IMAGES_FOLDER> --dataset_name bdd100k
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
#### Food Recognition Challenge 2022
|
| 62 |
+
|
| 63 |
+
1. Download data with raw images from the [website](https://www.aicrowd.com/challenges/food-recognition-benchmark-2022). Download `[Round 2] public_validation_set_2.0.tar.gz` file.
|
| 64 |
+
2. Extract the downloaded file to a location, say `<RAW_FOOD_IMAGES_FOLDER>`
|
| 65 |
+
3. Run the below command to pre-process the images and prepare for evaluation. The proceesed images will be saved to the location specified in `<PROCESSED_FOOD_IMAGES_FOLDER>`
|
| 66 |
+
```
|
| 67 |
+
python preprocess_silver_geode_bdd100k_food_rec.py --annotation_file <FOLDER_WITH_SILVER_ANNOTATIONS>/silver_food_rec_merged_test.json --raw_images_folder <RAW_FOOD_IMAGES_FOLDER> --processed_images_folder <PROCESSED_FOOD_IMAGES_FOLDER> --dataset_name food_rec
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
#### iNaturalist
|
| 71 |
+
|
| 72 |
+
The processed images needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/inaturalist-2017/) OR follow the below steps to prepare the processed images.
|
| 73 |
+
|
| 74 |
+
1. Run the below command to download, extract images in `<RAW_INATURALIST_IMAGES_FOLDER>` and prepare them for evaluation. The proceesed images will be saved to the location specified in `<PROCESSED_INATURALIST_IMAGES_FOLDER>`
|
| 75 |
+
```
|
| 76 |
+
python download_inaturalist.py --raw_images_folder <RAW_INATURALIST_IMAGES_FOLDER> --processed_images_folder <PROCESSED_INATURALIST_IMAGES_FOLDER>
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
#### Fathomnet
|
| 80 |
+
|
| 81 |
+
The processed images needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/fathomnet-kmz5d/) OR follow the below steps to prepare the processed images.
|
| 82 |
+
|
| 83 |
+
1. Install the FathomNet API
|
| 84 |
+
```
|
| 85 |
+
pip install fathomnet
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
2. Run the below command to download the images and prepare for evaluation. The proceesed images will be saved to the location specified in `<PROCESSED_BDD_IMAGES_FOLDER>`
|
| 89 |
+
```
|
| 90 |
+
python download_fathomnet.py --processed_images_folder <PROCESSED_BFATHOMNET_IMAGES_FOLDER>
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
### Frame Datasets
|
| 94 |
+
|
| 95 |
+
These datasets correspond to annotations for individual frames coming from videos. The file `CONFIG_FRAMES.yaml` is used to unify the downloads for the datasets, as explained below.
|
| 96 |
+
|
| 97 |
+
Before following the other dataset steps, update `CONFIG_FRAMES.yaml` with the correct `path_annotations` path where the annotation files are.
|
| 98 |
+
|
| 99 |
+
#### DROID
|
| 100 |
+
|
| 101 |
+
The processed frames needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/droid-cfual/) OR follow the below steps to prepare the processed frames.
|
| 102 |
+
|
| 103 |
+
1. Install the gsutil package:
|
| 104 |
+
```bash
|
| 105 |
+
pip install gsutil
|
| 106 |
+
```
|
| 107 |
+
2. Modify the `droid_path` variable in `CONFIG_FRAMES.yaml`. This is the path where the DROID data will be downloaded.
|
| 108 |
+
3. _\[Optional\] Update the variable `remove_downloaded_videos_droid` to (not) remove the videos after the frames have been extracted.
|
| 109 |
+
4. Download the data:
|
| 110 |
+
```bash
|
| 111 |
+
python download_videos.py droid
|
| 112 |
+
```
|
| 113 |
+
5. Extract the frames:
|
| 114 |
+
```bash
|
| 115 |
+
python extract_frames.py droid
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
See the [DROID website](https://droid-dataset.github.io/droid/the-droid-dataset#-using-the-dataset) for more information.
|
| 119 |
+
|
| 120 |
+
#### SA-V
|
| 121 |
+
|
| 122 |
+
The processed frames needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/sa-v) OR follow the below steps to prepare the processed frames.
|
| 123 |
+
|
| 124 |
+
1. Follow instructions in the [Segment Anything official website](https://ai.meta.com/datasets/segment-anything-video-downloads/) to obtain access to the download links (they are dynamic links).
|
| 125 |
+
2. Update `CONFIG_FRAMES.yaml`:
|
| 126 |
+
- Update the `sav_path` variable, where the frames will be saved.
|
| 127 |
+
- Update the `sav_videos_fps_6_download_path` variable. Copy paste the path corresponding to the `videos_fps_6.tar` in the list that you obtained in step 1.
|
| 128 |
+
- _\[Optional\]_ Update the variable `remove_downloaded_videos_sav` to (not) remove the videos after the frames have been extracted.
|
| 129 |
+
3. Download the videos:
|
| 130 |
+
```bash
|
| 131 |
+
python download_videos.py sav
|
| 132 |
+
```
|
| 133 |
+
4. Extract the frames:
|
| 134 |
+
```
|
| 135 |
+
python extract_frames.py sav
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
#### Ego4D
|
| 139 |
+
|
| 140 |
+
The processed frames needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/ego4d-w7fiu/) OR follow the below steps to prepare the processed frames.
|
| 141 |
+
|
| 142 |
+
1. Review and accept the license agreement in the [official Ego4D website](https://ego4d-data.org/docs/start-here/#license-agreement).
|
| 143 |
+
2. Configure AWS credentials. Run:
|
| 144 |
+
```bash
|
| 145 |
+
pip install awscli
|
| 146 |
+
aws configure
|
| 147 |
+
```
|
| 148 |
+
and copy the values shown in the email you received after step 1 (you can leave "region name" and "output format" empty). You can verify that the variables were set up correctly:
|
| 149 |
+
```bash
|
| 150 |
+
cat ~/.aws/credentials
|
| 151 |
+
```
|
| 152 |
+
3. Install the Ego4D library:
|
| 153 |
+
```bash
|
| 154 |
+
pip install ego4d
|
| 155 |
+
```
|
| 156 |
+
4. Update `CONFIG_FRAMES.yaml`:
|
| 157 |
+
- Set up AWS credentials following the instructions in the email you received after step 2. Modify the following variables: `aws_access_key_id` and `aws_secret_access_key`.
|
| 158 |
+
- Update the `ego4d_path` variable, where the frames will be saved.
|
| 159 |
+
- _\[Optional\]_ Update the variable `remove_downloaded_videos_ego4d` to (not) remove the videos after the frames have been extracted..
|
| 160 |
+
5. Download the `clips` subset of the Ego4D dataset:
|
| 161 |
+
```python
|
| 162 |
+
python download_videos.py ego4d
|
| 163 |
+
```
|
| 164 |
+
6. Extract the frames:
|
| 165 |
+
```
|
| 166 |
+
python extract_frames.py ego4d
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
See the [official CLI](https://ego4d-data.org/docs/CLI/) and the [explanation about the videos](https://ego4d-data.org/docs/data/videos/) for more information.
|
| 170 |
+
|
| 171 |
+
#### YT1B
|
| 172 |
+
|
| 173 |
+
The processed frames needed for evaluation can be downloaded from [Roboflow](https://universe.roboflow.com/sa-co-silver/yt-temporal-1b/) OR follow the below steps to prepare the processed frames.
|
| 174 |
+
|
| 175 |
+
1. Install the yt-dlp library:
|
| 176 |
+
```bash
|
| 177 |
+
python3 -m pip install -U "yt-dlp[default]"
|
| 178 |
+
```
|
| 179 |
+
2. Create a `cookies.txt` file following the instructions from yt-dlp [exporting-youtube-cookies](https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies) and [pass-cookies-to-yt-dlp](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp). This is required to download youtube videos. Then, update the path for that file in the `CONFIG_FRAMES.yaml` file, in the variable `cookies_path`.
|
| 180 |
+
3. Update `CONFIG_FRAMES.yaml`:
|
| 181 |
+
- Update the `yt1b_path`, where the frames will be saved.
|
| 182 |
+
- _\[Optional\]_ Some YouTube videos may not be available on YouTube anymore. Set `update_annotation_yt1b` to `True` in `CONFIG_FRAMES.yaml` to remove the annotations corresponding to such videos. Note that the evaluations will not be directly comparable with other reported evaluations.
|
| 183 |
+
- _\[Optional\]_ Update the variable `remove_downloaded_videos_yt1b` to (not) remove the videos after the frames have been extracted.
|
| 184 |
+
4. Run the following code to download the videos:
|
| 185 |
+
```
|
| 186 |
+
python download_videos.py yt1b
|
| 187 |
+
```
|
| 188 |
+
5. Extract the frames:
|
| 189 |
+
```
|
| 190 |
+
python extract_frames.py yt1b
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
# Usage
|
| 194 |
+
## Visualization
|
| 195 |
+
|
| 196 |
+
- Visualize GT annotations: [saco_gold_silver_vis_example.ipynb](https://github.com/facebookresearch/sam3/blob/main/examples/saco_gold_silver_vis_example.ipynb)
|
| 197 |
+
|
| 198 |
+
## Run evaluation
|
| 199 |
+
|
| 200 |
+
The official metric for SA-Co/Silver is cgF1. Please refer to the SAM3 paper for details.
|
| 201 |
+
Unlike Gold, the silver subset only has a single annotation per image. Therefore, the performance may be underestimated, because the model may be wrongly penalized for choosing an interpretation which is valid but different from that of the human annotator.
|
| 202 |
+
|
| 203 |
+
### Evaluate SAM3
|
| 204 |
+
|
| 205 |
+
We provide inference configurations to reproduce the evaluation of SAM3.
|
| 206 |
+
First, please edit the file [eval_base.yaml](https://github.com/facebookresearch/sam3/blob/main/sam3/train/configs/eval_base.yaml) with the paths where you downloaded the images and annotations above.
|
| 207 |
+
|
| 208 |
+
There are 10 subsets and as many configurations to be run.
|
| 209 |
+
Let's take the first subset as an example. The inference can be run locally using the following command (you can adjust the number of gpus):
|
| 210 |
+
```bash
|
| 211 |
+
python sam3/train/train.py -c configs/silver_image_evals/sam3_gold_image_bdd100k.yaml --use-cluster 0 --num-gpus 1
|
| 212 |
+
```
|
| 213 |
+
The predictions will be dumped in the folder specified in eval_base.yaml.
|
| 214 |
+
|
| 215 |
+
We also provide support for SLURM-based cluster inference. Edit the eval_base.yaml file to reflect your slurm configuration (partition, qos, ...), then run
|
| 216 |
+
|
| 217 |
+
```bash
|
| 218 |
+
python sam3/train/train.py -c configs/silver_image_evals/sam3_gold_image_bdd100k.yaml --use-cluster 1
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
### Offline evaluation
|
| 222 |
+
|
| 223 |
+
If you have the predictions in the COCO result format (see [here](https://cocodataset.org/#format-results)), then we provide scripts to easily run the evaluation.
|
| 224 |
+
|
| 225 |
+
For an example on how to run the evaluator on all subsets and aggregate results, see the following notebook: [saco_gold_silver_eval_example.ipynb](https://github.com/facebookresearch/sam3/blob/main/examples/saco_gold_silver_eval_example.ipynb)
|
| 226 |
+
|
| 227 |
+
If you have a prediction file for a given subset, you can run the evaluator specifically for that one using the standalone script. Example:
|
| 228 |
+
```bash
|
| 229 |
+
python scripts/eval/standalone_cgf1.py --pred_file /path/to/coco_predictions_segm.json --gt_files /path/to/annotations/silver_bdd100k_merged_test.json
|
| 230 |
+
```
|
| 231 |
+
|
| 232 |
+
# Results
|
| 233 |
+
<table style="border-color:black;border-style:solid;border-width:1px;border-collapse:collapse;border-spacing:0;text-align:right" class="tg"><thead>
|
| 234 |
+
<tr style="text-align:center">
|
| 235 |
+
<th></th>
|
| 236 |
+
<th colspan="3">Average</th>
|
| 237 |
+
<th colspan="3">BDD100k</th>
|
| 238 |
+
<th colspan="3">Droids</th>
|
| 239 |
+
<th colspan="3">Ego4d</th>
|
| 240 |
+
<th colspan="3">Food Rec</th>
|
| 241 |
+
<th colspan="3">Geode</th>
|
| 242 |
+
<th colspan="3">iNaturalist</th>
|
| 243 |
+
<th colspan="3">Nga Art</th>
|
| 244 |
+
<th colspan="3">SAV</th>
|
| 245 |
+
<th colspan="3">YT1B</th>
|
| 246 |
+
<th colspan="3">Fathomnet</th>
|
| 247 |
+
</tr></thead>
|
| 248 |
+
<tbody>
|
| 249 |
+
<tr>
|
| 250 |
+
<td></td>
|
| 251 |
+
<td>cgF1</td>
|
| 252 |
+
<td>IL_MCC</td>
|
| 253 |
+
<td>PmF1</td>
|
| 254 |
+
<td>CGF1</td>
|
| 255 |
+
<td>IL_MCC</td>
|
| 256 |
+
<td>pmF1</td>
|
| 257 |
+
<td>CGF1</td>
|
| 258 |
+
<td>IL_MCC</td>
|
| 259 |
+
<td>pmF1</td>
|
| 260 |
+
<td>CGF1</td>
|
| 261 |
+
<td>IL_MCC</td>
|
| 262 |
+
<td>pmF1</td>
|
| 263 |
+
<td>CGF1</td>
|
| 264 |
+
<td>IL_MCC</td>
|
| 265 |
+
<td>pmF1</td>
|
| 266 |
+
<td>CGF1</td>
|
| 267 |
+
<td>IL_MCC</td>
|
| 268 |
+
<td>pmF1</td>
|
| 269 |
+
<td>CGF1</td>
|
| 270 |
+
<td>IL_MCC</td>
|
| 271 |
+
<td>pmF1</td>
|
| 272 |
+
<td>CGF1</td>
|
| 273 |
+
<td>IL_MCC</td>
|
| 274 |
+
<td>pmF1</td>
|
| 275 |
+
<td>CGF1</td>
|
| 276 |
+
<td>IL_MCC</td>
|
| 277 |
+
<td>pmF1</td>
|
| 278 |
+
<td>CGF1</td>
|
| 279 |
+
<td>IL_MCC</td>
|
| 280 |
+
<td>pmF1</td>
|
| 281 |
+
<td>CGF1</td>
|
| 282 |
+
<td>IL_MCC</td>
|
| 283 |
+
<td>pmF1</td>
|
| 284 |
+
</tr>
|
| 285 |
+
<tr>
|
| 286 |
+
<td>gDino-T</td> <td>3.09</td> <td>0.12</td> <td>19.75</td> <td>3.33</td> <td>0.17</td> <td>19.54</td> <td>4.26</td> <td>0.15</td> <td>28.38</td> <td>2.87</td> <td>0.1</td>
|
| 287 |
+
<td>28.72</td> <td>0.69</td> <td>0.05</td> <td>13.88</td> <td>9.61</td> <td>0.24</td> <td>40.03</td> <td>0</td> <td>0</td> <td>1.97</td> <td>1.31</td> <td>0.09</td>
|
| 288 |
+
<td>14.57</td> <td>5.18</td> <td>0.19</td> <td>27.25</td> <td>3.6</td> <td>0.16</td> <td>22.5</td> <td>0</td> <td>0</td> <td>0.64</td>
|
| 289 |
+
</tr>
|
| 290 |
+
<tr>
|
| 291 |
+
<td>OWLv2*</td> <td>11.23</td> <td>0.32</td> <td>31.18</td> <td>14.97</td> <td>0.46</td> <td>32.34</td> <td>10.84</td> <td>0.36</td> <td>30.1</td> <td>7.36</td> <td>0.23</td>
|
| 292 |
+
<td>31.99</td> <td>19.35</td> <td>0.44</td> <td>43.98</td> <td>27.04</td> <td>0.5</td> <td>54.07</td> <td>3.92</td> <td>0.14</td> <td>27.98</td> <td>8.05</td> <td>0.31</td>
|
| 293 |
+
<td>25.98</td> <td>10.59</td> <td>0.32</td> <td>33.1</td> <td>10.15</td> <td>0.38</td> <td>26.7</td> <td>0.04</td> <td>0.01</td> <td>5.57</td>
|
| 294 |
+
</tr>
|
| 295 |
+
<tr>
|
| 296 |
+
<td>OWLv2</td> <td>8.18</td> <td>0.23</td> <td>32.55</td> <td>8.5</td> <td>0.31</td> <td>27.79</td> <td>7.21</td> <td>0.25</td> <td>28.84</td> <td>5.64</td> <td>0.18</td>
|
| 297 |
+
<td>31.35</td> <td>14.18</td> <td>0.32</td> <td>44.32</td> <td>13.04</td> <td>0.28</td> <td>46.58</td> <td>3.62</td> <td>0.1</td> <td>36.23</td> <td>7.22</td> <td>0.25</td>
|
| 298 |
+
<td>28.88</td> <td>10.86</td> <td>0.32</td> <td>33.93</td> <td>11.7</td> <td>0.35</td> <td>33.43</td> <td>-0.14</td> <td>-0.01</td> <td>14.15</td>
|
| 299 |
+
</tr>
|
| 300 |
+
<tr>
|
| 301 |
+
<td>LLMDet-L</td> <td>6.73</td> <td>0.17</td> <td>28.19</td> <td>1.69</td> <td>0.08</td> <td>19.97</td> <td>2.56</td> <td>0.1</td> <td>25.59</td> <td>2.39</td>
|
| 302 |
+
<td>0.08</td> <td>29.92</td> <td>0.98</td> <td>0.06</td> <td>16.26</td> <td>20.82</td> <td>0.37</td> <td>56.26</td> <td>27.37</td> <td>0.46</td> <td>59.5</td>
|
| 303 |
+
<td>2.17</td> <td>0.13</td> <td>16.68</td> <td>5.37</td> <td>0.19</td> <td>28.26</td> <td>3.73</td> <td>0.16</td> <td>23.32</td> <td>0.24</td> <td>0.04</td> <td>6.1</td>
|
| 304 |
+
</tr>
|
| 305 |
+
<tr>
|
| 306 |
+
<td>Gemini 2.5</td> <td>9.67</td> <td>0.19</td> <td>45.51</td> <td>5.83</td> <td>0.19</td> <td>30.66</td> <td>5.61</td> <td>0.14</td> <td>40.07</td>
|
| 307 |
+
<td>0.38</td> <td>0.01</td> <td>38.14</td> <td>10.92</td> <td>0.24</td> <td>45.52</td> <td>18.28</td> <td>0.26</td> <td>70.29</td> <td>26.57</td> <td>0.36</td>
|
| 308 |
+
<td>73.81</td> <td>8.18</td> <td>0.2</td> <td>40.91</td> <td>9.48</td> <td>0.22</td> <td>43.1</td> <td>8.66</td> <td>0.23</td> <td>37.65</td> <td>2.8</td>
|
| 309 |
+
<td>0.08</td> <td>34.99</td>
|
| 310 |
+
</tr>
|
| 311 |
+
<tr> <td>SAM3</td> <td>49.57</td> <td>0.76</td> <td>65.17</td> <td>46.61</td> <td>0.78</td> <td>60.13</td> <td>45.58</td> <td>0.76</td>
|
| 312 |
+
<td>60.35</td> <td>38.64</td> <td>0.62</td> <td>62.56</td> <td>52.96</td> <td>0.79</td> <td>67.21</td> <td>70.07</td> <td>0.89</td>
|
| 313 |
+
<td>78.73</td> <td>65.8</td> <td>0.82</td> <td>80.67</td> <td>38.06</td> <td>0.66</td> <td>57.62</td> <td>44.36</td> <td>0.67</td>
|
| 314 |
+
<td>66.05</td> <td>42.07</td> <td>0.72</td> <td>58.36</td> <td>51.53</td> <td>0.86</td> <td>59.98</td>
|
| 315 |
+
</tr>
|
| 316 |
+
</tbody></table>
|
| 317 |
+
|
| 318 |
+
# Annotation format
|
| 319 |
+
|
| 320 |
+
The annotation format is derived from [COCO format](https://cocodataset.org/#format-data). Notable data fields are:
|
| 321 |
+
|
| 322 |
+
- `images`: a `list` of `dict` features, contains a list of all image-NP pairs. Each entry is related to an image-NP pair and has the following items.
|
| 323 |
+
- `id`: an `int` feature, unique identifier for the image-NP pair
|
| 324 |
+
- `text_input`: a `string` feature, the noun phrase for the image-NP pair
|
| 325 |
+
- `file_name`: a `string` feature, the relative image path in the corresponding data folder.
|
| 326 |
+
- `height`/`width`: dimension of the image
|
| 327 |
+
- `is_instance_exhaustive`: Boolean (0 or 1). If it's 1 then all the instances are correctly annotated. For instance segmentation, we only use those datapoints. Otherwise, there may be either missing instances or crowd segments (a segment covering multiple instances)
|
| 328 |
+
- `is_pixel_exhaustive`: Boolean (0 or 1). If it's 1, then the union of all masks cover all pixels corresponding to the prompt. This is weaker than instance_exhaustive since it allows crowd segments. It can be used for semantic segmentation evaluations.
|
| 329 |
+
|
| 330 |
+
- `annotations`: a `list` of `dict` features, containing a list of all annotations including bounding box, segmentation mask, area etc.
|
| 331 |
+
- `image_id`: an `int` feature, maps to the identifier for the image-np pair in images
|
| 332 |
+
- `bbox`: a `list` of float features, containing bounding box in [x,y,w,h] format, normalized by the image dimensions
|
| 333 |
+
- `segmentation`: a dict feature, containing segmentation mask in RLE format
|
| 334 |
+
- `category_id`: For compatibility with the coco format. Will always be 1 and is unused.
|
| 335 |
+
- `is_crowd`: Boolean (0 or 1). If 1, then the segment overlaps several instances (used in cases where instances are not separable, for e.g. due to poor image quality)
|
| 336 |
+
|
| 337 |
+
- `categories`: a `list` of `dict` features, containing a list of all categories. Here, we provide the category key for compatibility with the COCO format, but in open-vocabulary detection we do not use it. Instead, the text prompt is stored directly in each image (text_input in images). Note that in our setting, a unique image (id in images) actually corresponds to an (image, text prompt) combination.
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
For `id` in images that have corresponding annotations (i.e. exist as `image_id` in `annotations`), we refer to them as a "positive" NP. And, for `id` in `images` that don't have any annotations (i.e. they do not exist as `image_id` in `annotations`), we refer to them as a "negative" NP.
|
| 341 |
+
|
| 342 |
+
A sample annotation from DROID domain looks as follows:
|
| 343 |
+
|
| 344 |
+
#### images
|
| 345 |
+
|
| 346 |
+
```
|
| 347 |
+
[
|
| 348 |
+
{
|
| 349 |
+
"id": 10000000,
|
| 350 |
+
"file_name": "AUTOLab_failure_2023-07-07_Fri_Jul__7_18:50:36_2023_recordings_MP4_22008760/00002.jpg",
|
| 351 |
+
"text_input": "the large wooden table",
|
| 352 |
+
"width": 1280,
|
| 353 |
+
"height": 720,
|
| 354 |
+
"queried_category": "3",
|
| 355 |
+
"is_instance_exhaustive": 1,
|
| 356 |
+
"is_pixel_exhaustive": 1
|
| 357 |
+
}
|
| 358 |
+
]
|
| 359 |
+
```
|
| 360 |
+
|
| 361 |
+
#### annotations
|
| 362 |
+
|
| 363 |
+
```
|
| 364 |
+
[
|
| 365 |
+
{
|
| 366 |
+
"area": 0.17324327256944444,
|
| 367 |
+
"id": 1,
|
| 368 |
+
"image_id": 10000000,
|
| 369 |
+
"source": "created by SAM3",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.03750000149011612,
|
| 372 |
+
0.5083333253860474,
|
| 373 |
+
0.8382812738418579,
|
| 374 |
+
0.49166667461395264
|
| 375 |
+
],
|
| 376 |
+
"segmentation": {
|
| 377 |
+
"counts": "[^R11]f03O0O100O2N100O1O100O100O100O100O1O100O100O100O100O100O1O10000O1O10000O1O100O10000O1O100O100O100O100O100O100O100O100O100O100O1O100O100O10000O100O100O100O101N100O1O011O0O1O101OO0010O100O1O100O2OO0100O100O100O100O100O10000O100O100O1O100O10000O1O100O100O100O10000O1O100O100O100O10000O1O10000O1O100O100O100O100O100O100O1O100O100O100O100O100O100O100O100O100O100O100O100O100O100O10000O100O100O1O100O10000O100O100O100O100O1O100O100O100O100O100O100O10O0100O100O2O000O1O10000O1O10000O100O100O100O1O100O100O100O100O100O100O100O100O100O100O100O100O1O100O100O100O10000O100O100O100O100O100O100O100O100O100O100O100O100O100O10000O100O100O100O100O100O100O1O10000O1O10000O100O1O100O100O100O100O100O100O100O100O10000O1O100O100O100O100O1O10000O10\\MP@hNo?W1U@gNk?X1W@gNh?Y1Z@fNf?Y1\\@fNc?[1^@dNb?[1`@dN_?]1b@bN^?]1e@aNZ?_1i@_NW?a1l@\\NS?d1RAXNn>h1TAVNk>k1VATNj>k1XATNg>m1YASNg>m1YASNf>m1[ASNe>m1[ASNd>m1]ASNc>m1]ASNb>l1`ATN`>i1cAWN\\>d1jA\\NV>_1oAaNP>^1RBbNn=\\1TBdNk=\\1VBdNj=1`@dNGO02P2Z1h=L_AfNj0^1g=FmC;R<EoC;Q<DPD<o;DRD<n;DQD=n;DjAnN?^1g=DhAQO?\\1h=DhAUO<W1l=EeAZO:R1P>F]ABa0h0Q>Hd@lNDV1e17S>k1iAWNW>i1hAXNW>j1gAWNY>i1fAXNY>j1eAWNZ>k1dAVN\\>k1bAVN^>k1`AVN_>l1`ATN`>m1^ATNa>o1]AQNc>P2[AQNd>P2\\APNd>Q2[AoMd>R2[AoMd>R2\\AnMd>S2ZAnMe>S2[AmMe>T2YAmMf>T2YAmMg>T2WAmMh>U2VAlMj>U2TAlMl>U2PAnMo>U2j@PNV?e4O100O100O100O100O100O100O100O100O100O100O100O100O101N100O100O10O0100O100O100O100O100O100O1000000O1000000O100O100O1O1O1O100O100O1O100O100O100O100O100O100O100O100O100O1O100O100O100O100O100O10000O100O1O100O100O100O100O100O100OkK_B]Oa=7oBEP=4YCKg<1^CNa<1bCN^<OeC1[<LhC4W<KlC4S<KoC5Q<JPD6o;JRD6n;JSD5l;LTD4l;LTD4k;MUD3k;MUD4j;LWD2i;OWD1i;OWD1h;0XD0h;1WDOh;2XDOg;1ZDNe;3[DMe;3[DNc;3]DLd;4\\DLc;5]DKb;7]DIc;7^DHa;9_DGa;9_DG`;:`DF`;;_DE`;<`DCa;=^DDa;=_DC`;>_DCa;>^DBb;[OUCiMW1n2c;YO[CeMn0V3g;TO^CeMf0[3k;POaCdM>b3Q<iNbCfM7f3V<dNeCeMKQ4`<YNgCfMAX4g<RNiCk2W<SMlCl2S<TMnCl2R<SMoCm2Q<RMQDm2n;TMRDl2n;SMTDl2k;UMUDk2k;UMVDj2i;VMXDj2h;VMXDj2g;VM[Di2e;VM\\Dj2c;VM^Dj2b;TMaDk2^;PMhDP3X;aL`CjM`1e5o:\\L^Ed3b:WLdEh3[:nKPFR4P:jKTFV4k9hKXFX4h9hKXFX4g9hKYFY4f9hKZFX4f9hKZFX4e9iKZFW4g9iKXFX4g9iKPElN\\O\\5c;iKeDYOEo4f;iK]DAJh4g;iKTDJ3^4i;jKkCO;X4i;hMVDX2j;hMUDY2j;iMUDW2k;iMTDW2l;kMSDU2m;kMRDV2m;lMRDT2n;mMPDT2P<mMoCS2P<oMnCR2R<V4O100O100OiInCR2Q<kMWDQ2i;kM_DQ2`;lMoDi1Q;TNWEg1h:XN^Ed1a:\\NdE`1\\:^NjE^1U:aNPF]1o9aNUF]1k9bNXF\\1g9dN]FY1c9fN`FX1_9hNdFV1\\9iNhFT1W9lNmFQ1S9nNQGo0n8QOTGn0l8ROWGk0h8UO[Gi0e8VO^Gh0a8YO`Gf0`8YOcGe0\\8\\OeGc0[8\\OiGa0V8@lG>T8AnG>Q8BQH=o7CRH<m7DVH:j7FWH9h7HYH7g7H[H7d7J^H4b7L^H4b7K`H4_7MbH2^7NcH1\\7OfH0Z70gHOX72iHMW73jHLV74jHLU74mHKS75mHKS75nHJR76oHIQ77oHIR7jMkDP1U4U1S7RM_D0h0g1f3W1^8hNcGV1_8iNaGX1_8gNaGY1`8fNaGY1_8gNaGY1`8fNaGY1_8gNaGY1`8fNaGY1_8gNaGY1`8fNaGY1_8gNaGY1_8gNaGY1_8gNbGX1_8gNaGY1_8gNaGY1_8fNbGY1`8fNaGY1_8gNaGY1_8gNaGY1_8gNaGY1_8gNbGX1^8hNbGX1^8hNbGX1^8hNbGX1^8hNbGX1^8iNbGV1^8jNbGV1^8jNbGV1^8jNbGV1^8jNbGV1^8jNbGV1^8jNbGV1]8lNbGT1^8lNcGS1\\8nNdGR1\\8nNdGR1[8oNeGQ1Z8POfGP1X8SOhGl0W8UOiGk0U8WOkGi0S8YOmGg0P8\\OPHd0n7_ORH`0l7BTH>j7DVH<g7HYH7d7L\\H4b7N^H2`71_HO^74bHL[77eHIY7:fHFX7<hHDV7>jHBT7a0kH_OT7b0mH]OR7d0nH\\OQ7f0nH]OQ7g0oHZOQ7g0oHYOQ7h0nHXOR7h0nHXOR7h0nHXOR7i0mHWOT7h0kHYOU7h0jHXOV7h0iHYOW7g0iHYOW7h0hHXOY7g0fHZOZ7f0eH[O\\7e0cHhNlKSNa;U3bHeNSLTN\\;W3_HbN]LRNU;\\3]H^Nb8c1\\G\\Ng8c1XG\\Nj8e1TGZNo8e1PGYNS9h1lFUNW9l1gFRN]9m1bFRN`9o1^FPNe9o1[FoMg9R2WFnMj9S2TFmMn9R2RFnMn9S2PFmMR:R2nEmMS:T2kEmMU:T2jEkMX:T2gEmMY:T2fElMZ:U2dEkM^:T2aEmM_:T2`ElM`:U2^ElMc:S2\\EmMe:T2YEmMg:T2WEmMj:S2UEmMk:T2SEmMn:S2PEnMP;S2nDoMQ;R2mDoMT;Q2kDoMU;R2iDoMX;Q2fDQNY;P2eDQN[;P2cDQN^;o1`DSN_;n1^DTNc;l1[DVNd;k1ZDVNg;j1WDXNh;j1UDWNk;j1SDWNn;i1oCZNP<h1mCYNS<h1kCZNU<g1gC\\NX<e1fC\\N[<d1cC^N\\<d1aC^N_<c1^C_Na<b1\\CaNc<a1ZCaNf<_1XCcNg<_1UCeNj<^1oBfNP=]1iBiN?gL^;e4hCkNf0dLb;`8YDcGg;^8VDdGk;^8mChGR<_8bCfG_<U900001N101O00001O001O00001O00001O0O2N1O1O2N1O2N100O2N1O1O2N1O2N1O1O2N1O2M200O2M2O2N1N2O2N1N3N1O1N3N1N3M2O2kMkAkKW>Q4RBiKo=8^AR2j0`Mk=:aAP2i0bMh==eAj1g0eMf=?hAh1f0eMd=?lAg1c0gMc=`0nAe1c0hMa=a0oAd1b0iM`=a0QBc1c0iM]=c0SB`1d0iM\\=e0SB^1e0jMY=g0VB[1e0jMV=k0WBW1V`0gNn_OT1T`0lNo_Oo0S`0POS@i0P`0VOT@d0n?\\OT@`0n?@T@<o?CR@^OUN6ka0=P@XO\\N6ga0a0j@WOY?i0X3O001O00010O00001O0010O0001O00010O001O00001O001O01O01O00001O001O000O2O0O2O0O2N1O2N1O2M3MYl51fSJ3L3O1O100O1O100000000001O000000001O00000000001O01OO1000000000001O000001O000O10000000000000000O10000O10000O10000O100O1O100O1O1O1O1O1O1N2O1O1O1O1O1O1O1O1O1O1O1O1O1O1O1O1N2O1O1O1O1O1O1O100O100N21O00001O001O2N1O1O2N1O2N1O2M3N4IVT_3",
|
| 378 |
+
"size": [
|
| 379 |
+
720,
|
| 380 |
+
1280
|
| 381 |
+
]
|
| 382 |
+
},
|
| 383 |
+
"category_id": 1,
|
| 384 |
+
"iscrowd": 0
|
| 385 |
+
}
|
| 386 |
+
]
|
| 387 |
+
```
|
| 388 |
+
|
| 389 |
+
### Data Stats
|
| 390 |
+
|
| 391 |
+
Here are the stats for the 10 annotation domains. The # Image-NPs represent the total number of unique image-NP pairs including both “positive” and “negative” NPs.
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
| Domain | # Image-NPs | # Image-NP-Masks|
|
| 395 |
+
|--------------------------|--------------| ----------------|
|
| 396 |
+
| BDD100k | 5546 | 13210 |
|
| 397 |
+
| DROID | 9445 | 11098 |
|
| 398 |
+
| Ego4D | 12608 | 24049 |
|
| 399 |
+
| MyFoodRepo-273 | 20985 | 28347 |
|
| 400 |
+
| GeoDE | 14850 | 7570 |
|
| 401 |
+
| iNaturalist-2017 | 1439051 | 48899 |
|
| 402 |
+
| National Gallery of Art | 22294 | 18991 |
|
| 403 |
+
| SA-V | 18337 | 39683 |
|
| 404 |
+
| YT-Temporal-1B | 7816 | 12221 |
|
| 405 |
+
| Fathomnet | 287193 | 14174 |
|
source_code/sam3/scripts/eval/silver/download_preprocess_nga.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
import argparse
|
| 3 |
+
import os
|
| 4 |
+
from functools import partial
|
| 5 |
+
from multiprocessing import Pool
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import requests
|
| 11 |
+
import utils
|
| 12 |
+
from PIL import Image
|
| 13 |
+
from tqdm import tqdm
|
| 14 |
+
|
| 15 |
+
METADATA_FILE = "published_images.csv"
|
| 16 |
+
METADATA_URL = "https://raw.githubusercontent.com/NationalGalleryOfArt/opendata/refs/heads/main/data" # data/published_iamges.csv from https://github.com/NationalGalleryOfArt/opendata/tree/main
|
| 17 |
+
IMG_URL = "https://api.nga.gov/iiif/%s/full/%s/0/default.jpg"
|
| 18 |
+
METADATA_FOLDER = "metadata"
|
| 19 |
+
EXTENSION = ".jpg"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def download_metadata(annotation_folder):
|
| 23 |
+
output_folder = annotation_folder / METADATA_FOLDER
|
| 24 |
+
output_folder.mkdir(exist_ok=True)
|
| 25 |
+
url = f"{METADATA_URL}/{METADATA_FILE}"
|
| 26 |
+
print(url)
|
| 27 |
+
response = requests.get(url)
|
| 28 |
+
if response.status_code == 200:
|
| 29 |
+
with open(output_folder / METADATA_FILE, "wb") as f:
|
| 30 |
+
f.write(response.content)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def download_url(row):
|
| 34 |
+
if np.isnan(row.maxpixels) or (
|
| 35 |
+
row.maxpixels > row.width and row.maxpixels > row.height
|
| 36 |
+
):
|
| 37 |
+
url = IMG_URL % (row.uuid, "full")
|
| 38 |
+
else:
|
| 39 |
+
url = IMG_URL % (row.uuid, f"!{row.maxpixels},{row.maxpixels}")
|
| 40 |
+
return url
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def download_item(item, output_folder):
|
| 44 |
+
uuid, url = item
|
| 45 |
+
try:
|
| 46 |
+
if (output_folder / f"{uuid}{EXTENSION}").exists():
|
| 47 |
+
print("skipping", uuid, "already downloaded")
|
| 48 |
+
return
|
| 49 |
+
response = requests.get(url)
|
| 50 |
+
if response.status_code == 200:
|
| 51 |
+
with open(output_folder / f"{uuid}{EXTENSION}", "wb") as f:
|
| 52 |
+
f.write(response.content)
|
| 53 |
+
except:
|
| 54 |
+
print("errored", item)
|
| 55 |
+
return
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def remove_non_compliant_image(item, output_folder):
|
| 59 |
+
uuid, max_pixels = item
|
| 60 |
+
if np.isnan(max_pixels):
|
| 61 |
+
return
|
| 62 |
+
if not (output_folder / f"{uuid}{EXTENSION}").exists():
|
| 63 |
+
return
|
| 64 |
+
img = Image.open(output_folder / f"{uuid}{EXTENSION}")
|
| 65 |
+
if img.width > max_pixels or img.height > max_pixels:
|
| 66 |
+
os.remove(output_folder / f"{uuid}{EXTENSION}") # delete image
|
| 67 |
+
return uuid
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def reshape_image(rel_path, filename_size_map, output_folder):
|
| 71 |
+
w, h = filename_size_map[rel_path]
|
| 72 |
+
path = output_folder / f"{rel_path}"
|
| 73 |
+
img = Image.open(path)
|
| 74 |
+
if img.width != w or img.height != h:
|
| 75 |
+
new_size = (w, h)
|
| 76 |
+
resized_img = img.resize(new_size)
|
| 77 |
+
resized_img.save(path)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def main(args, workers=20):
|
| 81 |
+
raw_folder = Path(args.raw_images_folder)
|
| 82 |
+
processed_folder = Path(args.processed_images_folder)
|
| 83 |
+
utils.setup(raw_folder)
|
| 84 |
+
utils.setup(processed_folder)
|
| 85 |
+
uuids = utils.get_image_ids(args.annotation_file)
|
| 86 |
+
filename_size_map = utils.get_filename_size_map(args.annotation_file)
|
| 87 |
+
if not ((raw_folder / METADATA_FOLDER) / METADATA_FILE).exists():
|
| 88 |
+
download_metadata(raw_folder)
|
| 89 |
+
|
| 90 |
+
metadata = pd.read_csv((raw_folder / METADATA_FOLDER) / METADATA_FILE)
|
| 91 |
+
metadata["download_url"] = metadata.apply(download_url, axis=1)
|
| 92 |
+
available_uuids = list(uuids.intersection(set(metadata["uuid"].tolist())))
|
| 93 |
+
print(len(available_uuids), "available for download out of", len(uuids), "target")
|
| 94 |
+
url_data = list(
|
| 95 |
+
metadata.set_index("uuid")
|
| 96 |
+
.loc[available_uuids]
|
| 97 |
+
.to_dict()["download_url"]
|
| 98 |
+
.items()
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
download_single = partial(download_item, output_folder=(processed_folder))
|
| 102 |
+
|
| 103 |
+
print("Preparing to download", len(url_data), "items")
|
| 104 |
+
with Pool(20) as p:
|
| 105 |
+
for _ in tqdm(p.imap(download_single, url_data), total=len(url_data)):
|
| 106 |
+
continue
|
| 107 |
+
check_img_size = partial(
|
| 108 |
+
remove_non_compliant_image, output_folder=(processed_folder)
|
| 109 |
+
)
|
| 110 |
+
max_pixels_dict_all = metadata.set_index("uuid").to_dict()["maxpixels"]
|
| 111 |
+
max_pixels_dict = {item[0]: max_pixels_dict_all[item[0]] for item in url_data}
|
| 112 |
+
print("Checking all images within size constraints")
|
| 113 |
+
non_compliant = set()
|
| 114 |
+
with Pool(20) as p:
|
| 115 |
+
for each in tqdm(
|
| 116 |
+
p.imap(check_img_size, max_pixels_dict.items()), total=len(max_pixels_dict)
|
| 117 |
+
):
|
| 118 |
+
if each is not None:
|
| 119 |
+
non_compliant.add(each)
|
| 120 |
+
print(len(non_compliant), "not compliant size, removed")
|
| 121 |
+
|
| 122 |
+
reshape_single = partial(
|
| 123 |
+
reshape_image,
|
| 124 |
+
filename_size_map=(filename_size_map),
|
| 125 |
+
output_folder=(processed_folder),
|
| 126 |
+
)
|
| 127 |
+
rel_paths = os.listdir(args.processed_images_folder)
|
| 128 |
+
print("Preparing to reshape", len(rel_paths), "items")
|
| 129 |
+
with Pool(20) as p:
|
| 130 |
+
for _ in tqdm(p.imap(reshape_single, rel_paths), total=len(rel_paths)):
|
| 131 |
+
continue
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
if __name__ == "__main__":
|
| 135 |
+
parser = argparse.ArgumentParser()
|
| 136 |
+
parser.add_argument("--annotation_file", help="Path to annotation file")
|
| 137 |
+
parser.add_argument("--raw_images_folder", help="Path to downloaded images")
|
| 138 |
+
parser.add_argument("--processed_images_folder", help="Path to processed images")
|
| 139 |
+
args = parser.parse_args()
|
| 140 |
+
main(args)
|
source_code/sam3/scripts/eval/silver/extract_frames.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
This file extracts the frames for the frame datasets in SA-CO/Gold and Silver.
|
| 4 |
+
|
| 5 |
+
Call like:
|
| 6 |
+
> python extract_frames.py <dataset_name>
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
import os
|
| 11 |
+
import shutil
|
| 12 |
+
import sys
|
| 13 |
+
from multiprocessing import Pool
|
| 14 |
+
|
| 15 |
+
from PIL import Image
|
| 16 |
+
from tqdm import tqdm
|
| 17 |
+
from utils import (
|
| 18 |
+
annotation_files,
|
| 19 |
+
config,
|
| 20 |
+
get_frame_from_video,
|
| 21 |
+
is_valid_image,
|
| 22 |
+
update_annotations,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def extract_frame(path_video, global_frame_idx, path_frame, image_size, file_name):
|
| 27 |
+
frame = get_frame_from_video(path_video, global_frame_idx)
|
| 28 |
+
os.makedirs(os.path.dirname(path_frame), exist_ok=True)
|
| 29 |
+
img = Image.fromarray(frame)
|
| 30 |
+
if frame.shape[:2] != image_size:
|
| 31 |
+
print(f"Resizing image {file_name} from {frame.shape[:2]} to {image_size}")
|
| 32 |
+
height, width = image_size
|
| 33 |
+
img = img.resize((width, height)) # Uses Image.NEAREST by default
|
| 34 |
+
img.save(path_frame)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def process_image(args):
|
| 38 |
+
image, dataset_name, config = args
|
| 39 |
+
original_video, global_frame_idx, file_name, image_size = image
|
| 40 |
+
extra_subpath = ""
|
| 41 |
+
if dataset_name == "ego4d":
|
| 42 |
+
extra_subpath = "v1/clips"
|
| 43 |
+
elif dataset_name == "yt1b":
|
| 44 |
+
original_video = f"video_{original_video}.mp4"
|
| 45 |
+
elif dataset_name == "sav":
|
| 46 |
+
extra_subpath = "videos_fps_6"
|
| 47 |
+
path_video = os.path.join(
|
| 48 |
+
config[f"{dataset_name}_path"],
|
| 49 |
+
"downloaded_videos",
|
| 50 |
+
extra_subpath,
|
| 51 |
+
original_video,
|
| 52 |
+
)
|
| 53 |
+
path_frame = os.path.join(config[f"{dataset_name}_path"], "frames", file_name)
|
| 54 |
+
to_return = file_name
|
| 55 |
+
try:
|
| 56 |
+
extract_frame(path_video, global_frame_idx, path_frame, image_size, file_name)
|
| 57 |
+
if not is_valid_image(path_frame):
|
| 58 |
+
print(f"Invalid image in {path_frame}")
|
| 59 |
+
to_return = None
|
| 60 |
+
except:
|
| 61 |
+
print(f"Invalid image in {path_frame}")
|
| 62 |
+
to_return = None
|
| 63 |
+
return to_return
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def main():
|
| 67 |
+
assert len(sys.argv) > 1, "You have to provide the name of the dataset"
|
| 68 |
+
dataset_name = sys.argv[1]
|
| 69 |
+
assert (
|
| 70 |
+
dataset_name in annotation_files
|
| 71 |
+
), f"The dataset can be one of {list(annotation_files.keys())}"
|
| 72 |
+
all_outputs = []
|
| 73 |
+
for file in annotation_files[dataset_name]:
|
| 74 |
+
with open(os.path.join(config["path_annotations"], file), "r") as f:
|
| 75 |
+
annotation = json.load(f)
|
| 76 |
+
images = annotation["images"]
|
| 77 |
+
images = set(
|
| 78 |
+
(
|
| 79 |
+
image["original_video"],
|
| 80 |
+
image["global_frame_idx"],
|
| 81 |
+
image["file_name"],
|
| 82 |
+
tuple(image["image_size"]),
|
| 83 |
+
)
|
| 84 |
+
for image in images
|
| 85 |
+
)
|
| 86 |
+
args_list = [(image, dataset_name, config) for image in images]
|
| 87 |
+
with Pool(os.cpu_count()) as pool:
|
| 88 |
+
outputs = list(
|
| 89 |
+
tqdm(pool.imap_unordered(process_image, args_list), total=len(images))
|
| 90 |
+
)
|
| 91 |
+
all_outputs.extend(outputs)
|
| 92 |
+
if any(out is None for out in outputs):
|
| 93 |
+
update_annotations(dataset_name, all_outputs, key="file_name")
|
| 94 |
+
if config[f"remove_downloaded_videos_{dataset_name}"]:
|
| 95 |
+
shutil.rmtree(os.path.join(config[f"{dataset_name}_path"], "downloaded_videos"))
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
if __name__ == "__main__":
|
| 99 |
+
main()
|
source_code/sam3/scripts/eval/silver/preprocess_silver_geode_bdd100k_food_rec.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
import argparse
|
| 3 |
+
from multiprocessing import Pool
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import utils
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def main(args, n_workers=20):
|
| 12 |
+
raw_folder = Path(args.raw_images_folder)
|
| 13 |
+
processed_folder = Path(args.processed_images_folder)
|
| 14 |
+
utils.setup(processed_folder)
|
| 15 |
+
img_ids = utils.get_image_ids(args.annotation_file)
|
| 16 |
+
if args.dataset_name == "geode":
|
| 17 |
+
metadata = pd.read_csv(raw_folder / "index.csv")
|
| 18 |
+
metadata["flat_filepath"] = metadata.file_path.apply(
|
| 19 |
+
lambda x: x.replace("/", "_")
|
| 20 |
+
)
|
| 21 |
+
metadata["original_absolute_path"] = metadata.file_path.apply(
|
| 22 |
+
lambda x: str((raw_folder / "images") / x)
|
| 23 |
+
)
|
| 24 |
+
metadata["new_absolute_path"] = metadata.flat_filepath.apply(
|
| 25 |
+
lambda x: str(processed_folder / x)
|
| 26 |
+
)
|
| 27 |
+
metadata["filestem"] = metadata.new_absolute_path.apply(lambda x: Path(x).stem)
|
| 28 |
+
img_id_mapping = metadata.set_index("filestem").to_dict()
|
| 29 |
+
# print(img_id_mapping.keys())
|
| 30 |
+
paths = [
|
| 31 |
+
(
|
| 32 |
+
img_id_mapping["original_absolute_path"][each],
|
| 33 |
+
img_id_mapping["new_absolute_path"][each],
|
| 34 |
+
)
|
| 35 |
+
for each in img_ids
|
| 36 |
+
]
|
| 37 |
+
elif args.dataset_name == "bdd100k":
|
| 38 |
+
bdd_subfolder = "100k/train"
|
| 39 |
+
img_filenames = utils.get_filenames(args.annotation_file)
|
| 40 |
+
raw_folder_bdd_images = raw_folder / bdd_subfolder
|
| 41 |
+
paths = [
|
| 42 |
+
(raw_folder_bdd_images / each, processed_folder / each)
|
| 43 |
+
for each in img_filenames
|
| 44 |
+
]
|
| 45 |
+
elif args.dataset_name == "food_rec":
|
| 46 |
+
food_subfolder = "public_validation_set_2.0/images"
|
| 47 |
+
img_filenames = utils.get_filenames(args.annotation_file)
|
| 48 |
+
raw_folder_food_images = raw_folder / food_subfolder
|
| 49 |
+
paths = [
|
| 50 |
+
(
|
| 51 |
+
raw_folder_food_images
|
| 52 |
+
/ f'{Path(each).stem.split("_")[-1]}{Path(each).suffix}',
|
| 53 |
+
processed_folder / each,
|
| 54 |
+
)
|
| 55 |
+
for each in img_filenames
|
| 56 |
+
]
|
| 57 |
+
print("Preparing to copy and flatten filename for", len(paths), "images")
|
| 58 |
+
with Pool(20) as p:
|
| 59 |
+
for _ in tqdm(p.imap(utils.copy_file, paths), total=len(paths)):
|
| 60 |
+
continue
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
if __name__ == "__main__":
|
| 64 |
+
parser = argparse.ArgumentParser()
|
| 65 |
+
parser.add_argument("--annotation_file", help="Path to annotation file")
|
| 66 |
+
parser.add_argument("--raw_images_folder", help="Path to downloaded images")
|
| 67 |
+
parser.add_argument("--processed_images_folder", help="Path to processed images")
|
| 68 |
+
parser.add_argument("--dataset_name", help="Path to processed images")
|
| 69 |
+
args = parser.parse_args()
|
| 70 |
+
main(args)
|
source_code/sam3/scripts/eval/silver/utils.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import shutil
|
| 5 |
+
import subprocess
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
import cv2
|
| 10 |
+
import matplotlib.pyplot as plt
|
| 11 |
+
import numpy as np
|
| 12 |
+
import yaml
|
| 13 |
+
from PIL import Image
|
| 14 |
+
from pycocotools import mask as mask_utils
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
annotation_files = {
|
| 19 |
+
"droid": [
|
| 20 |
+
"silver_droid_merged_test.json",
|
| 21 |
+
],
|
| 22 |
+
"sav": [
|
| 23 |
+
"silver_sav_merged_test.json",
|
| 24 |
+
],
|
| 25 |
+
"yt1b": [
|
| 26 |
+
"silver_yt1b_merged_test.json",
|
| 27 |
+
],
|
| 28 |
+
"ego4d": [
|
| 29 |
+
"silver_ego4d_merged_test.json",
|
| 30 |
+
],
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def load_yaml(filename):
|
| 35 |
+
with open(filename, "r") as f:
|
| 36 |
+
return yaml.safe_load(f)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def load_json(filename):
|
| 40 |
+
with open(filename, "r") as f:
|
| 41 |
+
return json.load(f)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def save_json(content, filename):
|
| 45 |
+
with open(filename, "w") as f:
|
| 46 |
+
json.dump(content, f)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def run_command(cmd):
|
| 50 |
+
"""Run a shell command and raise if it fails."""
|
| 51 |
+
result = subprocess.run(cmd, shell=True)
|
| 52 |
+
if result.returncode != 0:
|
| 53 |
+
raise RuntimeError(f"Command failed: {cmd}")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
config = load_yaml("CONFIG_FRAMES.yaml")
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def is_valid_image(img_path):
|
| 60 |
+
try:
|
| 61 |
+
img = Image.open(img_path).convert("RGB")
|
| 62 |
+
return True
|
| 63 |
+
except Exception:
|
| 64 |
+
return False
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def get_frame_from_video(video_path, frame_id):
|
| 68 |
+
cap = cv2.VideoCapture(video_path)
|
| 69 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
|
| 70 |
+
ret, frame = cap.read()
|
| 71 |
+
cap.release()
|
| 72 |
+
if not ret:
|
| 73 |
+
# Some videos cannot be open with OpenCV
|
| 74 |
+
import av
|
| 75 |
+
|
| 76 |
+
container = av.open(video_path)
|
| 77 |
+
stream = container.streams.video[0]
|
| 78 |
+
for i, frame in tqdm(
|
| 79 |
+
enumerate(container.decode(stream)),
|
| 80 |
+
desc="Decoding with AV",
|
| 81 |
+
total=frame_id + 1,
|
| 82 |
+
):
|
| 83 |
+
if i == frame_id:
|
| 84 |
+
img = frame.to_ndarray(format="rgb24")
|
| 85 |
+
return img
|
| 86 |
+
raise ValueError(
|
| 87 |
+
f"Could not read frame {frame_id} from video {video_path} (out of frame)"
|
| 88 |
+
)
|
| 89 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 90 |
+
return frame_rgb
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def update_annotations(dataset_name, file_names_keep, key="original_video"):
|
| 94 |
+
for annotation_file in annotation_files[dataset_name]:
|
| 95 |
+
path_ann = os.path.join(config["path_annotations"], annotation_file)
|
| 96 |
+
path_original_ann = os.path.join(
|
| 97 |
+
config["path_annotations"],
|
| 98 |
+
annotation_file.replace(".json", "_original.json"),
|
| 99 |
+
)
|
| 100 |
+
ann = load_json(path_ann)
|
| 101 |
+
shutil.copy(path_ann, path_original_ann)
|
| 102 |
+
new_images = []
|
| 103 |
+
image_ids_keep = set()
|
| 104 |
+
for image in ann["images"]:
|
| 105 |
+
if image[key].replace(".mp4", "") in file_names_keep:
|
| 106 |
+
new_images.append(image)
|
| 107 |
+
image_ids_keep.add(image["id"])
|
| 108 |
+
new_annotations = []
|
| 109 |
+
for annotation in ann["annotations"]:
|
| 110 |
+
if annotation["image_id"] in image_ids_keep:
|
| 111 |
+
new_annotations.append(annotation)
|
| 112 |
+
ann["images"] = new_images
|
| 113 |
+
ann["annotations"] = new_annotations
|
| 114 |
+
save_json(ann, path_ann)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def get_filename_size_map(annotation_path):
|
| 118 |
+
with open(annotation_path) as f:
|
| 119 |
+
annotations = json.load(f)
|
| 120 |
+
filename_size_map = {}
|
| 121 |
+
for each in annotations["images"]:
|
| 122 |
+
filename_size_map[each["file_name"]] = (each["width"], each["height"])
|
| 123 |
+
return filename_size_map
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def get_filenames(annotation_path):
|
| 127 |
+
with open(annotation_path) as f:
|
| 128 |
+
annotations = json.load(f)
|
| 129 |
+
filenames = {Path(each["file_name"]) for each in annotations["images"]}
|
| 130 |
+
return filenames
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def get_image_ids(annotation_path):
|
| 134 |
+
filenames = get_filenames(annotation_path)
|
| 135 |
+
filestems = {Path(each).stem for each in filenames}
|
| 136 |
+
return filestems
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def setup(folder):
|
| 140 |
+
print("Making dir", folder)
|
| 141 |
+
folder.mkdir(exist_ok=True)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def copy_file(paths):
|
| 145 |
+
old_path, new_path = paths
|
| 146 |
+
print("Copy from", old_path, "to", new_path)
|
| 147 |
+
if not Path(new_path).exists():
|
| 148 |
+
shutil.copy2(old_path, new_path)
|
source_code/sam3/scripts/eval/standalone_cgf1.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
"""Simple script to run the CGF1 evaluator given a prediction file and GT file(s).
|
| 4 |
+
|
| 5 |
+
Usage: python standalone_cgf1.py --pred_file <path_to_prediction_file> --gt_files <path_to_gt_file1> <path_to_gt_file2> ...
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import argparse
|
| 9 |
+
|
| 10 |
+
from sam3.eval.cgf1_eval import CGF1Evaluator
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def main():
|
| 14 |
+
parser = argparse.ArgumentParser()
|
| 15 |
+
parser.add_argument(
|
| 16 |
+
"--pred_file",
|
| 17 |
+
type=str,
|
| 18 |
+
required=True,
|
| 19 |
+
help="Path to the prediction file in COCO format.",
|
| 20 |
+
)
|
| 21 |
+
parser.add_argument(
|
| 22 |
+
"--gt_files",
|
| 23 |
+
type=str,
|
| 24 |
+
nargs="+",
|
| 25 |
+
required=True,
|
| 26 |
+
help="Paths to the ground truth files in COCO format.",
|
| 27 |
+
)
|
| 28 |
+
args = parser.parse_args()
|
| 29 |
+
if len(args.gt_files) == 0:
|
| 30 |
+
raise ValueError("At least one GT file must be provided.")
|
| 31 |
+
|
| 32 |
+
is_gold = args.gt_files[0].split("_")[-1].startswith("gold_")
|
| 33 |
+
if is_gold and len(args.gt_files) < 3:
|
| 34 |
+
print(
|
| 35 |
+
"WARNING: based on the name, it seems you are using gold GT files. Typically, there should be 3 GT files for gold subsets (a, b, c)."
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
evaluator = CGF1Evaluator(
|
| 39 |
+
gt_path=args.gt_files, verbose=True, iou_type="segm"
|
| 40 |
+
) # change to bbox if you want detection performance
|
| 41 |
+
|
| 42 |
+
results = evaluator.evaluate(args.pred_file)
|
| 43 |
+
|
| 44 |
+
print(results)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
if __name__ == "__main__":
|
| 48 |
+
main()
|
source_code/sam3/scripts/eval/veval/README.md
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SA-Co/VEval Dataset
|
| 2 |
+
**License** each domain has its own License
|
| 3 |
+
* SA-Co/VEval - SA-V: CC-BY-NC 4.0
|
| 4 |
+
* SA-Co/VEval - YT-Temporal-1B: CC-BY-NC 4.0
|
| 5 |
+
* SA-Co/VEval - SmartGlasses: CC-by-4.0
|
| 6 |
+
|
| 7 |
+
**SA-Co/VEval** is an evaluation dataset comprising of 3 domains, each domain has a val and test split.
|
| 8 |
+
* SA-Co/VEval - SA-V: videos are from the [SA-V dataset](https://ai.meta.com/datasets/segment-anything-video/)
|
| 9 |
+
* SA-Co/VEval - YT-Temporal-1B: videos are from the [YT-Temporal-1B](https://cove.thecvf.com/datasets/704)
|
| 10 |
+
* SA-Co/VEval - SmartGlasses: egocentric videos from [Smart Glasses](https://huggingface.co/datasets/facebook/SACo-VEval/blob/main/media/saco_sg.tar.gz)
|
| 11 |
+
|
| 12 |
+
## Environment
|
| 13 |
+
Install the SA-Co/VEVal required environment
|
| 14 |
+
```
|
| 15 |
+
pip install -e ".[veval]"
|
| 16 |
+
```
|
| 17 |
+
This will allow us to run:
|
| 18 |
+
* `scripts/eval/veval/saco_yt1b_downloader.py` preparing frames for SA-Co/VEval - YT-Temporal-1B
|
| 19 |
+
* `examples/saco_veval_eval_example.ipynb` example of running an offline evaluator
|
| 20 |
+
* `examples/saco_veval_vis_example.ipynb` example of loading and visualizing the data
|
| 21 |
+
|
| 22 |
+
## Download
|
| 23 |
+
### The expected folder structure
|
| 24 |
+
The following folder structure is expected after finishing all the download and pre-processing steps in this section
|
| 25 |
+
```
|
| 26 |
+
data/
|
| 27 |
+
├── annotation/
|
| 28 |
+
│ ├── saco_veval_sav_test.json
|
| 29 |
+
│ ├── saco_veval_sav_val.json
|
| 30 |
+
│ ├── saco_veval_smartglasses_test.json
|
| 31 |
+
│ ├── saco_veval_smartglasses_val.json
|
| 32 |
+
│ ├── saco_veval_yt1b_test.json
|
| 33 |
+
│ ├── saco_veval_yt1b_val.json
|
| 34 |
+
└── media/
|
| 35 |
+
├── saco_sav
|
| 36 |
+
│ └── JPEGImages_24fps
|
| 37 |
+
├── saco_sg
|
| 38 |
+
│ └── JPEGImages_6fps
|
| 39 |
+
└── saco_yt1b
|
| 40 |
+
└── JPEGImages_6fps
|
| 41 |
+
```
|
| 42 |
+
### Download ready-to-use data
|
| 43 |
+
The following links provide ready-to-use data, hosted on Roboflow, after completing the pre-processing steps outlined in the next section.
|
| 44 |
+
|
| 45 |
+
For each domain:
|
| 46 |
+
- [SA-Co/VEval - SA-V](https://universe.roboflow.com/sa-co-veval/sa-v-test/)
|
| 47 |
+
- [SA-Co/VEval - YT-Temporal-1B](https://universe.roboflow.com/sa-co-veval/yt-temporal-1b-test/)
|
| 48 |
+
- [SA-Co/VEval - SmartGlasses](https://universe.roboflow.com/sa-co-veval/smartglasses-test/)
|
| 49 |
+
|
| 50 |
+
For all three domains:
|
| 51 |
+
- [SA-Co/VEval](https://universe.roboflow.com/sa-co-veval)
|
| 52 |
+
|
| 53 |
+
Special note on **SA-Co/VEval - YT-Temporal-1B**:
|
| 54 |
+
* **Frame Shifting Alert!**
|
| 55 |
+
* The ready-to-use data hosted on Roboflow was produced by following the preprocessing steps below. Therefore, the frame-shifting issue for YT-Temporal-1B still exists: due to the nature of Youtube videos, the re-downloaded videos may not be exactly the same as those used during annotation, which can affect eval number reproducibility.
|
| 56 |
+
|
| 57 |
+
### Download via preprocessing steps
|
| 58 |
+
#### Download annotations
|
| 59 |
+
The GT annotations are available at Hugging Face:
|
| 60 |
+
* [SA-Co/VEval](https://huggingface.co/datasets/facebook/SACo-VEval/tree/main)
|
| 61 |
+
* SA-Co/VEval SA-V
|
| 62 |
+
* Test: `annotation/saco_veval_sav_test.json`
|
| 63 |
+
* Val: `annotation/saco_veval_sav_val.json`
|
| 64 |
+
* SA-Co/VEval YT-Temporal-1B
|
| 65 |
+
* Test: `annotation/saco_veval_yt1b_test.json`
|
| 66 |
+
* Val: `annotation/saco_veval_yt1b_val.json`
|
| 67 |
+
* SA-Co/VEval SmartGlasses
|
| 68 |
+
* Test: `annotation/saco_veval_smartglasses_test.json`
|
| 69 |
+
* Val: `annotation/saco_veval_smartglasses_val.json`
|
| 70 |
+
|
| 71 |
+
#### Download videos or frames
|
| 72 |
+
##### SA-Co/VEval - SAV
|
| 73 |
+
Follow instructions in [SA-V dataset](https://ai.meta.com/datasets/segment-anything-video/). Only the following two datasets are needed:
|
| 74 |
+
* sav_test.tar
|
| 75 |
+
* sav_val.tar
|
| 76 |
+
|
| 77 |
+
After untar:
|
| 78 |
+
```
|
| 79 |
+
sav_test/
|
| 80 |
+
├── Annotations_6fps [ignore this is the SAM 2 annotation]
|
| 81 |
+
├── JPEGImages_24fps
|
| 82 |
+
sav_val/
|
| 83 |
+
├── Annotations_6fps [ignore this is the SAM 2 annotation]
|
| 84 |
+
└── JPEGImages_24fps
|
| 85 |
+
```
|
| 86 |
+
Then merge the two JPEGImages_24fps together to better match our annotation json file path e.g.
|
| 87 |
+
```
|
| 88 |
+
media/
|
| 89 |
+
└── saco_sav
|
| 90 |
+
└── JPEGImages_24fps [merged from the two JPEGImages_24fps above]
|
| 91 |
+
```
|
| 92 |
+
Example commands to download and merge folders
|
| 93 |
+
```
|
| 94 |
+
cd ../data/media/saco_sav
|
| 95 |
+
wget -O sav_test.tar <sav_test.tar download link from the SA-V dataset page>
|
| 96 |
+
wget -O sav_val.tar <sav_val.tar download link from the SA-V dataset page>
|
| 97 |
+
tar -xf sav_test.tar
|
| 98 |
+
tar -xf sav_val.tar
|
| 99 |
+
mkdir JPEGImages_24fps
|
| 100 |
+
chmod -R u+w sav_test/
|
| 101 |
+
chmod -R u+w sav_val/
|
| 102 |
+
mv sav_test/JPEGImages_24fps/* JPEGImages_24fps/
|
| 103 |
+
mv sav_val/JPEGImages_24fps/* JPEGImages_24fps/
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
##### SA-Co/VEval - YT-Temporal-1B
|
| 107 |
+
Two files are needed to download the SA-Co/VEval - YT-Temporal-1B Youtube videos.
|
| 108 |
+
* Download `media/yt1b_start_end_time.json` from [SA-Co/VEval](https://huggingface.co/datasets/facebook/SACo-VEval/tree/main), which contains the Youtube video ids and the start and end time used in SA-Co/VEval - YT-Temporal-1B.
|
| 109 |
+
* Prepare the `cookies.txt` file. Follow instruction in yt-dlp [exporting-youtube-cookies](https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies) and [pass-cookies-to-yt-dlp](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp) to prepare the cookies_file.
|
| 110 |
+
* Please see the full **WARNINGS** in yt-dlp regarding the risk of Youtube account ban!!
|
| 111 |
+
|
| 112 |
+
Then run `scripts/eval/veval/saco_yt1b_downloader.py` to download the videos and prepare the frames e.g.
|
| 113 |
+
```
|
| 114 |
+
python saco_yt1b_downloader.py \
|
| 115 |
+
--data_dir ../data/media/saco_yt1b \
|
| 116 |
+
--cookies_file ../data/media/saco_yt1b/cookies.txt \
|
| 117 |
+
--yt1b_start_end_time_file ../data/media/saco_yt1b/yt1b_start_end_time.json \
|
| 118 |
+
--yt1b_frame_prep_log_file ../data/media/saco_yt1b/yt1b_frame_prep.log
|
| 119 |
+
```
|
| 120 |
+
* data_dir: The directoy to download the Youtube videos and store the extraced frames
|
| 121 |
+
* cookies_file: the `cookies.txt` downloaded above
|
| 122 |
+
* yt1b_start_end_time_file: the `yt1b_start_end_time.json` downloaded above
|
| 123 |
+
* yt1b_frame_prep_log_file: a log file to track the video downloading and frame extracting status
|
| 124 |
+
|
| 125 |
+
Then run `scripts/eval/veval/saco_yt1b_annot_update.py` to update the annotation based on the video availability e.g.
|
| 126 |
+
```
|
| 127 |
+
python saco_yt1b_annot_update.py \
|
| 128 |
+
--yt1b_media_dir ../data/media/saco_yt1b/JPEGImages_6fps \
|
| 129 |
+
--yt1b_input_annot_path ../data/annotation/saco_veval_yt1b_val.json \
|
| 130 |
+
--yt1b_output_annot_path ../data/annotation/saco_veval_yt1b_val_updated.json \
|
| 131 |
+
--yt1b_annot_update_log_path ../data/annotation/saco_veval_yt1b_val_updated.log
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
**NOTE**:
|
| 135 |
+
* Not all Youtube videos might be available as Youtube videos can be deleted or become private. The script `saco_yt1b_annot_update.py` is used to remove the annotations of the unavailable videos.
|
| 136 |
+
* **Frame Shifting Alert!!** Even when the videos are still available, their specifications, such as fps and duration, may differ from those used during annotation when re-downloaded from YouTube. Additionally, sometimes `ffmpeg` seems to find it hard to guarantee consistent frame extraction from the same video across different environments. This may cause the re-downloaded and re-extracted frames to have alignment issues with our annotations due to frame shifting. Please be aware of this caveat when evaluating on SA-Co/VEval - YT-Temporal-1B.
|
| 137 |
+
|
| 138 |
+
##### SA-Co/VEval - SmartGlasses
|
| 139 |
+
Go to [SACo-VEval](https://huggingface.co/datasets/facebook/SACo-VEval/tree/main) download `media/saco_sg.tar.gz`
|
| 140 |
+
```
|
| 141 |
+
cd ../data
|
| 142 |
+
hf download facebook/SACo-VEval media/saco_sg.tar.gz --repo-type dataset --local-dir .
|
| 143 |
+
cd ../data/media
|
| 144 |
+
tar -xzf saco_sg.tar.gz
|
| 145 |
+
```
|
| 146 |
+
|
| 147 |
+
## Annotation Format
|
| 148 |
+
The format is similar to the [YTVIS](https://youtube-vos.org/dataset/vis/) format.
|
| 149 |
+
|
| 150 |
+
In the annotation json, e.g. `saco_veval_sav_test.json` there are 5 fields:
|
| 151 |
+
* info:
|
| 152 |
+
* A dict containing the dataset info
|
| 153 |
+
* E.g. {'version': 'v1', 'date': '2025-09-24', 'description': 'SA-Co/VEval SA-V Test'}
|
| 154 |
+
* videos
|
| 155 |
+
* A list of videos that are used in the current annotation json
|
| 156 |
+
* It contains {id, video_name, file_names, height, width, length}
|
| 157 |
+
* annotations
|
| 158 |
+
* A list of **positive** masklets and their related info
|
| 159 |
+
* It contains {id, segmentations, bboxes, areas, iscrowd, video_id, height, width, category_id, noun_phrase}
|
| 160 |
+
* video_id should match to the `videos - id` field above
|
| 161 |
+
* category_id should match to the `categories - id` field below
|
| 162 |
+
* segmentations is a list of [RLE](https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py)
|
| 163 |
+
* categories
|
| 164 |
+
* A **globally** used noun phrase id map, which is true across all 3 domains.
|
| 165 |
+
* It contains {id, name}
|
| 166 |
+
* name is the noun phrase
|
| 167 |
+
* video_np_pairs
|
| 168 |
+
* A list of video-np pairs, including both **positive** and **negative** used in the current annotation json
|
| 169 |
+
* It contains {id, video_id, category_id, noun_phrase, num_masklets}
|
| 170 |
+
* video_id should match the `videos - id` above
|
| 171 |
+
* category_id should match the `categories - id` above
|
| 172 |
+
* when `num_masklets > 0` it is a positive video-np pair, and the presenting masklets can be found in the annotations field
|
| 173 |
+
* when `num_masklets = 0` it is a negative video-np pair, meaning no masklet presenting at all
|
| 174 |
+
```
|
| 175 |
+
data {
|
| 176 |
+
"info": info
|
| 177 |
+
"videos": [video]
|
| 178 |
+
"annotations": [annotation]
|
| 179 |
+
"categories": [category]
|
| 180 |
+
"video_np_pairs": [video_np_pair]
|
| 181 |
+
}
|
| 182 |
+
video {
|
| 183 |
+
"id": int
|
| 184 |
+
"video_name": str # e.g. sav_000000
|
| 185 |
+
"file_names": List[str]
|
| 186 |
+
"height": int
|
| 187 |
+
"width": width
|
| 188 |
+
"length": length
|
| 189 |
+
}
|
| 190 |
+
annotation {
|
| 191 |
+
"id": int
|
| 192 |
+
"segmentations": List[RLE]
|
| 193 |
+
"bboxes": List[List[int, int, int, int]]
|
| 194 |
+
"areas": List[int]
|
| 195 |
+
"iscrowd": int
|
| 196 |
+
"video_id": str
|
| 197 |
+
"height": int
|
| 198 |
+
"width": int
|
| 199 |
+
"category_id": int
|
| 200 |
+
"noun_phrase": str
|
| 201 |
+
}
|
| 202 |
+
category {
|
| 203 |
+
"id": int
|
| 204 |
+
"name": str
|
| 205 |
+
}
|
| 206 |
+
video_np_pair {
|
| 207 |
+
"id": int
|
| 208 |
+
"video_id": str
|
| 209 |
+
"category_id": int
|
| 210 |
+
"noun_phrase": str
|
| 211 |
+
"num_masklets" int
|
| 212 |
+
}
|
| 213 |
+
```
|
| 214 |
+
[sam3/examples/saco_veval_vis_example.ipynb](https://github.com/facebookresearch/sam3/blob/main/examples/saco_veval_vis_example.ipynb) shows some examples of the data format and data visualization.
|
| 215 |
+
|
| 216 |
+
## Run Offline Eval
|
| 217 |
+
An example notebook and an eval script have been provided for offline evaluation.
|
| 218 |
+
```
|
| 219 |
+
sam3/
|
| 220 |
+
├── examples/
|
| 221 |
+
│ └── saco_veval_eval_example.ipynb # this notebook will load eval res or run the eval on the fly, and print the results
|
| 222 |
+
└── sam3/eval/
|
| 223 |
+
└── saco_veval_eval.py # this script will run the offline evaluator
|
| 224 |
+
```
|
| 225 |
+
`saco_veval_eval.py` supports two modes, `one` and `all`.
|
| 226 |
+
* `one`: will take only one pair of gt and pred files to eval
|
| 227 |
+
* `all`: will eval on all 6 SACo/VEval datasets
|
| 228 |
+
|
| 229 |
+
Example usage
|
| 230 |
+
```
|
| 231 |
+
python saco_veval_eval.py one \
|
| 232 |
+
--gt_annot_file ../sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_gt.json \
|
| 233 |
+
--pred_file ../sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_pred.json \
|
| 234 |
+
--eval_res_file ../sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_eval_res.json
|
| 235 |
+
```
|
| 236 |
+
* `gt_annot_file`: the location of the GT file
|
| 237 |
+
* `pred_file`: the location of the Pred file
|
| 238 |
+
* `eval_res_file`: the location where the eval result will be written to
|
| 239 |
+
|
| 240 |
+
```
|
| 241 |
+
python saco_veval_eval.py all \
|
| 242 |
+
--gt_annot_dir ../data/annotation \
|
| 243 |
+
--pred_dir ../data/pred \
|
| 244 |
+
--eval_res_dir ../data/pred
|
| 245 |
+
```
|
| 246 |
+
* `gt_annot_dir`: the location of the GT files
|
| 247 |
+
* `pred_dir`: the location of the Pred files
|
| 248 |
+
* `eval_res_dir`: the location where the eval results will be written to
|
source_code/sam3/scripts/eval/veval/saco_yt1b_annot_update.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
import argparse
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_available_saco_yt1b_ids(yt1b_meida_dir, data):
|
| 14 |
+
vdf = pd.DataFrame(data["videos"])
|
| 15 |
+
expected_saco_yt1b_ids = vdf.video_name.tolist()
|
| 16 |
+
|
| 17 |
+
yt1b_media_folders = os.listdir(yt1b_meida_dir)
|
| 18 |
+
|
| 19 |
+
available_saco_yt1b_ids = []
|
| 20 |
+
for yt1b_media_folder in yt1b_media_folders:
|
| 21 |
+
if yt1b_media_folder not in expected_saco_yt1b_ids:
|
| 22 |
+
continue
|
| 23 |
+
jpeg_folder_dir = os.path.join(yt1b_meida_dir, yt1b_media_folder)
|
| 24 |
+
jpeg_count = len(os.listdir(jpeg_folder_dir))
|
| 25 |
+
if jpeg_count > 0:
|
| 26 |
+
available_saco_yt1b_ids.append(yt1b_media_folder)
|
| 27 |
+
else:
|
| 28 |
+
logger.info(
|
| 29 |
+
f"No JPEG images found for {yt1b_media_folder}. The annotation related to this video will be removed."
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
logger.info(
|
| 33 |
+
f"Expected {len(expected_saco_yt1b_ids)} videos for {data['info']}. Found {len(available_saco_yt1b_ids)} videos available in {yt1b_meida_dir}."
|
| 34 |
+
)
|
| 35 |
+
return available_saco_yt1b_ids
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def update_yt1b_annot_per_field(data, field, id_col, available_ids):
|
| 39 |
+
field_data = data[field]
|
| 40 |
+
new_field_data = []
|
| 41 |
+
for data_entry in field_data:
|
| 42 |
+
if data_entry[id_col] not in available_ids:
|
| 43 |
+
logger.info(
|
| 44 |
+
f"{field}: Removing {data_entry} due to the video being unavailable."
|
| 45 |
+
)
|
| 46 |
+
continue
|
| 47 |
+
new_field_data.append(data_entry)
|
| 48 |
+
|
| 49 |
+
data[field] = new_field_data
|
| 50 |
+
logger.info(
|
| 51 |
+
f"Updated {field} by {id_col} - Before: {len(field_data)}, After: {len(new_field_data)}, Removed: {len(field_data) - len(new_field_data)}"
|
| 52 |
+
)
|
| 53 |
+
return data
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def update_yt1b_annot(yt1b_input_annot_path, yt1b_media_dir, yt1b_output_annot_path):
|
| 57 |
+
with open(yt1b_input_annot_path, "r") as f:
|
| 58 |
+
data = json.load(f)
|
| 59 |
+
|
| 60 |
+
available_saco_yt1b_ids = get_available_saco_yt1b_ids(yt1b_media_dir, data)
|
| 61 |
+
|
| 62 |
+
data = update_yt1b_annot_per_field(
|
| 63 |
+
data=data,
|
| 64 |
+
field="videos",
|
| 65 |
+
id_col="video_name",
|
| 66 |
+
available_ids=available_saco_yt1b_ids,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
videos_data = data["videos"]
|
| 70 |
+
available_video_incremental_ids = [data_entry["id"] for data_entry in videos_data]
|
| 71 |
+
|
| 72 |
+
data = update_yt1b_annot_per_field(
|
| 73 |
+
data=data,
|
| 74 |
+
field="annotations",
|
| 75 |
+
id_col="video_id",
|
| 76 |
+
available_ids=available_video_incremental_ids,
|
| 77 |
+
)
|
| 78 |
+
data = update_yt1b_annot_per_field(
|
| 79 |
+
data=data,
|
| 80 |
+
field="video_np_pairs",
|
| 81 |
+
id_col="video_id",
|
| 82 |
+
available_ids=available_video_incremental_ids,
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
with open(yt1b_output_annot_path, "w") as f:
|
| 86 |
+
json.dump(data, f)
|
| 87 |
+
|
| 88 |
+
return data
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def main():
|
| 92 |
+
parser = argparse.ArgumentParser(description="Run video grounding evaluators")
|
| 93 |
+
parser.add_argument(
|
| 94 |
+
"--yt1b_media_dir",
|
| 95 |
+
type=str,
|
| 96 |
+
help="Path to the directory where the yt1b media is stored e.g media/saco_yt1b/JPEGImages_6fps",
|
| 97 |
+
)
|
| 98 |
+
parser.add_argument(
|
| 99 |
+
"--yt1b_input_annot_path",
|
| 100 |
+
type=str,
|
| 101 |
+
help="Path to the saco_veval_yt1b input annotation file e.g annotation/saco_veval_yt1b_test.json or annotation/saco_veval_yt1b_val.json",
|
| 102 |
+
)
|
| 103 |
+
parser.add_argument(
|
| 104 |
+
"--yt1b_output_annot_path",
|
| 105 |
+
type=str,
|
| 106 |
+
help="Path to the output annotation file e.g annotation/saco_veval_yt1b_test_updated.json or annotation/saco_veval_yt1b_val_updated.json",
|
| 107 |
+
)
|
| 108 |
+
parser.add_argument(
|
| 109 |
+
"--yt1b_annot_update_log_path",
|
| 110 |
+
type=str,
|
| 111 |
+
help="Path to the yt1b annot update log file e.g annotation/yt1b_annot_update_log.log",
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
args = parser.parse_args()
|
| 115 |
+
|
| 116 |
+
os.makedirs(os.path.dirname(args.yt1b_annot_update_log_path), exist_ok=True)
|
| 117 |
+
os.makedirs(os.path.dirname(args.yt1b_output_annot_path), exist_ok=True)
|
| 118 |
+
|
| 119 |
+
logging.basicConfig(
|
| 120 |
+
filename=args.yt1b_annot_update_log_path,
|
| 121 |
+
format="%(asctime)s [%(threadName)s] %(levelname)s: %(message)s",
|
| 122 |
+
level=logging.INFO,
|
| 123 |
+
filemode="w",
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
_ = update_yt1b_annot(
|
| 127 |
+
yt1b_input_annot_path=args.yt1b_input_annot_path,
|
| 128 |
+
yt1b_media_dir=args.yt1b_media_dir,
|
| 129 |
+
yt1b_output_annot_path=args.yt1b_output_annot_path,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
print("Done!! Check the log at", args.yt1b_annot_update_log_path)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
main()
|