Update Model
Browse files- README.md +61 -2
- config.json +195 -6
- model.safetensors +3 -0
- preprocessor_config.json +34 -0
README.md
CHANGED
|
@@ -1,6 +1,65 @@
|
|
| 1 |
---
|
|
|
|
| 2 |
license: mit
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
| 4 |
---
|
|
|
|
| 5 |
|
| 6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
library_name: transformers
|
| 3 |
license: mit
|
| 4 |
+
tags:
|
| 5 |
+
- vision
|
| 6 |
+
- image-segmentation
|
| 7 |
+
- pytorch
|
| 8 |
---
|
| 9 |
+
# EoMT
|
| 10 |
|
| 11 |
+
[](https://pytorch.org/)
|
| 12 |
+
|
| 13 |
+
**EoMT (Encoder-only Mask Transformer)** is a Vision Transformer (ViT) architecture designed for high-quality and efficient image segmentation. It was introduced in the CVPR 2025 highlight paper:
|
| 14 |
+
**[Your ViT is Secretly an Image Segmentation Model](https://www.tue-mps.org/eomt)**
|
| 15 |
+
by Tommie Kerssies, Niccolò Cavagnero, Alexander Hermans, Narges Norouzi, Giuseppe Averta, Bastian Leibe, Gijs Dubbelman, and Daan de Geus.
|
| 16 |
+
|
| 17 |
+
> **Key Insight**: Given sufficient scale and pretraining, a plain ViT along with additional few params can perform segmentation without the need for task-specific decoders or pixel fusion modules. The same model backbone supports semantic, instance, and panoptic segmentation with different post-processing 🤗
|
| 18 |
+
|
| 19 |
+
The original implementation can be found in this [repository](https://github.com/tue-mps/eomt)
|
| 20 |
+
|
| 21 |
+
---
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
### How to use
|
| 25 |
+
|
| 26 |
+
Here is how to use this model for Instance Segmentation:
|
| 27 |
+
|
| 28 |
+
```python
|
| 29 |
+
import matplotlib.pyplot as plt
|
| 30 |
+
import requests
|
| 31 |
+
import torch
|
| 32 |
+
from PIL import Image
|
| 33 |
+
|
| 34 |
+
from transformers import EomtForUniversalSegmentation, AutoImageProcessor
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
model_id = "yaswanthgali/coco_instance_eomt_large_640-hf"
|
| 38 |
+
processor = AutoImageProcessor.from_pretrained(model_id)
|
| 39 |
+
model = EomtForUniversalSegmentation.from_pretrained(model_id)
|
| 40 |
+
|
| 41 |
+
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
|
| 42 |
+
|
| 43 |
+
inputs = processor(
|
| 44 |
+
images=image,
|
| 45 |
+
return_tensors="pt",
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
with torch.inference_mode():
|
| 49 |
+
outputs = model(**inputs)
|
| 50 |
+
|
| 51 |
+
# Prepare the original image size in the format (height, width)
|
| 52 |
+
original_image_sizes = [(image.height, image.width)]
|
| 53 |
+
|
| 54 |
+
# Post-process the model outputs to get final segmentation prediction
|
| 55 |
+
preds = processor.post_process_instance_segmentation(
|
| 56 |
+
outputs,
|
| 57 |
+
original_image_sizes=original_image_sizes,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
# Visualize the segmentation mask
|
| 61 |
+
plt.imshow(preds[0]["segmentation"])
|
| 62 |
+
plt.axis("off")
|
| 63 |
+
plt.title("Instance Segmentation")
|
| 64 |
+
plt.show()
|
| 65 |
+
```
|
config.json
CHANGED
|
@@ -1,9 +1,198 @@
|
|
| 1 |
{
|
| 2 |
-
"
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
"image_size": 640,
|
| 5 |
-
"
|
| 6 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
"num_blocks": 4,
|
| 8 |
-
"
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"EomtForUniversalSegmentation"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"class_weight": 2.0,
|
| 7 |
+
"dice_weight": 5.0,
|
| 8 |
+
"drop_path_rate": 0.0,
|
| 9 |
+
"hidden_act": "gelu",
|
| 10 |
+
"hidden_dropout_prob": 0.0,
|
| 11 |
+
"hidden_size": 1024,
|
| 12 |
+
"id2label": {
|
| 13 |
+
"0": "LABEL_0",
|
| 14 |
+
"1": "LABEL_1",
|
| 15 |
+
"2": "LABEL_2",
|
| 16 |
+
"3": "LABEL_3",
|
| 17 |
+
"4": "LABEL_4",
|
| 18 |
+
"5": "LABEL_5",
|
| 19 |
+
"6": "LABEL_6",
|
| 20 |
+
"7": "LABEL_7",
|
| 21 |
+
"8": "LABEL_8",
|
| 22 |
+
"9": "LABEL_9",
|
| 23 |
+
"10": "LABEL_10",
|
| 24 |
+
"11": "LABEL_11",
|
| 25 |
+
"12": "LABEL_12",
|
| 26 |
+
"13": "LABEL_13",
|
| 27 |
+
"14": "LABEL_14",
|
| 28 |
+
"15": "LABEL_15",
|
| 29 |
+
"16": "LABEL_16",
|
| 30 |
+
"17": "LABEL_17",
|
| 31 |
+
"18": "LABEL_18",
|
| 32 |
+
"19": "LABEL_19",
|
| 33 |
+
"20": "LABEL_20",
|
| 34 |
+
"21": "LABEL_21",
|
| 35 |
+
"22": "LABEL_22",
|
| 36 |
+
"23": "LABEL_23",
|
| 37 |
+
"24": "LABEL_24",
|
| 38 |
+
"25": "LABEL_25",
|
| 39 |
+
"26": "LABEL_26",
|
| 40 |
+
"27": "LABEL_27",
|
| 41 |
+
"28": "LABEL_28",
|
| 42 |
+
"29": "LABEL_29",
|
| 43 |
+
"30": "LABEL_30",
|
| 44 |
+
"31": "LABEL_31",
|
| 45 |
+
"32": "LABEL_32",
|
| 46 |
+
"33": "LABEL_33",
|
| 47 |
+
"34": "LABEL_34",
|
| 48 |
+
"35": "LABEL_35",
|
| 49 |
+
"36": "LABEL_36",
|
| 50 |
+
"37": "LABEL_37",
|
| 51 |
+
"38": "LABEL_38",
|
| 52 |
+
"39": "LABEL_39",
|
| 53 |
+
"40": "LABEL_40",
|
| 54 |
+
"41": "LABEL_41",
|
| 55 |
+
"42": "LABEL_42",
|
| 56 |
+
"43": "LABEL_43",
|
| 57 |
+
"44": "LABEL_44",
|
| 58 |
+
"45": "LABEL_45",
|
| 59 |
+
"46": "LABEL_46",
|
| 60 |
+
"47": "LABEL_47",
|
| 61 |
+
"48": "LABEL_48",
|
| 62 |
+
"49": "LABEL_49",
|
| 63 |
+
"50": "LABEL_50",
|
| 64 |
+
"51": "LABEL_51",
|
| 65 |
+
"52": "LABEL_52",
|
| 66 |
+
"53": "LABEL_53",
|
| 67 |
+
"54": "LABEL_54",
|
| 68 |
+
"55": "LABEL_55",
|
| 69 |
+
"56": "LABEL_56",
|
| 70 |
+
"57": "LABEL_57",
|
| 71 |
+
"58": "LABEL_58",
|
| 72 |
+
"59": "LABEL_59",
|
| 73 |
+
"60": "LABEL_60",
|
| 74 |
+
"61": "LABEL_61",
|
| 75 |
+
"62": "LABEL_62",
|
| 76 |
+
"63": "LABEL_63",
|
| 77 |
+
"64": "LABEL_64",
|
| 78 |
+
"65": "LABEL_65",
|
| 79 |
+
"66": "LABEL_66",
|
| 80 |
+
"67": "LABEL_67",
|
| 81 |
+
"68": "LABEL_68",
|
| 82 |
+
"69": "LABEL_69",
|
| 83 |
+
"70": "LABEL_70",
|
| 84 |
+
"71": "LABEL_71",
|
| 85 |
+
"72": "LABEL_72",
|
| 86 |
+
"73": "LABEL_73",
|
| 87 |
+
"74": "LABEL_74",
|
| 88 |
+
"75": "LABEL_75",
|
| 89 |
+
"76": "LABEL_76",
|
| 90 |
+
"77": "LABEL_77",
|
| 91 |
+
"78": "LABEL_78",
|
| 92 |
+
"79": "LABEL_79"
|
| 93 |
+
},
|
| 94 |
"image_size": 640,
|
| 95 |
+
"importance_sample_ratio": 0.75,
|
| 96 |
+
"initializer_range": 0.02,
|
| 97 |
+
"label2id": {
|
| 98 |
+
"LABEL_0": 0,
|
| 99 |
+
"LABEL_1": 1,
|
| 100 |
+
"LABEL_10": 10,
|
| 101 |
+
"LABEL_11": 11,
|
| 102 |
+
"LABEL_12": 12,
|
| 103 |
+
"LABEL_13": 13,
|
| 104 |
+
"LABEL_14": 14,
|
| 105 |
+
"LABEL_15": 15,
|
| 106 |
+
"LABEL_16": 16,
|
| 107 |
+
"LABEL_17": 17,
|
| 108 |
+
"LABEL_18": 18,
|
| 109 |
+
"LABEL_19": 19,
|
| 110 |
+
"LABEL_2": 2,
|
| 111 |
+
"LABEL_20": 20,
|
| 112 |
+
"LABEL_21": 21,
|
| 113 |
+
"LABEL_22": 22,
|
| 114 |
+
"LABEL_23": 23,
|
| 115 |
+
"LABEL_24": 24,
|
| 116 |
+
"LABEL_25": 25,
|
| 117 |
+
"LABEL_26": 26,
|
| 118 |
+
"LABEL_27": 27,
|
| 119 |
+
"LABEL_28": 28,
|
| 120 |
+
"LABEL_29": 29,
|
| 121 |
+
"LABEL_3": 3,
|
| 122 |
+
"LABEL_30": 30,
|
| 123 |
+
"LABEL_31": 31,
|
| 124 |
+
"LABEL_32": 32,
|
| 125 |
+
"LABEL_33": 33,
|
| 126 |
+
"LABEL_34": 34,
|
| 127 |
+
"LABEL_35": 35,
|
| 128 |
+
"LABEL_36": 36,
|
| 129 |
+
"LABEL_37": 37,
|
| 130 |
+
"LABEL_38": 38,
|
| 131 |
+
"LABEL_39": 39,
|
| 132 |
+
"LABEL_4": 4,
|
| 133 |
+
"LABEL_40": 40,
|
| 134 |
+
"LABEL_41": 41,
|
| 135 |
+
"LABEL_42": 42,
|
| 136 |
+
"LABEL_43": 43,
|
| 137 |
+
"LABEL_44": 44,
|
| 138 |
+
"LABEL_45": 45,
|
| 139 |
+
"LABEL_46": 46,
|
| 140 |
+
"LABEL_47": 47,
|
| 141 |
+
"LABEL_48": 48,
|
| 142 |
+
"LABEL_49": 49,
|
| 143 |
+
"LABEL_5": 5,
|
| 144 |
+
"LABEL_50": 50,
|
| 145 |
+
"LABEL_51": 51,
|
| 146 |
+
"LABEL_52": 52,
|
| 147 |
+
"LABEL_53": 53,
|
| 148 |
+
"LABEL_54": 54,
|
| 149 |
+
"LABEL_55": 55,
|
| 150 |
+
"LABEL_56": 56,
|
| 151 |
+
"LABEL_57": 57,
|
| 152 |
+
"LABEL_58": 58,
|
| 153 |
+
"LABEL_59": 59,
|
| 154 |
+
"LABEL_6": 6,
|
| 155 |
+
"LABEL_60": 60,
|
| 156 |
+
"LABEL_61": 61,
|
| 157 |
+
"LABEL_62": 62,
|
| 158 |
+
"LABEL_63": 63,
|
| 159 |
+
"LABEL_64": 64,
|
| 160 |
+
"LABEL_65": 65,
|
| 161 |
+
"LABEL_66": 66,
|
| 162 |
+
"LABEL_67": 67,
|
| 163 |
+
"LABEL_68": 68,
|
| 164 |
+
"LABEL_69": 69,
|
| 165 |
+
"LABEL_7": 7,
|
| 166 |
+
"LABEL_70": 70,
|
| 167 |
+
"LABEL_71": 71,
|
| 168 |
+
"LABEL_72": 72,
|
| 169 |
+
"LABEL_73": 73,
|
| 170 |
+
"LABEL_74": 74,
|
| 171 |
+
"LABEL_75": 75,
|
| 172 |
+
"LABEL_76": 76,
|
| 173 |
+
"LABEL_77": 77,
|
| 174 |
+
"LABEL_78": 78,
|
| 175 |
+
"LABEL_79": 79,
|
| 176 |
+
"LABEL_8": 8,
|
| 177 |
+
"LABEL_9": 9
|
| 178 |
+
},
|
| 179 |
+
"layer_norm_eps": 1e-06,
|
| 180 |
+
"layerscale_value": 1e-05,
|
| 181 |
+
"mask_weight": 5.0,
|
| 182 |
+
"mlp_ratio": 4,
|
| 183 |
+
"model_type": "eomt",
|
| 184 |
+
"no_object_weight": 0.1,
|
| 185 |
+
"num_attention_heads": 16,
|
| 186 |
"num_blocks": 4,
|
| 187 |
+
"num_channels": 3,
|
| 188 |
+
"num_hidden_layers": 24,
|
| 189 |
+
"num_queries": 200,
|
| 190 |
+
"num_register_tokens": 4,
|
| 191 |
+
"num_upscale_blocks": 2,
|
| 192 |
+
"oversample_ratio": 3.0,
|
| 193 |
+
"patch_size": 16,
|
| 194 |
+
"torch_dtype": "float32",
|
| 195 |
+
"train_num_points": 12544,
|
| 196 |
+
"transformers_version": "4.53.0.dev0",
|
| 197 |
+
"use_swiglu_ffn": false
|
| 198 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e8fa9bee24c6a8b41bc98ec3097aa0ab0f758142c57b6a35e5172da62bcc6115
|
| 3 |
+
size 1266611816
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": null,
|
| 3 |
+
"data_format": "channels_first",
|
| 4 |
+
"default_to_square": false,
|
| 5 |
+
"device": null,
|
| 6 |
+
"disable_grouping": null,
|
| 7 |
+
"do_center_crop": null,
|
| 8 |
+
"do_convert_rgb": null,
|
| 9 |
+
"do_normalize": true,
|
| 10 |
+
"do_pad": true,
|
| 11 |
+
"do_rescale": true,
|
| 12 |
+
"do_resize": true,
|
| 13 |
+
"do_split_image": false,
|
| 14 |
+
"ignore_index": null,
|
| 15 |
+
"image_mean": [
|
| 16 |
+
0.485,
|
| 17 |
+
0.456,
|
| 18 |
+
0.406
|
| 19 |
+
],
|
| 20 |
+
"image_processor_type": "EomtImageProcessorFast",
|
| 21 |
+
"image_std": [
|
| 22 |
+
0.229,
|
| 23 |
+
0.224,
|
| 24 |
+
0.225
|
| 25 |
+
],
|
| 26 |
+
"input_data_format": null,
|
| 27 |
+
"resample": 2,
|
| 28 |
+
"rescale_factor": 0.00392156862745098,
|
| 29 |
+
"return_tensors": null,
|
| 30 |
+
"size": {
|
| 31 |
+
"longest_edge": 640,
|
| 32 |
+
"shortest_edge": 640
|
| 33 |
+
}
|
| 34 |
+
}
|