jichao commited on
Commit ·
efaf3db
1
Parent(s): b4c7b26
Model upload, utils
Browse files- .gitignore +26 -0
- README.md +55 -0
- checkpoint-1199.pth +3 -0
- config.json +22 -0
- mae/checkpoint-1199.pth +3 -0
- mae/mae_visualize.ipynb +209 -0
- mae/models_mae_1c.py +279 -0
- model.safetensors +3 -0
.gitignore
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*.pyo
|
| 5 |
+
*.pyd
|
| 6 |
+
env/
|
| 7 |
+
venv/
|
| 8 |
+
ENV/
|
| 9 |
+
*.egg-info/
|
| 10 |
+
*.egg
|
| 11 |
+
*.log
|
| 12 |
+
|
| 13 |
+
# Jupyter Notebook
|
| 14 |
+
.ipynb_checkpoints
|
| 15 |
+
|
| 16 |
+
# Pytest
|
| 17 |
+
.cache
|
| 18 |
+
.pytest_cache/
|
| 19 |
+
|
| 20 |
+
# VS Code
|
| 21 |
+
.vscode/
|
| 22 |
+
.idea/
|
| 23 |
+
|
| 24 |
+
# System files
|
| 25 |
+
.DS_Store
|
| 26 |
+
Thumbs.db
|
README.md
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Model Card for Mars ViT Base Model
|
| 2 |
+
|
| 3 |
+
## Model Architecture
|
| 4 |
+
- Architecture: Vision Transformer (ViT) Base
|
| 5 |
+
- Input Channels: 1 (grayscale images)
|
| 6 |
+
- Number of Classes: 0 (features extraction)
|
| 7 |
+
|
| 8 |
+
## Training Method
|
| 9 |
+
- Method: Masked Autoencoder (MAE)
|
| 10 |
+
- Dataset: 2 million CTX images
|
| 11 |
+
|
| 12 |
+
## Usage Examples
|
| 13 |
+
### Using timm
|
| 14 |
+
```python
|
| 15 |
+
import timm
|
| 16 |
+
import torch
|
| 17 |
+
|
| 18 |
+
model = timm.create_model(
|
| 19 |
+
'vit_base_patch16_224',
|
| 20 |
+
in_chans=1,
|
| 21 |
+
num_classes=0,
|
| 22 |
+
global_pool='',
|
| 23 |
+
checkpoint_path="https://huggingface.co/jfang/mars-vit-base-ctx2m/resolve/main/checkpoint-1199.pth"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
model.eval()
|
| 27 |
+
x = torch.randn(1, 1, 224, 224)
|
| 28 |
+
with torch.no_grad():
|
| 29 |
+
features = model.forward_features(x) # shape [1, tokens, embed_dim]
|
| 30 |
+
print(features.shape)
|
| 31 |
+
|
| 32 |
+
cls_token = features[:, 0]
|
| 33 |
+
patch_tokens = features[:, 1:]
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
Using transformers
|
| 37 |
+
```python
|
| 38 |
+
from transformers import AutoModel, AutoImageProcessor
|
| 39 |
+
|
| 40 |
+
model = AutoModel.from_pretrained("jfang/mars-vit-base-ctx2m")
|
| 41 |
+
image_processor = AutoImageProcessor.from_pretrained("jfang/mars-vit-base-ctx2m")
|
| 42 |
+
|
| 43 |
+
# Example usage
|
| 44 |
+
from PIL import Image
|
| 45 |
+
image = Image.open("some_image.png").convert("L") # 1-channel
|
| 46 |
+
inputs = image_processor(image, return_tensors="pt")
|
| 47 |
+
outputs = model(**inputs)
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### Model Performance
|
| 51 |
+
The model is optimized for feature extraction from CTX images. Detailed performance metrics on specific tasks or datasets are not provided in this card.
|
| 52 |
+
|
| 53 |
+
### Limitations
|
| 54 |
+
The model is trained specifically on CTX images and may not generalize well to other types of images without further fine-tuning.
|
| 55 |
+
The model is designed for feature extraction and does not include a classification head.
|
checkpoint-1199.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2f3d358fe5106cd9f7604b7b7368da15e14d8585e776d0fa59766a4ae556e48
|
| 3 |
+
size 341667862
|
config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"ViTModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.0,
|
| 6 |
+
"encoder_stride": 16,
|
| 7 |
+
"hidden_act": "gelu",
|
| 8 |
+
"hidden_dropout_prob": 0.0,
|
| 9 |
+
"hidden_size": 768,
|
| 10 |
+
"image_size": 224,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 3072,
|
| 13 |
+
"layer_norm_eps": 1e-12,
|
| 14 |
+
"model_type": "vit",
|
| 15 |
+
"num_attention_heads": 12,
|
| 16 |
+
"num_channels": 1,
|
| 17 |
+
"num_hidden_layers": 12,
|
| 18 |
+
"patch_size": 16,
|
| 19 |
+
"qkv_bias": true,
|
| 20 |
+
"torch_dtype": "float32",
|
| 21 |
+
"transformers_version": "4.47.1"
|
| 22 |
+
}
|
mae/checkpoint-1199.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6022020e9ce04b222a644f49059cdd0518d96e261f2d3b4ae93a95d12c8977d9
|
| 3 |
+
size 1333326408
|
mae/mae_visualize.ipynb
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "15c2148f-c1b0-46e0-87f6-2db29e13d5b8",
|
| 6 |
+
"metadata": {
|
| 7 |
+
"jp-MarkdownHeadingCollapsed": true
|
| 8 |
+
},
|
| 9 |
+
"source": [
|
| 10 |
+
"This is a visualization demo using our pre-trained MAE models. Adapted from [MAE Visualize](https://github.com/facebookresearch/mae/blob/main/demo/mae_visualize.ipynb). Modified to work with our MAE models."
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "code",
|
| 15 |
+
"execution_count": 13,
|
| 16 |
+
"id": "df2c7e91-3981-44ae-a00e-1b26efa7aa5c",
|
| 17 |
+
"metadata": {
|
| 18 |
+
"execution": {
|
| 19 |
+
"iopub.execute_input": "2025-01-27T01:14:13.796746Z",
|
| 20 |
+
"iopub.status.busy": "2025-01-27T01:14:13.796412Z",
|
| 21 |
+
"iopub.status.idle": "2025-01-27T01:14:13.803827Z",
|
| 22 |
+
"shell.execute_reply": "2025-01-27T01:14:13.803400Z",
|
| 23 |
+
"shell.execute_reply.started": "2025-01-27T01:14:13.796730Z"
|
| 24 |
+
},
|
| 25 |
+
"tags": []
|
| 26 |
+
},
|
| 27 |
+
"outputs": [],
|
| 28 |
+
"source": [
|
| 29 |
+
"import torch\n",
|
| 30 |
+
"import numpy as np\n",
|
| 31 |
+
"import matplotlib.pyplot as plt\n",
|
| 32 |
+
"from PIL import Image\n",
|
| 33 |
+
"\n",
|
| 34 |
+
"# Define utils\n",
|
| 35 |
+
"# Remove RGB-specific normalization\n",
|
| 36 |
+
"imagenet_mean = np.array([0.5]) # Using only one channel\n",
|
| 37 |
+
"imagenet_std = np.array([0.5]) # Using only one channel\n",
|
| 38 |
+
"\n",
|
| 39 |
+
"def show_image(image, title=''):\n",
|
| 40 |
+
" # image is [H, W, 1] or [H, W]\n",
|
| 41 |
+
" if not isinstance(image, torch.Tensor):\n",
|
| 42 |
+
" image = torch.tensor(image)\n",
|
| 43 |
+
" plt.imshow(((image * imagenet_std + imagenet_mean) * 255).clip(0, 255).int(), cmap='gray')\n",
|
| 44 |
+
" plt.title(title, fontsize=16)\n",
|
| 45 |
+
" plt.axis('off')\n",
|
| 46 |
+
" return\n",
|
| 47 |
+
"\n",
|
| 48 |
+
"def run_one_image(img, model):\n",
|
| 49 |
+
" x = torch.tensor(img)\n",
|
| 50 |
+
" \n",
|
| 51 |
+
" # Add channel dimension if not present\n",
|
| 52 |
+
" if len(x.shape) == 2:\n",
|
| 53 |
+
" x = x.unsqueeze(-1) # Add channel dimension\n",
|
| 54 |
+
" \n",
|
| 55 |
+
" # make it a batch-like\n",
|
| 56 |
+
" x = x.unsqueeze(dim=0)\n",
|
| 57 |
+
" x = torch.einsum('nhwc->nchw', x)\n",
|
| 58 |
+
"\n",
|
| 59 |
+
" # run MAE\n",
|
| 60 |
+
" loss, y, mask = model(x.float(), mask_ratio=0.75)\n",
|
| 61 |
+
" y = model.unpatchify(y)\n",
|
| 62 |
+
" y = torch.einsum('nchw->nhwc', y).detach().cpu()\n",
|
| 63 |
+
"\n",
|
| 64 |
+
" # visualize the mask\n",
|
| 65 |
+
" mask = mask.detach()\n",
|
| 66 |
+
" mask = mask.unsqueeze(-1).repeat(1, 1, model.patch_embed.patch_size[0]**2 * 1) # Changed *3 to *1 for single channel\n",
|
| 67 |
+
" mask = model.unpatchify(mask)\n",
|
| 68 |
+
" mask = torch.einsum('nchw->nhwc', mask).detach().cpu()\n",
|
| 69 |
+
" \n",
|
| 70 |
+
" x = torch.einsum('nchw->nhwc', x)\n",
|
| 71 |
+
"\n",
|
| 72 |
+
" # Rest of the function remains the same\n",
|
| 73 |
+
" im_masked = x * (1 - mask)\n",
|
| 74 |
+
" im_paste = x * (1 - mask) + y * mask\n",
|
| 75 |
+
" \n",
|
| 76 |
+
" plt.rcParams['figure.figsize'] = [24, 24]\n",
|
| 77 |
+
"\n",
|
| 78 |
+
" plt.subplot(1, 4, 1)\n",
|
| 79 |
+
" show_image(x[0], \"original\")\n",
|
| 80 |
+
"\n",
|
| 81 |
+
" plt.subplot(1, 4, 2)\n",
|
| 82 |
+
" show_image(im_masked[0], \"masked\")\n",
|
| 83 |
+
"\n",
|
| 84 |
+
" plt.subplot(1, 4, 3)\n",
|
| 85 |
+
"\n",
|
| 86 |
+
" # Only keep reconstructed pixels in masked region\n",
|
| 87 |
+
" y_masked = y * mask\n",
|
| 88 |
+
" \n",
|
| 89 |
+
" black_value = -(imagenet_mean / imagenet_std)\n",
|
| 90 |
+
" # Convert to a float or a torch Tensor\n",
|
| 91 |
+
" black_value = torch.tensor(black_value, dtype=y_masked.dtype, device=y_masked.device)\n",
|
| 92 |
+
" y_masked[mask == 0] = black_value\n",
|
| 93 |
+
" show_image(y_masked[0], \"reconstruction only\")\n",
|
| 94 |
+
" \n",
|
| 95 |
+
" # show_image(y[0], \"reconstruction\")\n",
|
| 96 |
+
"\n",
|
| 97 |
+
" plt.subplot(1, 4, 4)\n",
|
| 98 |
+
" show_image(im_paste[0], \"reconstruction + visible\")\n",
|
| 99 |
+
"\n",
|
| 100 |
+
" plt.show()"
|
| 101 |
+
]
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"cell_type": "code",
|
| 105 |
+
"execution_count": null,
|
| 106 |
+
"id": "a47df54a",
|
| 107 |
+
"metadata": {
|
| 108 |
+
"execution": {
|
| 109 |
+
"iopub.execute_input": "2025-01-27T01:14:14.045225Z",
|
| 110 |
+
"iopub.status.busy": "2025-01-27T01:14:14.044902Z",
|
| 111 |
+
"iopub.status.idle": "2025-01-27T01:14:14.064737Z",
|
| 112 |
+
"shell.execute_reply": "2025-01-27T01:14:14.064205Z",
|
| 113 |
+
"shell.execute_reply.started": "2025-01-27T01:14:14.045199Z"
|
| 114 |
+
}
|
| 115 |
+
},
|
| 116 |
+
"outputs": [],
|
| 117 |
+
"source": [
|
| 118 |
+
"from glob import glob\n",
|
| 119 |
+
"import random\n",
|
| 120 |
+
"\n",
|
| 121 |
+
"img_paths = glob('./samples/*.png')\n",
|
| 122 |
+
"\n",
|
| 123 |
+
"img_path = random.choice(img_paths)\n",
|
| 124 |
+
"img = Image.open(img_path)\n",
|
| 125 |
+
"img = img.resize((224, 224))\n",
|
| 126 |
+
"img = np.array(img) / 255.\n",
|
| 127 |
+
"\n"
|
| 128 |
+
]
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"cell_type": "code",
|
| 132 |
+
"execution_count": null,
|
| 133 |
+
"id": "b33ab531",
|
| 134 |
+
"metadata": {
|
| 135 |
+
"execution": {
|
| 136 |
+
"iopub.execute_input": "2025-01-27T01:14:14.279739Z",
|
| 137 |
+
"iopub.status.busy": "2025-01-27T01:14:14.279422Z",
|
| 138 |
+
"iopub.status.idle": "2025-01-27T01:14:14.904252Z",
|
| 139 |
+
"shell.execute_reply": "2025-01-27T01:14:14.903550Z",
|
| 140 |
+
"shell.execute_reply.started": "2025-01-27T01:14:14.279714Z"
|
| 141 |
+
}
|
| 142 |
+
},
|
| 143 |
+
"outputs": [],
|
| 144 |
+
"source": [
|
| 145 |
+
"import torch\n",
|
| 146 |
+
"import sys\n",
|
| 147 |
+
"import os\n",
|
| 148 |
+
"sys.path.append(os.getcwd())\n",
|
| 149 |
+
"import models_mae_1c\n",
|
| 150 |
+
"\n",
|
| 151 |
+
"def prepare_model(chkpt_dir, arch='mae_vit_base_patch16'):\n",
|
| 152 |
+
" # build model\n",
|
| 153 |
+
" model = getattr(models_mae_1c, arch)()\n",
|
| 154 |
+
" # load model\n",
|
| 155 |
+
" checkpoint = torch.load(chkpt_dir, map_location='cpu')\n",
|
| 156 |
+
" msg = model.load_state_dict(checkpoint['model'], strict=False)\n",
|
| 157 |
+
" print(msg)\n",
|
| 158 |
+
" return model\n",
|
| 159 |
+
"\n",
|
| 160 |
+
"\n",
|
| 161 |
+
"model = prepare_model(\"./checkpoint-1199.pth\")"
|
| 162 |
+
]
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"cell_type": "code",
|
| 166 |
+
"execution_count": null,
|
| 167 |
+
"id": "05a153d6",
|
| 168 |
+
"metadata": {
|
| 169 |
+
"execution": {
|
| 170 |
+
"iopub.execute_input": "2025-01-27T01:14:29.875184Z",
|
| 171 |
+
"iopub.status.busy": "2025-01-27T01:14:29.874954Z",
|
| 172 |
+
"iopub.status.idle": "2025-01-27T01:14:30.229847Z",
|
| 173 |
+
"shell.execute_reply": "2025-01-27T01:14:30.229301Z",
|
| 174 |
+
"shell.execute_reply.started": "2025-01-27T01:14:29.875168Z"
|
| 175 |
+
}
|
| 176 |
+
},
|
| 177 |
+
"outputs": [],
|
| 178 |
+
"source": [
|
| 179 |
+
"img_path = random.choice(img_paths)\n",
|
| 180 |
+
"img = Image.open(img_path).convert(\"L\")\n",
|
| 181 |
+
"img = img.resize((224, 224))\n",
|
| 182 |
+
"img = np.array(img) / 255.\n",
|
| 183 |
+
"# show_image(img)\n",
|
| 184 |
+
"run_one_image(img, model)"
|
| 185 |
+
]
|
| 186 |
+
}
|
| 187 |
+
],
|
| 188 |
+
"metadata": {
|
| 189 |
+
"kernelspec": {
|
| 190 |
+
"display_name": "vfms",
|
| 191 |
+
"language": "python",
|
| 192 |
+
"name": "python3"
|
| 193 |
+
},
|
| 194 |
+
"language_info": {
|
| 195 |
+
"codemirror_mode": {
|
| 196 |
+
"name": "ipython",
|
| 197 |
+
"version": 3
|
| 198 |
+
},
|
| 199 |
+
"file_extension": ".py",
|
| 200 |
+
"mimetype": "text/x-python",
|
| 201 |
+
"name": "python",
|
| 202 |
+
"nbconvert_exporter": "python",
|
| 203 |
+
"pygments_lexer": "ipython3",
|
| 204 |
+
"version": "3.11.5"
|
| 205 |
+
}
|
| 206 |
+
},
|
| 207 |
+
"nbformat": 4,
|
| 208 |
+
"nbformat_minor": 5
|
| 209 |
+
}
|
mae/models_mae_1c.py
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
# --------------------------------------------------------
|
| 7 |
+
# References:
|
| 8 |
+
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
| 9 |
+
# DeiT: https://github.com/facebookresearch/deit
|
| 10 |
+
# --------------------------------------------------------
|
| 11 |
+
|
| 12 |
+
from functools import partial
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
from timm.models.vision_transformer import PatchEmbed, Block
|
| 19 |
+
|
| 20 |
+
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
|
| 21 |
+
"""
|
| 22 |
+
grid_size: int of the grid height and width
|
| 23 |
+
return:
|
| 24 |
+
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
| 25 |
+
"""
|
| 26 |
+
grid_h = np.arange(grid_size, dtype=np.float32)
|
| 27 |
+
grid_w = np.arange(grid_size, dtype=np.float32)
|
| 28 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
| 29 |
+
grid = np.stack(grid, axis=0)
|
| 30 |
+
|
| 31 |
+
grid = grid.reshape([2, 1, grid_size, grid_size])
|
| 32 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
| 33 |
+
if cls_token:
|
| 34 |
+
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
|
| 35 |
+
return pos_embed
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class MaskedAutoencoderViT(nn.Module):
|
| 39 |
+
""" Masked Autoencoder with VisionTransformer backbone
|
| 40 |
+
"""
|
| 41 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=1,
|
| 42 |
+
embed_dim=1024, depth=24, num_heads=16,
|
| 43 |
+
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
|
| 44 |
+
mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False):
|
| 45 |
+
super().__init__()
|
| 46 |
+
|
| 47 |
+
# --------------------------------------------------------------------------
|
| 48 |
+
# MAE encoder specifics
|
| 49 |
+
self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
|
| 50 |
+
num_patches = self.patch_embed.num_patches
|
| 51 |
+
|
| 52 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 53 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) # fixed sin-cos embedding
|
| 54 |
+
|
| 55 |
+
self.blocks = nn.ModuleList([
|
| 56 |
+
# Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer)
|
| 57 |
+
Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer)
|
| 58 |
+
for i in range(depth)])
|
| 59 |
+
self.norm = norm_layer(embed_dim)
|
| 60 |
+
# --------------------------------------------------------------------------
|
| 61 |
+
|
| 62 |
+
# --------------------------------------------------------------------------
|
| 63 |
+
# MAE decoder specifics
|
| 64 |
+
self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True)
|
| 65 |
+
|
| 66 |
+
self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))
|
| 67 |
+
|
| 68 |
+
self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=False) # fixed sin-cos embedding
|
| 69 |
+
|
| 70 |
+
self.decoder_blocks = nn.ModuleList([
|
| 71 |
+
# Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer)
|
| 72 |
+
Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer)
|
| 73 |
+
|
| 74 |
+
for i in range(decoder_depth)])
|
| 75 |
+
|
| 76 |
+
self.decoder_norm = norm_layer(decoder_embed_dim)
|
| 77 |
+
self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size**2 * in_chans, bias=True) # decoder to patch
|
| 78 |
+
# --------------------------------------------------------------------------
|
| 79 |
+
|
| 80 |
+
self.norm_pix_loss = norm_pix_loss
|
| 81 |
+
|
| 82 |
+
self.initialize_weights()
|
| 83 |
+
|
| 84 |
+
def initialize_weights(self):
|
| 85 |
+
# initialization
|
| 86 |
+
# initialize (and freeze) pos_embed by sin-cos embedding
|
| 87 |
+
pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
|
| 88 |
+
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
|
| 89 |
+
|
| 90 |
+
decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
|
| 91 |
+
self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))
|
| 92 |
+
|
| 93 |
+
# initialize patch_embed like nn.Linear (instead of nn.Conv2d)
|
| 94 |
+
w = self.patch_embed.proj.weight.data
|
| 95 |
+
torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
|
| 96 |
+
|
| 97 |
+
# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
|
| 98 |
+
torch.nn.init.normal_(self.cls_token, std=.02)
|
| 99 |
+
torch.nn.init.normal_(self.mask_token, std=.02)
|
| 100 |
+
|
| 101 |
+
# initialize nn.Linear and nn.LayerNorm
|
| 102 |
+
self.apply(self._init_weights)
|
| 103 |
+
|
| 104 |
+
def _init_weights(self, m):
|
| 105 |
+
if isinstance(m, nn.Linear):
|
| 106 |
+
# we use xavier_uniform following official JAX ViT:
|
| 107 |
+
torch.nn.init.xavier_uniform_(m.weight)
|
| 108 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 109 |
+
nn.init.constant_(m.bias, 0)
|
| 110 |
+
elif isinstance(m, nn.LayerNorm):
|
| 111 |
+
nn.init.constant_(m.bias, 0)
|
| 112 |
+
nn.init.constant_(m.weight, 1.0)
|
| 113 |
+
|
| 114 |
+
def patchify(self, imgs):
|
| 115 |
+
"""
|
| 116 |
+
imgs: (N, 1, H, W)
|
| 117 |
+
x: (N, L, patch_size**2 * 1)
|
| 118 |
+
"""
|
| 119 |
+
assert imgs.shape[1] == 1, f"Expected 1 channel, got {imgs.shape[1]}"
|
| 120 |
+
p = self.patch_embed.patch_size[0]
|
| 121 |
+
assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
|
| 122 |
+
|
| 123 |
+
h = w = imgs.shape[2] // p
|
| 124 |
+
x = imgs.reshape(shape=(imgs.shape[0], 1, h, p, w, p))
|
| 125 |
+
x = torch.einsum('nchpwq->nhwpqc', x)
|
| 126 |
+
x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 1))
|
| 127 |
+
return x
|
| 128 |
+
|
| 129 |
+
def unpatchify(self, x):
|
| 130 |
+
"""
|
| 131 |
+
x: (N, L, patch_size**2 * 1)
|
| 132 |
+
imgs: (N, 1, H, W)
|
| 133 |
+
"""
|
| 134 |
+
assert x.shape[2] == self.patch_embed.patch_size[0]**2, f"Incorrect patch dimension"
|
| 135 |
+
p = self.patch_embed.patch_size[0]
|
| 136 |
+
h = w = int(x.shape[1]**.5)
|
| 137 |
+
assert h * w == x.shape[1]
|
| 138 |
+
|
| 139 |
+
x = x.reshape(shape=(x.shape[0], h, w, p, p, 1))
|
| 140 |
+
x = torch.einsum('nhwpqc->nchpwq', x)
|
| 141 |
+
imgs = x.reshape(shape=(x.shape[0], 1, h * p, h * p))
|
| 142 |
+
return imgs
|
| 143 |
+
|
| 144 |
+
def random_masking(self, x, mask_ratio):
|
| 145 |
+
"""
|
| 146 |
+
Perform per-sample random masking by per-sample shuffling.
|
| 147 |
+
Per-sample shuffling is done by argsort random noise.
|
| 148 |
+
x: [N, L, D], sequence
|
| 149 |
+
"""
|
| 150 |
+
N, L, D = x.shape # batch, length, dim
|
| 151 |
+
len_keep = int(L * (1 - mask_ratio))
|
| 152 |
+
|
| 153 |
+
noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
|
| 154 |
+
|
| 155 |
+
# sort noise for each sample
|
| 156 |
+
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
|
| 157 |
+
ids_restore = torch.argsort(ids_shuffle, dim=1)
|
| 158 |
+
|
| 159 |
+
# keep the first subset
|
| 160 |
+
ids_keep = ids_shuffle[:, :len_keep]
|
| 161 |
+
x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
|
| 162 |
+
|
| 163 |
+
# generate the binary mask: 0 is keep, 1 is remove
|
| 164 |
+
mask = torch.ones([N, L], device=x.device)
|
| 165 |
+
mask[:, :len_keep] = 0
|
| 166 |
+
# unshuffle to get the binary mask
|
| 167 |
+
mask = torch.gather(mask, dim=1, index=ids_restore)
|
| 168 |
+
|
| 169 |
+
return x_masked, mask, ids_restore
|
| 170 |
+
|
| 171 |
+
def forward_encoder(self, x, mask_ratio):
|
| 172 |
+
# embed patches
|
| 173 |
+
x = self.patch_embed(x)
|
| 174 |
+
|
| 175 |
+
# add pos embed w/o cls token
|
| 176 |
+
x = x + self.pos_embed[:, 1:, :]
|
| 177 |
+
|
| 178 |
+
# masking: length -> length * mask_ratio
|
| 179 |
+
x, mask, ids_restore = self.random_masking(x, mask_ratio)
|
| 180 |
+
|
| 181 |
+
# append cls token
|
| 182 |
+
cls_token = self.cls_token + self.pos_embed[:, :1, :]
|
| 183 |
+
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
|
| 184 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 185 |
+
|
| 186 |
+
# apply Transformer blocks
|
| 187 |
+
for blk in self.blocks:
|
| 188 |
+
x = blk(x)
|
| 189 |
+
x = self.norm(x)
|
| 190 |
+
|
| 191 |
+
return x, mask, ids_restore
|
| 192 |
+
|
| 193 |
+
def forward_decoder(self, x, ids_restore):
|
| 194 |
+
# embed tokens
|
| 195 |
+
x = self.decoder_embed(x)
|
| 196 |
+
|
| 197 |
+
# append mask tokens to sequence
|
| 198 |
+
mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)
|
| 199 |
+
x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token
|
| 200 |
+
x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle
|
| 201 |
+
x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token
|
| 202 |
+
|
| 203 |
+
# add pos embed
|
| 204 |
+
x = x + self.decoder_pos_embed
|
| 205 |
+
|
| 206 |
+
# apply Transformer blocks
|
| 207 |
+
for blk in self.decoder_blocks:
|
| 208 |
+
x = blk(x)
|
| 209 |
+
x = self.decoder_norm(x)
|
| 210 |
+
|
| 211 |
+
# predictor projection
|
| 212 |
+
x = self.decoder_pred(x)
|
| 213 |
+
|
| 214 |
+
# remove cls token
|
| 215 |
+
x = x[:, 1:, :]
|
| 216 |
+
|
| 217 |
+
return x
|
| 218 |
+
|
| 219 |
+
def forward_loss(self, imgs, pred, mask):
|
| 220 |
+
"""
|
| 221 |
+
imgs: [N, 3, H, W]
|
| 222 |
+
pred: [N, L, p*p*3]
|
| 223 |
+
mask: [N, L], 0 is keep, 1 is remove,
|
| 224 |
+
"""
|
| 225 |
+
target = self.patchify(imgs)
|
| 226 |
+
if self.norm_pix_loss:
|
| 227 |
+
mean = target.mean(dim=-1, keepdim=True)
|
| 228 |
+
var = target.var(dim=-1, keepdim=True)
|
| 229 |
+
target = (target - mean) / (var + 1.e-6)**.5
|
| 230 |
+
|
| 231 |
+
loss = (pred - target) ** 2
|
| 232 |
+
loss = loss.mean(dim=-1) # [N, L], mean loss per patch
|
| 233 |
+
|
| 234 |
+
loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
|
| 235 |
+
return loss
|
| 236 |
+
|
| 237 |
+
def forward(self, imgs, mask_ratio=0.75):
|
| 238 |
+
latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio)
|
| 239 |
+
pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3]
|
| 240 |
+
loss = self.forward_loss(imgs, pred, mask)
|
| 241 |
+
return loss, pred, mask
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def mae_vit_base_patch16_dec512d8b(**kwargs):
|
| 245 |
+
model = MaskedAutoencoderViT(
|
| 246 |
+
patch_size=16, embed_dim=768, depth=12, num_heads=12,
|
| 247 |
+
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
|
| 248 |
+
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
| 249 |
+
return model
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def mae_vit_large_patch16_dec512d8b(**kwargs):
|
| 253 |
+
model = MaskedAutoencoderViT(
|
| 254 |
+
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
|
| 255 |
+
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
|
| 256 |
+
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
| 257 |
+
return model
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def mae_vit_huge_patch14_dec512d8b(**kwargs):
|
| 261 |
+
model = MaskedAutoencoderViT(
|
| 262 |
+
patch_size=14, embed_dim=1280, depth=32, num_heads=16,
|
| 263 |
+
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
|
| 264 |
+
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
| 265 |
+
return model
|
| 266 |
+
|
| 267 |
+
def mae_vit_small_patch16_dec512d8b(**kwargs):
|
| 268 |
+
model = MaskedAutoencoderViT(
|
| 269 |
+
patch_size=16, embed_dim=384, depth=12, num_heads=6,
|
| 270 |
+
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
|
| 271 |
+
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
| 272 |
+
return model
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# set recommended archs
|
| 276 |
+
mae_vit_base_patch16 = mae_vit_base_patch16_dec512d8b # decoder: 512 dim, 8 blocks
|
| 277 |
+
mae_vit_large_patch16 = mae_vit_large_patch16_dec512d8b # decoder: 512 dim, 8 blocks
|
| 278 |
+
mae_vit_huge_patch14 = mae_vit_huge_patch14_dec512d8b # decoder: 512 dim, 8 blocks
|
| 279 |
+
mae_vit_small_patch16 = mae_vit_small_patch16_dec512d8b
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:04e7122c161a46314611a6ff20504ad1c7f84b05fe25bb2739e3c9f9ff83a093
|
| 3 |
+
size 344006552
|