Upload 11 files
Browse files- .gitattributes +2 -0
- README.md +106 -0
- model_config.json +8 -0
- open_clip_config.json +38 -0
- special_tokens_map.json +30 -0
- text.onnx +3 -0
- text.onnx.data +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +31 -0
- visual.onnx +3 -0
- visual.onnx.data +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
text.onnx.data filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
visual.onnx.data filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,3 +1,109 @@
|
|
| 1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
license: apple-amlr
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
tags:
|
| 3 |
+
- clip
|
| 4 |
+
- mobileclip2
|
| 5 |
+
pipeline_tag: zero-shot-image-classification
|
| 6 |
license: apple-amlr
|
| 7 |
---
|
| 8 |
+
|
| 9 |
+
# ONNX export of timm/MobileCLIP2-S4-OpenCLIP
|
| 10 |
+
|
| 11 |
+
This model is an export of [timm/MobileCLIP2-S4-OpenCLIP](https://huggingface.co/timm/MobileCLIP2-S4-OpenCLIP). It can be used with the [`open_clip_inference`](https://crates.io/crates/open_clip_inference) rust crate, or any other ONNX Runtime based implementation.
|
| 12 |
+
|
| 13 |
+
## Usage with `open_clip_inference` in Rust:
|
| 14 |
+
|
| 15 |
+
```rust
|
| 16 |
+
use open_clip_inference::Clip;
|
| 17 |
+
use std::path::Path;
|
| 18 |
+
|
| 19 |
+
#[tokio::main]
|
| 20 |
+
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
| 21 |
+
let model_id = "RuteNL/MobileCLIP2-S4-OpenCLIP-ONNX";
|
| 22 |
+
let mut clip = Clip::from_hf(model_id).build().await?;
|
| 23 |
+
|
| 24 |
+
let img = image::open(Path::new("assets/img/cat_face.jpg")).expect("Failed to load image");
|
| 25 |
+
let texts = &[
|
| 26 |
+
"A photo of a cat",
|
| 27 |
+
"A photo of a dog",
|
| 28 |
+
"A photo of a beignet",
|
| 29 |
+
];
|
| 30 |
+
|
| 31 |
+
let results = clip.classify(&img, texts)?;
|
| 32 |
+
|
| 33 |
+
for (text, prob) in results {
|
| 34 |
+
println!("{}: {:.2}%", text, prob * 100.0);
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
Ok(())
|
| 38 |
+
}
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
---
|
| 42 |
+
|
| 43 |
+
# Model card for MobileCLIP2-S4-OpenCLIP
|
| 44 |
+
These weights and model card are adapted from the original Apple model at https://huggingface.co/apple/MobileCLIP2-S4. This version uses canonical OpenCLIP configs and weight naming.
|
| 45 |
+
|
| 46 |
+
MobileCLIP2 was introduced in [MobileCLIP2: Improving Multi-Modal Reinforced Training](http://arxiv.org/abs/2508.20691) (TMLR August 2025 <mark>Featured</mark>), by Fartash Faghri, Pavan Kumar Anasosalu Vasu, Cem Koc, Vaishaal Shankar, Alexander T Toshev, Oncel Tuzel, Hadi Pouransari.
|
| 47 |
+
|
| 48 |
+
This repository contains the **MobileCLIP2-S4** checkpoint.
|
| 49 |
+
|
| 50 |
+
### Highlights
|
| 51 |
+
|
| 52 |
+
* `MobileCLIP2-S4` matches the accuracy of SigLIP-SO400M/14 with 2x fewer parameters and surpasses DFN ViT-L/14 at 2.5x lower latency measured on iPhone12 Pro Max.
|
| 53 |
+
* `MobileCLIP-S3/S4` are our new architectures trained on MobileCLIP’s training dataset, DataCompDR-1B (dashed lines).
|
| 54 |
+
* Our smallest variant `MobileCLIP-S0` obtains similar zero-shot performance as [OpenAI](https://arxiv.org/abs/2103.00020)'s ViT-B/16 model while being 4.8x faster and 2.8x smaller.
|
| 55 |
+
* `MobileCLIP-S2` obtains better avg zero-shot performance than [SigLIP](https://arxiv.org/abs/2303.15343)'s ViT-B/16 model while being 2.3x faster and 2.1x smaller, and trained with 3x less seen samples.
|
| 56 |
+
* `MobileCLIP-B (LT)` attains zero-shot ImageNet performance of **77.2%** which is significantly better than recent works like [DFN](https://arxiv.org/abs/2309.17425) and [SigLIP](https://arxiv.org/abs/2303.15343) with similar architectures or even [OpenAI's ViT-L/14@336](https://arxiv.org/abs/2103.00020).
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
## Checkpoints and Results (Original Apple links)
|
| 60 |
+
|
| 61 |
+
| Model | # Seen <BR>Samples (B) | # Params (M) <BR> (img + txt) | Latency (ms) <BR> (img + txt) | IN-1k Zero-Shot <BR> Top-1 Acc. (%) | Avg. Perf. (%) <BR> on 38 datasets |
|
| 62 |
+
|:----------------------------------------------------------|:----------------------:|:-----------------------------:|:-----------------------------:|:-----------------------------------:|:----------------------------------:|
|
| 63 |
+
| [MobileCLIP2-S0](https://hf.co/apple/MobileCLIP2-S0) | 13 | 11.4 + 42.4 | 1.5 + 1.6 | 71.5 | 59.7 |
|
| 64 |
+
| [MobileCLIP2-S2](https://hf.co/apple/MobileCLIP2-S2) | 13 | 35.7 + 63.4 | 3.6 + 3.3 | 77.2 | 64.1 |
|
| 65 |
+
| [MobileCLIP2-B](https://hf.co/apple/MobileCLIP2-B) | 13 | 86.3 + 63.4 | 10.4 + 3.3 | 79.4 | 65.8 |
|
| 66 |
+
| [MobileCLIP2-S3](https://hf.co/apple/MobileCLIP2-S3) | 13 | 125.1 + 123.6 | 8.0 + 6.6 | 80.7 | 66.8 |
|
| 67 |
+
| [MobileCLIP2-L/14](https://hf.co/apple/MobileCLIP2-L-14) | 13 | 304.3 + 123.6 | 57.9 + 6.6 | 81.9 | 67.8 |
|
| 68 |
+
| [MobileCLIP2-S4](https://hf.co/apple/MobileCLIP2-S4) | 13 | 321.6 + 123.6 | 19.6 + 6.6 | 81.9 | 67.5 |
|
| 69 |
+
| [MobileCLIP-S0](https://hf.co/apple/MobileCLIP-S0) | 13 | 11.4 + 42.4 | 1.5 + 1.6 | 67.8 | 58.1 |
|
| 70 |
+
| [MobileCLIP-S1](https://hf.co/apple/MobileCLIP-S1) | 13 | 21.5 + 63.4 | 2.5 + 3.3 | 72.6 | 61.3 |
|
| 71 |
+
| [MobileCLIP-S2](https://hf.co/apple/MobileCLIP-S2) | 13 | 35.7 + 63.4 | 3.6 + 3.3 | 74.4 | 63.7 |
|
| 72 |
+
| [MobileCLIP-B](https://hf.co/apple/MobileCLIP-B) | 13 | 86.3 + 63.4 | 10.4 + 3.3 | 76.8 | 65.2 |
|
| 73 |
+
| [MobileCLIP-B (LT)](https://hf.co/apple/MobileCLIP-B-LT) | 36 | 86.3 + 63.4 | 10.4 + 3.3 | 77.2 | 65.8 |
|
| 74 |
+
| [MobileCLIP-S3](https://hf.co/apple/MobileCLIP-S3) | 13 | 125.1 + 123.6 | 8.0 + 6.6 | 78.3 | 66.3 |
|
| 75 |
+
| [MobileCLIP-L/14](https://hf.co/apple/MobileCLIP-L-14) | 13 | 304.3 + 123.6 | 57.9 + 6.6 | 79.5 | 66.9 |
|
| 76 |
+
| [MobileCLIP-S4](https://hf.co/apple/MobileCLIP-S4) | 13 | 321.6 + 123.6 | 19.6 + 6.6 | 79.4 | 68.1 |
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
## How to Use
|
| 80 |
+
|
| 81 |
+
```py
|
| 82 |
+
import torch
|
| 83 |
+
import open_clip
|
| 84 |
+
from PIL import Image
|
| 85 |
+
from urllib.request import urlopen
|
| 86 |
+
from timm.utils import reparameterize_model
|
| 87 |
+
|
| 88 |
+
model, _, preprocess = open_clip.create_model_and_transforms('MobileCLIP2-S4', pretrained='dfndr2b')
|
| 89 |
+
model.eval()
|
| 90 |
+
tokenizer = open_clip.get_tokenizer('MobileCLIP2-S4')
|
| 91 |
+
|
| 92 |
+
# For inference/model exporting purposes, optionally reparameterize for better performance
|
| 93 |
+
model = reparameterize_model(model)
|
| 94 |
+
|
| 95 |
+
image = Image.open(urlopen(
|
| 96 |
+
'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
|
| 97 |
+
))
|
| 98 |
+
image = preprocess(image).unsqueeze(0)
|
| 99 |
+
text = tokenizer(["a diagram", "a dog", "a cat", "a doughnut"])
|
| 100 |
+
|
| 101 |
+
with torch.no_grad(), torch.amp.autocast(image.device.type):
|
| 102 |
+
image_features = model.encode_image(image)
|
| 103 |
+
text_features = model.encode_text(text)
|
| 104 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 105 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 106 |
+
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
|
| 107 |
+
|
| 108 |
+
print("Label probs:", text_probs)
|
| 109 |
+
```
|
model_config.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"logit_scale": 57.30369567871094,
|
| 3 |
+
"logit_bias": 0.0,
|
| 4 |
+
"activation_function": "softmax",
|
| 5 |
+
"tokenizer_needs_lowercase": false,
|
| 6 |
+
"pad_id": 0,
|
| 7 |
+
"vocab_size": 49408
|
| 8 |
+
}
|
open_clip_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_cfg": {
|
| 3 |
+
"embed_dim": 768,
|
| 4 |
+
"vision_cfg": {
|
| 5 |
+
"timm_model_name": "fastvit_mci4",
|
| 6 |
+
"timm_model_pretrained": false,
|
| 7 |
+
"timm_pool": "avg",
|
| 8 |
+
"timm_proj": null,
|
| 9 |
+
"timm_drop": 0.0,
|
| 10 |
+
"timm_drop_path": 0.0,
|
| 11 |
+
"image_size": 256
|
| 12 |
+
},
|
| 13 |
+
"text_cfg": {
|
| 14 |
+
"context_length": 77,
|
| 15 |
+
"vocab_size": 49408,
|
| 16 |
+
"width": 768,
|
| 17 |
+
"heads": 12,
|
| 18 |
+
"layers": 12,
|
| 19 |
+
"no_causal_mask": true,
|
| 20 |
+
"hf_tokenizer_name": "timm/MobileCLIP2-S4-OpenCLIP"
|
| 21 |
+
},
|
| 22 |
+
"custom_text": true
|
| 23 |
+
},
|
| 24 |
+
"preprocess_cfg": {
|
| 25 |
+
"mean": [
|
| 26 |
+
0.48145466,
|
| 27 |
+
0.4578275,
|
| 28 |
+
0.40821073
|
| 29 |
+
],
|
| 30 |
+
"std": [
|
| 31 |
+
0.26862954,
|
| 32 |
+
0.26130258,
|
| 33 |
+
0.27577711
|
| 34 |
+
],
|
| 35 |
+
"interpolation": "bilinear",
|
| 36 |
+
"resize_mode": "shortest"
|
| 37 |
+
}
|
| 38 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|startoftext|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": true,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|endoftext|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<|endoftext|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"unk_token": {
|
| 24 |
+
"content": "<|endoftext|>",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
}
|
| 30 |
+
}
|
text.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8af44c17a4087732199e78ab85378a555fc5a97cc4359c9dc26c75ede2fdf91c
|
| 3 |
+
size 1521172
|
text.onnx.data
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d12023b749daeccb6de14f06f98d306f7d2827b61779bdc86ef9e6da680a781
|
| 3 |
+
size 494665728
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"49406": {
|
| 5 |
+
"content": "<|startoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"49407": {
|
| 13 |
+
"content": "<|endoftext|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
}
|
| 20 |
+
},
|
| 21 |
+
"bos_token": "<|startoftext|>",
|
| 22 |
+
"clean_up_tokenization_spaces": false,
|
| 23 |
+
"do_lower_case": true,
|
| 24 |
+
"eos_token": "<|endoftext|>",
|
| 25 |
+
"errors": "replace",
|
| 26 |
+
"extra_special_tokens": {},
|
| 27 |
+
"model_max_length": 77,
|
| 28 |
+
"pad_token": "<|endoftext|>",
|
| 29 |
+
"tokenizer_class": "CLIPTokenizer",
|
| 30 |
+
"unk_token": "<|endoftext|>"
|
| 31 |
+
}
|
visual.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe3d34cf17ac2dc51bb881d3121e329a7628b84c0434e297313ae961b15b9e03
|
| 3 |
+
size 2789209
|
visual.onnx.data
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65d54e15b15d33f22f36c9f2f790600da1b84531a5033360b9954d1bcf295c51
|
| 3 |
+
size 1286209536
|