Image-Text-to-Text
Transformers
PyTorch
English
qwen2_vl
Embedding
text-generation-inference
SwyWang commited on
Commit
d94b5f6
·
1 Parent(s): 4809c82

first upload

Browse files
README.md CHANGED
@@ -1,3 +1,62 @@
1
  ---
2
  license: cc-by-nc-4.0
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-nc-4.0
3
+ datasets:
4
+ - TIGER-Lab/MMEB-train
5
+ - Sony/SCaR-Train
6
+ language:
7
+ - en
8
+ metrics:
9
+ - accuracy
10
+ base_model:
11
+ - Qwen/Qwen2-VL-7B-Instruct
12
+ library_name: transformers
13
+ tags:
14
+ - Embedding
15
  ---
16
+
17
+ # VIRTUE Model Card
18
+
19
+ [VIRTUE-2B-SCaR](https://huggingface.co/Sony/VIRTUE-2B-SCaR) and [VIRTUE-7B-SCaR](https://huggingface.co/Sony/VIRTUE-7B-SCaR) are the official checkpoints for the paper "[VIRTUE: Visual-Interactive Text-Image Universal Embedder](https://arxiv.org/abs/2510.00523)" that are trained with MMEB-Train and SCaR-Train.
20
+ VIRTUE is a visual-interactive text-image universal embedder consisting of a VLM as well as a segmentation model to enable the visual interaction modality for human interactions.
21
+ In addition, we introduce the SCaR benchmark ([train](https://huggingface.co/datasets/Sony/SCaR-Train), [eval](https://huggingface.co/datasets/Sony/SCaR-Eval)), composed of 1M samples for visual-interactive image-to-text retrieval, to evaluate visual-interactive embedding capabilities.
22
+ SCaR enables evaluation of advanced reasoning and compositional tasks in multimodal, visual-interaction-aware embedding scenarios that remain unexplored.
23
+
24
+ ## Model Details
25
+
26
+ - [VIRTUE-2B-SCaR](https://huggingface.co/Sony/VIRTUE-2B-SCaR)
27
+ - [VIRTUE-7B-SCaR](https://huggingface.co/Sony/VIRTUE-7B-SCaR)
28
+
29
+ ## Experimental Results
30
+
31
+ ### MMEB
32
+ - Without SCaR-Train:
33
+
34
+ ![MMEB Results](images/MMEB-results.png)
35
+ - With SCaR-Train
36
+
37
+ ![MMEB Results with SCaR-Train](images/MMEB-results-with-SCaR.png)
38
+
39
+
40
+ ### SCaR
41
+ ![SCaR Results](images/SCaR-results.png)
42
+
43
+ ## Resources
44
+ - [Paper](https://arxiv.org/abs/2510.00523)
45
+ - [Webpage]()
46
+ - [Repository](https://github.com/sony/virtue)
47
+
48
+ ## How to Use
49
+
50
+ ## Citation
51
+ ```
52
+ @article{wangICLR2026virtue,
53
+ author = {Wei-Yao Wang and
54
+ Kazuya Tateishi and
55
+ Qiyu Wu and
56
+ Shusuke Takahashi and
57
+ Yuki Mitsufuji},
58
+ title = {VIRTUE: Visual-Interactive Text-Image Universal Embedder},
59
+ journal = {arXiv preprint arXiv:2510.00523},
60
+ year = {2025}
61
+ }
62
+ ```
config.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2VLForConditionalGeneration"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 1536,
10
+ "image_token_id": 151655,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2_vl",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": {
21
+ "mrope_section": [
22
+ 16,
23
+ 24,
24
+ 24
25
+ ],
26
+ "rope_type": "default",
27
+ "type": "default"
28
+ },
29
+ "rope_theta": 1000000.0,
30
+ "sliding_window": 32768,
31
+ "text_config": {
32
+ "architectures": [
33
+ "Qwen2VLForConditionalGeneration"
34
+ ],
35
+ "attention_dropout": 0.0,
36
+ "bos_token_id": 151643,
37
+ "eos_token_id": 151645,
38
+ "hidden_act": "silu",
39
+ "hidden_size": 1536,
40
+ "image_token_id": null,
41
+ "initializer_range": 0.02,
42
+ "intermediate_size": 8960,
43
+ "max_position_embeddings": 32768,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2_vl_text",
46
+ "num_attention_heads": 12,
47
+ "num_hidden_layers": 28,
48
+ "num_key_value_heads": 2,
49
+ "rms_norm_eps": 1e-06,
50
+ "rope_scaling": {
51
+ "mrope_section": [
52
+ 16,
53
+ 24,
54
+ 24
55
+ ],
56
+ "rope_type": "default",
57
+ "type": "default"
58
+ },
59
+ "rope_theta": 1000000.0,
60
+ "sliding_window": 32768,
61
+ "tie_word_embeddings": true,
62
+ "torch_dtype": "bfloat16",
63
+ "use_cache": true,
64
+ "use_sliding_window": false,
65
+ "video_token_id": null,
66
+ "vision_end_token_id": 151653,
67
+ "vision_start_token_id": 151652,
68
+ "vision_token_id": 151654,
69
+ "vocab_size": 151936
70
+ },
71
+ "torch_dtype": "bfloat16",
72
+ "transformers_version": "4.52.3",
73
+ "use_cache": true,
74
+ "use_sliding_window": false,
75
+ "video_token_id": 151656,
76
+ "vision_config": {
77
+ "depth": 32,
78
+ "embed_dim": 1280,
79
+ "hidden_act": "quick_gelu",
80
+ "hidden_size": 1536,
81
+ "in_channels": 3,
82
+ "in_chans": 3,
83
+ "initializer_range": 0.02,
84
+ "mlp_ratio": 4,
85
+ "model_type": "qwen2_vl",
86
+ "num_heads": 16,
87
+ "patch_size": 14,
88
+ "spatial_merge_size": 2,
89
+ "spatial_patch_size": 14,
90
+ "temporal_patch_size": 2,
91
+ "torch_dtype": "bfloat16"
92
+ },
93
+ "vision_end_token_id": 151653,
94
+ "vision_start_token_id": 151652,
95
+ "vision_token_id": 151654,
96
+ "vocab_size": 151936,
97
+ "virtue_sam": {
98
+ "enabled": true,
99
+ "config_path": "./sam2.1/sam2.1_hiera_b+.yaml",
100
+ "checkpoint": "./sam2_checkpoints/sam2.1_hiera_base_plus.pt",
101
+ "points_per_side": 3,
102
+ "feature_levels": 2
103
+ }
104
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.01,
10
+ "top_k": 1,
11
+ "top_p": 0.001,
12
+ "transformers_version": "4.52.3"
13
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "Qwen2VLImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "max_pixels": 1003520,
18
+ "merge_size": 2,
19
+ "min_pixels": 3136,
20
+ "patch_size": 14,
21
+ "processor_class": "Qwen2VLProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "longest_edge": 1003520,
26
+ "shortest_edge": 3136
27
+ },
28
+ "temporal_patch_size": 2
29
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f234e4c9e0ce32e5712c81416200eb3bf9a662132bbd6886055e319dc8d7be33
3
+ size 4418212954
sam2_reducer_and_vlm_layer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c33204cb8a45108de2719b5dba35f5a8bd2a869aa905679cb2f37db5e4ed847
3
+ size 6040518
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091aa7594dc2fcfbfa06b9e3c22a5f0562ac14f30375c13af7309407a0e67b8a
3
+ size 11420371
tokenizer_config.json ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<|object_ref_start|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151647": {
37
+ "content": "<|object_ref_end|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "151648": {
45
+ "content": "<|box_start|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "151649": {
53
+ "content": "<|box_end|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "151650": {
61
+ "content": "<|quad_start|>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "151651": {
69
+ "content": "<|quad_end|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "151652": {
77
+ "content": "<|vision_start|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "151653": {
85
+ "content": "<|vision_end|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "151654": {
93
+ "content": "<|vision_pad|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "151655": {
101
+ "content": "<|image_pad|>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "151656": {
109
+ "content": "<|video_pad|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ }
116
+ },
117
+ "additional_special_tokens": [
118
+ "<|im_start|>",
119
+ "<|im_end|>",
120
+ "<|object_ref_start|>",
121
+ "<|object_ref_end|>",
122
+ "<|box_start|>",
123
+ "<|box_end|>",
124
+ "<|quad_start|>",
125
+ "<|quad_end|>",
126
+ "<|vision_start|>",
127
+ "<|vision_end|>",
128
+ "<|vision_pad|>",
129
+ "<|image_pad|>",
130
+ "<|video_pad|>"
131
+ ],
132
+ "bos_token": null,
133
+ "clean_up_tokenization_spaces": false,
134
+ "eos_token": "<|im_end|>",
135
+ "errors": "replace",
136
+ "extra_special_tokens": {},
137
+ "model_max_length": 32768,
138
+ "pad_token": "<|endoftext|>",
139
+ "padding_side": "left",
140
+ "processor_class": "Qwen2VLProcessor",
141
+ "split_special_tokens": false,
142
+ "tokenizer_class": "Qwen2Tokenizer",
143
+ "unk_token": null
144
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff