Text Generation
Transformers
Safetensors
imp
custom_code
Files changed (1) hide show
  1. MILVLG_imp-v1-3b.json +158 -0
MILVLG_imp-v1-3b.json ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bomFormat": "CycloneDX",
3
+ "specVersion": "1.6",
4
+ "serialNumber": "urn:uuid:6c3b467c-c76e-47eb-bf5c-1d97c8f26c4d",
5
+ "version": 1,
6
+ "metadata": {
7
+ "timestamp": "2025-07-14T11:12:32.085748+00:00",
8
+ "component": {
9
+ "type": "machine-learning-model",
10
+ "bom-ref": "MILVLG/imp-v1-3b-c7e93326-9202-5ee4-804d-f351fc4293d5",
11
+ "name": "MILVLG/imp-v1-3b",
12
+ "externalReferences": [
13
+ {
14
+ "url": "https://huggingface.co/MILVLG/imp-v1-3b",
15
+ "type": "documentation"
16
+ }
17
+ ],
18
+ "modelCard": {
19
+ "modelParameters": {
20
+ "task": "text-generation",
21
+ "architectureFamily": "imp",
22
+ "modelArchitecture": "ImpForCausalLM",
23
+ "datasets": [
24
+ {
25
+ "ref": "liuhaotian/LLaVA-Pretrain-8de3d58f-6f2a-556e-9c5a-056d9604987c"
26
+ },
27
+ {
28
+ "ref": "liuhaotian/LLaVA-Instruct-150K-67c7ef13-998b-5b99-8af3-d82417369ce2"
29
+ }
30
+ ]
31
+ },
32
+ "properties": [
33
+ {
34
+ "name": "library_name",
35
+ "value": "transformers"
36
+ }
37
+ ]
38
+ },
39
+ "authors": [
40
+ {
41
+ "name": "MILVLG"
42
+ }
43
+ ],
44
+ "licenses": [
45
+ {
46
+ "license": {
47
+ "id": "Apache-2.0",
48
+ "url": "https://spdx.org/licenses/Apache-2.0.html"
49
+ }
50
+ }
51
+ ],
52
+ "description": "The Imp project aims to provide a family of a strong multimodal `small` language models (MSLMs). Our `imp-v1-3b` is a strong MSLM with only **3B** parameters, which is build upon a small yet powerful SLM [Phi-2 ](https://huggingface.co/microsoft/phi-2)(2.7B) and a powerful visual encoder [SigLIP ](https://huggingface.co/google/siglip-so400m-patch14-384)(0.4B), and trained on the [LLaVA-v1.5](https://github.com/haotian-liu/LLaVA) training set.As shown in the image below, `imp-v1-3b` significantly outperforms the counterparts of similar model sizes, and even achieves slightly better performance than the strong LLaVA-7B model on various multimodal benchmarks.![evaluation](images/evaluation.png)We release our model weights and provide an example below to run our model . Detailed technical report and corresponding training/evaluation code will be released soon on our [GitHub repo](https://github.com/MILVLG/imp). We will persistently improve our model and release the next versions to further improve model performance :)",
53
+ "tags": [
54
+ "transformers",
55
+ "safetensors",
56
+ "imp",
57
+ "text-generation",
58
+ "custom_code",
59
+ "dataset:liuhaotian/LLaVA-Pretrain",
60
+ "dataset:liuhaotian/LLaVA-Instruct-150K",
61
+ "arxiv:2405.12107",
62
+ "license:apache-2.0",
63
+ "autotrain_compatible",
64
+ "region:us"
65
+ ]
66
+ }
67
+ },
68
+ "components": [
69
+ {
70
+ "type": "data",
71
+ "bom-ref": "liuhaotian/LLaVA-Pretrain-8de3d58f-6f2a-556e-9c5a-056d9604987c",
72
+ "name": "liuhaotian/LLaVA-Pretrain",
73
+ "data": [
74
+ {
75
+ "type": "dataset",
76
+ "bom-ref": "liuhaotian/LLaVA-Pretrain-8de3d58f-6f2a-556e-9c5a-056d9604987c",
77
+ "name": "liuhaotian/LLaVA-Pretrain",
78
+ "contents": {
79
+ "url": "https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain",
80
+ "properties": [
81
+ {
82
+ "name": "language",
83
+ "value": "en"
84
+ },
85
+ {
86
+ "name": "pretty_name",
87
+ "value": "LLaVA Pretrain"
88
+ },
89
+ {
90
+ "name": "license",
91
+ "value": "other"
92
+ }
93
+ ]
94
+ },
95
+ "governance": {
96
+ "owners": [
97
+ {
98
+ "organization": {
99
+ "name": "liuhaotian",
100
+ "url": "https://huggingface.co/liuhaotian"
101
+ }
102
+ }
103
+ ]
104
+ },
105
+ "description": "\n\t\n\t\t\n\t\tLLaVA Visual Instruct Pretrain Dataset Card\n\t\n\n\n\t\n\t\t\n\t\tDataset details\n\t\n\nDataset type:\nLLaVA Visual Instruct Pretrain LCS-558K is a subset of LAION/CC/SBU dataset, filtered with a more balanced concept coverage distribution.\nCaptions are also associated with BLIP synthetic caption for reference.\nIt is constructed for the pretraining stage for feature alignment in visual instruction tuning.\nWe aim to build large multimodal towards GPT-4 vision/language capability.\nDataset date:\nLLaVA\u2026 See the full description on the dataset page: https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain."
106
+ }
107
+ ]
108
+ },
109
+ {
110
+ "type": "data",
111
+ "bom-ref": "liuhaotian/LLaVA-Instruct-150K-67c7ef13-998b-5b99-8af3-d82417369ce2",
112
+ "name": "liuhaotian/LLaVA-Instruct-150K",
113
+ "data": [
114
+ {
115
+ "type": "dataset",
116
+ "bom-ref": "liuhaotian/LLaVA-Instruct-150K-67c7ef13-998b-5b99-8af3-d82417369ce2",
117
+ "name": "liuhaotian/LLaVA-Instruct-150K",
118
+ "contents": {
119
+ "url": "https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K",
120
+ "properties": [
121
+ {
122
+ "name": "task_categories",
123
+ "value": "visual-question-answering, question-answering"
124
+ },
125
+ {
126
+ "name": "language",
127
+ "value": "en"
128
+ },
129
+ {
130
+ "name": "size_categories",
131
+ "value": "100K<n<1M"
132
+ },
133
+ {
134
+ "name": "pretty_name",
135
+ "value": "LLaVA Visual Instruct 150K"
136
+ },
137
+ {
138
+ "name": "license",
139
+ "value": "cc-by-4.0"
140
+ }
141
+ ]
142
+ },
143
+ "governance": {
144
+ "owners": [
145
+ {
146
+ "organization": {
147
+ "name": "liuhaotian",
148
+ "url": "https://huggingface.co/liuhaotian"
149
+ }
150
+ }
151
+ ]
152
+ },
153
+ "description": "\n\t\n\t\t\n\t\tLLaVA Visual Instruct 150K Dataset Card\n\t\n\n\n\t\n\t\t\n\t\tDataset details\n\t\n\nDataset type:\nLLaVA Visual Instruct 150K is a set of GPT-generated multimodal instruction-following data.\nIt is constructed for visual instruction tuning and for building large multimodal towards GPT-4 vision/language capability.\nDataset date:\nLLaVA Visual Instruct 150K was collected in April 2023, by prompting GPT-4-0314 API.\nPaper or resources for more information:\nhttps://llava-vl.github.io/\nLicense:\nCreative\u2026 See the full description on the dataset page: https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K."
154
+ }
155
+ ]
156
+ }
157
+ ]
158
+ }