Ligul commited on
Commit
fd6509b
·
verified ·
1 Parent(s): e433270

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Capri
2
+
3
+ Capri is a compact image captioning model designed for high-throughput, plain-language descriptions.
4
+ It supports two inference paths: direct image input or precomputed SigLIP2 pooled embeddings.
5
+
6
+ The project started from a practical pipeline constraint: existing captioning models were either too slow or too weak for reliable image understanding. Since SigLIP embeddings were already computed upstream, Capri was built to reuse them with a small LLM decoder, combining strong visual representations with fast text generation.
7
+
8
+ The name comes from the small Italian island of Capri and also hints at the goal of the project: a small CAPtioner with Rapid Inference.
9
+
10
+ ## Model Architecture
11
+
12
+ - Vision encoder: `google/siglip2-base-patch16-224` (pooled embeddings)
13
+ - Projector: MLP `768 -> 3072 -> 896`
14
+ - Decoder: `Qwen/Qwen2.5-0.5B`
15
+ - Adaptation: LoRA on `q_proj` and `v_proj`
16
+
17
+ ## Load Modes
18
+
19
+ Embedding-only mode keeps SigLIP out of downloads and VRAM:
20
+
21
+ ```python
22
+ from transformers import AutoModel, AutoProcessor
23
+ import torch
24
+
25
+ processor = AutoProcessor.from_pretrained("Ligul/capri", trust_remote_code=True)
26
+ model = AutoModel.from_pretrained(
27
+ "Ligul/capri",
28
+ trust_remote_code=True,
29
+ load_vision_tower=False,
30
+ torch_dtype=torch.bfloat16,
31
+ )
32
+
33
+ inputs = processor(
34
+ pooled_embeddings=torch.randn(2, 768),
35
+ return_tensors="pt",
36
+ )
37
+ captions = model.generate_captions(
38
+ pooled_embeddings=inputs["pooled_embeddings"],
39
+ processor=processor,
40
+ max_new_tokens=32,
41
+ decode_batch_size=2048,
42
+ )
43
+ ```
44
+
45
+ Image mode loads SigLIP lazily:
46
+
47
+ ```python
48
+ from PIL import Image
49
+ from transformers import AutoModel, AutoProcessor
50
+ import torch
51
+
52
+ processor = AutoProcessor.from_pretrained("Ligul/capri", trust_remote_code=True)
53
+ model = AutoModel.from_pretrained(
54
+ "Ligul/capri",
55
+ trust_remote_code=True,
56
+ load_vision_tower=True,
57
+ torch_dtype=torch.bfloat16,
58
+ )
59
+
60
+ image = Image.open("example.jpg").convert("RGB")
61
+ captions = model.generate_captions(
62
+ images=[image],
63
+ processor=processor,
64
+ max_new_tokens=32,
65
+ vision_batch_size=64,
66
+ decode_batch_size=2048,
67
+ )
68
+ ```
69
+
70
+ `generate()` is still available for low-level token generation if you want raw token ids.
71
+
72
+ ## Batch Guidance
73
+
74
+ Use different knobs for the two stages:
75
+
76
+ - `vision_batch_size`: moderate, image preprocessing + SigLIP is the expensive vision pass
77
+ - `decode_batch_size`: much larger, pooled embeddings are tiny and Qwen generation batches well
78
+
79
+ Reasonable defaults:
80
+
81
+ - `vision_batch_size=64`
82
+ - `decode_batch_size=1024`
83
+
84
+ On larger GPUs, decode often scales to `2048+`.
__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .configuration_capri import CapriConfig
2
+ from .modeling_capri import CapriForConditionalGeneration
3
+ from .processing_capri import CapriProcessor
4
+
5
+ __all__ = ["CapriConfig", "CapriForConditionalGeneration", "CapriProcessor"]
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CapriForConditionalGeneration"
4
+ ],
5
+ "model_type": "capri",
6
+ "text_model_name_or_path": "Qwen/Qwen2.5-0.5B",
7
+ "vision_model_name_or_path": "google/siglip2-base-patch16-224",
8
+ "adapter_subdir": "text_adapter",
9
+ "projector_type": "mlp",
10
+ "projector_in_dim": 768,
11
+ "projector_hidden_dim": 3072,
12
+ "projector_out_dim": 896,
13
+ "image_token": "<image>",
14
+ "image_token_id": 151665,
15
+ "prompt_prefix": "<image> Caption:",
16
+ "max_length": 64,
17
+ "load_vision_tower_by_default": false,
18
+ "processor_class": "CapriProcessor",
19
+ "auto_map": {
20
+ "AutoConfig": "configuration_capri.CapriConfig",
21
+ "AutoModel": "modeling_capri.CapriForConditionalGeneration",
22
+ "AutoModelForCausalLM": "modeling_capri.CapriForConditionalGeneration"
23
+ }
24
+ }
configuration_capri.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class CapriConfig(PretrainedConfig):
5
+ model_type = "capri"
6
+
7
+ def __init__(
8
+ self,
9
+ text_model_name_or_path="Qwen/Qwen2.5-0.5B",
10
+ vision_model_name_or_path="google/siglip2-base-patch16-224",
11
+ adapter_subdir="text_adapter",
12
+ projector_type="mlp",
13
+ projector_in_dim=768,
14
+ projector_hidden_dim=3072,
15
+ projector_out_dim=896,
16
+ image_token="<image>",
17
+ image_token_id=151665,
18
+ prompt_prefix="<image> Caption:",
19
+ max_length=64,
20
+ load_vision_tower_by_default=False,
21
+ processor_class="CapriProcessor",
22
+ **kwargs,
23
+ ):
24
+ self.text_model_name_or_path = text_model_name_or_path
25
+ self.vision_model_name_or_path = vision_model_name_or_path
26
+ self.adapter_subdir = adapter_subdir
27
+ self.projector_type = projector_type
28
+ self.projector_in_dim = projector_in_dim
29
+ self.projector_hidden_dim = projector_hidden_dim
30
+ self.projector_out_dim = projector_out_dim
31
+ self.image_token = image_token
32
+ self.image_token_id = image_token_id
33
+ self.prompt_prefix = prompt_prefix
34
+ self.max_length = max_length
35
+ self.load_vision_tower_by_default = load_vision_tower_by_default
36
+ self.processor_class = processor_class
37
+ super().__init__(**kwargs)
generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": false,
3
+ "do_sample": false,
4
+ "max_new_tokens": 32
5
+ }
modeling_capri.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from peft import PeftModel
7
+ from safetensors.torch import load_file, save_file
8
+ from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel
9
+ from transformers.utils import cached_file
10
+
11
+ from .configuration_capri import CapriConfig
12
+
13
+
14
+ class MLPProjector(nn.Module):
15
+ def __init__(self, in_dim: int, hidden_dim: int, out_dim: int):
16
+ super().__init__()
17
+ self.net = nn.Sequential(
18
+ nn.Linear(in_dim, hidden_dim),
19
+ nn.GELU(),
20
+ nn.Linear(hidden_dim, out_dim),
21
+ )
22
+
23
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
24
+ return self.net(x)
25
+
26
+
27
+ class CapriForConditionalGeneration(PreTrainedModel):
28
+ config_class = CapriConfig
29
+ base_model_prefix = "capri"
30
+ main_input_name = "input_ids"
31
+
32
+ def __init__(self, config: CapriConfig):
33
+ super().__init__(config)
34
+ self.projector = MLPProjector(
35
+ in_dim=config.projector_in_dim,
36
+ hidden_dim=config.projector_hidden_dim,
37
+ out_dim=config.projector_out_dim,
38
+ )
39
+ self.text_model = None
40
+ self.vision_model = None
41
+ self.tokenizer = None
42
+ self._repo_id_or_path = None
43
+ self._hub_kwargs = {}
44
+ self._text_model_kwargs = {}
45
+ self._vision_model_kwargs = {}
46
+ self.post_init()
47
+
48
+ @classmethod
49
+ def from_pretrained(cls, pretrained_model_name_or_path: str, *model_args, config=None, **kwargs):
50
+ load_vision_tower = kwargs.pop("load_vision_tower", None)
51
+ if config is None:
52
+ config, model_kwargs = CapriConfig.from_pretrained(
53
+ pretrained_model_name_or_path,
54
+ return_unused_kwargs=True,
55
+ **kwargs,
56
+ )
57
+ else:
58
+ model_kwargs = dict(kwargs)
59
+
60
+ model = cls(config, *model_args)
61
+ model._repo_id_or_path = pretrained_model_name_or_path
62
+ model._hub_kwargs = {
63
+ "cache_dir": model_kwargs.get("cache_dir"),
64
+ "force_download": model_kwargs.get("force_download"),
65
+ "local_files_only": model_kwargs.get("local_files_only"),
66
+ "revision": model_kwargs.get("revision"),
67
+ "token": model_kwargs.get("token"),
68
+ "trust_remote_code": model_kwargs.get("trust_remote_code", True),
69
+ }
70
+ base_runtime = {
71
+ "cache_dir": model_kwargs.get("cache_dir"),
72
+ "force_download": model_kwargs.get("force_download"),
73
+ "local_files_only": model_kwargs.get("local_files_only"),
74
+ "revision": model_kwargs.get("revision"),
75
+ "token": model_kwargs.get("token"),
76
+ "torch_dtype": model_kwargs.get("torch_dtype", model_kwargs.get("dtype")),
77
+ "device_map": model_kwargs.get("device_map"),
78
+ "attn_implementation": model_kwargs.get("attn_implementation"),
79
+ }
80
+ model._text_model_kwargs = {k: v for k, v in base_runtime.items() if v is not None}
81
+ model._vision_model_kwargs = {k: v for k, v in base_runtime.items() if k != "attn_implementation" and v is not None}
82
+
83
+ model._load_tokenizer()
84
+ model._load_text_model()
85
+ model._load_projector_weights()
86
+
87
+ should_load_vision = (
88
+ config.load_vision_tower_by_default if load_vision_tower is None else load_vision_tower
89
+ )
90
+ if should_load_vision:
91
+ model._load_vision_model()
92
+
93
+ model.eval()
94
+ return model
95
+
96
+ def save_pretrained(self, save_directory: str, **kwargs):
97
+ os.makedirs(save_directory, exist_ok=True)
98
+ self.config.save_pretrained(save_directory)
99
+ save_file(
100
+ self.projector.state_dict(),
101
+ os.path.join(save_directory, "projector.safetensors"),
102
+ )
103
+ if self.text_model is not None:
104
+ self.text_model.save_pretrained(
105
+ os.path.join(save_directory, self.config.adapter_subdir)
106
+ )
107
+ if self.tokenizer is not None:
108
+ self.tokenizer.save_pretrained(save_directory)
109
+
110
+ def _resolve_repo_file(self, filename: str, subfolder: str | None = None) -> str:
111
+ if os.path.isdir(self._repo_id_or_path):
112
+ parts = [self._repo_id_or_path]
113
+ if subfolder:
114
+ parts.append(subfolder)
115
+ parts.append(filename)
116
+ return os.path.join(*parts)
117
+ return cached_file(self._repo_id_or_path, filename, subfolder=subfolder, **self._hub_kwargs)
118
+
119
+ def _load_tokenizer(self):
120
+ if self.tokenizer is not None:
121
+ return
122
+
123
+ if self.config.image_token_id is None or self.config.image_token is None:
124
+ raise ValueError("`image_token_id` and `image_token` must be set in the config.")
125
+
126
+ self.tokenizer = AutoTokenizer.from_pretrained(
127
+ self._repo_id_or_path,
128
+ **self._hub_kwargs,
129
+ )
130
+ if self.tokenizer.pad_token is None:
131
+ self.tokenizer.pad_token = self.tokenizer.eos_token
132
+
133
+ def _load_text_model(self):
134
+ if self.text_model is not None:
135
+ return
136
+ base_model = AutoModelForCausalLM.from_pretrained(
137
+ self.config.text_model_name_or_path,
138
+ **self._text_model_kwargs,
139
+ )
140
+ self.text_model = PeftModel.from_pretrained(
141
+ base_model,
142
+ self._repo_id_or_path,
143
+ subfolder=self.config.adapter_subdir,
144
+ is_trainable=False,
145
+ **self._hub_kwargs,
146
+ )
147
+ self.text_model.eval()
148
+
149
+ def _load_vision_model(self):
150
+ if self.vision_model is not None:
151
+ return
152
+ model = AutoModel.from_pretrained(
153
+ self.config.vision_model_name_or_path,
154
+ **self._vision_model_kwargs,
155
+ )
156
+ self.vision_model = getattr(model, "vision_model", model)
157
+ self.vision_model.eval()
158
+
159
+ def _load_projector_weights(self):
160
+ projector_path = self._resolve_repo_file("projector.safetensors")
161
+ state_dict = load_file(projector_path)
162
+ self.projector.load_state_dict(state_dict)
163
+
164
+ embed_weight = self.text_model.get_input_embeddings().weight
165
+ self.projector.to(device=embed_weight.device, dtype=embed_weight.dtype)
166
+
167
+ @property
168
+ def vision_loaded(self) -> bool:
169
+ return self.vision_model is not None
170
+
171
+ @staticmethod
172
+ def _module_device_dtype(module: nn.Module) -> tuple[torch.device, torch.dtype]:
173
+ param = next(module.parameters())
174
+ return param.device, param.dtype
175
+
176
+ @staticmethod
177
+ def _chunk_list(items: list[Any], chunk_size: int) -> list[list[Any]]:
178
+ return [items[i : i + chunk_size] for i in range(0, len(items), chunk_size)]
179
+
180
+ def encode_images(self, pixel_values: torch.Tensor) -> torch.Tensor:
181
+ self._load_vision_model()
182
+ vision_device, vision_dtype = self._module_device_dtype(self.vision_model)
183
+ pixel_values = pixel_values.to(device=vision_device, dtype=vision_dtype)
184
+ outputs = self.vision_model(pixel_values=pixel_values)
185
+ pooled = getattr(outputs, "pooler_output", None)
186
+ if pooled is None:
187
+ last_hidden = getattr(outputs, "last_hidden_state", None)
188
+ if last_hidden is None:
189
+ raise ValueError("Vision model did not return pooler_output or last_hidden_state.")
190
+ pooled = last_hidden[:, 0]
191
+ return pooled
192
+
193
+ def _prompt_inputs(self, batch_size: int, device: torch.device) -> tuple[torch.Tensor, torch.Tensor]:
194
+ encoded = self.tokenizer(
195
+ [self.config.prompt_prefix] * batch_size,
196
+ add_special_tokens=False,
197
+ return_tensors="pt",
198
+ padding=True,
199
+ )
200
+ return encoded["input_ids"].to(device), encoded["attention_mask"].to(device)
201
+
202
+ def _prepare_inputs(
203
+ self,
204
+ *,
205
+ input_ids: torch.Tensor | None = None,
206
+ attention_mask: torch.Tensor | None = None,
207
+ pooled_embeddings: torch.Tensor | None = None,
208
+ pixel_values: torch.Tensor | None = None,
209
+ ) -> tuple[torch.Tensor, torch.Tensor]:
210
+ if pooled_embeddings is None:
211
+ if pixel_values is None:
212
+ raise ValueError("Provide either `pooled_embeddings` or `pixel_values`.")
213
+ pooled_embeddings = self.encode_images(pixel_values)
214
+
215
+ if pooled_embeddings.ndim == 1:
216
+ pooled_embeddings = pooled_embeddings.unsqueeze(0)
217
+
218
+ target_device = self.text_model.get_input_embeddings().weight.device
219
+ if input_ids is None:
220
+ input_ids, attention_mask = self._prompt_inputs(pooled_embeddings.size(0), target_device)
221
+ else:
222
+ input_ids = input_ids.to(target_device)
223
+ if attention_mask is None:
224
+ attention_mask = torch.ones_like(input_ids, device=target_device)
225
+ else:
226
+ attention_mask = attention_mask.to(target_device)
227
+
228
+ inputs_embeds = self.text_model.get_input_embeddings()(input_ids)
229
+ pooled_embeddings = pooled_embeddings.to(device=inputs_embeds.device, dtype=inputs_embeds.dtype)
230
+ projected = self.projector(pooled_embeddings)
231
+
232
+ image_mask = input_ids.eq(self.config.image_token_id)
233
+ image_count = image_mask.sum(dim=1)
234
+ if not torch.all(image_count == 1):
235
+ raise ValueError("Each sample must contain exactly one `<image>` token.")
236
+
237
+ token_positions = image_mask.float().argmax(dim=1)
238
+ batch_positions = torch.arange(input_ids.size(0), device=input_ids.device)
239
+ inputs_embeds[batch_positions, token_positions] = projected
240
+ return inputs_embeds, attention_mask
241
+
242
+ def forward(
243
+ self,
244
+ input_ids: torch.Tensor | None = None,
245
+ attention_mask: torch.Tensor | None = None,
246
+ pooled_embeddings: torch.Tensor | None = None,
247
+ pixel_values: torch.Tensor | None = None,
248
+ labels: torch.Tensor | None = None,
249
+ **kwargs: Any,
250
+ ):
251
+ if input_ids is None and labels is not None:
252
+ raise ValueError("`input_ids` are required when passing `labels`.")
253
+
254
+ inputs_embeds, attention_mask = self._prepare_inputs(
255
+ input_ids=input_ids,
256
+ attention_mask=attention_mask,
257
+ pooled_embeddings=pooled_embeddings,
258
+ pixel_values=pixel_values,
259
+ )
260
+ return self.text_model(
261
+ inputs_embeds=inputs_embeds,
262
+ attention_mask=attention_mask,
263
+ labels=labels,
264
+ **kwargs,
265
+ )
266
+
267
+ @torch.no_grad()
268
+ def generate(
269
+ self,
270
+ input_ids: torch.Tensor | None = None,
271
+ attention_mask: torch.Tensor | None = None,
272
+ pooled_embeddings: torch.Tensor | None = None,
273
+ pixel_values: torch.Tensor | None = None,
274
+ **generate_kwargs: Any,
275
+ ) -> torch.Tensor:
276
+ inputs_embeds, attention_mask = self._prepare_inputs(
277
+ input_ids=input_ids,
278
+ attention_mask=attention_mask,
279
+ pooled_embeddings=pooled_embeddings,
280
+ pixel_values=pixel_values,
281
+ )
282
+
283
+ generate_kwargs.setdefault("do_sample", False)
284
+ generate_kwargs.setdefault("eos_token_id", self.tokenizer.eos_token_id)
285
+ generate_kwargs.setdefault("pad_token_id", self.tokenizer.pad_token_id)
286
+ return self.text_model.generate(
287
+ inputs_embeds=inputs_embeds,
288
+ attention_mask=attention_mask,
289
+ **generate_kwargs,
290
+ )
291
+
292
+ @torch.no_grad()
293
+ def generate_captions(
294
+ self,
295
+ *,
296
+ images: Any = None,
297
+ pooled_embeddings: Any = None,
298
+ processor=None,
299
+ vision_batch_size: int = 64,
300
+ decode_batch_size: int = 1024,
301
+ **generate_kwargs: Any,
302
+ ) -> list[str]:
303
+ if processor is None:
304
+ raise ValueError("`processor` is required for `generate_captions()`.")
305
+ if images is None and pooled_embeddings is None:
306
+ raise ValueError("Provide either `images` or `pooled_embeddings`.")
307
+ if images is not None and pooled_embeddings is not None:
308
+ raise ValueError("Provide only one of `images` or `pooled_embeddings`.")
309
+ if vision_batch_size <= 0 or decode_batch_size <= 0:
310
+ raise ValueError("Batch sizes must be positive integers.")
311
+
312
+ if images is not None:
313
+ image_items = processor.normalize_images(images)
314
+ all_pooled = []
315
+ for image_chunk in self._chunk_list(image_items, vision_batch_size):
316
+ image_inputs = processor(images=image_chunk, return_tensors="pt")
317
+ pooled_chunk = self.encode_images(image_inputs["pixel_values"]).detach().cpu()
318
+ all_pooled.append(pooled_chunk)
319
+ pooled_embeddings = torch.cat(all_pooled, dim=0)
320
+ else:
321
+ pooled_embeddings = processor.normalize_pooled_embeddings(pooled_embeddings).detach().cpu()
322
+
323
+ captions = []
324
+ total = pooled_embeddings.shape[0]
325
+ for start in range(0, total, decode_batch_size):
326
+ pooled_chunk = pooled_embeddings[start : start + decode_batch_size]
327
+ model_inputs = dict(processor(
328
+ pooled_embeddings=pooled_chunk,
329
+ return_tensors="pt",
330
+ ))
331
+ sequences = self.generate(**model_inputs, **generate_kwargs)
332
+ captions.extend(processor.batch_decode(sequences, skip_special_tokens=True))
333
+
334
+ return captions
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "SiglipImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "processor_class": "CapriProcessor",
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
processing_capri.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ import torch
6
+ from transformers import AutoConfig
7
+ from transformers.feature_extraction_utils import BatchFeature
8
+ from transformers.image_processing_utils import BaseImageProcessor
9
+ from transformers.processing_utils import ProcessorMixin
10
+ from transformers.tokenization_utils_base import PreTrainedTokenizerBase
11
+
12
+
13
+ class CapriProcessor(ProcessorMixin):
14
+ attributes = ["image_processor", "tokenizer"]
15
+ image_processor_class = "SiglipImageProcessor"
16
+ tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
17
+
18
+ def __init__(
19
+ self,
20
+ image_processor: BaseImageProcessor,
21
+ tokenizer: PreTrainedTokenizerBase,
22
+ prompt_prefix: str = "<image> Caption:",
23
+ image_token: str = "<image>",
24
+ pooled_embedding_dim: int = 768,
25
+ ):
26
+ self.prompt_prefix = prompt_prefix
27
+ self.image_token = image_token
28
+ self.pooled_embedding_dim = pooled_embedding_dim
29
+ super().__init__(image_processor, tokenizer)
30
+
31
+ @classmethod
32
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
33
+ kwargs.setdefault("use_fast", False)
34
+ processor = super().from_pretrained(pretrained_model_name_or_path, **kwargs)
35
+ config = AutoConfig.from_pretrained(
36
+ pretrained_model_name_or_path,
37
+ trust_remote_code=kwargs.get("trust_remote_code", False),
38
+ )
39
+ processor.prompt_prefix = getattr(config, "prompt_prefix", processor.prompt_prefix)
40
+ processor.image_token = getattr(config, "image_token", processor.image_token)
41
+ processor.pooled_embedding_dim = getattr(config, "projector_in_dim", processor.pooled_embedding_dim)
42
+ return processor
43
+
44
+ def normalize_images(self, images) -> list[Any]:
45
+ if isinstance(images, torch.Tensor):
46
+ if images.ndim == 4:
47
+ return [images[i] for i in range(images.shape[0])]
48
+ return [images]
49
+ if isinstance(images, (list, tuple)):
50
+ return list(images)
51
+ return [images]
52
+
53
+ def normalize_pooled_embeddings(self, pooled_embeddings) -> torch.Tensor:
54
+ pooled = torch.as_tensor(pooled_embeddings)
55
+ if pooled.ndim == 1:
56
+ pooled = pooled.unsqueeze(0)
57
+ if pooled.ndim != 2:
58
+ raise ValueError("`pooled_embeddings` must be a 1D embedding or a 2D batch of embeddings.")
59
+ if pooled.shape[-1] != self.pooled_embedding_dim:
60
+ raise ValueError(
61
+ f"Expected pooled embedding dim {self.pooled_embedding_dim}, got {pooled.shape[-1]}."
62
+ )
63
+ return pooled
64
+
65
+ def __call__(
66
+ self,
67
+ images=None,
68
+ pooled_embeddings=None,
69
+ text=None,
70
+ return_tensors: str | None = "pt",
71
+ padding: bool | str = True,
72
+ truncation: bool = False,
73
+ max_length: int | None = None,
74
+ **kwargs: Any,
75
+ ) -> BatchFeature:
76
+ if images is None and pooled_embeddings is None and text is None:
77
+ raise ValueError("Provide `images`, `pooled_embeddings`, or `text`.")
78
+
79
+ batch = {}
80
+ batch_size = None
81
+
82
+ if images is not None:
83
+ image_features = self.image_processor(images=images, return_tensors=return_tensors, **kwargs)
84
+ batch.update(dict(image_features))
85
+ batch_size = batch["pixel_values"].shape[0]
86
+
87
+ if pooled_embeddings is not None:
88
+ pooled = self.normalize_pooled_embeddings(pooled_embeddings)
89
+ batch["pooled_embeddings"] = pooled
90
+ batch_size = pooled.shape[0]
91
+
92
+ if text is None and batch_size is not None:
93
+ text = [self.prompt_prefix] * batch_size
94
+
95
+ if text is not None:
96
+ if isinstance(text, str):
97
+ text = [text]
98
+ tokenized = self.tokenizer(
99
+ text,
100
+ add_special_tokens=False,
101
+ padding=padding,
102
+ truncation=truncation,
103
+ max_length=max_length,
104
+ return_tensors=return_tensors,
105
+ )
106
+ batch.update(dict(tokenized))
107
+
108
+ return BatchFeature(data=batch, tensor_type=return_tensors)
109
+
110
+ def batch_decode(self, *args, **kwargs):
111
+ return self.tokenizer.batch_decode(*args, **kwargs)
112
+
113
+ def decode(self, *args, **kwargs):
114
+ return self.tokenizer.decode(*args, **kwargs)
processor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "processor_class": "CapriProcessor",
3
+ "prompt_prefix": "<image> Caption:",
4
+ "image_token": "<image>",
5
+ "pooled_embedding_dim": 768,
6
+ "auto_map": {
7
+ "AutoProcessor": "processing_capri.CapriProcessor"
8
+ }
9
+ }
projector.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eef2cd9c916ace570cad3ffc03f8ba0b2ae7a13d79c3cd26b555b53008645415
3
+ size 20463424
text_adapter/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-0.5B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-0.5B
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.1
text_adapter/adapter_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-0.5B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 16,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "v_proj",
33
+ "q_proj"
34
+ ],
35
+ "target_parameters": null,
36
+ "task_type": "CAUSAL_LM",
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_qalora": false,
40
+ "use_rslora": false
41
+ }
text_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:895e9cc5ceaa3b35e74bf764b286f848ee31900859e2b849994f3168a39ebc8e
3
+ size 4337968
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d5cbd0458fdb84541ddde44bf8ad64825062559d7e0ff402d601905ec49941c
3
+ size 11422076
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|endoftext|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<image>"
10
+ ],
11
+ "is_local": true,
12
+ "model_max_length": 131072,
13
+ "pad_token": "<|endoftext|>",
14
+ "split_special_tokens": false,
15
+ "tokenizer_class": "Qwen2Tokenizer",
16
+ "unk_token": null
17
+ }