Upload 21 files
Browse files- .gitattributes +1 -0
- added_tokens.json +25 -0
- config.json +84 -0
- configuration_minicpm.py +104 -0
- constants.py +190 -0
- generation_config.json +6 -0
- image_processing_minicpmv.py +417 -0
- merges.txt +0 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +0 -0
- modeling_minicpmv.py +476 -0
- modeling_navit_siglip.py +937 -0
- preprocessor_config.json +47 -0
- processing_minicpmv.py +238 -0
- processor_config.json +6 -0
- resampler.py +782 -0
- special_tokens_map.json +52 -0
- tokenization_minicpmv_fast.py +66 -0
- tokenizer.json +3 -0
- tokenizer_config.json +238 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</box>": 151651,
|
| 3 |
+
"</image>": 151647,
|
| 4 |
+
"</image_id>": 151659,
|
| 5 |
+
"</point>": 151655,
|
| 6 |
+
"</quad>": 151653,
|
| 7 |
+
"</ref>": 151649,
|
| 8 |
+
"</slice>": 151657,
|
| 9 |
+
"<box>": 151650,
|
| 10 |
+
"")
|
| 121 |
+
self.slice_start_token = kwargs.pop("slice_start", "<slice>")
|
| 122 |
+
self.slice_end_token = kwargs.pop("slice_end", "</slice>")
|
| 123 |
+
self.unk_token = kwargs.pop("unk", "<unk>")
|
| 124 |
+
self.im_id_start = kwargs.pop("im_id_start", "<image_id>")
|
| 125 |
+
self.im_id_end = kwargs.pop("im_id_end", "</image_id>")
|
| 126 |
+
self.slice_mode = kwargs.pop("slice_mode", True)
|
| 127 |
+
self.mean = np.array(kwargs.pop("norm_mean", [0.5, 0.5, 0.5]))
|
| 128 |
+
self.std = np.array(kwargs.pop("norm_std", [0.5, 0.5, 0.5]))
|
| 129 |
+
self.version = kwargs.pop("version", 2.0)
|
| 130 |
+
|
| 131 |
+
def ensure_divide(self, length, patch_size):
|
| 132 |
+
return max(round(length / patch_size) * patch_size, patch_size)
|
| 133 |
+
|
| 134 |
+
def find_best_resize(self,
|
| 135 |
+
original_size,
|
| 136 |
+
scale_resolution,
|
| 137 |
+
patch_size,
|
| 138 |
+
allow_upscale=False):
|
| 139 |
+
width, height = original_size
|
| 140 |
+
if (width * height >
|
| 141 |
+
scale_resolution * scale_resolution) or allow_upscale:
|
| 142 |
+
r = width / height
|
| 143 |
+
height = int(scale_resolution / math.sqrt(r))
|
| 144 |
+
width = int(height * r)
|
| 145 |
+
best_width = self.ensure_divide(width, patch_size)
|
| 146 |
+
best_height = self.ensure_divide(height, patch_size)
|
| 147 |
+
return (best_width, best_height)
|
| 148 |
+
|
| 149 |
+
def get_refine_size(self,
|
| 150 |
+
original_size,
|
| 151 |
+
grid,
|
| 152 |
+
scale_resolution,
|
| 153 |
+
patch_size,
|
| 154 |
+
allow_upscale=False):
|
| 155 |
+
width, height = original_size
|
| 156 |
+
grid_x, grid_y = grid
|
| 157 |
+
|
| 158 |
+
refine_width = self.ensure_divide(width, grid_x)
|
| 159 |
+
refine_height = self.ensure_divide(height, grid_y)
|
| 160 |
+
|
| 161 |
+
grid_width = refine_width / grid_x
|
| 162 |
+
grid_height = refine_height / grid_y
|
| 163 |
+
|
| 164 |
+
best_grid_size = self.find_best_resize((grid_width, grid_height),
|
| 165 |
+
scale_resolution,
|
| 166 |
+
patch_size,
|
| 167 |
+
allow_upscale=allow_upscale)
|
| 168 |
+
refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y)
|
| 169 |
+
return refine_size
|
| 170 |
+
|
| 171 |
+
def split_to_patches(self, image, grid):
|
| 172 |
+
patches = []
|
| 173 |
+
width, height = image.size
|
| 174 |
+
grid_x = int(width / grid[0])
|
| 175 |
+
grid_y = int(height / grid[1])
|
| 176 |
+
for i in range(0, height, grid_y):
|
| 177 |
+
images = []
|
| 178 |
+
for j in range(0, width, grid_x):
|
| 179 |
+
box = (j, i, j + grid_x, i + grid_y)
|
| 180 |
+
patch = image.crop(box)
|
| 181 |
+
images.append(patch)
|
| 182 |
+
patches.append(images)
|
| 183 |
+
return patches
|
| 184 |
+
|
| 185 |
+
def slice_image(
|
| 186 |
+
self, image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False
|
| 187 |
+
):
|
| 188 |
+
original_size = image.size
|
| 189 |
+
source_image = None
|
| 190 |
+
best_grid = self.get_sliced_grid(original_size, max_slice_nums, never_split)
|
| 191 |
+
patches = []
|
| 192 |
+
|
| 193 |
+
if best_grid is None:
|
| 194 |
+
# dont need to slice, upsample
|
| 195 |
+
best_size = self.find_best_resize(
|
| 196 |
+
original_size, scale_resolution, patch_size, allow_upscale=True
|
| 197 |
+
)
|
| 198 |
+
source_image = image.resize(best_size, resample=Image.Resampling.BICUBIC)
|
| 199 |
+
else:
|
| 200 |
+
# source image, down-sampling and ensure divided by patch_size
|
| 201 |
+
best_resize = self.find_best_resize(original_size, scale_resolution, patch_size)
|
| 202 |
+
source_image = image.copy().resize(best_resize, resample=Image.Resampling.BICUBIC)
|
| 203 |
+
refine_size = self.get_refine_size(
|
| 204 |
+
original_size, best_grid, scale_resolution, patch_size, allow_upscale=True
|
| 205 |
+
)
|
| 206 |
+
refine_image = image.resize(refine_size, resample=Image.Resampling.BICUBIC)
|
| 207 |
+
patches = self.split_to_patches(refine_image, best_grid)
|
| 208 |
+
|
| 209 |
+
return source_image, patches, best_grid
|
| 210 |
+
|
| 211 |
+
def get_grid_placeholder(self, grid):
|
| 212 |
+
if grid is None:
|
| 213 |
+
return ""
|
| 214 |
+
slice_image_placeholder = (
|
| 215 |
+
self.slice_start_token
|
| 216 |
+
+ self.unk_token * self.image_feature_size
|
| 217 |
+
+ self.slice_end_token
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
cols = grid[0]
|
| 221 |
+
rows = grid[1]
|
| 222 |
+
slices = []
|
| 223 |
+
for i in range(rows):
|
| 224 |
+
lines = []
|
| 225 |
+
for j in range(cols):
|
| 226 |
+
lines.append(slice_image_placeholder)
|
| 227 |
+
slices.append("".join(lines))
|
| 228 |
+
|
| 229 |
+
slice_placeholder = "\n".join(slices)
|
| 230 |
+
return slice_placeholder
|
| 231 |
+
|
| 232 |
+
def get_image_id_placeholder(self, idx=0):
|
| 233 |
+
return f"{self.im_id_start}{idx}{self.im_id_end}"
|
| 234 |
+
|
| 235 |
+
def get_sliced_images(self, image, max_slice_nums=None):
|
| 236 |
+
slice_images = []
|
| 237 |
+
|
| 238 |
+
if not self.slice_mode:
|
| 239 |
+
return [image]
|
| 240 |
+
|
| 241 |
+
max_slice_nums = self.max_slice_nums if max_slice_nums is None else int(max_slice_nums)
|
| 242 |
+
assert max_slice_nums > 0
|
| 243 |
+
source_image, patches, sliced_grid = self.slice_image(
|
| 244 |
+
image,
|
| 245 |
+
max_slice_nums, # default: 9
|
| 246 |
+
self.scale_resolution, # default: 448
|
| 247 |
+
self.patch_size # default: 14
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
slice_images.append(source_image)
|
| 251 |
+
if len(patches) > 0:
|
| 252 |
+
for i in range(len(patches)):
|
| 253 |
+
for j in range(len(patches[0])):
|
| 254 |
+
slice_images.append(patches[i][j])
|
| 255 |
+
return slice_images
|
| 256 |
+
|
| 257 |
+
def get_sliced_grid(self, image_size, max_slice_nums, nerver_split=False):
|
| 258 |
+
original_width, original_height = image_size
|
| 259 |
+
log_ratio = math.log(original_width / original_height)
|
| 260 |
+
ratio = original_width * original_height / (self.scale_resolution * self.scale_resolution)
|
| 261 |
+
multiple = min(math.ceil(ratio), max_slice_nums)
|
| 262 |
+
if multiple <= 1 or nerver_split:
|
| 263 |
+
return None
|
| 264 |
+
candidate_split_grids_nums = []
|
| 265 |
+
for i in [multiple - 1, multiple, multiple + 1]:
|
| 266 |
+
if i == 1 or i > max_slice_nums:
|
| 267 |
+
continue
|
| 268 |
+
candidate_split_grids_nums.append(i)
|
| 269 |
+
|
| 270 |
+
candidate_grids = []
|
| 271 |
+
for split_grids_nums in candidate_split_grids_nums:
|
| 272 |
+
m = 1
|
| 273 |
+
while m <= split_grids_nums:
|
| 274 |
+
if split_grids_nums % m == 0:
|
| 275 |
+
candidate_grids.append([m, split_grids_nums // m])
|
| 276 |
+
m += 1
|
| 277 |
+
|
| 278 |
+
best_grid = [1, 1]
|
| 279 |
+
min_error = float("inf")
|
| 280 |
+
for grid in candidate_grids:
|
| 281 |
+
error = abs(log_ratio - math.log(grid[0] / grid[1]))
|
| 282 |
+
if error < min_error:
|
| 283 |
+
best_grid = grid
|
| 284 |
+
min_error = error
|
| 285 |
+
|
| 286 |
+
return best_grid
|
| 287 |
+
|
| 288 |
+
def get_slice_image_placeholder(self, image_size, image_idx=0, max_slice_nums=None, use_image_id=None):
|
| 289 |
+
max_slice_nums = self.max_slice_nums if max_slice_nums is None else int(max_slice_nums)
|
| 290 |
+
assert max_slice_nums > 0
|
| 291 |
+
grid = self.get_sliced_grid(image_size=image_size, max_slice_nums=max_slice_nums)
|
| 292 |
+
|
| 293 |
+
image_placeholder = (
|
| 294 |
+
self.im_start_token
|
| 295 |
+
+ self.unk_token * self.image_feature_size
|
| 296 |
+
+ self.im_end_token
|
| 297 |
+
)
|
| 298 |
+
use_image_id = self.use_image_id if use_image_id is None else bool(use_image_id)
|
| 299 |
+
if use_image_id:
|
| 300 |
+
final_placeholder = self.get_image_id_placeholder(image_idx) + image_placeholder
|
| 301 |
+
else:
|
| 302 |
+
final_placeholder = image_placeholder
|
| 303 |
+
|
| 304 |
+
if self.slice_mode:
|
| 305 |
+
final_placeholder = final_placeholder + self.get_grid_placeholder(grid=grid)
|
| 306 |
+
return final_placeholder
|
| 307 |
+
|
| 308 |
+
def to_pil_image(self, image, rescale=None) -> PIL.Image.Image:
|
| 309 |
+
"""
|
| 310 |
+
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
|
| 311 |
+
needed.
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
|
| 315 |
+
The image to convert to the PIL Image format.
|
| 316 |
+
rescale (`bool`, *optional*):
|
| 317 |
+
Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
|
| 318 |
+
default to `True` if the image type is a floating type, `False` otherwise.
|
| 319 |
+
"""
|
| 320 |
+
if isinstance(image, PIL.Image.Image):
|
| 321 |
+
return image
|
| 322 |
+
if is_torch_tensor(image):
|
| 323 |
+
image = image.numpy()
|
| 324 |
+
|
| 325 |
+
if isinstance(image, np.ndarray):
|
| 326 |
+
if rescale is None:
|
| 327 |
+
# rescale default to the array being of floating type.
|
| 328 |
+
rescale = isinstance(image.flat[0], np.floating)
|
| 329 |
+
# If the channel as been moved to first dim, we put it back at the end.
|
| 330 |
+
if image.ndim == 3 and image.shape[0] in [1, 3]:
|
| 331 |
+
image = image.transpose(1, 2, 0)
|
| 332 |
+
if rescale:
|
| 333 |
+
image = image * 255
|
| 334 |
+
image = image.astype(np.uint8)
|
| 335 |
+
return PIL.Image.fromarray(image)
|
| 336 |
+
return image
|
| 337 |
+
|
| 338 |
+
def reshape_by_patch(self, image):
|
| 339 |
+
"""
|
| 340 |
+
:param image: shape [3, H, W]
|
| 341 |
+
:param patch_size:
|
| 342 |
+
:return: [3, patch_size, HW/patch_size]
|
| 343 |
+
"""
|
| 344 |
+
image = torch.from_numpy(image)
|
| 345 |
+
patch_size = self.patch_size
|
| 346 |
+
patches = torch.nn.functional.unfold(
|
| 347 |
+
image,
|
| 348 |
+
(patch_size, patch_size),
|
| 349 |
+
stride=(patch_size, patch_size)
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
patches = patches.reshape(image.size(0), patch_size, patch_size, -1)
|
| 353 |
+
patches = patches.permute(0, 1, 3, 2).reshape(image.size(0), patch_size, -1)
|
| 354 |
+
return patches.numpy()
|
| 355 |
+
|
| 356 |
+
def preprocess(
|
| 357 |
+
self,
|
| 358 |
+
images: Union[Image.Image, List[Image.Image], List[List[Image.Image]]],
|
| 359 |
+
do_pad: Optional[bool] = True, # TODO: add pad for MiniCPM-Llama3-V-2_5
|
| 360 |
+
max_slice_nums: int = None,
|
| 361 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 362 |
+
) -> MiniCPMVBatchFeature:
|
| 363 |
+
if isinstance(images, Image.Image):
|
| 364 |
+
images_list = [[images]]
|
| 365 |
+
elif isinstance(images[0], Image.Image):
|
| 366 |
+
images_list = [images]
|
| 367 |
+
else:
|
| 368 |
+
images_list = images
|
| 369 |
+
|
| 370 |
+
new_images_list = []
|
| 371 |
+
image_sizes_list = []
|
| 372 |
+
tgt_sizes_list = []
|
| 373 |
+
|
| 374 |
+
for _images in images_list:
|
| 375 |
+
if _images is None or len(_images) == 0:
|
| 376 |
+
new_images_list.append([])
|
| 377 |
+
image_sizes_list.append([])
|
| 378 |
+
tgt_sizes_list.append([])
|
| 379 |
+
continue
|
| 380 |
+
if not valid_images(_images):
|
| 381 |
+
raise ValueError(
|
| 382 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 383 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
_images = [self.to_pil_image(image).convert("RGB") for image in _images]
|
| 387 |
+
input_data_format = infer_channel_dimension_format(np.array(_images[0]))
|
| 388 |
+
|
| 389 |
+
new_images = []
|
| 390 |
+
image_sizes = [image.size for image in _images]
|
| 391 |
+
tgt_sizes = []
|
| 392 |
+
for image in _images:
|
| 393 |
+
image_patches = self.get_sliced_images(image, max_slice_nums)
|
| 394 |
+
image_patches = [to_numpy_array(image).astype(np.float32) / 255 for image in image_patches]
|
| 395 |
+
image_patches = [
|
| 396 |
+
self.normalize(image=image, mean=self.mean, std=self.std, input_data_format=input_data_format)
|
| 397 |
+
for image in image_patches
|
| 398 |
+
]
|
| 399 |
+
image_patches = [
|
| 400 |
+
to_channel_dimension_format(image, ChannelDimension.FIRST, input_channel_dim=input_data_format)
|
| 401 |
+
for image in image_patches
|
| 402 |
+
]
|
| 403 |
+
for slice_image in image_patches:
|
| 404 |
+
new_images.append(self.reshape_by_patch(slice_image))
|
| 405 |
+
tgt_sizes.append(np.array((slice_image.shape[1] // self.patch_size, slice_image.shape[2] // self.patch_size)))
|
| 406 |
+
|
| 407 |
+
if tgt_sizes:
|
| 408 |
+
tgt_sizes = np.vstack(tgt_sizes)
|
| 409 |
+
|
| 410 |
+
new_images_list.append(new_images)
|
| 411 |
+
image_sizes_list.append(image_sizes)
|
| 412 |
+
tgt_sizes_list.append(tgt_sizes)
|
| 413 |
+
return MiniCPMVBatchFeature(
|
| 414 |
+
data={"pixel_values": new_images_list, "image_sizes": image_sizes_list, "tgt_sizes": tgt_sizes_list}, tensor_type=return_tensors
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
AutoImageProcessor.register("MiniCPMVImageProcessor", MiniCPMVImageProcessor)
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b03769dddb5c25fb915dcd378ff491504f9569ae3407d8c66b6aea0f01f6aec
|
| 3 |
+
size 4454412044
|
model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33f7a0fd12e7e1be3c8b2659ff4583c3d47b6e1c292653badd17aeac6b80cf25
|
| 3 |
+
size 1503366314
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
modeling_minicpmv.py
ADDED
|
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
import json
|
| 4 |
+
import torch
|
| 5 |
+
import torchvision
|
| 6 |
+
|
| 7 |
+
from threading import Thread
|
| 8 |
+
from copy import deepcopy
|
| 9 |
+
from PIL import Image
|
| 10 |
+
from transformers import AutoProcessor, Qwen2PreTrainedModel, Qwen2ForCausalLM, TextIteratorStreamer, \
|
| 11 |
+
Owlv2ForObjectDetection
|
| 12 |
+
from .constants import ADE20K_847, IMAGENET_CLASSES, HOUSE_OBJECTS
|
| 13 |
+
|
| 14 |
+
from .configuration_minicpm import MiniCPMVConfig
|
| 15 |
+
from .modeling_navit_siglip import SiglipVisionTransformer
|
| 16 |
+
from .resampler import Resampler
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class MiniCPMVPreTrainedModel(Qwen2PreTrainedModel):
|
| 20 |
+
config_class = MiniCPMVConfig
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class MiniCPMV(MiniCPMVPreTrainedModel):
|
| 24 |
+
def __init__(self, config):
|
| 25 |
+
super().__init__(config)
|
| 26 |
+
self.llm = Qwen2ForCausalLM(config)
|
| 27 |
+
self.vpm = self.init_vision_module()
|
| 28 |
+
self.od_model = None
|
| 29 |
+
self.od_processor = None
|
| 30 |
+
self.vision_dim = self.vpm.embed_dim
|
| 31 |
+
self.embed_dim = self.llm.config.hidden_size
|
| 32 |
+
self.resampler = self.init_resampler(self.embed_dim, self.vision_dim)
|
| 33 |
+
self.processor = None
|
| 34 |
+
|
| 35 |
+
self.terminators = ['<|im_end|>', '<|endoftext|>']
|
| 36 |
+
self._generate = self.generate
|
| 37 |
+
|
| 38 |
+
def init_vision_module(self):
|
| 39 |
+
# same as HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit add tgt_sizes
|
| 40 |
+
if self.config._attn_implementation == 'flash_attention_2':
|
| 41 |
+
self.config.vision_config._attn_implementation = 'flash_attention_2'
|
| 42 |
+
else:
|
| 43 |
+
# not suport sdpa
|
| 44 |
+
self.config.vision_config._attn_implementation = 'eager'
|
| 45 |
+
model = SiglipVisionTransformer(self.config.vision_config)
|
| 46 |
+
if self.config.drop_vision_last_layer:
|
| 47 |
+
model.encoder.layers = model.encoder.layers[:-1]
|
| 48 |
+
|
| 49 |
+
setattr(model, 'embed_dim', model.embeddings.embed_dim)
|
| 50 |
+
setattr(model, 'patch_size', model.embeddings.patch_size)
|
| 51 |
+
|
| 52 |
+
return model
|
| 53 |
+
|
| 54 |
+
def init_od_model(self):
|
| 55 |
+
# google/owlv2-base-patch16-ensemble
|
| 56 |
+
if self.od_model is None:
|
| 57 |
+
self.od_model = Owlv2ForObjectDetection.from_pretrained(self.config.od_model_name).cuda()
|
| 58 |
+
return self.od_model
|
| 59 |
+
|
| 60 |
+
def init_od_processor(self):
|
| 61 |
+
# google/owlv2-base-patch16-ensemble
|
| 62 |
+
if self.od_processor is None:
|
| 63 |
+
self.od_processor = AutoProcessor.from_pretrained(self.config.od_processor_name)
|
| 64 |
+
return self.od_processor
|
| 65 |
+
|
| 66 |
+
def init_resampler(self, embed_dim, vision_dim):
|
| 67 |
+
return Resampler(
|
| 68 |
+
num_queries=self.config.query_num,
|
| 69 |
+
embed_dim=embed_dim,
|
| 70 |
+
num_heads=embed_dim // 128,
|
| 71 |
+
kv_dim=vision_dim,
|
| 72 |
+
adaptive=True
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
def get_input_embeddings(self):
|
| 76 |
+
return self.llm.get_input_embeddings()
|
| 77 |
+
|
| 78 |
+
def set_input_embeddings(self, value):
|
| 79 |
+
self.llm.embed_tokens = value
|
| 80 |
+
|
| 81 |
+
def get_output_embeddings(self):
|
| 82 |
+
return self.llm.lm_head
|
| 83 |
+
|
| 84 |
+
def set_output_embeddings(self, new_embeddings):
|
| 85 |
+
self.llm.lm_head = new_embeddings
|
| 86 |
+
|
| 87 |
+
def set_decoder(self, decoder):
|
| 88 |
+
self.llm = decoder
|
| 89 |
+
|
| 90 |
+
def get_decoder(self):
|
| 91 |
+
return self.llm
|
| 92 |
+
|
| 93 |
+
def get_vllm_embedding(self, data):
|
| 94 |
+
if 'vision_hidden_states' not in data:
|
| 95 |
+
dtype = self.llm.model.embed_tokens.weight.dtype
|
| 96 |
+
device = self.llm.model.embed_tokens.weight.device
|
| 97 |
+
tgt_sizes = data['tgt_sizes']
|
| 98 |
+
pixel_values_list = data['pixel_values']
|
| 99 |
+
vision_hidden_states = []
|
| 100 |
+
all_pixel_values = []
|
| 101 |
+
img_cnt = []
|
| 102 |
+
for pixel_values in pixel_values_list:
|
| 103 |
+
img_cnt.append(len(pixel_values))
|
| 104 |
+
all_pixel_values.extend([i.flatten(end_dim=1).permute(1, 0) for i in pixel_values])
|
| 105 |
+
|
| 106 |
+
# exist image
|
| 107 |
+
if all_pixel_values:
|
| 108 |
+
tgt_sizes = [tgt_size for tgt_size in tgt_sizes if isinstance(tgt_size, torch.Tensor)]
|
| 109 |
+
tgt_sizes = torch.vstack(tgt_sizes).type(torch.int32)
|
| 110 |
+
|
| 111 |
+
max_patches = torch.max(tgt_sizes[:, 0] * tgt_sizes[:, 1])
|
| 112 |
+
|
| 113 |
+
all_pixel_values = torch.nn.utils.rnn.pad_sequence(all_pixel_values, batch_first=True,
|
| 114 |
+
padding_value=0.0)
|
| 115 |
+
B, L, _ = all_pixel_values.shape
|
| 116 |
+
all_pixel_values = all_pixel_values.permute(0, 2, 1).reshape(B, 3, -1, L)
|
| 117 |
+
|
| 118 |
+
patch_attn_mask = torch.zeros((B, 1, max_patches), dtype=torch.bool, device=device)
|
| 119 |
+
for i in range(B):
|
| 120 |
+
patch_attn_mask[i, 0, :tgt_sizes[i][0] * tgt_sizes[i][1]] = True
|
| 121 |
+
|
| 122 |
+
vision_batch_size = self.config.vision_batch_size
|
| 123 |
+
all_pixel_values = all_pixel_values.type(dtype)
|
| 124 |
+
if B > vision_batch_size:
|
| 125 |
+
hs = []
|
| 126 |
+
for i in range(0, B, vision_batch_size):
|
| 127 |
+
start_idx = i
|
| 128 |
+
end_idx = i + vision_batch_size
|
| 129 |
+
tmp_hs = self.vpm(all_pixel_values[start_idx:end_idx],
|
| 130 |
+
patch_attention_mask=patch_attn_mask[start_idx:end_idx],
|
| 131 |
+
tgt_sizes=tgt_sizes[start_idx:end_idx]).last_hidden_state
|
| 132 |
+
hs.append(tmp_hs)
|
| 133 |
+
vision_embedding = torch.cat(hs, dim=0)
|
| 134 |
+
else:
|
| 135 |
+
vision_embedding = self.vpm(all_pixel_values, patch_attention_mask=patch_attn_mask,
|
| 136 |
+
tgt_sizes=tgt_sizes).last_hidden_state
|
| 137 |
+
vision_embedding = self.resampler(vision_embedding, tgt_sizes)
|
| 138 |
+
|
| 139 |
+
start = 0
|
| 140 |
+
for pixel_values in pixel_values_list:
|
| 141 |
+
img_cnt = len(pixel_values)
|
| 142 |
+
if img_cnt > 0:
|
| 143 |
+
vision_hidden_states.append(vision_embedding[start: start + img_cnt])
|
| 144 |
+
start += img_cnt
|
| 145 |
+
else:
|
| 146 |
+
vision_hidden_states.append([])
|
| 147 |
+
else: # no image
|
| 148 |
+
if self.training:
|
| 149 |
+
dummy_image = torch.zeros(
|
| 150 |
+
(1, 3, 224, 224),
|
| 151 |
+
device=device, dtype=dtype
|
| 152 |
+
)
|
| 153 |
+
tgt_sizes = torch.Tensor(
|
| 154 |
+
[[(224 // self.config.patch_size), math.ceil(224 / self.config.patch_size)]]).type(torch.int32)
|
| 155 |
+
dummy_feature = self.resampler(self.vpm(dummy_image).last_hidden_state, tgt_sizes)
|
| 156 |
+
else:
|
| 157 |
+
dummy_feature = []
|
| 158 |
+
for _ in range(len(pixel_values_list)):
|
| 159 |
+
vision_hidden_states.append(dummy_feature)
|
| 160 |
+
|
| 161 |
+
else:
|
| 162 |
+
vision_hidden_states = data['vision_hidden_states']
|
| 163 |
+
|
| 164 |
+
if hasattr(self.llm.config, 'scale_emb'):
|
| 165 |
+
vllm_embedding = self.llm.model.embed_tokens(data['input_ids']) * self.llm.config.scale_emb
|
| 166 |
+
else:
|
| 167 |
+
vllm_embedding = self.llm.model.embed_tokens(data['input_ids'])
|
| 168 |
+
|
| 169 |
+
new_vllm_embedding = vllm_embedding.clone()
|
| 170 |
+
|
| 171 |
+
vision_hidden_states = [i.type(vllm_embedding.dtype) if isinstance(
|
| 172 |
+
i, torch.Tensor) else i for i in vision_hidden_states]
|
| 173 |
+
|
| 174 |
+
bs = len(data['input_ids'])
|
| 175 |
+
for i in range(bs):
|
| 176 |
+
cur_vs_hs = vision_hidden_states[i]
|
| 177 |
+
if len(cur_vs_hs) > 0:
|
| 178 |
+
cur_vllm_emb = vllm_embedding[i]
|
| 179 |
+
cur_image_bound = data['image_bound'][i]
|
| 180 |
+
if len(cur_image_bound) > 0:
|
| 181 |
+
image_indices = torch.stack(
|
| 182 |
+
[torch.arange(r[0], r[1], dtype=torch.long) for r in cur_image_bound]
|
| 183 |
+
).to(vllm_embedding.device)
|
| 184 |
+
|
| 185 |
+
new_vllm_embedding[i] = cur_vllm_emb.scatter(0, image_indices.view(-1, 1).repeat(1,
|
| 186 |
+
cur_vllm_emb.shape[
|
| 187 |
+
-1]),
|
| 188 |
+
cur_vs_hs.view(-1, cur_vs_hs.shape[-1]))
|
| 189 |
+
elif self.training:
|
| 190 |
+
new_vllm_embedding[i] += cur_vs_hs[0].mean() * 0
|
| 191 |
+
|
| 192 |
+
return new_vllm_embedding, vision_hidden_states
|
| 193 |
+
|
| 194 |
+
def forward(self, data, **kwargs):
|
| 195 |
+
vllm_embedding, vision_hidden_states = self.get_vllm_embedding(data)
|
| 196 |
+
position_ids = data["position_ids"]
|
| 197 |
+
if position_ids.dtype != torch.int64:
|
| 198 |
+
position_ids = position_ids.long()
|
| 199 |
+
|
| 200 |
+
for key in ['input_ids', 'inputs_embeds', 'position_ids']:
|
| 201 |
+
if key in kwargs:
|
| 202 |
+
del kwargs[key]
|
| 203 |
+
|
| 204 |
+
return self.llm(
|
| 205 |
+
input_ids=None,
|
| 206 |
+
position_ids=position_ids,
|
| 207 |
+
inputs_embeds=vllm_embedding,
|
| 208 |
+
**kwargs
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
def _decode(self, inputs_embeds, tokenizer, attention_mask, decode_text=False, **kwargs):
|
| 212 |
+
terminators = [tokenizer.convert_tokens_to_ids(i) for i in self.terminators]
|
| 213 |
+
output = self.llm.generate(
|
| 214 |
+
inputs_embeds=inputs_embeds,
|
| 215 |
+
pad_token_id=0,
|
| 216 |
+
eos_token_id=terminators,
|
| 217 |
+
attention_mask=attention_mask,
|
| 218 |
+
**kwargs
|
| 219 |
+
)
|
| 220 |
+
if decode_text:
|
| 221 |
+
return self._decode_text(output, tokenizer)
|
| 222 |
+
return output
|
| 223 |
+
|
| 224 |
+
def _decode_stream(self, inputs_embeds, tokenizer, **kwargs):
|
| 225 |
+
terminators = [tokenizer.convert_tokens_to_ids(i) for i in self.terminators]
|
| 226 |
+
streamer = TextIteratorStreamer(tokenizer=tokenizer)
|
| 227 |
+
generation_kwargs = {
|
| 228 |
+
'inputs_embeds': inputs_embeds,
|
| 229 |
+
'pad_token_id': 0,
|
| 230 |
+
'eos_token_id': terminators,
|
| 231 |
+
'streamer': streamer
|
| 232 |
+
}
|
| 233 |
+
generation_kwargs.update(kwargs)
|
| 234 |
+
|
| 235 |
+
thread = Thread(target=self.llm.generate, kwargs=generation_kwargs)
|
| 236 |
+
thread.start()
|
| 237 |
+
|
| 238 |
+
return streamer
|
| 239 |
+
|
| 240 |
+
def _decode_text(self, result_ids, tokenizer):
|
| 241 |
+
terminators = [tokenizer.convert_tokens_to_ids(i) for i in self.terminators]
|
| 242 |
+
result_text = []
|
| 243 |
+
for result in result_ids:
|
| 244 |
+
result = result[result != 0]
|
| 245 |
+
if result[0] == tokenizer.bos_id:
|
| 246 |
+
result = result[1:]
|
| 247 |
+
if result[-1] in terminators:
|
| 248 |
+
result = result[:-1]
|
| 249 |
+
result_text.append(tokenizer.decode(result).strip())
|
| 250 |
+
return result_text
|
| 251 |
+
|
| 252 |
+
def generate(
|
| 253 |
+
self,
|
| 254 |
+
input_ids=None,
|
| 255 |
+
pixel_values=None,
|
| 256 |
+
tgt_sizes=None,
|
| 257 |
+
image_bound=None,
|
| 258 |
+
attention_mask=None,
|
| 259 |
+
tokenizer=None,
|
| 260 |
+
vision_hidden_states=None,
|
| 261 |
+
return_vision_hidden_states=False,
|
| 262 |
+
stream=False,
|
| 263 |
+
decode_text=False,
|
| 264 |
+
**kwargs
|
| 265 |
+
):
|
| 266 |
+
assert input_ids is not None
|
| 267 |
+
assert len(input_ids) == len(pixel_values)
|
| 268 |
+
|
| 269 |
+
model_inputs = {
|
| 270 |
+
"input_ids": input_ids,
|
| 271 |
+
"image_bound": image_bound,
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
if vision_hidden_states is None:
|
| 275 |
+
model_inputs["pixel_values"] = pixel_values
|
| 276 |
+
model_inputs['tgt_sizes'] = tgt_sizes
|
| 277 |
+
else:
|
| 278 |
+
model_inputs["vision_hidden_states"] = vision_hidden_states
|
| 279 |
+
|
| 280 |
+
with torch.inference_mode():
|
| 281 |
+
(
|
| 282 |
+
model_inputs["inputs_embeds"],
|
| 283 |
+
vision_hidden_states,
|
| 284 |
+
) = self.get_vllm_embedding(model_inputs)
|
| 285 |
+
|
| 286 |
+
if stream:
|
| 287 |
+
result = self._decode_stream(model_inputs["inputs_embeds"], tokenizer, **kwargs)
|
| 288 |
+
else:
|
| 289 |
+
result = self._decode(model_inputs["inputs_embeds"], tokenizer, attention_mask, decode_text=decode_text,
|
| 290 |
+
**kwargs)
|
| 291 |
+
|
| 292 |
+
if return_vision_hidden_states:
|
| 293 |
+
return result, vision_hidden_states
|
| 294 |
+
|
| 295 |
+
return result
|
| 296 |
+
|
| 297 |
+
@staticmethod
|
| 298 |
+
def box2string(box):
|
| 299 |
+
out = '['
|
| 300 |
+
for i, x in enumerate(box):
|
| 301 |
+
out += f"{round(x.item(), 2):.2f}"
|
| 302 |
+
if i != len(box) - 1: out += ', '
|
| 303 |
+
out += ']'
|
| 304 |
+
return out
|
| 305 |
+
|
| 306 |
+
def make_od_prompt(self, od_result):
|
| 307 |
+
od_scores = od_result['scores']
|
| 308 |
+
od_index = torch.where(od_scores >= 0.5)
|
| 309 |
+
od_boxes = od_result['boxes'][od_index] / 490
|
| 310 |
+
od_labels = [(HOUSE_OBJECTS)[ind] for ind in od_result['labels'][od_index]]
|
| 311 |
+
|
| 312 |
+
if len(od_boxes) != 0:
|
| 313 |
+
verbalization_od = 'The image includes bounding box coordinates and their objects: '
|
| 314 |
+
for i, (box, label) in enumerate(zip(od_boxes, od_labels)):
|
| 315 |
+
verbalization_od += f'{self.box2string(box)} {label}'
|
| 316 |
+
if i != len(od_boxes) - 1: verbalization_od += ', and '
|
| 317 |
+
verbalization_od += '.'
|
| 318 |
+
else:
|
| 319 |
+
verbalization_od = ''
|
| 320 |
+
return verbalization_od, od_boxes, od_labels
|
| 321 |
+
|
| 322 |
+
def od_to_prompt(self, image):
|
| 323 |
+
|
| 324 |
+
od_inputs = self.init_od_processor()(text=[HOUSE_OBJECTS], images=[image], return_tensors="pt")
|
| 325 |
+
|
| 326 |
+
# OWOD Outputs
|
| 327 |
+
with torch.inference_mode():
|
| 328 |
+
self.init_od_model().eval()
|
| 329 |
+
od_results = self.init_od_processor().post_process_object_detection(
|
| 330 |
+
self.init_od_model()(**{k: v.to(self.device) for k, v in od_inputs.items()}),
|
| 331 |
+
threshold=0.1,
|
| 332 |
+
target_sizes=[(490, 490)])
|
| 333 |
+
|
| 334 |
+
verbalization_od, od_boxes, od_labels = self.make_od_prompt(od_results[0])
|
| 335 |
+
|
| 336 |
+
return verbalization_od
|
| 337 |
+
|
| 338 |
+
def chat(
|
| 339 |
+
self,
|
| 340 |
+
image,
|
| 341 |
+
msgs,
|
| 342 |
+
tokenizer,
|
| 343 |
+
processor=None,
|
| 344 |
+
vision_hidden_states=None,
|
| 345 |
+
max_new_tokens=2048,
|
| 346 |
+
min_new_tokens=0,
|
| 347 |
+
sampling=True,
|
| 348 |
+
max_inp_length=8192,
|
| 349 |
+
system_prompt='',
|
| 350 |
+
stream=False,
|
| 351 |
+
max_slice_nums=None,
|
| 352 |
+
use_image_id=None,
|
| 353 |
+
**kwargs
|
| 354 |
+
):
|
| 355 |
+
if isinstance(msgs[0], list):
|
| 356 |
+
batched = True
|
| 357 |
+
else:
|
| 358 |
+
batched = False
|
| 359 |
+
msgs_list = msgs
|
| 360 |
+
images_list = image
|
| 361 |
+
|
| 362 |
+
if batched is False:
|
| 363 |
+
images_list, msgs_list = [images_list], [msgs_list]
|
| 364 |
+
else:
|
| 365 |
+
assert images_list is None, "Please integrate image to msgs when using batch inference."
|
| 366 |
+
images_list = [None] * len(msgs_list)
|
| 367 |
+
assert len(images_list) == len(msgs_list), "The batch dim of images_list and msgs_list should be the same."
|
| 368 |
+
|
| 369 |
+
if processor is None:
|
| 370 |
+
if self.processor is None:
|
| 371 |
+
self.processor = AutoProcessor.from_pretrained(self.config._name_or_path, trust_remote_code=True)
|
| 372 |
+
processor = self.processor
|
| 373 |
+
|
| 374 |
+
assert self.config.query_num == processor.image_processor.image_feature_size, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
|
| 375 |
+
assert self.config.patch_size == processor.image_processor.patch_size, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
|
| 376 |
+
assert self.config.use_image_id == processor.image_processor.use_image_id, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
|
| 377 |
+
assert self.config.slice_config.max_slice_nums == processor.image_processor.max_slice_nums, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
|
| 378 |
+
assert self.config.slice_mode == processor.image_processor.slice_mode, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
|
| 379 |
+
|
| 380 |
+
prompts_lists = []
|
| 381 |
+
input_images_lists = []
|
| 382 |
+
for image, msgs in zip(images_list, msgs_list):
|
| 383 |
+
if isinstance(msgs, str):
|
| 384 |
+
msgs = json.loads(msgs)
|
| 385 |
+
copy_msgs = deepcopy(msgs)
|
| 386 |
+
|
| 387 |
+
assert len(msgs) > 0, "msgs is empty"
|
| 388 |
+
assert sampling or not stream, "if use stream mode, make sure sampling=True"
|
| 389 |
+
|
| 390 |
+
if image is not None and isinstance(copy_msgs[0]["content"], str):
|
| 391 |
+
copy_msgs[0]["content"] = [image, copy_msgs[0]["content"]]
|
| 392 |
+
|
| 393 |
+
images = []
|
| 394 |
+
for i, msg in enumerate(copy_msgs):
|
| 395 |
+
role = msg["role"]
|
| 396 |
+
content = msg["content"]
|
| 397 |
+
assert role in ["user", "assistant"]
|
| 398 |
+
if i == 0:
|
| 399 |
+
assert role == "user", "The role of first msg should be user"
|
| 400 |
+
if isinstance(content, str):
|
| 401 |
+
content = [content]
|
| 402 |
+
cur_msgs = []
|
| 403 |
+
for c in content:
|
| 404 |
+
if isinstance(c, Image.Image):
|
| 405 |
+
images.append(c)
|
| 406 |
+
cur_msgs.append("()")
|
| 407 |
+
cur_msgs.append(self.od_to_prompt(c))
|
| 408 |
+
elif isinstance(c, str):
|
| 409 |
+
cur_msgs.append(c)
|
| 410 |
+
msg["content"] = "\n".join(cur_msgs)
|
| 411 |
+
|
| 412 |
+
if system_prompt:
|
| 413 |
+
sys_msg = {'role': 'system', 'content': system_prompt}
|
| 414 |
+
copy_msgs = [sys_msg] + copy_msgs
|
| 415 |
+
|
| 416 |
+
prompts_lists.append(
|
| 417 |
+
processor.tokenizer.apply_chat_template(copy_msgs, tokenize=False, add_generation_prompt=True))
|
| 418 |
+
input_images_lists.append(images)
|
| 419 |
+
print(prompts_lists)
|
| 420 |
+
inputs = processor(
|
| 421 |
+
prompts_lists,
|
| 422 |
+
input_images_lists,
|
| 423 |
+
max_slice_nums=max_slice_nums,
|
| 424 |
+
use_image_id=use_image_id,
|
| 425 |
+
return_tensors="pt",
|
| 426 |
+
max_length=max_inp_length
|
| 427 |
+
).to(self.device)
|
| 428 |
+
|
| 429 |
+
if sampling:
|
| 430 |
+
generation_config = {
|
| 431 |
+
"top_p": 0.8,
|
| 432 |
+
"top_k": 100,
|
| 433 |
+
"temperature": 0.7,
|
| 434 |
+
"do_sample": True,
|
| 435 |
+
"repetition_penalty": 1.05
|
| 436 |
+
}
|
| 437 |
+
else:
|
| 438 |
+
generation_config = {
|
| 439 |
+
"num_beams": 3,
|
| 440 |
+
"repetition_penalty": 1.2,
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
if min_new_tokens > 0:
|
| 444 |
+
generation_config['min_new_tokens'] = min_new_tokens
|
| 445 |
+
|
| 446 |
+
generation_config.update(
|
| 447 |
+
(k, kwargs[k]) for k in generation_config.keys() & kwargs.keys()
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
inputs.pop("image_sizes")
|
| 451 |
+
with torch.inference_mode():
|
| 452 |
+
res = self.generate(
|
| 453 |
+
**inputs,
|
| 454 |
+
tokenizer=tokenizer,
|
| 455 |
+
max_new_tokens=max_new_tokens,
|
| 456 |
+
vision_hidden_states=vision_hidden_states,
|
| 457 |
+
stream=stream,
|
| 458 |
+
decode_text=True,
|
| 459 |
+
**generation_config
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
if stream:
|
| 463 |
+
def stream_gen():
|
| 464 |
+
for text in res:
|
| 465 |
+
for term in self.terminators:
|
| 466 |
+
text = text.replace(term, '')
|
| 467 |
+
yield text
|
| 468 |
+
|
| 469 |
+
return stream_gen()
|
| 470 |
+
|
| 471 |
+
else:
|
| 472 |
+
if batched:
|
| 473 |
+
answer = res
|
| 474 |
+
else:
|
| 475 |
+
answer = res[0]
|
| 476 |
+
return answer
|
modeling_navit_siglip.py
ADDED
|
@@ -0,0 +1,937 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" PyTorch Siglip model. """
|
| 16 |
+
# Copied from HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit and add tgt_sizes
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
import os
|
| 20 |
+
import math
|
| 21 |
+
import warnings
|
| 22 |
+
from dataclasses import dataclass
|
| 23 |
+
from typing import Any, Optional, Tuple, Union
|
| 24 |
+
|
| 25 |
+
import numpy as np
|
| 26 |
+
import torch
|
| 27 |
+
import torch.nn.functional as F
|
| 28 |
+
import torch.utils.checkpoint
|
| 29 |
+
from torch import nn
|
| 30 |
+
from torch.nn.init import _calculate_fan_in_and_fan_out
|
| 31 |
+
|
| 32 |
+
from transformers.activations import ACT2FN
|
| 33 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
|
| 34 |
+
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
| 35 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 36 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 37 |
+
from transformers.utils import (
|
| 38 |
+
ModelOutput,
|
| 39 |
+
add_start_docstrings,
|
| 40 |
+
add_start_docstrings_to_model_forward,
|
| 41 |
+
is_flash_attn_2_available,
|
| 42 |
+
logging,
|
| 43 |
+
replace_return_docstrings,
|
| 44 |
+
)
|
| 45 |
+
from transformers.utils import logging
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
class SiglipVisionConfig(PretrainedConfig):
|
| 50 |
+
r"""
|
| 51 |
+
This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a
|
| 52 |
+
Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a
|
| 53 |
+
configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip
|
| 54 |
+
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
|
| 55 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 56 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 57 |
+
Args:
|
| 58 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 59 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 60 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 61 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 62 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 63 |
+
Number of hidden layers in the Transformer encoder.
|
| 64 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 65 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 66 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 67 |
+
Number of channels in the input images.
|
| 68 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 69 |
+
The size (resolution) of each image.
|
| 70 |
+
patch_size (`int`, *optional*, defaults to 16):
|
| 71 |
+
The size (resolution) of each patch.
|
| 72 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
|
| 73 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 74 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
|
| 75 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| 76 |
+
The epsilon used by the layer normalization layers.
|
| 77 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 78 |
+
The dropout ratio for the attention probabilities.
|
| 79 |
+
Example:
|
| 80 |
+
```python
|
| 81 |
+
>>> from transformers import SiglipVisionConfig, SiglipVisionModel
|
| 82 |
+
>>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration
|
| 83 |
+
>>> configuration = SiglipVisionConfig()
|
| 84 |
+
>>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration
|
| 85 |
+
>>> model = SiglipVisionModel(configuration)
|
| 86 |
+
>>> # Accessing the model configuration
|
| 87 |
+
>>> configuration = model.config
|
| 88 |
+
```"""
|
| 89 |
+
|
| 90 |
+
model_type = "siglip_vision_model"
|
| 91 |
+
|
| 92 |
+
def __init__(
|
| 93 |
+
self,
|
| 94 |
+
hidden_size=768,
|
| 95 |
+
intermediate_size=3072,
|
| 96 |
+
num_hidden_layers=12,
|
| 97 |
+
num_attention_heads=12,
|
| 98 |
+
num_channels=3,
|
| 99 |
+
image_size=224,
|
| 100 |
+
patch_size=16,
|
| 101 |
+
hidden_act="gelu_pytorch_tanh",
|
| 102 |
+
layer_norm_eps=1e-6,
|
| 103 |
+
attention_dropout=0.0,
|
| 104 |
+
**kwargs,
|
| 105 |
+
):
|
| 106 |
+
super().__init__(**kwargs)
|
| 107 |
+
|
| 108 |
+
self.hidden_size = hidden_size
|
| 109 |
+
self.intermediate_size = intermediate_size
|
| 110 |
+
self.num_hidden_layers = num_hidden_layers
|
| 111 |
+
self.num_attention_heads = num_attention_heads
|
| 112 |
+
self.num_channels = num_channels
|
| 113 |
+
self.patch_size = patch_size
|
| 114 |
+
self.image_size = image_size
|
| 115 |
+
self.attention_dropout = attention_dropout
|
| 116 |
+
self.layer_norm_eps = layer_norm_eps
|
| 117 |
+
self.hidden_act = hidden_act
|
| 118 |
+
|
| 119 |
+
@classmethod
|
| 120 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
| 121 |
+
cls._set_token_in_kwargs(kwargs)
|
| 122 |
+
|
| 123 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 124 |
+
|
| 125 |
+
# get the vision config dict if we are loading from SiglipConfig
|
| 126 |
+
if config_dict.get("model_type") == "siglip":
|
| 127 |
+
config_dict = config_dict["vision_config"]
|
| 128 |
+
|
| 129 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
| 130 |
+
logger.warning(
|
| 131 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 132 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
return cls.from_dict(config_dict, **kwargs)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
_CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
|
| 139 |
+
|
| 140 |
+
SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 141 |
+
"google/siglip-base-patch16-224",
|
| 142 |
+
# See all SigLIP models at https://huggingface.co/models?filter=siglip
|
| 143 |
+
]
|
| 144 |
+
|
| 145 |
+
if is_flash_attn_2_available():
|
| 146 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 147 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
| 151 |
+
def _get_unpad_data(attention_mask):
|
| 152 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 153 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 154 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 155 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
| 156 |
+
return (
|
| 157 |
+
indices,
|
| 158 |
+
cu_seqlens,
|
| 159 |
+
max_seqlen_in_batch,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def _trunc_normal_(tensor, mean, std, a, b):
|
| 164 |
+
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
| 165 |
+
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
| 166 |
+
def norm_cdf(x):
|
| 167 |
+
# Computes standard normal cumulative distribution function
|
| 168 |
+
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
|
| 169 |
+
|
| 170 |
+
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
| 171 |
+
warnings.warn(
|
| 172 |
+
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
| 173 |
+
"The distribution of values may be incorrect.",
|
| 174 |
+
stacklevel=2,
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
# Values are generated by using a truncated uniform distribution and
|
| 178 |
+
# then using the inverse CDF for the normal distribution.
|
| 179 |
+
# Get upper and lower cdf values
|
| 180 |
+
l = norm_cdf((a - mean) / std)
|
| 181 |
+
u = norm_cdf((b - mean) / std)
|
| 182 |
+
|
| 183 |
+
# Uniformly fill tensor with values from [l, u], then translate to
|
| 184 |
+
# [2l-1, 2u-1].
|
| 185 |
+
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
| 186 |
+
|
| 187 |
+
# Use inverse cdf transform for normal distribution to get truncated
|
| 188 |
+
# standard normal
|
| 189 |
+
if tensor.dtype in [torch.float16, torch.bfloat16]:
|
| 190 |
+
# The `erfinv_` op is not (yet?) defined in float16+cpu, bfloat16+gpu
|
| 191 |
+
og_dtype = tensor.dtype
|
| 192 |
+
tensor = tensor.to(torch.float32)
|
| 193 |
+
tensor.erfinv_()
|
| 194 |
+
tensor = tensor.to(og_dtype)
|
| 195 |
+
else:
|
| 196 |
+
tensor.erfinv_()
|
| 197 |
+
|
| 198 |
+
# Transform to proper mean, std
|
| 199 |
+
tensor.mul_(std * math.sqrt(2.0))
|
| 200 |
+
tensor.add_(mean)
|
| 201 |
+
|
| 202 |
+
# Clamp to ensure it's in the proper range
|
| 203 |
+
if tensor.dtype == torch.float16:
|
| 204 |
+
# The `clamp_` op is not (yet?) defined in float16+cpu
|
| 205 |
+
tensor = tensor.to(torch.float32)
|
| 206 |
+
tensor.clamp_(min=a, max=b)
|
| 207 |
+
tensor = tensor.to(torch.float16)
|
| 208 |
+
else:
|
| 209 |
+
tensor.clamp_(min=a, max=b)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def trunc_normal_tf_(
|
| 213 |
+
tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
|
| 214 |
+
) -> torch.Tensor:
|
| 215 |
+
"""Fills the input Tensor with values drawn from a truncated
|
| 216 |
+
normal distribution. The values are effectively drawn from the
|
| 217 |
+
normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
|
| 218 |
+
with values outside :math:`[a, b]` redrawn until they are within
|
| 219 |
+
the bounds. The method used for generating the random values works
|
| 220 |
+
best when :math:`a \\leq \text{mean} \\leq b`.
|
| 221 |
+
NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
|
| 222 |
+
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
|
| 223 |
+
and the result is subsquently scaled and shifted by the mean and std args.
|
| 224 |
+
Args:
|
| 225 |
+
tensor: an n-dimensional `torch.Tensor`
|
| 226 |
+
mean: the mean of the normal distribution
|
| 227 |
+
std: the standard deviation of the normal distribution
|
| 228 |
+
a: the minimum cutoff value
|
| 229 |
+
b: the maximum cutoff value
|
| 230 |
+
"""
|
| 231 |
+
with torch.no_grad():
|
| 232 |
+
_trunc_normal_(tensor, 0, 1.0, a, b)
|
| 233 |
+
tensor.mul_(std).add_(mean)
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
|
| 237 |
+
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
|
| 238 |
+
if mode == "fan_in":
|
| 239 |
+
denom = fan_in
|
| 240 |
+
elif mode == "fan_out":
|
| 241 |
+
denom = fan_out
|
| 242 |
+
elif mode == "fan_avg":
|
| 243 |
+
denom = (fan_in + fan_out) / 2
|
| 244 |
+
|
| 245 |
+
variance = scale / denom
|
| 246 |
+
|
| 247 |
+
if distribution == "truncated_normal":
|
| 248 |
+
# constant is stddev of standard normal truncated to (-2, 2)
|
| 249 |
+
trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
|
| 250 |
+
elif distribution == "normal":
|
| 251 |
+
with torch.no_grad():
|
| 252 |
+
tensor.normal_(std=math.sqrt(variance))
|
| 253 |
+
elif distribution == "uniform":
|
| 254 |
+
bound = math.sqrt(3 * variance)
|
| 255 |
+
with torch.no_grad():
|
| 256 |
+
tensor.uniform_(-bound, bound)
|
| 257 |
+
else:
|
| 258 |
+
raise ValueError(f"invalid distribution {distribution}")
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def lecun_normal_(tensor):
|
| 262 |
+
variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def default_flax_embed_init(tensor):
|
| 266 |
+
variance_scaling_(tensor, mode="fan_in", distribution="normal")
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
@dataclass
|
| 270 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
|
| 271 |
+
class SiglipVisionModelOutput(ModelOutput):
|
| 272 |
+
"""
|
| 273 |
+
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
| 274 |
+
Args:
|
| 275 |
+
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
| 276 |
+
The image embeddings obtained by applying the projection layer to the pooler_output.
|
| 277 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 278 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 279 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 280 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 281 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 282 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 283 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
| 284 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
| 285 |
+
sequence_length)`.
|
| 286 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
| 287 |
+
heads.
|
| 288 |
+
"""
|
| 289 |
+
|
| 290 |
+
image_embeds: Optional[torch.FloatTensor] = None
|
| 291 |
+
last_hidden_state: torch.FloatTensor = None
|
| 292 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 293 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class SiglipVisionEmbeddings(nn.Module):
|
| 297 |
+
def __init__(self, config: SiglipVisionConfig):
|
| 298 |
+
super().__init__()
|
| 299 |
+
self.config = config
|
| 300 |
+
self.embed_dim = config.hidden_size
|
| 301 |
+
self.image_size = config.image_size
|
| 302 |
+
self.patch_size = config.patch_size
|
| 303 |
+
|
| 304 |
+
self.patch_embedding = nn.Conv2d(
|
| 305 |
+
in_channels=config.num_channels,
|
| 306 |
+
out_channels=self.embed_dim,
|
| 307 |
+
kernel_size=self.patch_size,
|
| 308 |
+
stride=self.patch_size,
|
| 309 |
+
padding="valid",
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
self.num_patches_per_side = self.image_size // self.patch_size
|
| 313 |
+
self.num_patches = self.num_patches_per_side**2
|
| 314 |
+
self.num_positions = self.num_patches
|
| 315 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
| 316 |
+
|
| 317 |
+
def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor, tgt_sizes: Optional[torch.IntTensor]=None) -> torch.Tensor:
|
| 318 |
+
batch_size = pixel_values.size(0)
|
| 319 |
+
|
| 320 |
+
patch_embeds = self.patch_embedding(pixel_values)
|
| 321 |
+
embeddings = patch_embeds.flatten(2).transpose(1, 2)
|
| 322 |
+
|
| 323 |
+
max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3)
|
| 324 |
+
max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
|
| 325 |
+
boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
|
| 326 |
+
position_ids = torch.full(
|
| 327 |
+
size=(
|
| 328 |
+
batch_size,
|
| 329 |
+
max_nb_patches_h * max_nb_patches_w,
|
| 330 |
+
),
|
| 331 |
+
fill_value=0,
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
|
| 335 |
+
if tgt_sizes is not None:
|
| 336 |
+
nb_patches_h = tgt_sizes[batch_idx][0]
|
| 337 |
+
nb_patches_w = tgt_sizes[batch_idx][1]
|
| 338 |
+
else:
|
| 339 |
+
nb_patches_h = p_attn_mask[:, 0].sum()
|
| 340 |
+
nb_patches_w = p_attn_mask[0].sum()
|
| 341 |
+
|
| 342 |
+
fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
|
| 343 |
+
fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
|
| 344 |
+
|
| 345 |
+
bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
|
| 346 |
+
bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
|
| 347 |
+
|
| 348 |
+
pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
|
| 349 |
+
position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
|
| 350 |
+
|
| 351 |
+
position_ids = position_ids.to(self.position_embedding.weight.device)
|
| 352 |
+
|
| 353 |
+
embeddings = embeddings + self.position_embedding(position_ids)
|
| 354 |
+
return embeddings
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class SiglipAttention(nn.Module):
|
| 358 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 359 |
+
|
| 360 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
|
| 361 |
+
def __init__(self, config):
|
| 362 |
+
super().__init__()
|
| 363 |
+
self.config = config
|
| 364 |
+
self.embed_dim = config.hidden_size
|
| 365 |
+
self.num_heads = config.num_attention_heads
|
| 366 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 367 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 368 |
+
raise ValueError(
|
| 369 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
| 370 |
+
f" {self.num_heads})."
|
| 371 |
+
)
|
| 372 |
+
self.scale = self.head_dim**-0.5
|
| 373 |
+
self.dropout = config.attention_dropout
|
| 374 |
+
|
| 375 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 376 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 377 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 378 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 379 |
+
|
| 380 |
+
def forward(
|
| 381 |
+
self,
|
| 382 |
+
hidden_states: torch.Tensor,
|
| 383 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 384 |
+
output_attentions: Optional[bool] = False,
|
| 385 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 386 |
+
"""Input shape: Batch x Time x Channel"""
|
| 387 |
+
|
| 388 |
+
batch_size, q_len, _ = hidden_states.size()
|
| 389 |
+
|
| 390 |
+
query_states = self.q_proj(hidden_states)
|
| 391 |
+
key_states = self.k_proj(hidden_states)
|
| 392 |
+
value_states = self.v_proj(hidden_states)
|
| 393 |
+
|
| 394 |
+
query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 395 |
+
key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 396 |
+
value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 397 |
+
|
| 398 |
+
k_v_seq_len = key_states.shape[-2]
|
| 399 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
|
| 400 |
+
|
| 401 |
+
if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
|
| 402 |
+
raise ValueError(
|
| 403 |
+
f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
|
| 404 |
+
f" {attn_weights.size()}"
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
if attention_mask is not None:
|
| 408 |
+
if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
|
| 409 |
+
raise ValueError(
|
| 410 |
+
f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
|
| 411 |
+
)
|
| 412 |
+
attn_weights = attn_weights + attention_mask
|
| 413 |
+
|
| 414 |
+
# upcast attention to fp32
|
| 415 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 416 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
| 417 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 418 |
+
|
| 419 |
+
if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
|
| 420 |
+
raise ValueError(
|
| 421 |
+
f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
|
| 422 |
+
f" {attn_output.size()}"
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 426 |
+
attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
|
| 427 |
+
|
| 428 |
+
attn_output = self.out_proj(attn_output)
|
| 429 |
+
|
| 430 |
+
return attn_output, attn_weights
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
class SiglipFlashAttention2(SiglipAttention):
|
| 434 |
+
"""
|
| 435 |
+
Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
|
| 436 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 437 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 438 |
+
"""
|
| 439 |
+
|
| 440 |
+
def __init__(self, *args, **kwargs):
|
| 441 |
+
super().__init__(*args, **kwargs)
|
| 442 |
+
self.is_causal = False # Hack to make sure we don't use a causal mask
|
| 443 |
+
|
| 444 |
+
def forward(
|
| 445 |
+
self,
|
| 446 |
+
hidden_states: torch.Tensor,
|
| 447 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 448 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 449 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 450 |
+
output_attentions: bool = False,
|
| 451 |
+
use_cache: bool = False,
|
| 452 |
+
**kwargs,
|
| 453 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 454 |
+
output_attentions = False
|
| 455 |
+
|
| 456 |
+
bsz, q_len, _ = hidden_states.size()
|
| 457 |
+
|
| 458 |
+
query_states = self.q_proj(hidden_states)
|
| 459 |
+
key_states = self.k_proj(hidden_states)
|
| 460 |
+
value_states = self.v_proj(hidden_states)
|
| 461 |
+
|
| 462 |
+
# Flash attention requires the input to have the shape
|
| 463 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
| 464 |
+
# therefore we just need to keep the original shape
|
| 465 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 466 |
+
key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 467 |
+
value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 468 |
+
|
| 469 |
+
kv_seq_len = key_states.shape[-2]
|
| 470 |
+
if past_key_value is not None:
|
| 471 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 472 |
+
# cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 473 |
+
# query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 474 |
+
|
| 475 |
+
# if past_key_value is not None:
|
| 476 |
+
# cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 477 |
+
# key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 478 |
+
|
| 479 |
+
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
|
| 480 |
+
# to be able to avoid many of these transpose/reshape/view.
|
| 481 |
+
query_states = query_states.transpose(1, 2)
|
| 482 |
+
key_states = key_states.transpose(1, 2)
|
| 483 |
+
value_states = value_states.transpose(1, 2)
|
| 484 |
+
|
| 485 |
+
dropout_rate = self.dropout if self.training else 0.0
|
| 486 |
+
|
| 487 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 488 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 489 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
| 490 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
| 491 |
+
# in fp32. (LlamaRMSNorm handles it correctly)
|
| 492 |
+
|
| 493 |
+
input_dtype = query_states.dtype
|
| 494 |
+
if input_dtype == torch.float32:
|
| 495 |
+
if torch.is_autocast_enabled():
|
| 496 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 497 |
+
# Handle the case where the model is quantized
|
| 498 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 499 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 500 |
+
else:
|
| 501 |
+
target_dtype = self.q_proj.weight.dtype
|
| 502 |
+
|
| 503 |
+
logger.warning_once(
|
| 504 |
+
"The input hidden states seems to be silently casted in float32, this might be related to the fact"
|
| 505 |
+
" you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 506 |
+
f" {target_dtype}."
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
query_states = query_states.to(target_dtype)
|
| 510 |
+
key_states = key_states.to(target_dtype)
|
| 511 |
+
value_states = value_states.to(target_dtype)
|
| 512 |
+
|
| 513 |
+
attn_output = self._flash_attention_forward(
|
| 514 |
+
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
|
| 518 |
+
attn_output = self.out_proj(attn_output)
|
| 519 |
+
|
| 520 |
+
if not output_attentions:
|
| 521 |
+
attn_weights = None
|
| 522 |
+
|
| 523 |
+
return attn_output, attn_weights
|
| 524 |
+
|
| 525 |
+
def _flash_attention_forward(
|
| 526 |
+
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
| 527 |
+
):
|
| 528 |
+
"""
|
| 529 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 530 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 531 |
+
Args:
|
| 532 |
+
query_states (`torch.Tensor`):
|
| 533 |
+
Input query states to be passed to Flash Attention API
|
| 534 |
+
key_states (`torch.Tensor`):
|
| 535 |
+
Input key states to be passed to Flash Attention API
|
| 536 |
+
value_states (`torch.Tensor`):
|
| 537 |
+
Input value states to be passed to Flash Attention API
|
| 538 |
+
attention_mask (`torch.Tensor`):
|
| 539 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 540 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 541 |
+
dropout (`int`, *optional*):
|
| 542 |
+
Attention dropout
|
| 543 |
+
softmax_scale (`float`, *optional*):
|
| 544 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 545 |
+
"""
|
| 546 |
+
|
| 547 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
| 548 |
+
causal = self.is_causal and query_length != 1
|
| 549 |
+
|
| 550 |
+
# Contains at least one padding token in the sequence
|
| 551 |
+
if attention_mask is not None:
|
| 552 |
+
batch_size = query_states.shape[0]
|
| 553 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 554 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 558 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 559 |
+
|
| 560 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 561 |
+
query_states,
|
| 562 |
+
key_states,
|
| 563 |
+
value_states,
|
| 564 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 565 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 566 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 567 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 568 |
+
dropout_p=dropout,
|
| 569 |
+
softmax_scale=softmax_scale,
|
| 570 |
+
causal=causal,
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 574 |
+
else:
|
| 575 |
+
attn_output = flash_attn_func(
|
| 576 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
return attn_output
|
| 580 |
+
|
| 581 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 582 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 583 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
| 584 |
+
|
| 585 |
+
key_layer = index_first_axis(
|
| 586 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
| 587 |
+
)
|
| 588 |
+
value_layer = index_first_axis(
|
| 589 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
| 590 |
+
)
|
| 591 |
+
if query_length == kv_seq_len:
|
| 592 |
+
query_layer = index_first_axis(
|
| 593 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
| 594 |
+
)
|
| 595 |
+
cu_seqlens_q = cu_seqlens_k
|
| 596 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 597 |
+
indices_q = indices_k
|
| 598 |
+
elif query_length == 1:
|
| 599 |
+
max_seqlen_in_batch_q = 1
|
| 600 |
+
cu_seqlens_q = torch.arange(
|
| 601 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 602 |
+
) # There is a memcpy here, that is very bad.
|
| 603 |
+
indices_q = cu_seqlens_q[:-1]
|
| 604 |
+
query_layer = query_layer.squeeze(1)
|
| 605 |
+
else:
|
| 606 |
+
# The -q_len: slice assumes left padding.
|
| 607 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 608 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 609 |
+
|
| 610 |
+
return (
|
| 611 |
+
query_layer,
|
| 612 |
+
key_layer,
|
| 613 |
+
value_layer,
|
| 614 |
+
indices_q,
|
| 615 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 616 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 617 |
+
)
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
|
| 621 |
+
class SiglipMLP(nn.Module):
|
| 622 |
+
def __init__(self, config):
|
| 623 |
+
super().__init__()
|
| 624 |
+
self.config = config
|
| 625 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
| 626 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 627 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 628 |
+
|
| 629 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 630 |
+
hidden_states = self.fc1(hidden_states)
|
| 631 |
+
hidden_states = self.activation_fn(hidden_states)
|
| 632 |
+
hidden_states = self.fc2(hidden_states)
|
| 633 |
+
return hidden_states
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Siglip
|
| 637 |
+
class SiglipEncoderLayer(nn.Module):
|
| 638 |
+
def __init__(self, config: SiglipVisionConfig):
|
| 639 |
+
super().__init__()
|
| 640 |
+
self.embed_dim = config.hidden_size
|
| 641 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 642 |
+
self.self_attn = (
|
| 643 |
+
SiglipAttention(config)
|
| 644 |
+
if not self._use_flash_attention_2
|
| 645 |
+
else SiglipFlashAttention2(config)
|
| 646 |
+
)
|
| 647 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 648 |
+
self.mlp = SiglipMLP(config)
|
| 649 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 650 |
+
|
| 651 |
+
def forward(
|
| 652 |
+
self,
|
| 653 |
+
hidden_states: torch.Tensor,
|
| 654 |
+
attention_mask: torch.Tensor,
|
| 655 |
+
output_attentions: Optional[bool] = False,
|
| 656 |
+
) -> Tuple[torch.FloatTensor]:
|
| 657 |
+
"""
|
| 658 |
+
Args:
|
| 659 |
+
hidden_states (`torch.FloatTensor`):
|
| 660 |
+
Input to the layer of shape `(batch, seq_len, embed_dim)`.
|
| 661 |
+
attention_mask (`torch.FloatTensor`):
|
| 662 |
+
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
|
| 663 |
+
output_attentions (`bool`, *optional*, defaults to `False`):
|
| 664 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 665 |
+
returned tensors for more detail.
|
| 666 |
+
"""
|
| 667 |
+
residual = hidden_states
|
| 668 |
+
|
| 669 |
+
hidden_states = self.layer_norm1(hidden_states)
|
| 670 |
+
hidden_states, attn_weights = self.self_attn(
|
| 671 |
+
hidden_states=hidden_states,
|
| 672 |
+
attention_mask=attention_mask,
|
| 673 |
+
output_attentions=output_attentions,
|
| 674 |
+
)
|
| 675 |
+
hidden_states = residual + hidden_states
|
| 676 |
+
|
| 677 |
+
residual = hidden_states
|
| 678 |
+
hidden_states = self.layer_norm2(hidden_states)
|
| 679 |
+
hidden_states = self.mlp(hidden_states)
|
| 680 |
+
hidden_states = residual + hidden_states
|
| 681 |
+
|
| 682 |
+
outputs = (hidden_states,)
|
| 683 |
+
|
| 684 |
+
if output_attentions:
|
| 685 |
+
outputs += (attn_weights,)
|
| 686 |
+
|
| 687 |
+
return outputs
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
class SiglipPreTrainedModel(PreTrainedModel):
|
| 691 |
+
"""
|
| 692 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 693 |
+
models.
|
| 694 |
+
"""
|
| 695 |
+
|
| 696 |
+
config_class = SiglipVisionConfig
|
| 697 |
+
base_model_prefix = "siglip"
|
| 698 |
+
supports_gradient_checkpointing = True
|
| 699 |
+
|
| 700 |
+
def _init_weights(self, module):
|
| 701 |
+
"""Initialize the weights"""
|
| 702 |
+
|
| 703 |
+
if isinstance(module, SiglipVisionEmbeddings):
|
| 704 |
+
width = self.config.hidden_size
|
| 705 |
+
nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
|
| 706 |
+
elif isinstance(module, nn.Embedding):
|
| 707 |
+
default_flax_embed_init(module.weight)
|
| 708 |
+
elif isinstance(module, SiglipAttention):
|
| 709 |
+
nn.init.normal_(module.q_proj.weight)
|
| 710 |
+
nn.init.normal_(module.k_proj.weight)
|
| 711 |
+
nn.init.normal_(module.v_proj.weight)
|
| 712 |
+
nn.init.normal_(module.out_proj.weight)
|
| 713 |
+
nn.init.zeros_(module.q_proj.bias)
|
| 714 |
+
nn.init.zeros_(module.k_proj.bias)
|
| 715 |
+
nn.init.zeros_(module.v_proj.bias)
|
| 716 |
+
nn.init.zeros_(module.out_proj.bias)
|
| 717 |
+
elif isinstance(module, SiglipMLP):
|
| 718 |
+
nn.init.normal_(module.fc1.weight)
|
| 719 |
+
nn.init.normal_(module.fc2.weight)
|
| 720 |
+
nn.init.normal_(module.fc1.bias, std=1e-6)
|
| 721 |
+
nn.init.normal_(module.fc2.bias, std=1e-6)
|
| 722 |
+
elif isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 723 |
+
lecun_normal_(module.weight)
|
| 724 |
+
if module.bias is not None:
|
| 725 |
+
nn.init.zeros_(module.bias)
|
| 726 |
+
elif isinstance(module, nn.LayerNorm):
|
| 727 |
+
module.bias.data.zero_()
|
| 728 |
+
module.weight.data.fill_(1.0)
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
SIGLIP_START_DOCSTRING = r"""
|
| 732 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 733 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 734 |
+
etc.)
|
| 735 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 736 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 737 |
+
and behavior.
|
| 738 |
+
Parameters:
|
| 739 |
+
config ([`SiglipVisionConfig`]): Model configuration class with all the parameters of the model.
|
| 740 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 741 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 742 |
+
"""
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
SIGLIP_VISION_INPUTS_DOCSTRING = r"""
|
| 746 |
+
Args:
|
| 747 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 748 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| 749 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 750 |
+
output_attentions (`bool`, *optional*):
|
| 751 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 752 |
+
tensors for more detail.
|
| 753 |
+
output_hidden_states (`bool`, *optional*):
|
| 754 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 755 |
+
more detail.
|
| 756 |
+
return_dict (`bool`, *optional*):
|
| 757 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 758 |
+
"""
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
|
| 762 |
+
class SiglipEncoder(nn.Module):
|
| 763 |
+
"""
|
| 764 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 765 |
+
[`SiglipEncoderLayer`].
|
| 766 |
+
Args:
|
| 767 |
+
config: SiglipConfig
|
| 768 |
+
"""
|
| 769 |
+
|
| 770 |
+
def __init__(self, config: SiglipVisionConfig):
|
| 771 |
+
super().__init__()
|
| 772 |
+
self.config = config
|
| 773 |
+
self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 774 |
+
self.gradient_checkpointing = False
|
| 775 |
+
|
| 776 |
+
# Ignore copy
|
| 777 |
+
def forward(
|
| 778 |
+
self,
|
| 779 |
+
inputs_embeds,
|
| 780 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 781 |
+
output_attentions: Optional[bool] = None,
|
| 782 |
+
output_hidden_states: Optional[bool] = None,
|
| 783 |
+
return_dict: Optional[bool] = None,
|
| 784 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 785 |
+
r"""
|
| 786 |
+
Args:
|
| 787 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 788 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
| 789 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
| 790 |
+
than the model's internal embedding lookup matrix.
|
| 791 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 792 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 793 |
+
- 1 for tokens that are **not masked**,
|
| 794 |
+
- 0 for tokens that are **masked**.
|
| 795 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 796 |
+
output_attentions (`bool`, *optional*):
|
| 797 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 798 |
+
returned tensors for more detail.
|
| 799 |
+
output_hidden_states (`bool`, *optional*):
|
| 800 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 801 |
+
for more detail.
|
| 802 |
+
return_dict (`bool`, *optional*):
|
| 803 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 804 |
+
"""
|
| 805 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 806 |
+
output_hidden_states = (
|
| 807 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 808 |
+
)
|
| 809 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 810 |
+
|
| 811 |
+
encoder_states = () if output_hidden_states else None
|
| 812 |
+
all_attentions = () if output_attentions else None
|
| 813 |
+
|
| 814 |
+
hidden_states = inputs_embeds
|
| 815 |
+
for encoder_layer in self.layers:
|
| 816 |
+
if output_hidden_states:
|
| 817 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 818 |
+
if self.gradient_checkpointing and self.training:
|
| 819 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 820 |
+
encoder_layer.__call__,
|
| 821 |
+
hidden_states,
|
| 822 |
+
attention_mask,
|
| 823 |
+
output_attentions,
|
| 824 |
+
)
|
| 825 |
+
else:
|
| 826 |
+
layer_outputs = encoder_layer(
|
| 827 |
+
hidden_states,
|
| 828 |
+
attention_mask,
|
| 829 |
+
output_attentions=output_attentions,
|
| 830 |
+
)
|
| 831 |
+
|
| 832 |
+
hidden_states = layer_outputs[0]
|
| 833 |
+
|
| 834 |
+
if output_attentions:
|
| 835 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
| 836 |
+
|
| 837 |
+
if output_hidden_states:
|
| 838 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 839 |
+
|
| 840 |
+
if not return_dict:
|
| 841 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
| 842 |
+
return BaseModelOutput(
|
| 843 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
| 844 |
+
)
|
| 845 |
+
|
| 846 |
+
@add_start_docstrings(
|
| 847 |
+
"""The vision model from SigLIP without any head or projection on top.""",
|
| 848 |
+
SIGLIP_START_DOCSTRING
|
| 849 |
+
)
|
| 850 |
+
class SiglipVisionTransformer(SiglipPreTrainedModel):
|
| 851 |
+
config_class = SiglipVisionConfig
|
| 852 |
+
main_input_name = "pixel_values"
|
| 853 |
+
_supports_flash_attn_2 = True
|
| 854 |
+
|
| 855 |
+
def __init__(self, config: SiglipVisionConfig):
|
| 856 |
+
super().__init__(config)
|
| 857 |
+
self.config = config
|
| 858 |
+
embed_dim = config.hidden_size
|
| 859 |
+
|
| 860 |
+
self.embeddings = SiglipVisionEmbeddings(config)
|
| 861 |
+
self.encoder = SiglipEncoder(config)
|
| 862 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 863 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 864 |
+
|
| 865 |
+
# Initialize weights and apply final processing
|
| 866 |
+
self.post_init()
|
| 867 |
+
|
| 868 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 869 |
+
return self.embeddings.patch_embedding
|
| 870 |
+
|
| 871 |
+
@add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
|
| 872 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
|
| 873 |
+
def forward(
|
| 874 |
+
self,
|
| 875 |
+
pixel_values,
|
| 876 |
+
patch_attention_mask: Optional[torch.BoolTensor] = None,
|
| 877 |
+
tgt_sizes: Optional[torch.IntTensor] = None,
|
| 878 |
+
output_attentions: Optional[bool] = None,
|
| 879 |
+
output_hidden_states: Optional[bool] = None,
|
| 880 |
+
return_dict: Optional[bool] = None,
|
| 881 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 882 |
+
r"""
|
| 883 |
+
Returns:
|
| 884 |
+
"""
|
| 885 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 886 |
+
output_hidden_states = (
|
| 887 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 888 |
+
)
|
| 889 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 890 |
+
|
| 891 |
+
batch_size = pixel_values.size(0)
|
| 892 |
+
if patch_attention_mask is None:
|
| 893 |
+
patch_attention_mask = torch.ones(
|
| 894 |
+
size=(
|
| 895 |
+
batch_size,
|
| 896 |
+
pixel_values.size(2) // self.config.patch_size,
|
| 897 |
+
pixel_values.size(3) // self.config.patch_size,
|
| 898 |
+
),
|
| 899 |
+
dtype=torch.bool,
|
| 900 |
+
device=pixel_values.device,
|
| 901 |
+
)
|
| 902 |
+
|
| 903 |
+
hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, tgt_sizes=tgt_sizes)
|
| 904 |
+
|
| 905 |
+
patch_attention_mask = patch_attention_mask.view(batch_size, -1)
|
| 906 |
+
# The call to `_upad_input` in `_flash_attention_forward` is expensive
|
| 907 |
+
# So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
|
| 908 |
+
# avoiding passing the attention_mask, which is equivalent to attending to the full sequence
|
| 909 |
+
if not torch.any(~patch_attention_mask):
|
| 910 |
+
attention_mask=None
|
| 911 |
+
else:
|
| 912 |
+
attention_mask = (
|
| 913 |
+
_prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
|
| 914 |
+
if not self._use_flash_attention_2
|
| 915 |
+
else patch_attention_mask
|
| 916 |
+
)
|
| 917 |
+
|
| 918 |
+
encoder_outputs = self.encoder(
|
| 919 |
+
inputs_embeds=hidden_states,
|
| 920 |
+
attention_mask=attention_mask,
|
| 921 |
+
output_attentions=output_attentions,
|
| 922 |
+
output_hidden_states=output_hidden_states,
|
| 923 |
+
return_dict=return_dict,
|
| 924 |
+
)
|
| 925 |
+
|
| 926 |
+
last_hidden_state = encoder_outputs[0]
|
| 927 |
+
last_hidden_state = self.post_layernorm(last_hidden_state)
|
| 928 |
+
|
| 929 |
+
if not return_dict:
|
| 930 |
+
return (last_hidden_state, None) + encoder_outputs[1:]
|
| 931 |
+
|
| 932 |
+
return BaseModelOutputWithPooling(
|
| 933 |
+
last_hidden_state=last_hidden_state,
|
| 934 |
+
pooler_output=None,
|
| 935 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 936 |
+
attentions=encoder_outputs.attentions,
|
| 937 |
+
)
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_map": {
|
| 3 |
+
"AutoImageProcessor": "image_processing_minicpmv.MiniCPMVImageProcessor",
|
| 4 |
+
"AutoProcessor": "processing_minicpmv.MiniCPMVProcessor"
|
| 5 |
+
},
|
| 6 |
+
"im_end": "</image>",
|
| 7 |
+
"im_end_token": "</image>",
|
| 8 |
+
"im_id_end": "</image_id>",
|
| 9 |
+
"im_id_start": "<image_id>",
|
| 10 |
+
"im_start": ")"
|
| 143 |
+
images, image_sizes, tgt_sizes = images["pixel_values"], images["image_sizes"], images["tgt_sizes"]
|
| 144 |
+
|
| 145 |
+
if isinstance(texts, str):
|
| 146 |
+
texts = [texts]
|
| 147 |
+
input_ids_list = []
|
| 148 |
+
image_bounds_list = []
|
| 149 |
+
for index, text in enumerate(texts):
|
| 150 |
+
image_tags = re.findall(pattern, text)
|
| 151 |
+
assert len(image_tags) == len(image_sizes[index])
|
| 152 |
+
text_chunks = text.split(pattern)
|
| 153 |
+
final_text = ""
|
| 154 |
+
for i in range(len(image_tags)):
|
| 155 |
+
final_text = final_text + text_chunks[i] + \
|
| 156 |
+
self.image_processor.get_slice_image_placeholder(
|
| 157 |
+
image_sizes[index][i],
|
| 158 |
+
i,
|
| 159 |
+
max_slice_nums,
|
| 160 |
+
use_image_id
|
| 161 |
+
)
|
| 162 |
+
final_text += text_chunks[-1]
|
| 163 |
+
input_ids, image_bounds = self._convert(final_text, max_length)
|
| 164 |
+
input_ids_list.append(input_ids)
|
| 165 |
+
image_bounds_list.append(image_bounds)
|
| 166 |
+
padded_input_ids, padding_lengths = self.pad(
|
| 167 |
+
input_ids_list,
|
| 168 |
+
padding_side="left"
|
| 169 |
+
)
|
| 170 |
+
for i, length in enumerate(padding_lengths):
|
| 171 |
+
image_bounds_list[i] = image_bounds_list[i] + length
|
| 172 |
+
attention_mask = padded_input_ids.ne(0)
|
| 173 |
+
|
| 174 |
+
return MiniCPMVBatchFeature(data={
|
| 175 |
+
"input_ids": padded_input_ids,
|
| 176 |
+
"attention_mask": attention_mask,
|
| 177 |
+
"pixel_values": images,
|
| 178 |
+
"image_sizes": image_sizes,
|
| 179 |
+
"image_bound": image_bounds_list,
|
| 180 |
+
"tgt_sizes": tgt_sizes
|
| 181 |
+
})
|
| 182 |
+
|
| 183 |
+
@property
|
| 184 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
|
| 185 |
+
def model_input_names(self):
|
| 186 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
| 187 |
+
image_processor_input_names = self.image_processor.model_input_names
|
| 188 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def pad(self, inputs, max_length=None, padding_value=0, padding_side="left"):
|
| 192 |
+
items = []
|
| 193 |
+
if isinstance(inputs[0], list):
|
| 194 |
+
assert isinstance(inputs[0][0], torch.Tensor)
|
| 195 |
+
for it in inputs:
|
| 196 |
+
for tr in it:
|
| 197 |
+
items.append(tr)
|
| 198 |
+
else:
|
| 199 |
+
assert isinstance(inputs[0], torch.Tensor)
|
| 200 |
+
items = inputs
|
| 201 |
+
|
| 202 |
+
batch_size = len(items)
|
| 203 |
+
shape = items[0].shape
|
| 204 |
+
dim = len(shape)
|
| 205 |
+
assert dim <= 2
|
| 206 |
+
if max_length is None:
|
| 207 |
+
max_length = 0
|
| 208 |
+
max_length = max(max_length, max(item.shape[-1] for item in items))
|
| 209 |
+
min_length = min(item.shape[-1] for item in items)
|
| 210 |
+
dtype = items[0].dtype
|
| 211 |
+
|
| 212 |
+
if dim == 0:
|
| 213 |
+
return torch.stack([item for item in items], dim=0), [0]
|
| 214 |
+
elif dim == 1:
|
| 215 |
+
if max_length == min_length:
|
| 216 |
+
return torch.stack([item for item in items], dim=0), [0] * batch_size
|
| 217 |
+
tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
|
| 218 |
+
else:
|
| 219 |
+
tensor = (
|
| 220 |
+
torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype)
|
| 221 |
+
+ padding_value
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
padding_length = []
|
| 225 |
+
for i, item in enumerate(items):
|
| 226 |
+
if dim == 1:
|
| 227 |
+
if padding_side == "left":
|
| 228 |
+
tensor[i, -len(item) :] = item.clone()
|
| 229 |
+
else:
|
| 230 |
+
tensor[i, : len(item)] = item.clone()
|
| 231 |
+
elif dim == 2:
|
| 232 |
+
if padding_side == "left":
|
| 233 |
+
tensor[i, -len(item) :, :] = item.clone()
|
| 234 |
+
else:
|
| 235 |
+
tensor[i, : len(item), :] = item.clone()
|
| 236 |
+
padding_length.append(tensor.shape[-1] - len(item))
|
| 237 |
+
|
| 238 |
+
return tensor, padding_length
|
processor_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_map": {
|
| 3 |
+
"AutoProcessor": "processing_minicpmv.MiniCPMVProcessor"
|
| 4 |
+
},
|
| 5 |
+
"processor_class": "MiniCPMVProcessor"
|
| 6 |
+
}
|
resampler.py
ADDED
|
@@ -0,0 +1,782 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import partial
|
| 2 |
+
from typing import Optional, Tuple
|
| 3 |
+
import numpy as np
|
| 4 |
+
import warnings
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from torch.nn.functional import *
|
| 11 |
+
from torch.nn.modules.activation import *
|
| 12 |
+
from torch.nn.init import trunc_normal_, constant_, xavier_normal_, xavier_uniform_
|
| 13 |
+
|
| 14 |
+
from transformers.integrations import is_deepspeed_zero3_enabled
|
| 15 |
+
|
| 16 |
+
def get_2d_sincos_pos_embed(embed_dim, image_size):
|
| 17 |
+
"""
|
| 18 |
+
image_size: image_size or (image_height, image_width)
|
| 19 |
+
return:
|
| 20 |
+
pos_embed: [image_height, image_width, embed_dim]
|
| 21 |
+
"""
|
| 22 |
+
if isinstance(image_size, int):
|
| 23 |
+
grid_h_size, grid_w_size = image_size, image_size
|
| 24 |
+
else:
|
| 25 |
+
grid_h_size, grid_w_size = image_size[0], image_size[1]
|
| 26 |
+
|
| 27 |
+
grid_h = np.arange(grid_h_size, dtype=np.float32)
|
| 28 |
+
grid_w = np.arange(grid_w_size, dtype=np.float32)
|
| 29 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
| 30 |
+
grid = np.stack(grid, axis=0)
|
| 31 |
+
|
| 32 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
| 33 |
+
return pos_embed
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
| 37 |
+
assert embed_dim % 2 == 0
|
| 38 |
+
|
| 39 |
+
# use half of dimensions to encode grid_h
|
| 40 |
+
emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[0]) # (H, W, D/2)
|
| 41 |
+
emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[1]) # (H, W, D/2)
|
| 42 |
+
|
| 43 |
+
emb = np.concatenate([emb_h, emb_w], axis=-1) # (H, W, D)
|
| 44 |
+
return emb
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def get_1d_sincos_pos_embed_from_grid_new(embed_dim, pos):
|
| 48 |
+
"""
|
| 49 |
+
embed_dim: output dimension for each position
|
| 50 |
+
pos: a list of positions to be encoded: size (H, W)
|
| 51 |
+
out: (H, W, D)
|
| 52 |
+
"""
|
| 53 |
+
assert embed_dim % 2 == 0
|
| 54 |
+
omega = np.arange(embed_dim // 2, dtype=np.float32)
|
| 55 |
+
omega /= embed_dim / 2.
|
| 56 |
+
omega = 1. / 10000 ** omega # (D/2,)
|
| 57 |
+
|
| 58 |
+
out = np.einsum('hw,d->hwd', pos, omega) # (H, W, D/2), outer product
|
| 59 |
+
|
| 60 |
+
emb_sin = np.sin(out) # (H, W, D/2)
|
| 61 |
+
emb_cos = np.cos(out) # (H, W, D/2)
|
| 62 |
+
|
| 63 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (H, W, D)
|
| 64 |
+
return emb
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class Resampler(nn.Module):
|
| 68 |
+
"""
|
| 69 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
| 70 |
+
given learnable queries and 2d sincos pos_emb
|
| 71 |
+
Outputs:
|
| 72 |
+
A tensor with the shape of (batch_size, num_queries, embed_dim)
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(
|
| 76 |
+
self,
|
| 77 |
+
num_queries,
|
| 78 |
+
embed_dim,
|
| 79 |
+
num_heads,
|
| 80 |
+
kv_dim=None,
|
| 81 |
+
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
| 82 |
+
adaptive=False,
|
| 83 |
+
max_size=(70, 70),
|
| 84 |
+
):
|
| 85 |
+
super().__init__()
|
| 86 |
+
self.num_queries = num_queries
|
| 87 |
+
self.embed_dim = embed_dim
|
| 88 |
+
self.num_heads = num_heads
|
| 89 |
+
self.adaptive = adaptive
|
| 90 |
+
self.max_size = max_size
|
| 91 |
+
|
| 92 |
+
self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 93 |
+
|
| 94 |
+
if kv_dim is not None and kv_dim != embed_dim:
|
| 95 |
+
self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
|
| 96 |
+
else:
|
| 97 |
+
self.kv_proj = nn.Identity()
|
| 98 |
+
|
| 99 |
+
self.attn = MultiheadAttention(embed_dim, num_heads)
|
| 100 |
+
self.ln_q = norm_layer(embed_dim)
|
| 101 |
+
self.ln_kv = norm_layer(embed_dim)
|
| 102 |
+
|
| 103 |
+
self.ln_post = norm_layer(embed_dim)
|
| 104 |
+
self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
|
| 105 |
+
|
| 106 |
+
self._set_2d_pos_cache(self.max_size)
|
| 107 |
+
|
| 108 |
+
def _set_2d_pos_cache(self, max_size, device='cpu'):
|
| 109 |
+
if is_deepspeed_zero3_enabled():
|
| 110 |
+
device='cuda'
|
| 111 |
+
pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.embed_dim, max_size)).float().to(device)
|
| 112 |
+
self.register_buffer("pos_embed", pos_embed, persistent=False)
|
| 113 |
+
|
| 114 |
+
def _adjust_pos_cache(self, tgt_sizes, device):
|
| 115 |
+
max_h = torch.max(tgt_sizes[:, 0])
|
| 116 |
+
max_w = torch.max(tgt_sizes[:, 1])
|
| 117 |
+
if max_h > self.max_size[0] or max_w > self.max_size[1]:
|
| 118 |
+
self.max_size = [max(max_h, self.max_size[0]), max(max_w, self.max_size[1])]
|
| 119 |
+
self._set_2d_pos_cache(self.max_size, device)
|
| 120 |
+
|
| 121 |
+
def _init_weights(self, m):
|
| 122 |
+
if isinstance(m, nn.Linear):
|
| 123 |
+
trunc_normal_(m.weight, std=.02)
|
| 124 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 125 |
+
nn.init.constant_(m.bias, 0)
|
| 126 |
+
elif isinstance(m, nn.LayerNorm):
|
| 127 |
+
nn.init.constant_(m.bias, 0)
|
| 128 |
+
nn.init.constant_(m.weight, 1.0)
|
| 129 |
+
|
| 130 |
+
def forward(self, x, tgt_sizes=None):
|
| 131 |
+
assert x.shape[0] == tgt_sizes.shape[0]
|
| 132 |
+
bs = x.shape[0]
|
| 133 |
+
|
| 134 |
+
device = x.device
|
| 135 |
+
dtype = x.dtype
|
| 136 |
+
|
| 137 |
+
patch_len = tgt_sizes[:, 0] * tgt_sizes[:, 1]
|
| 138 |
+
|
| 139 |
+
self._adjust_pos_cache(tgt_sizes, device=device)
|
| 140 |
+
|
| 141 |
+
max_patch_len = torch.max(patch_len)
|
| 142 |
+
key_padding_mask = torch.zeros((bs, max_patch_len), dtype=torch.bool, device=device)
|
| 143 |
+
|
| 144 |
+
pos_embed = []
|
| 145 |
+
for i in range(bs):
|
| 146 |
+
tgt_h, tgt_w = tgt_sizes[i]
|
| 147 |
+
pos_embed.append(self.pos_embed[:tgt_h, :tgt_w, :].reshape((tgt_h * tgt_w, -1)).to(dtype)) # patches * D
|
| 148 |
+
key_padding_mask[i, patch_len[i]:] = True
|
| 149 |
+
|
| 150 |
+
pos_embed = torch.nn.utils.rnn.pad_sequence(
|
| 151 |
+
pos_embed, batch_first=True, padding_value=0.0).permute(1, 0, 2) # BLD => L * B * D
|
| 152 |
+
|
| 153 |
+
x = self.kv_proj(x) # B * L * D
|
| 154 |
+
x = self.ln_kv(x).permute(1, 0, 2) # L * B * D
|
| 155 |
+
|
| 156 |
+
q = self.ln_q(self.query) # Q * D
|
| 157 |
+
|
| 158 |
+
out = self.attn(
|
| 159 |
+
self._repeat(q, bs), # Q * B * D
|
| 160 |
+
x + pos_embed, # L * B * D + L * B * D
|
| 161 |
+
x,
|
| 162 |
+
key_padding_mask=key_padding_mask)[0]
|
| 163 |
+
# out: Q * B * D
|
| 164 |
+
x = out.permute(1, 0, 2) # B * Q * D
|
| 165 |
+
|
| 166 |
+
x = self.ln_post(x)
|
| 167 |
+
x = x @ self.proj
|
| 168 |
+
return x
|
| 169 |
+
|
| 170 |
+
def _repeat(self, query, N: int):
|
| 171 |
+
return query.unsqueeze(1).repeat(1, N, 1)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class MultiheadAttention(nn.MultiheadAttention):
|
| 175 |
+
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False,
|
| 176 |
+
add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
|
| 177 |
+
super().__init__(embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim, batch_first, device, dtype)
|
| 178 |
+
|
| 179 |
+
# rewrite out_proj layer,with nn.Linear
|
| 180 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype)
|
| 181 |
+
|
| 182 |
+
def forward(
|
| 183 |
+
self,
|
| 184 |
+
query: Tensor,
|
| 185 |
+
key: Tensor,
|
| 186 |
+
value: Tensor,
|
| 187 |
+
key_padding_mask: Optional[Tensor] = None,
|
| 188 |
+
need_weights: bool = True,
|
| 189 |
+
attn_mask: Optional[Tensor] = None,
|
| 190 |
+
average_attn_weights: bool = True,
|
| 191 |
+
is_causal : bool = False) -> Tuple[Tensor, Optional[Tensor]]:
|
| 192 |
+
why_not_fast_path = ''
|
| 193 |
+
if ((attn_mask is not None and torch.is_floating_point(attn_mask))
|
| 194 |
+
or (key_padding_mask is not None) and torch.is_floating_point(key_padding_mask)):
|
| 195 |
+
why_not_fast_path = "floating-point masks are not supported for fast path."
|
| 196 |
+
|
| 197 |
+
is_batched = query.dim() == 3
|
| 198 |
+
|
| 199 |
+
key_padding_mask = _canonical_mask(
|
| 200 |
+
mask=key_padding_mask,
|
| 201 |
+
mask_name="key_padding_mask",
|
| 202 |
+
other_type=F._none_or_dtype(attn_mask),
|
| 203 |
+
other_name="attn_mask",
|
| 204 |
+
target_type=query.dtype
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
attn_mask = _canonical_mask(
|
| 208 |
+
mask=attn_mask,
|
| 209 |
+
mask_name="attn_mask",
|
| 210 |
+
other_type=None,
|
| 211 |
+
other_name="",
|
| 212 |
+
target_type=query.dtype,
|
| 213 |
+
check_other=False,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
if not is_batched:
|
| 218 |
+
why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
|
| 219 |
+
elif query is not key or key is not value:
|
| 220 |
+
# When lifting this restriction, don't forget to either
|
| 221 |
+
# enforce that the dtypes all match or test cases where
|
| 222 |
+
# they don't!
|
| 223 |
+
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
|
| 224 |
+
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
|
| 225 |
+
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
|
| 226 |
+
elif self.in_proj_weight is None:
|
| 227 |
+
why_not_fast_path = "in_proj_weight was None"
|
| 228 |
+
elif query.dtype != self.in_proj_weight.dtype:
|
| 229 |
+
# this case will fail anyway, but at least they'll get a useful error message.
|
| 230 |
+
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
|
| 231 |
+
elif self.training:
|
| 232 |
+
why_not_fast_path = "training is enabled"
|
| 233 |
+
elif (self.num_heads % 2) != 0:
|
| 234 |
+
why_not_fast_path = "self.num_heads is not even"
|
| 235 |
+
elif not self.batch_first:
|
| 236 |
+
why_not_fast_path = "batch_first was not True"
|
| 237 |
+
elif self.bias_k is not None:
|
| 238 |
+
why_not_fast_path = "self.bias_k was not None"
|
| 239 |
+
elif self.bias_v is not None:
|
| 240 |
+
why_not_fast_path = "self.bias_v was not None"
|
| 241 |
+
elif self.add_zero_attn:
|
| 242 |
+
why_not_fast_path = "add_zero_attn was enabled"
|
| 243 |
+
elif not self._qkv_same_embed_dim:
|
| 244 |
+
why_not_fast_path = "_qkv_same_embed_dim was not True"
|
| 245 |
+
elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
|
| 246 |
+
why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
|
| 247 |
+
is not supported with NestedTensor input"
|
| 248 |
+
elif torch.is_autocast_enabled():
|
| 249 |
+
why_not_fast_path = "autocast is enabled"
|
| 250 |
+
|
| 251 |
+
if not why_not_fast_path:
|
| 252 |
+
tensor_args = (
|
| 253 |
+
query,
|
| 254 |
+
key,
|
| 255 |
+
value,
|
| 256 |
+
self.in_proj_weight,
|
| 257 |
+
self.in_proj_bias,
|
| 258 |
+
self.out_proj.weight,
|
| 259 |
+
self.out_proj.bias,
|
| 260 |
+
)
|
| 261 |
+
# We have to use list comprehensions below because TorchScript does not support
|
| 262 |
+
# generator expressions.
|
| 263 |
+
if torch.overrides.has_torch_function(tensor_args):
|
| 264 |
+
why_not_fast_path = "some Tensor argument has_torch_function"
|
| 265 |
+
elif _is_make_fx_tracing():
|
| 266 |
+
why_not_fast_path = "we are running make_fx tracing"
|
| 267 |
+
elif not all(_check_arg_device(x) for x in tensor_args):
|
| 268 |
+
why_not_fast_path = ("some Tensor argument's device is neither one of "
|
| 269 |
+
f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}")
|
| 270 |
+
elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args):
|
| 271 |
+
why_not_fast_path = ("grad is enabled and at least one of query or the "
|
| 272 |
+
"input/output projection weights or biases requires_grad")
|
| 273 |
+
if not why_not_fast_path:
|
| 274 |
+
merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query)
|
| 275 |
+
|
| 276 |
+
if self.in_proj_bias is not None and self.in_proj_weight is not None:
|
| 277 |
+
return torch._native_multi_head_attention(
|
| 278 |
+
query,
|
| 279 |
+
key,
|
| 280 |
+
value,
|
| 281 |
+
self.embed_dim,
|
| 282 |
+
self.num_heads,
|
| 283 |
+
self.in_proj_weight,
|
| 284 |
+
self.in_proj_bias,
|
| 285 |
+
self.out_proj.weight,
|
| 286 |
+
self.out_proj.bias,
|
| 287 |
+
merged_mask,
|
| 288 |
+
need_weights,
|
| 289 |
+
average_attn_weights,
|
| 290 |
+
mask_type)
|
| 291 |
+
|
| 292 |
+
any_nested = query.is_nested or key.is_nested or value.is_nested
|
| 293 |
+
assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
|
| 294 |
+
f"The fast path was not hit because {why_not_fast_path}")
|
| 295 |
+
|
| 296 |
+
if self.batch_first and is_batched:
|
| 297 |
+
# make sure that the transpose op does not affect the "is" property
|
| 298 |
+
if key is value:
|
| 299 |
+
if query is key:
|
| 300 |
+
query = key = value = query.transpose(1, 0)
|
| 301 |
+
else:
|
| 302 |
+
query, key = (x.transpose(1, 0) for x in (query, key))
|
| 303 |
+
value = key
|
| 304 |
+
else:
|
| 305 |
+
query, key, value = (x.transpose(1, 0) for x in (query, key, value))
|
| 306 |
+
|
| 307 |
+
if not self._qkv_same_embed_dim:
|
| 308 |
+
attn_output, attn_output_weights = self.multi_head_attention_forward(
|
| 309 |
+
query, key, value, self.embed_dim, self.num_heads,
|
| 310 |
+
self.in_proj_weight, self.in_proj_bias,
|
| 311 |
+
self.bias_k, self.bias_v, self.add_zero_attn,
|
| 312 |
+
self.dropout, self.out_proj.weight, self.out_proj.bias,
|
| 313 |
+
training=self.training,
|
| 314 |
+
key_padding_mask=key_padding_mask, need_weights=need_weights,
|
| 315 |
+
attn_mask=attn_mask,
|
| 316 |
+
use_separate_proj_weight=True,
|
| 317 |
+
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
|
| 318 |
+
v_proj_weight=self.v_proj_weight,
|
| 319 |
+
average_attn_weights=average_attn_weights,
|
| 320 |
+
is_causal=is_causal)
|
| 321 |
+
else:
|
| 322 |
+
attn_output, attn_output_weights = self.multi_head_attention_forward(
|
| 323 |
+
query, key, value, self.embed_dim, self.num_heads,
|
| 324 |
+
self.in_proj_weight, self.in_proj_bias,
|
| 325 |
+
self.bias_k, self.bias_v, self.add_zero_attn,
|
| 326 |
+
self.dropout, self.out_proj.weight, self.out_proj.bias,
|
| 327 |
+
training=self.training,
|
| 328 |
+
key_padding_mask=key_padding_mask,
|
| 329 |
+
need_weights=need_weights,
|
| 330 |
+
attn_mask=attn_mask,
|
| 331 |
+
average_attn_weights=average_attn_weights,
|
| 332 |
+
is_causal=is_causal)
|
| 333 |
+
if self.batch_first and is_batched:
|
| 334 |
+
return attn_output.transpose(1, 0), attn_output_weights
|
| 335 |
+
else:
|
| 336 |
+
return attn_output, attn_output_weights
|
| 337 |
+
|
| 338 |
+
def multi_head_attention_forward(
|
| 339 |
+
self,
|
| 340 |
+
query: Tensor,
|
| 341 |
+
key: Tensor,
|
| 342 |
+
value: Tensor,
|
| 343 |
+
embed_dim_to_check: int,
|
| 344 |
+
num_heads: int,
|
| 345 |
+
in_proj_weight: Optional[Tensor],
|
| 346 |
+
in_proj_bias: Optional[Tensor],
|
| 347 |
+
bias_k: Optional[Tensor],
|
| 348 |
+
bias_v: Optional[Tensor],
|
| 349 |
+
add_zero_attn: bool,
|
| 350 |
+
dropout_p: float,
|
| 351 |
+
out_proj_weight: Tensor,
|
| 352 |
+
out_proj_bias: Optional[Tensor],
|
| 353 |
+
training: bool = True,
|
| 354 |
+
key_padding_mask: Optional[Tensor] = None,
|
| 355 |
+
need_weights: bool = True,
|
| 356 |
+
attn_mask: Optional[Tensor] = None,
|
| 357 |
+
use_separate_proj_weight: bool = False,
|
| 358 |
+
q_proj_weight: Optional[Tensor] = None,
|
| 359 |
+
k_proj_weight: Optional[Tensor] = None,
|
| 360 |
+
v_proj_weight: Optional[Tensor] = None,
|
| 361 |
+
static_k: Optional[Tensor] = None,
|
| 362 |
+
static_v: Optional[Tensor] = None,
|
| 363 |
+
average_attn_weights: bool = True,
|
| 364 |
+
is_causal: bool = False,
|
| 365 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
| 366 |
+
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
|
| 367 |
+
|
| 368 |
+
is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
|
| 369 |
+
|
| 370 |
+
# For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
|
| 371 |
+
# is batched, run the computation and before returning squeeze the
|
| 372 |
+
# batch dimension so that the output doesn't carry this temporary batch dimension.
|
| 373 |
+
if not is_batched:
|
| 374 |
+
# unsqueeze if the input is unbatched
|
| 375 |
+
query = query.unsqueeze(1)
|
| 376 |
+
key = key.unsqueeze(1)
|
| 377 |
+
value = value.unsqueeze(1)
|
| 378 |
+
if key_padding_mask is not None:
|
| 379 |
+
key_padding_mask = key_padding_mask.unsqueeze(0)
|
| 380 |
+
|
| 381 |
+
# set up shape vars
|
| 382 |
+
tgt_len, bsz, embed_dim = query.shape
|
| 383 |
+
src_len, _, _ = key.shape
|
| 384 |
+
|
| 385 |
+
key_padding_mask = _canonical_mask(
|
| 386 |
+
mask=key_padding_mask,
|
| 387 |
+
mask_name="key_padding_mask",
|
| 388 |
+
other_type=_none_or_dtype(attn_mask),
|
| 389 |
+
other_name="attn_mask",
|
| 390 |
+
target_type=query.dtype
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
if is_causal and attn_mask is None:
|
| 394 |
+
raise RuntimeError(
|
| 395 |
+
"Need attn_mask if specifying the is_causal hint. "
|
| 396 |
+
"You may use the Transformer module method "
|
| 397 |
+
"`generate_square_subsequent_mask` to create this mask."
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
if is_causal and key_padding_mask is None and not need_weights:
|
| 401 |
+
# when we have a kpm or need weights, we need attn_mask
|
| 402 |
+
# Otherwise, we use the is_causal hint go as is_causal
|
| 403 |
+
# indicator to SDPA.
|
| 404 |
+
attn_mask = None
|
| 405 |
+
else:
|
| 406 |
+
attn_mask = _canonical_mask(
|
| 407 |
+
mask=attn_mask,
|
| 408 |
+
mask_name="attn_mask",
|
| 409 |
+
other_type=None,
|
| 410 |
+
other_name="",
|
| 411 |
+
target_type=query.dtype,
|
| 412 |
+
check_other=False,
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
if key_padding_mask is not None:
|
| 416 |
+
# We have the attn_mask, and use that to merge kpm into it.
|
| 417 |
+
# Turn off use of is_causal hint, as the merged mask is no
|
| 418 |
+
# longer causal.
|
| 419 |
+
is_causal = False
|
| 420 |
+
|
| 421 |
+
assert embed_dim == embed_dim_to_check, \
|
| 422 |
+
f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
|
| 423 |
+
if isinstance(embed_dim, torch.Tensor):
|
| 424 |
+
# embed_dim can be a tensor when JIT tracing
|
| 425 |
+
head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
|
| 426 |
+
else:
|
| 427 |
+
head_dim = embed_dim // num_heads
|
| 428 |
+
assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
|
| 429 |
+
if use_separate_proj_weight:
|
| 430 |
+
# allow MHA to have different embedding dimensions when separate projection weights are used
|
| 431 |
+
assert key.shape[:2] == value.shape[:2], \
|
| 432 |
+
f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
|
| 433 |
+
else:
|
| 434 |
+
assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
|
| 435 |
+
|
| 436 |
+
#
|
| 437 |
+
# compute in-projection
|
| 438 |
+
#
|
| 439 |
+
if not use_separate_proj_weight:
|
| 440 |
+
assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
|
| 441 |
+
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
|
| 442 |
+
else:
|
| 443 |
+
assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
|
| 444 |
+
assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
|
| 445 |
+
assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
|
| 446 |
+
if in_proj_bias is None:
|
| 447 |
+
b_q = b_k = b_v = None
|
| 448 |
+
else:
|
| 449 |
+
b_q, b_k, b_v = in_proj_bias.chunk(3)
|
| 450 |
+
q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
|
| 451 |
+
|
| 452 |
+
# prep attention mask
|
| 453 |
+
|
| 454 |
+
if attn_mask is not None:
|
| 455 |
+
# ensure attn_mask's dim is 3
|
| 456 |
+
if attn_mask.dim() == 2:
|
| 457 |
+
correct_2d_size = (tgt_len, src_len)
|
| 458 |
+
if attn_mask.shape != correct_2d_size:
|
| 459 |
+
raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
|
| 460 |
+
attn_mask = attn_mask.unsqueeze(0)
|
| 461 |
+
elif attn_mask.dim() == 3:
|
| 462 |
+
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
|
| 463 |
+
if attn_mask.shape != correct_3d_size:
|
| 464 |
+
raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
|
| 465 |
+
else:
|
| 466 |
+
raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
|
| 467 |
+
|
| 468 |
+
# add bias along batch dimension (currently second)
|
| 469 |
+
if bias_k is not None and bias_v is not None:
|
| 470 |
+
assert static_k is None, "bias cannot be added to static key."
|
| 471 |
+
assert static_v is None, "bias cannot be added to static value."
|
| 472 |
+
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
|
| 473 |
+
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
|
| 474 |
+
if attn_mask is not None:
|
| 475 |
+
attn_mask = pad(attn_mask, (0, 1))
|
| 476 |
+
if key_padding_mask is not None:
|
| 477 |
+
key_padding_mask = pad(key_padding_mask, (0, 1))
|
| 478 |
+
else:
|
| 479 |
+
assert bias_k is None
|
| 480 |
+
assert bias_v is None
|
| 481 |
+
|
| 482 |
+
#
|
| 483 |
+
# reshape q, k, v for multihead attention and make em batch first
|
| 484 |
+
#
|
| 485 |
+
q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
|
| 486 |
+
if static_k is None:
|
| 487 |
+
k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
|
| 488 |
+
else:
|
| 489 |
+
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
|
| 490 |
+
assert static_k.size(0) == bsz * num_heads, \
|
| 491 |
+
f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
|
| 492 |
+
assert static_k.size(2) == head_dim, \
|
| 493 |
+
f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
|
| 494 |
+
k = static_k
|
| 495 |
+
if static_v is None:
|
| 496 |
+
v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
|
| 497 |
+
else:
|
| 498 |
+
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
|
| 499 |
+
assert static_v.size(0) == bsz * num_heads, \
|
| 500 |
+
f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
|
| 501 |
+
assert static_v.size(2) == head_dim, \
|
| 502 |
+
f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
|
| 503 |
+
v = static_v
|
| 504 |
+
|
| 505 |
+
# add zero attention along batch dimension (now first)
|
| 506 |
+
if add_zero_attn:
|
| 507 |
+
zero_attn_shape = (bsz * num_heads, 1, head_dim)
|
| 508 |
+
k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
|
| 509 |
+
v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
|
| 510 |
+
if attn_mask is not None:
|
| 511 |
+
attn_mask = pad(attn_mask, (0, 1))
|
| 512 |
+
if key_padding_mask is not None:
|
| 513 |
+
key_padding_mask = pad(key_padding_mask, (0, 1))
|
| 514 |
+
|
| 515 |
+
# update source sequence length after adjustments
|
| 516 |
+
src_len = k.size(1)
|
| 517 |
+
|
| 518 |
+
# merge key padding and attention masks
|
| 519 |
+
if key_padding_mask is not None:
|
| 520 |
+
assert key_padding_mask.shape == (bsz, src_len), \
|
| 521 |
+
f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
|
| 522 |
+
key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
|
| 523 |
+
expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
|
| 524 |
+
if attn_mask is None:
|
| 525 |
+
attn_mask = key_padding_mask
|
| 526 |
+
else:
|
| 527 |
+
attn_mask = attn_mask + key_padding_mask
|
| 528 |
+
|
| 529 |
+
# adjust dropout probability
|
| 530 |
+
if not training:
|
| 531 |
+
dropout_p = 0.0
|
| 532 |
+
|
| 533 |
+
#
|
| 534 |
+
# (deep breath) calculate attention and out projection
|
| 535 |
+
#
|
| 536 |
+
|
| 537 |
+
if need_weights:
|
| 538 |
+
B, Nt, E = q.shape
|
| 539 |
+
q_scaled = q / math.sqrt(E)
|
| 540 |
+
|
| 541 |
+
assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
|
| 542 |
+
|
| 543 |
+
if attn_mask is not None:
|
| 544 |
+
attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
|
| 545 |
+
else:
|
| 546 |
+
attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
|
| 547 |
+
attn_output_weights = softmax(attn_output_weights, dim=-1)
|
| 548 |
+
if dropout_p > 0.0:
|
| 549 |
+
attn_output_weights = dropout(attn_output_weights, p=dropout_p)
|
| 550 |
+
|
| 551 |
+
attn_output = torch.bmm(attn_output_weights, v)
|
| 552 |
+
|
| 553 |
+
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
|
| 554 |
+
attn_output = self.out_proj(attn_output)
|
| 555 |
+
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
|
| 556 |
+
|
| 557 |
+
# optionally average attention weights over heads
|
| 558 |
+
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
|
| 559 |
+
if average_attn_weights:
|
| 560 |
+
attn_output_weights = attn_output_weights.mean(dim=1)
|
| 561 |
+
|
| 562 |
+
if not is_batched:
|
| 563 |
+
# squeeze the output if input was unbatched
|
| 564 |
+
attn_output = attn_output.squeeze(1)
|
| 565 |
+
attn_output_weights = attn_output_weights.squeeze(0)
|
| 566 |
+
return attn_output, attn_output_weights
|
| 567 |
+
else:
|
| 568 |
+
# attn_mask can be either (L,S) or (N*num_heads, L, S)
|
| 569 |
+
# if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
|
| 570 |
+
# in order to match the input for SDPA of (N, num_heads, L, S)
|
| 571 |
+
if attn_mask is not None:
|
| 572 |
+
if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
|
| 573 |
+
attn_mask = attn_mask.unsqueeze(0)
|
| 574 |
+
else:
|
| 575 |
+
attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
|
| 576 |
+
|
| 577 |
+
q = q.view(bsz, num_heads, tgt_len, head_dim)
|
| 578 |
+
k = k.view(bsz, num_heads, src_len, head_dim)
|
| 579 |
+
v = v.view(bsz, num_heads, src_len, head_dim)
|
| 580 |
+
|
| 581 |
+
attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
|
| 582 |
+
attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
|
| 583 |
+
|
| 584 |
+
attn_output = self.out_proj(attn_output)
|
| 585 |
+
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
|
| 586 |
+
if not is_batched:
|
| 587 |
+
# squeeze the output if input was unbatched
|
| 588 |
+
attn_output = attn_output.squeeze(1)
|
| 589 |
+
return attn_output, None
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,
|
| 593 |
+
key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):
|
| 594 |
+
# Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
|
| 595 |
+
# and returns if the input is batched or not.
|
| 596 |
+
# Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
|
| 597 |
+
|
| 598 |
+
# Shape check.
|
| 599 |
+
if query.dim() == 3:
|
| 600 |
+
# Batched Inputs
|
| 601 |
+
is_batched = True
|
| 602 |
+
assert key.dim() == 3 and value.dim() == 3, \
|
| 603 |
+
("For batched (3-D) `query`, expected `key` and `value` to be 3-D"
|
| 604 |
+
f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
|
| 605 |
+
if key_padding_mask is not None:
|
| 606 |
+
assert key_padding_mask.dim() == 2, \
|
| 607 |
+
("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
|
| 608 |
+
f" but found {key_padding_mask.dim()}-D tensor instead")
|
| 609 |
+
if attn_mask is not None:
|
| 610 |
+
assert attn_mask.dim() in (2, 3), \
|
| 611 |
+
("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
|
| 612 |
+
f" but found {attn_mask.dim()}-D tensor instead")
|
| 613 |
+
elif query.dim() == 2:
|
| 614 |
+
# Unbatched Inputs
|
| 615 |
+
is_batched = False
|
| 616 |
+
assert key.dim() == 2 and value.dim() == 2, \
|
| 617 |
+
("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
|
| 618 |
+
f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
|
| 619 |
+
|
| 620 |
+
if key_padding_mask is not None:
|
| 621 |
+
assert key_padding_mask.dim() == 1, \
|
| 622 |
+
("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
|
| 623 |
+
f" but found {key_padding_mask.dim()}-D tensor instead")
|
| 624 |
+
|
| 625 |
+
if attn_mask is not None:
|
| 626 |
+
assert attn_mask.dim() in (2, 3), \
|
| 627 |
+
("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
|
| 628 |
+
f" but found {attn_mask.dim()}-D tensor instead")
|
| 629 |
+
if attn_mask.dim() == 3:
|
| 630 |
+
expected_shape = (num_heads, query.shape[0], key.shape[0])
|
| 631 |
+
assert attn_mask.shape == expected_shape, \
|
| 632 |
+
(f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}")
|
| 633 |
+
else:
|
| 634 |
+
raise AssertionError(
|
| 635 |
+
f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor")
|
| 636 |
+
|
| 637 |
+
return is_batched
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def _canonical_mask(
|
| 641 |
+
mask: Optional[Tensor],
|
| 642 |
+
mask_name: str,
|
| 643 |
+
other_type: Optional[DType],
|
| 644 |
+
other_name: str,
|
| 645 |
+
target_type: DType,
|
| 646 |
+
check_other: bool = True,
|
| 647 |
+
) -> Optional[Tensor]:
|
| 648 |
+
|
| 649 |
+
if mask is not None:
|
| 650 |
+
_mask_dtype = mask.dtype
|
| 651 |
+
_mask_is_float = torch.is_floating_point(mask)
|
| 652 |
+
if _mask_dtype != torch.bool and not _mask_is_float:
|
| 653 |
+
raise AssertionError(
|
| 654 |
+
f"only bool and floating types of {mask_name} are supported")
|
| 655 |
+
if check_other and other_type is not None:
|
| 656 |
+
if _mask_dtype != other_type:
|
| 657 |
+
warnings.warn(
|
| 658 |
+
f"Support for mismatched {mask_name} and {other_name} "
|
| 659 |
+
"is deprecated. Use same type for both instead."
|
| 660 |
+
)
|
| 661 |
+
if not _mask_is_float:
|
| 662 |
+
mask = (
|
| 663 |
+
torch.zeros_like(mask, dtype=target_type)
|
| 664 |
+
.masked_fill_(mask, float("-inf"))
|
| 665 |
+
)
|
| 666 |
+
return mask
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]:
|
| 670 |
+
if input is None:
|
| 671 |
+
return None
|
| 672 |
+
elif isinstance(input, torch.Tensor):
|
| 673 |
+
return input.dtype
|
| 674 |
+
raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor")
|
| 675 |
+
|
| 676 |
+
def _in_projection_packed(
|
| 677 |
+
q: Tensor,
|
| 678 |
+
k: Tensor,
|
| 679 |
+
v: Tensor,
|
| 680 |
+
w: Tensor,
|
| 681 |
+
b: Optional[Tensor] = None,
|
| 682 |
+
) -> List[Tensor]:
|
| 683 |
+
r"""
|
| 684 |
+
Performs the in-projection step of the attention operation, using packed weights.
|
| 685 |
+
Output is a triple containing projection tensors for query, key and value.
|
| 686 |
+
Args:
|
| 687 |
+
q, k, v: query, key and value tensors to be projected. For self-attention,
|
| 688 |
+
these are typically the same tensor; for encoder-decoder attention,
|
| 689 |
+
k and v are typically the same tensor. (We take advantage of these
|
| 690 |
+
identities for performance if they are present.) Regardless, q, k and v
|
| 691 |
+
must share a common embedding dimension; otherwise their shapes may vary.
|
| 692 |
+
w: projection weights for q, k and v, packed into a single tensor. Weights
|
| 693 |
+
are packed along dimension 0, in q, k, v order.
|
| 694 |
+
b: optional projection biases for q, k and v, packed into a single tensor
|
| 695 |
+
in q, k, v order.
|
| 696 |
+
Shape:
|
| 697 |
+
Inputs:
|
| 698 |
+
- q: :math:`(..., E)` where E is the embedding dimension
|
| 699 |
+
- k: :math:`(..., E)` where E is the embedding dimension
|
| 700 |
+
- v: :math:`(..., E)` where E is the embedding dimension
|
| 701 |
+
- w: :math:`(E * 3, E)` where E is the embedding dimension
|
| 702 |
+
- b: :math:`E * 3` where E is the embedding dimension
|
| 703 |
+
Output:
|
| 704 |
+
- in output list :math:`[q', k', v']`, each output tensor will have the
|
| 705 |
+
same shape as the corresponding input tensor.
|
| 706 |
+
"""
|
| 707 |
+
E = q.size(-1)
|
| 708 |
+
if k is v:
|
| 709 |
+
if q is k:
|
| 710 |
+
# self-attention
|
| 711 |
+
proj = linear(q, w, b)
|
| 712 |
+
# reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
|
| 713 |
+
proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
|
| 714 |
+
return proj[0], proj[1], proj[2]
|
| 715 |
+
else:
|
| 716 |
+
# encoder-decoder attention
|
| 717 |
+
w_q, w_kv = w.split([E, E * 2])
|
| 718 |
+
if b is None:
|
| 719 |
+
b_q = b_kv = None
|
| 720 |
+
else:
|
| 721 |
+
b_q, b_kv = b.split([E, E * 2])
|
| 722 |
+
q_proj = linear(q, w_q, b_q)
|
| 723 |
+
kv_proj = linear(k, w_kv, b_kv)
|
| 724 |
+
# reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
|
| 725 |
+
kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
|
| 726 |
+
return (q_proj, kv_proj[0], kv_proj[1])
|
| 727 |
+
else:
|
| 728 |
+
w_q, w_k, w_v = w.chunk(3)
|
| 729 |
+
if b is None:
|
| 730 |
+
b_q = b_k = b_v = None
|
| 731 |
+
else:
|
| 732 |
+
b_q, b_k, b_v = b.chunk(3)
|
| 733 |
+
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
def _in_projection(
|
| 737 |
+
q: Tensor,
|
| 738 |
+
k: Tensor,
|
| 739 |
+
v: Tensor,
|
| 740 |
+
w_q: Tensor,
|
| 741 |
+
w_k: Tensor,
|
| 742 |
+
w_v: Tensor,
|
| 743 |
+
b_q: Optional[Tensor] = None,
|
| 744 |
+
b_k: Optional[Tensor] = None,
|
| 745 |
+
b_v: Optional[Tensor] = None,
|
| 746 |
+
) -> Tuple[Tensor, Tensor, Tensor]:
|
| 747 |
+
r"""
|
| 748 |
+
Performs the in-projection step of the attention operation. This is simply
|
| 749 |
+
a triple of linear projections, with shape constraints on the weights which
|
| 750 |
+
ensure embedding dimension uniformity in the projected outputs.
|
| 751 |
+
Output is a triple containing projection tensors for query, key and value.
|
| 752 |
+
Args:
|
| 753 |
+
q, k, v: query, key and value tensors to be projected.
|
| 754 |
+
w_q, w_k, w_v: weights for q, k and v, respectively.
|
| 755 |
+
b_q, b_k, b_v: optional biases for q, k and v, respectively.
|
| 756 |
+
Shape:
|
| 757 |
+
Inputs:
|
| 758 |
+
- q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
|
| 759 |
+
number of leading dimensions.
|
| 760 |
+
- k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
|
| 761 |
+
number of leading dimensions.
|
| 762 |
+
- v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
|
| 763 |
+
number of leading dimensions.
|
| 764 |
+
- w_q: :math:`(Eq, Eq)`
|
| 765 |
+
- w_k: :math:`(Eq, Ek)`
|
| 766 |
+
- w_v: :math:`(Eq, Ev)`
|
| 767 |
+
- b_q: :math:`(Eq)`
|
| 768 |
+
- b_k: :math:`(Eq)`
|
| 769 |
+
- b_v: :math:`(Eq)`
|
| 770 |
+
Output: in output triple :math:`(q', k', v')`,
|
| 771 |
+
- q': :math:`[Qdims..., Eq]`
|
| 772 |
+
- k': :math:`[Kdims..., Eq]`
|
| 773 |
+
- v': :math:`[Vdims..., Eq]`
|
| 774 |
+
"""
|
| 775 |
+
Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
|
| 776 |
+
assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
|
| 777 |
+
assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
|
| 778 |
+
assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
|
| 779 |
+
assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
|
| 780 |
+
assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
|
| 781 |
+
assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
|
| 782 |
+
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"",
|
| 5 |
+
"<ref>",
|
| 6 |
+
"</ref>",
|
| 7 |
+
"<box>",
|
| 8 |
+
"</box>",
|
| 9 |
+
"<quad>",
|
| 10 |
+
"</quad>",
|
| 11 |
+
"<point>",
|
| 12 |
+
"</point>",
|
| 13 |
+
"<slice>",
|
| 14 |
+
"</slice>",
|
| 15 |
+
"<image_id>",
|
| 16 |
+
"</image_id>",
|
| 17 |
+
"<|reserved_special_token_0|>",
|
| 18 |
+
"<|reserved_special_token_1|>",
|
| 19 |
+
"<|reserved_special_token_2|>",
|
| 20 |
+
"<|reserved_special_token_3|>",
|
| 21 |
+
"<|reserved_special_token_4|>",
|
| 22 |
+
"<|reserved_special_token_5|>"
|
| 23 |
+
],
|
| 24 |
+
"bos_token": {
|
| 25 |
+
"content": "<|im_start|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
},
|
| 31 |
+
"eos_token": {
|
| 32 |
+
"content": "<|im_end|>",
|
| 33 |
+
"lstrip": false,
|
| 34 |
+
"normalized": false,
|
| 35 |
+
"rstrip": false,
|
| 36 |
+
"single_word": false
|
| 37 |
+
},
|
| 38 |
+
"pad_token": {
|
| 39 |
+
"content": "<|endoftext|>",
|
| 40 |
+
"lstrip": false,
|
| 41 |
+
"normalized": false,
|
| 42 |
+
"rstrip": false,
|
| 43 |
+
"single_word": false
|
| 44 |
+
},
|
| 45 |
+
"unk_token": {
|
| 46 |
+
"content": "<unk>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false
|
| 51 |
+
}
|
| 52 |
+
}
|
tokenization_minicpmv_fast.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers.models.qwen2 import Qwen2TokenizerFast
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class MiniCPMVTokenizerFast(Qwen2TokenizerFast):
|
| 5 |
+
def __init__(self, **kwargs):
|
| 6 |
+
super().__init__(**kwargs)
|
| 7 |
+
self.im_start = ""
|
| 9 |
+
self.ref_start = "<ref>"
|
| 10 |
+
self.ref_end = "</ref>"
|
| 11 |
+
self.box_start = "<box>"
|
| 12 |
+
self.box_end = "</box>"
|
| 13 |
+
self.quad_start = "<quad>"
|
| 14 |
+
self.quad_end = "</quad>"
|
| 15 |
+
self.slice_start = "<slice>"
|
| 16 |
+
self.slice_end = "</slice>"
|
| 17 |
+
self.im_id_start = "<image_id>"
|
| 18 |
+
self.im_id_end = "</image_id>"
|
| 19 |
+
|
| 20 |
+
@property
|
| 21 |
+
def eos_id(self):
|
| 22 |
+
return self.eos_token_id
|
| 23 |
+
|
| 24 |
+
@property
|
| 25 |
+
def bos_id(self):
|
| 26 |
+
return self.bos_token_id
|
| 27 |
+
|
| 28 |
+
@property
|
| 29 |
+
def unk_id(self):
|
| 30 |
+
return self.unk_token_id
|
| 31 |
+
|
| 32 |
+
@property
|
| 33 |
+
def im_start_id(self):
|
| 34 |
+
return self.convert_tokens_to_ids(self.im_start)
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def im_end_id(self):
|
| 38 |
+
return self.convert_tokens_to_ids(self.im_end)
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def slice_start_id(self):
|
| 42 |
+
return self.convert_tokens_to_ids(self.slice_start)
|
| 43 |
+
|
| 44 |
+
@property
|
| 45 |
+
def slice_end_id(self):
|
| 46 |
+
return self.convert_tokens_to_ids(self.slice_end)
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def im_id_start_id(self):
|
| 50 |
+
return self.convert_tokens_to_ids(self.im_id_start)
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def im_id_end_id(self):
|
| 54 |
+
return self.convert_tokens_to_ids(self.im_id_end)
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def newline_id(self):
|
| 58 |
+
return self.convert_tokens_to_ids('\n')
|
| 59 |
+
|
| 60 |
+
@staticmethod
|
| 61 |
+
def escape(text: str) -> str:
|
| 62 |
+
return text
|
| 63 |
+
|
| 64 |
+
@staticmethod
|
| 65 |
+
def unescape(text: str) -> str:
|
| 66 |
+
return text
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9de76ce95f90e336b4d2b0ec11d37f3d5404f2dad0f7ac95298405474b2a3a90
|
| 3 |
+
size 11422257
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"128244": {
|
| 5 |
+
"content": "<unk>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"151643": {
|
| 13 |
+
"content": "<|endoftext|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"151644": {
|
| 21 |
+
"content": "<|im_start|>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"151645": {
|
| 29 |
+
"content": "<|im_end|>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"151646": {
|
| 37 |
+
"content": "",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
},
|
| 52 |
+
"151648": {
|
| 53 |
+
"content": "<ref>",
|
| 54 |
+
"lstrip": false,
|
| 55 |
+
"normalized": false,
|
| 56 |
+
"rstrip": false,
|
| 57 |
+
"single_word": false,
|
| 58 |
+
"special": true
|
| 59 |
+
},
|
| 60 |
+
"151649": {
|
| 61 |
+
"content": "</ref>",
|
| 62 |
+
"lstrip": false,
|
| 63 |
+
"normalized": false,
|
| 64 |
+
"rstrip": false,
|
| 65 |
+
"single_word": false,
|
| 66 |
+
"special": true
|
| 67 |
+
},
|
| 68 |
+
"151650": {
|
| 69 |
+
"content": "<box>",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false,
|
| 74 |
+
"special": true
|
| 75 |
+
},
|
| 76 |
+
"151651": {
|
| 77 |
+
"content": "</box>",
|
| 78 |
+
"lstrip": false,
|
| 79 |
+
"normalized": false,
|
| 80 |
+
"rstrip": false,
|
| 81 |
+
"single_word": false,
|
| 82 |
+
"special": true
|
| 83 |
+
},
|
| 84 |
+
"151652": {
|
| 85 |
+
"content": "<quad>",
|
| 86 |
+
"lstrip": false,
|
| 87 |
+
"normalized": false,
|
| 88 |
+
"rstrip": false,
|
| 89 |
+
"single_word": false,
|
| 90 |
+
"special": true
|
| 91 |
+
},
|
| 92 |
+
"151653": {
|
| 93 |
+
"content": "</quad>",
|
| 94 |
+
"lstrip": false,
|
| 95 |
+
"normalized": false,
|
| 96 |
+
"rstrip": false,
|
| 97 |
+
"single_word": false,
|
| 98 |
+
"special": true
|
| 99 |
+
},
|
| 100 |
+
"151654": {
|
| 101 |
+
"content": "<point>",
|
| 102 |
+
"lstrip": false,
|
| 103 |
+
"normalized": false,
|
| 104 |
+
"rstrip": false,
|
| 105 |
+
"single_word": false,
|
| 106 |
+
"special": true
|
| 107 |
+
},
|
| 108 |
+
"151655": {
|
| 109 |
+
"content": "</point>",
|
| 110 |
+
"lstrip": false,
|
| 111 |
+
"normalized": false,
|
| 112 |
+
"rstrip": false,
|
| 113 |
+
"single_word": false,
|
| 114 |
+
"special": true
|
| 115 |
+
},
|
| 116 |
+
"151656": {
|
| 117 |
+
"content": "<slice>",
|
| 118 |
+
"lstrip": false,
|
| 119 |
+
"normalized": false,
|
| 120 |
+
"rstrip": false,
|
| 121 |
+
"single_word": false,
|
| 122 |
+
"special": true
|
| 123 |
+
},
|
| 124 |
+
"151657": {
|
| 125 |
+
"content": "</slice>",
|
| 126 |
+
"lstrip": false,
|
| 127 |
+
"normalized": false,
|
| 128 |
+
"rstrip": false,
|
| 129 |
+
"single_word": false,
|
| 130 |
+
"special": true
|
| 131 |
+
},
|
| 132 |
+
"151658": {
|
| 133 |
+
"content": "<image_id>",
|
| 134 |
+
"lstrip": false,
|
| 135 |
+
"normalized": false,
|
| 136 |
+
"rstrip": false,
|
| 137 |
+
"single_word": false,
|
| 138 |
+
"special": true
|
| 139 |
+
},
|
| 140 |
+
"151659": {
|
| 141 |
+
"content": "</image_id>",
|
| 142 |
+
"lstrip": false,
|
| 143 |
+
"normalized": false,
|
| 144 |
+
"rstrip": false,
|
| 145 |
+
"single_word": false,
|
| 146 |
+
"special": true
|
| 147 |
+
},
|
| 148 |
+
"151660": {
|
| 149 |
+
"content": "<|reserved_special_token_0|>",
|
| 150 |
+
"lstrip": false,
|
| 151 |
+
"normalized": false,
|
| 152 |
+
"rstrip": false,
|
| 153 |
+
"single_word": false,
|
| 154 |
+
"special": true
|
| 155 |
+
},
|
| 156 |
+
"151661": {
|
| 157 |
+
"content": "<|reserved_special_token_1|>",
|
| 158 |
+
"lstrip": false,
|
| 159 |
+
"normalized": false,
|
| 160 |
+
"rstrip": false,
|
| 161 |
+
"single_word": false,
|
| 162 |
+
"special": true
|
| 163 |
+
},
|
| 164 |
+
"151662": {
|
| 165 |
+
"content": "<|reserved_special_token_2|>",
|
| 166 |
+
"lstrip": false,
|
| 167 |
+
"normalized": false,
|
| 168 |
+
"rstrip": false,
|
| 169 |
+
"single_word": false,
|
| 170 |
+
"special": true
|
| 171 |
+
},
|
| 172 |
+
"151663": {
|
| 173 |
+
"content": "<|reserved_special_token_3|>",
|
| 174 |
+
"lstrip": false,
|
| 175 |
+
"normalized": false,
|
| 176 |
+
"rstrip": false,
|
| 177 |
+
"single_word": false,
|
| 178 |
+
"special": true
|
| 179 |
+
},
|
| 180 |
+
"151664": {
|
| 181 |
+
"content": "<|reserved_special_token_4|>",
|
| 182 |
+
"lstrip": false,
|
| 183 |
+
"normalized": false,
|
| 184 |
+
"rstrip": false,
|
| 185 |
+
"single_word": false,
|
| 186 |
+
"special": true
|
| 187 |
+
},
|
| 188 |
+
"151665": {
|
| 189 |
+
"content": "<|reserved_special_token_5|>",
|
| 190 |
+
"lstrip": false,
|
| 191 |
+
"normalized": false,
|
| 192 |
+
"rstrip": false,
|
| 193 |
+
"single_word": false,
|
| 194 |
+
"special": true
|
| 195 |
+
}
|
| 196 |
+
},
|
| 197 |
+
"additional_special_tokens": [
|
| 198 |
+
"",
|
| 200 |
+
"<ref>",
|
| 201 |
+
"</ref>",
|
| 202 |
+
"<box>",
|
| 203 |
+
"</box>",
|
| 204 |
+
"<quad>",
|
| 205 |
+
"</quad>",
|
| 206 |
+
"<point>",
|
| 207 |
+
"</point>",
|
| 208 |
+
"<slice>",
|
| 209 |
+
"</slice>",
|
| 210 |
+
"<image_id>",
|
| 211 |
+
"</image_id>",
|
| 212 |
+
"<|reserved_special_token_0|>",
|
| 213 |
+
"<|reserved_special_token_1|>",
|
| 214 |
+
"<|reserved_special_token_2|>",
|
| 215 |
+
"<|reserved_special_token_3|>",
|
| 216 |
+
"<|reserved_special_token_4|>",
|
| 217 |
+
"<|reserved_special_token_5|>"
|
| 218 |
+
],
|
| 219 |
+
"auto_map": {
|
| 220 |
+
"AutoProcessor": "processing_minicpmv.MiniCPMVProcessor",
|
| 221 |
+
"AutoTokenizer": [
|
| 222 |
+
"tokenization_qwen2.Qwen2Tokenizer",
|
| 223 |
+
"tokenization_minicpmv_fast.MiniCPMVTokenizerFast"
|
| 224 |
+
]
|
| 225 |
+
},
|
| 226 |
+
"bos_token": "<|im_start|>",
|
| 227 |
+
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
| 228 |
+
"clean_up_tokenization_spaces": false,
|
| 229 |
+
"eos_token": "<|im_end|>",
|
| 230 |
+
"errors": "replace",
|
| 231 |
+
"extra_special_tokens": {},
|
| 232 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 233 |
+
"pad_token": "<|endoftext|>",
|
| 234 |
+
"processor_class": "MiniCPMVProcessor",
|
| 235 |
+
"split_special_tokens": false,
|
| 236 |
+
"tokenizer_class": "MiniCPMVTokenizer",
|
| 237 |
+
"unk_token": "<unk>"
|
| 238 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|